From 43b93dcff75fd7f44586546283d6e37b0779d222 Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Wed, 23 Oct 2024 16:21:23 +1100 Subject: [PATCH 001/324] Mute org.elasticsearch.reservedstate.service.FileSettingsServiceTests testProcessFileChanges #115280 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index a7f36cdd06d66..6bbccf8bb05bb 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -321,6 +321,9 @@ tests: - class: org.elasticsearch.xpack.watcher.trigger.schedule.engine.TickerScheduleEngineTests method: testWatchWithNoLastCheckedTimeButHasActivationTimeExecutesBeforeInitialInterval issue: https://github.com/elastic/elasticsearch/issues/115368 +- class: org.elasticsearch.reservedstate.service.FileSettingsServiceTests + method: testProcessFileChanges + issue: https://github.com/elastic/elasticsearch/issues/115280 # Examples: # From ba7d0954efff88295697b7d6c9809b9f8f0ba636 Mon Sep 17 00:00:00 2001 From: Carlos Delgado <6339205+carlosdelest@users.noreply.github.com> Date: Wed, 23 Oct 2024 07:57:32 +0200 Subject: [PATCH 002/324] Fix synonyms CI tests timeout (#114641) --- muted-tests.yml | 9 --------- .../test/synonyms/10_synonyms_put.yml | 8 ++++++++ .../test/synonyms/110_synonyms_invalid.yml | 5 +++++ .../test/synonyms/20_synonyms_get.yml | 5 ++++- .../test/synonyms/30_synonyms_delete.yml | 4 ++++ .../test/synonyms/40_synonyms_sets_get.yml | 19 ++++++------------- .../test/synonyms/50_synonym_rule_put.yml | 5 ++++- .../test/synonyms/60_synonym_rule_get.yml | 7 ++++--- .../test/synonyms/70_synonym_rule_delete.yml | 5 +++++ .../test/synonyms/80_synonyms_from_index.yml | 6 +++++- .../90_synonyms_reloading_for_synset.yml | 6 +++++- 11 files changed, 50 insertions(+), 29 deletions(-) diff --git a/muted-tests.yml b/muted-tests.yml index 6bbccf8bb05bb..ccb387986551c 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -259,15 +259,6 @@ tests: - class: org.elasticsearch.xpack.inference.DefaultElserIT method: testInferCreatesDefaultElser issue: https://github.com/elastic/elasticsearch/issues/114503 -- class: org.elasticsearch.backwards.MixedClusterClientYamlTestSuiteIT - method: test {p0=synonyms/60_synonym_rule_get/Synonym set not found} - issue: https://github.com/elastic/elasticsearch/issues/114432 -- class: org.elasticsearch.backwards.MixedClusterClientYamlTestSuiteIT - method: test {p0=synonyms/60_synonym_rule_get/Get a synonym rule} - issue: https://github.com/elastic/elasticsearch/issues/114443 -- class: org.elasticsearch.backwards.MixedClusterClientYamlTestSuiteIT - method: test {p0=synonyms/60_synonym_rule_get/Synonym rule not found} - issue: https://github.com/elastic/elasticsearch/issues/114444 - class: org.elasticsearch.xpack.inference.integration.ModelRegistryIT method: testGetModel issue: https://github.com/elastic/elasticsearch/issues/114657 diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/10_synonyms_put.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/10_synonyms_put.yml index bcd58f3f7bd64..675b98133ce11 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/10_synonyms_put.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/10_synonyms_put.yml @@ -15,6 +15,10 @@ setup: - match: { result: "created" } + - do: + cluster.health: + wait_for_no_initializing_shards: true + - do: synonyms.get_synonym: id: test-update-synonyms @@ -58,6 +62,10 @@ setup: - match: { result: "created" } + - do: + cluster.health: + wait_for_no_initializing_shards: true + - do: synonyms.get_synonym: id: test-empty-synonyms diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/110_synonyms_invalid.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/110_synonyms_invalid.yml index d3d0a3bb4df70..4e77e10495109 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/110_synonyms_invalid.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/110_synonyms_invalid.yml @@ -11,6 +11,11 @@ setup: synonyms_set: synonyms: "foo => bar, baz" + # This is to ensure that all index shards (write and read) are available. In serverless this can take some time. + - do: + cluster.health: + wait_for_no_initializing_shards: true + - do: indices.create: index: test_index diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/20_synonyms_get.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/20_synonyms_get.yml index 3494f33466ce4..5e6d4ec2341ad 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/20_synonyms_get.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/20_synonyms_get.yml @@ -14,6 +14,10 @@ setup: - synonyms: "test => check" id: "test-id-3" + # This is to ensure that all index shards (write and read) are available. In serverless this can take some time. + - do: + cluster.health: + wait_for_no_initializing_shards: true --- "Get synonyms set": @@ -31,7 +35,6 @@ setup: id: "test-id-2" - synonyms: "test => check" id: "test-id-3" - --- "Get synonyms set - not found": - do: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/30_synonyms_delete.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/30_synonyms_delete.yml index 351ff4e186d8a..23c907f6a1137 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/30_synonyms_delete.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/30_synonyms_delete.yml @@ -12,6 +12,10 @@ setup: - synonyms: "bye => goodbye" id: "test-id-2" + # This is to ensure that all index shards (write and read) are available. In serverless this can take some time. + - do: + cluster.health: + wait_for_no_initializing_shards: true --- "Delete synonyms set": - do: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/40_synonyms_sets_get.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/40_synonyms_sets_get.yml index 723c41e163eb8..7c145dafd81cd 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/40_synonyms_sets_get.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/40_synonyms_sets_get.yml @@ -9,6 +9,12 @@ setup: synonyms_set: - synonyms: "hello, hi" - synonyms: "goodbye, bye" + + # This is to ensure that all index shards (write and read) are available. In serverless this can take some time. + - do: + cluster.health: + wait_for_no_initializing_shards: true + - do: synonyms.put_synonym: id: test-synonyms-1 @@ -23,21 +29,8 @@ setup: body: synonyms_set: - synonyms: "pc, computer" - # set logging to debug for issue: https://github.com/elastic/elasticsearch/issues/102261 - - do: - cluster.put_settings: - body: - persistent: - logger.org.elasticsearch.synonyms: DEBUG --- -teardown: - - do: - cluster.put_settings: - body: - persistent: - logger.org.elasticsearch.synonyms: null ---- "List synonyms set": - do: synonyms.get_synonyms_sets: { } diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/50_synonym_rule_put.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/50_synonym_rule_put.yml index f3711bb0774ca..d8611000fe465 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/50_synonym_rule_put.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/50_synonym_rule_put.yml @@ -14,7 +14,10 @@ setup: - synonyms: "test => check" id: "test-id-3" - + # This is to ensure that all index shards (write and read) are available. In serverless this can take some time. + - do: + cluster.health: + wait_for_no_initializing_shards: true --- "Update a synonyms rule": - do: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/60_synonym_rule_get.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/60_synonym_rule_get.yml index 2a7c8aff89d8e..0c962b51e08cb 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/60_synonym_rule_get.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/60_synonym_rule_get.yml @@ -13,11 +13,12 @@ setup: id: "test-id-2" - synonyms: "test => check" id: "test-id-3" + + # This is to ensure that all index shards (write and read) are available. In serverless this can take some time. - do: cluster.health: - index: .synonyms - timeout: 1m - wait_for_status: green + wait_for_no_initializing_shards: true + --- "Get a synonym rule": diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/70_synonym_rule_delete.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/70_synonym_rule_delete.yml index a4853b0b6d414..41ab293158a35 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/70_synonym_rule_delete.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/70_synonym_rule_delete.yml @@ -14,6 +14,11 @@ setup: - synonyms: "test => check" id: "test-id-3" + # This is to ensure that all index shards (write and read) are available. In serverless this can take some time. + - do: + cluster.health: + wait_for_no_initializing_shards: true + --- "Delete synonym rule": - do: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/80_synonyms_from_index.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/80_synonyms_from_index.yml index 89ad933370e1c..3aba0f0b4b78b 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/80_synonyms_from_index.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/80_synonyms_from_index.yml @@ -2,7 +2,6 @@ setup: - requires: cluster_features: ["gte_v8.10.0"] reason: Loading synonyms from index is introduced in 8.10.0 - # Create a new synonyms set - do: synonyms.put_synonym: @@ -14,6 +13,11 @@ setup: - synonyms: "bye => goodbye" id: "synonym-rule-2" + # This is to ensure that all index shards (write and read) are available. In serverless this can take some time. + - do: + cluster.health: + wait_for_no_initializing_shards: true + # Create an index with synonym_filter that uses that synonyms set - do: indices.create: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/90_synonyms_reloading_for_synset.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/90_synonyms_reloading_for_synset.yml index dc94b36222402..1ceb5b43b8129 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/90_synonyms_reloading_for_synset.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/90_synonyms_reloading_for_synset.yml @@ -3,7 +3,6 @@ - requires: cluster_features: ["gte_v8.10.0"] reason: Reloading analyzers for specific synonym set is introduced in 8.10.0 - # Create synonyms_set1 - do: synonyms.put_synonym: @@ -26,6 +25,11 @@ - synonyms: "bye => goodbye" id: "synonym-rule-2" + # This is to ensure that all index shards (write and read) are available. In serverless this can take some time. + - do: + cluster.health: + wait_for_no_initializing_shards: true + # Create my_index1 with synonym_filter that uses synonyms_set1 - do: indices.create: From 32dee6aaaeb18a8d6d4f0fee8bbf338e8991650d Mon Sep 17 00:00:00 2001 From: Artem Prigoda Date: Wed, 23 Oct 2024 09:30:02 +0200 Subject: [PATCH 003/324] [test] Dynamically pick up the upper bound snapshot index version (#114703) Pick an index version between the minimum compatible and latest known version for snapshot testing. --- .../snapshots/AbstractSnapshotIntegTestCase.java | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) diff --git a/test/framework/src/main/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java index 8bc81fef2157d..7a72a7bd0daf0 100644 --- a/test/framework/src/main/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java @@ -34,7 +34,6 @@ import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.core.Nullable; -import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.IndexVersions; import org.elasticsearch.plugins.Plugin; @@ -366,15 +365,9 @@ protected static Settings.Builder indexSettingsNoReplicas(int shards) { /** * Randomly write an empty snapshot of an older version to an empty repository to simulate an older repository metadata format. */ - @UpdateForV9(owner = UpdateForV9.Owner.DISTRIBUTED_COORDINATION) - // This used to pick an index version from 7.0.0 to 8.9.0. The minimum now is 8.0.0 but it's not clear what the upper range should be protected void maybeInitWithOldSnapshotVersion(String repoName, Path repoPath) throws Exception { if (randomBoolean() && randomBoolean()) { - initWithSnapshotVersion( - repoName, - repoPath, - IndexVersionUtils.randomVersionBetween(random(), IndexVersions.MINIMUM_COMPATIBLE, IndexVersions.V_8_9_0) - ); + initWithSnapshotVersion(repoName, repoPath, IndexVersionUtils.randomVersion()); } } From 530d15029eb8ada2cd7cd76ea5be15adbfc0e639 Mon Sep 17 00:00:00 2001 From: Artem Prigoda Date: Wed, 23 Oct 2024 09:30:16 +0200 Subject: [PATCH 004/324] Remove direct cloning of BytesTransportRequests (#114808) All request handlers should be able to read `BytesTransportRequest` to a class than can copied by re-serializing. Direct copying was only necessary by the legacy `JOIN_VALIDATE_ACTION_NAME` request handler. See #89926 --- .../test/transport/MockTransportService.java | 20 ++----------------- 1 file changed, 2 insertions(+), 18 deletions(-) diff --git a/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java b/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java index fd376fcd07688..18c591166e720 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java +++ b/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java @@ -39,7 +39,6 @@ import org.elasticsearch.core.RefCounted; import org.elasticsearch.core.Strings; import org.elasticsearch.core.TimeValue; -import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; import org.elasticsearch.node.Node; import org.elasticsearch.plugins.Plugin; @@ -50,7 +49,6 @@ import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.tasks.MockTaskManager; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.BytesTransportRequest; import org.elasticsearch.transport.ClusterConnectionManager; import org.elasticsearch.transport.ConnectTransportException; import org.elasticsearch.transport.ConnectionProfile; @@ -586,13 +584,8 @@ public void sendRequest( // poor mans request cloning... BytesStreamOutput bStream = new BytesStreamOutput(); request.writeTo(bStream); - final TransportRequest clonedRequest; - if (request instanceof BytesTransportRequest) { - clonedRequest = copyRawBytesForBwC(bStream); - } else { - RequestHandlerRegistry reg = MockTransportService.this.getRequestHandler(action); - clonedRequest = reg.newRequest(bStream.bytes().streamInput()); - } + RequestHandlerRegistry reg = MockTransportService.this.getRequestHandler(action); + final TransportRequest clonedRequest = reg.newRequest(bStream.bytes().streamInput()); assert clonedRequest.getClass().equals(MasterNodeRequestHelper.unwrapTermOverride(request).getClass()) : clonedRequest + " vs " + request; @@ -640,15 +633,6 @@ protected void doRun() throws IOException { } } - // Some request handlers read back a BytesTransportRequest - // into a different class that cannot be re-serialized (i.e. JOIN_VALIDATE_ACTION_NAME), - // in those cases we just copy the raw bytes back to a BytesTransportRequest. - // This is only needed for the BwC for JOIN_VALIDATE_ACTION_NAME and can be removed in the next major - @UpdateForV9(owner = UpdateForV9.Owner.DISTRIBUTED_COORDINATION) - private static TransportRequest copyRawBytesForBwC(BytesStreamOutput bStream) throws IOException { - return new BytesTransportRequest(bStream.bytes().streamInput()); - } - @Override public void clearCallback() { synchronized (this) { From aa70c41abaaecd94676a019fee464cf71b453f51 Mon Sep 17 00:00:00 2001 From: Artem Prigoda Date: Wed, 23 Oct 2024 09:30:48 +0200 Subject: [PATCH 005/324] [test] Always assume that the old cluster support replication of closed indices (#114314) Support for replicating closed indices was added in #39506 (7.1.0), we can expect the the cluster always supports replication of closed indices in 8.0/9.0 --- .../elasticsearch/upgrades/FullClusterRestartIT.java | 12 ++---------- 1 file changed, 2 insertions(+), 10 deletions(-) diff --git a/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartIT.java b/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartIT.java index 73f291da15ead..92a704f793dc2 100644 --- a/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartIT.java +++ b/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartIT.java @@ -27,7 +27,6 @@ import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.core.Booleans; import org.elasticsearch.core.CheckedFunction; -import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.IndexVersions; @@ -1203,15 +1202,8 @@ public void testClosedIndices() throws Exception { closeIndex(index); } - @UpdateForV9(owner = UpdateForV9.Owner.DISTRIBUTED_INDEXING) // This check can be removed (always assume true) - var originalClusterSupportsReplicationOfClosedIndices = oldClusterHasFeature(RestTestLegacyFeatures.REPLICATION_OF_CLOSED_INDICES); - - if (originalClusterSupportsReplicationOfClosedIndices) { - ensureGreenLongWait(index); - assertClosedIndex(index, true); - } else { - assertClosedIndex(index, false); - } + ensureGreenLongWait(index); + assertClosedIndex(index, true); if (isRunningAgainstOldCluster() == false) { openIndex(index); From 387062eb808f2c8a6c0724d1317b57176a60539d Mon Sep 17 00:00:00 2001 From: Martijn van Groningen Date: Wed, 23 Oct 2024 10:20:42 +0200 Subject: [PATCH 006/324] Sometimes delegate to SourceLoader in ValueSourceReaderOperator for required stored fields (#115114) If source is required by a block loader then the StoredFieldsSpec that gets populated should be enhanced by SourceLoader#requiredStoredFields(...) in ValuesSourceReaderOperator. Otherwise in case of synthetic source many stored fields aren't loaded, which causes only a subset of _source to be synthesized. For example when unmapped fields exist or field values that exceed configured ignore above will not appear is _source. This happens when field types fallback to a block loader implementation that uses _source. The required field values are then extracted from the source once loaded. This change also reverts the production code changes introduced via #114903. That change only ensured that _ignored_source field was added to the required list of stored fields. In reality more fields could be required. This change is better fix, since it handles also other cases and the SourceLoader implementation indicates which stored fields are needed. Closes #115076 --- .../extras/MatchOnlyTextFieldMapper.java | 3 +- .../mapper/extras/ScaledFloatFieldMapper.java | 3 +- muted-tests.yml | 12 --- .../mapper/AbstractGeometryFieldMapper.java | 3 +- .../index/mapper/BlockSourceReader.java | 47 ++++------ .../index/mapper/BooleanFieldMapper.java | 2 +- .../index/mapper/DateFieldMapper.java | 3 +- .../index/mapper/KeywordFieldMapper.java | 3 +- .../index/mapper/NumberFieldMapper.java | 65 ++++--------- .../index/mapper/TextFieldMapper.java | 14 +-- .../index/mapper/BlockSourceReaderTests.java | 2 +- .../index/mapper/TextFieldMapperTests.java | 3 +- .../KeywordFieldSyntheticSourceSupport.java | 5 + .../index/mapper/MapperServiceTestCase.java | 5 + .../index/mapper/MapperTestCase.java | 47 +++++++--- ...xtFieldFamilySyntheticSourceTestSetup.java | 20 +--- .../lucene/ValuesSourceReaderOperator.java | 27 ++++-- .../mapper/SemanticTextFieldMapper.java | 3 +- ..._esql_synthetic_source_disabled_fields.yml | 92 +++++++++++++++++-- .../test/51_esql_synthetic_source.yml | 77 ++++++++++++++++ .../unsignedlong/UnsignedLongFieldMapper.java | 3 +- 21 files changed, 276 insertions(+), 163 deletions(-) diff --git a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/MatchOnlyTextFieldMapper.java b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/MatchOnlyTextFieldMapper.java index cd252fcff2376..5904169308fab 100644 --- a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/MatchOnlyTextFieldMapper.java +++ b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/MatchOnlyTextFieldMapper.java @@ -364,8 +364,7 @@ public BlockLoader blockLoader(BlockLoaderContext blContext) { SourceValueFetcher fetcher = SourceValueFetcher.toString(blContext.sourcePaths(name())); // MatchOnlyText never has norms, so we have to use the field names field BlockSourceReader.LeafIteratorLookup lookup = BlockSourceReader.lookupFromFieldNames(blContext.fieldNames(), name()); - var sourceMode = blContext.indexSettings().getIndexMappingSourceMode(); - return new BlockSourceReader.BytesRefsBlockLoader(fetcher, lookup, sourceMode); + return new BlockSourceReader.BytesRefsBlockLoader(fetcher, lookup); } @Override diff --git a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/ScaledFloatFieldMapper.java b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/ScaledFloatFieldMapper.java index 1f647cb977cf5..b845545133e19 100644 --- a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/ScaledFloatFieldMapper.java +++ b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/ScaledFloatFieldMapper.java @@ -319,8 +319,7 @@ public BlockLoader blockLoader(BlockLoaderContext blContext) { BlockSourceReader.LeafIteratorLookup lookup = isStored() || isIndexed() ? BlockSourceReader.lookupFromFieldNames(blContext.fieldNames(), name()) : BlockSourceReader.lookupMatchingAll(); - var sourceMode = blContext.indexSettings().getIndexMappingSourceMode(); - return new BlockSourceReader.DoublesBlockLoader(valueFetcher, lookup, sourceMode); + return new BlockSourceReader.DoublesBlockLoader(valueFetcher, lookup); } @Override diff --git a/muted-tests.yml b/muted-tests.yml index ccb387986551c..45b1398df7ace 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -282,18 +282,6 @@ tests: - class: org.elasticsearch.xpack.inference.DefaultEndPointsIT method: testInferDeploysDefaultElser issue: https://github.com/elastic/elasticsearch/issues/114913 -- class: org.elasticsearch.index.mapper.TextFieldMapperTests - method: testBlockLoaderFromRowStrideReaderWithSyntheticSource - issue: https://github.com/elastic/elasticsearch/issues/115066 -- class: org.elasticsearch.index.mapper.TextFieldMapperTests - method: testBlockLoaderFromColumnReaderWithSyntheticSource - issue: https://github.com/elastic/elasticsearch/issues/115073 -- class: org.elasticsearch.index.mapper.annotatedtext.AnnotatedTextFieldMapperTests - method: testBlockLoaderFromColumnReaderWithSyntheticSource - issue: https://github.com/elastic/elasticsearch/issues/115074 -- class: org.elasticsearch.index.mapper.annotatedtext.AnnotatedTextFieldMapperTests - method: testBlockLoaderFromRowStrideReaderWithSyntheticSource - issue: https://github.com/elastic/elasticsearch/issues/115076 - class: org.elasticsearch.xpack.test.rest.XPackRestIT method: test {p0=esql/60_usage/Basic ESQL usage output (telemetry)} issue: https://github.com/elastic/elasticsearch/issues/115231 diff --git a/server/src/main/java/org/elasticsearch/index/mapper/AbstractGeometryFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/AbstractGeometryFieldMapper.java index 3512989c115ee..c38b5beeb55a0 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/AbstractGeometryFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/AbstractGeometryFieldMapper.java @@ -189,8 +189,7 @@ public BlockLoader blockLoader(BlockLoaderContext blContext) { protected BlockLoader blockLoaderFromSource(BlockLoaderContext blContext) { ValueFetcher fetcher = valueFetcher(blContext.sourcePaths(name()), nullValue, GeometryFormatterFactory.WKB); // TODO consider optimization using BlockSourceReader.lookupFromFieldNames(blContext.fieldNames(), name()) - var sourceMode = blContext.indexSettings().getIndexMappingSourceMode(); - return new BlockSourceReader.GeometriesBlockLoader(fetcher, BlockSourceReader.lookupMatchingAll(), sourceMode); + return new BlockSourceReader.GeometriesBlockLoader(fetcher, BlockSourceReader.lookupMatchingAll()); } protected abstract Object nullValueAsSource(T nullValue); diff --git a/server/src/main/java/org/elasticsearch/index/mapper/BlockSourceReader.java b/server/src/main/java/org/elasticsearch/index/mapper/BlockSourceReader.java index 105943c732a5e..19a1cce746172 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/BlockSourceReader.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/BlockSourceReader.java @@ -22,7 +22,6 @@ import java.io.IOException; import java.util.ArrayList; import java.util.List; -import java.util.Set; /** * Loads values from {@code _source}. This whole process is very slow and cast-tastic, @@ -30,14 +29,6 @@ * slow. */ public abstract class BlockSourceReader implements BlockLoader.RowStrideReader { - - // _ignored_source is needed when source mode is synthetic. - static final StoredFieldsSpec NEEDS_SOURCE_AND_IGNORED_SOURCE = new StoredFieldsSpec( - true, - false, - Set.of(IgnoredSourceFieldMapper.NAME) - ); - private final ValueFetcher fetcher; private final List ignoredValues = new ArrayList<>(); private final DocIdSetIterator iter; @@ -100,12 +91,10 @@ public interface LeafIteratorLookup { private abstract static class SourceBlockLoader implements BlockLoader { protected final ValueFetcher fetcher; private final LeafIteratorLookup lookup; - private final SourceFieldMapper.Mode sourceMode; - private SourceBlockLoader(ValueFetcher fetcher, LeafIteratorLookup lookup, SourceFieldMapper.Mode sourceMode) { + private SourceBlockLoader(ValueFetcher fetcher, LeafIteratorLookup lookup) { this.fetcher = fetcher; this.lookup = lookup; - this.sourceMode = sourceMode; } @Override @@ -115,7 +104,7 @@ public final ColumnAtATimeReader columnAtATimeReader(LeafReaderContext context) @Override public final StoredFieldsSpec rowStrideStoredFieldSpec() { - return sourceMode == SourceFieldMapper.Mode.SYNTHETIC ? NEEDS_SOURCE_AND_IGNORED_SOURCE : StoredFieldsSpec.NEEDS_SOURCE; + return StoredFieldsSpec.NEEDS_SOURCE; } @Override @@ -151,8 +140,8 @@ public final String toString() { * Load {@code boolean}s from {@code _source}. */ public static class BooleansBlockLoader extends SourceBlockLoader { - public BooleansBlockLoader(ValueFetcher fetcher, LeafIteratorLookup lookup, SourceFieldMapper.Mode sourceMode) { - super(fetcher, lookup, sourceMode); + public BooleansBlockLoader(ValueFetcher fetcher, LeafIteratorLookup lookup) { + super(fetcher, lookup); } @Override @@ -191,8 +180,8 @@ public String toString() { * Load {@link BytesRef}s from {@code _source}. */ public static class BytesRefsBlockLoader extends SourceBlockLoader { - public BytesRefsBlockLoader(ValueFetcher fetcher, LeafIteratorLookup lookup, SourceFieldMapper.Mode sourceMode) { - super(fetcher, lookup, sourceMode); + public BytesRefsBlockLoader(ValueFetcher fetcher, LeafIteratorLookup lookup) { + super(fetcher, lookup); } @Override @@ -202,7 +191,7 @@ public final Builder builder(BlockFactory factory, int expectedCount) { @Override protected RowStrideReader rowStrideReader(LeafReaderContext context, DocIdSetIterator iter) throws IOException { - return new BytesRefs(fetcher, iter, null); + return new BytesRefs(fetcher, iter); } @Override @@ -212,8 +201,8 @@ protected String name() { } public static class GeometriesBlockLoader extends SourceBlockLoader { - public GeometriesBlockLoader(ValueFetcher fetcher, LeafIteratorLookup lookup, SourceFieldMapper.Mode sourceMode) { - super(fetcher, lookup, sourceMode); + public GeometriesBlockLoader(ValueFetcher fetcher, LeafIteratorLookup lookup) { + super(fetcher, lookup); } @Override @@ -223,7 +212,7 @@ public final Builder builder(BlockFactory factory, int expectedCount) { @Override protected RowStrideReader rowStrideReader(LeafReaderContext context, DocIdSetIterator iter) { - return new Geometries(fetcher, iter, null); + return new Geometries(fetcher, iter); } @Override @@ -235,7 +224,7 @@ protected String name() { private static class BytesRefs extends BlockSourceReader { private final BytesRef scratch = new BytesRef(); - BytesRefs(ValueFetcher fetcher, DocIdSetIterator iter, SourceFieldMapper.Mode sourceMode) { + BytesRefs(ValueFetcher fetcher, DocIdSetIterator iter) { super(fetcher, iter); } @@ -252,7 +241,7 @@ public String toString() { private static class Geometries extends BlockSourceReader { - Geometries(ValueFetcher fetcher, DocIdSetIterator iter, SourceFieldMapper.Mode sourceMode) { + Geometries(ValueFetcher fetcher, DocIdSetIterator iter) { super(fetcher, iter); } @@ -275,8 +264,8 @@ public String toString() { * Load {@code double}s from {@code _source}. */ public static class DoublesBlockLoader extends SourceBlockLoader { - public DoublesBlockLoader(ValueFetcher fetcher, LeafIteratorLookup lookup, SourceFieldMapper.Mode sourceMode) { - super(fetcher, lookup, sourceMode); + public DoublesBlockLoader(ValueFetcher fetcher, LeafIteratorLookup lookup) { + super(fetcher, lookup); } @Override @@ -315,8 +304,8 @@ public String toString() { * Load {@code int}s from {@code _source}. */ public static class IntsBlockLoader extends SourceBlockLoader { - public IntsBlockLoader(ValueFetcher fetcher, LeafIteratorLookup lookup, SourceFieldMapper.Mode sourceMode) { - super(fetcher, lookup, sourceMode); + public IntsBlockLoader(ValueFetcher fetcher, LeafIteratorLookup lookup) { + super(fetcher, lookup); } @Override @@ -355,8 +344,8 @@ public String toString() { * Load {@code long}s from {@code _source}. */ public static class LongsBlockLoader extends SourceBlockLoader { - public LongsBlockLoader(ValueFetcher fetcher, LeafIteratorLookup lookup, SourceFieldMapper.Mode sourceMode) { - super(fetcher, lookup, sourceMode); + public LongsBlockLoader(ValueFetcher fetcher, LeafIteratorLookup lookup) { + super(fetcher, lookup); } @Override diff --git a/server/src/main/java/org/elasticsearch/index/mapper/BooleanFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/BooleanFieldMapper.java index c2bf9e18bfeec..5aaaf7dce83c9 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/BooleanFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/BooleanFieldMapper.java @@ -314,7 +314,7 @@ public BlockLoader blockLoader(BlockLoaderContext blContext) { BlockSourceReader.LeafIteratorLookup lookup = isIndexed() || isStored() ? BlockSourceReader.lookupFromFieldNames(blContext.fieldNames(), name()) : BlockSourceReader.lookupMatchingAll(); - return new BlockSourceReader.BooleansBlockLoader(fetcher, lookup, blContext.indexSettings().getIndexMappingSourceMode()); + return new BlockSourceReader.BooleansBlockLoader(fetcher, lookup); } @Override diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DateFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/DateFieldMapper.java index d05f0e477db09..87e4ce5f90479 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/DateFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/DateFieldMapper.java @@ -793,8 +793,7 @@ public BlockLoader blockLoader(BlockLoaderContext blContext) { BlockSourceReader.LeafIteratorLookup lookup = isStored() || isIndexed() ? BlockSourceReader.lookupFromFieldNames(blContext.fieldNames(), name()) : BlockSourceReader.lookupMatchingAll(); - var sourceMode = blContext.indexSettings().getIndexMappingSourceMode(); - return new BlockSourceReader.LongsBlockLoader(sourceValueFetcher(blContext.sourcePaths(name())), lookup, sourceMode); + return new BlockSourceReader.LongsBlockLoader(sourceValueFetcher(blContext.sourcePaths(name())), lookup); } @Override diff --git a/server/src/main/java/org/elasticsearch/index/mapper/KeywordFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/KeywordFieldMapper.java index 802680e7f373e..ecc708bc94614 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/KeywordFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/KeywordFieldMapper.java @@ -634,8 +634,7 @@ public BlockLoader blockLoader(BlockLoaderContext blContext) { return new BlockStoredFieldsReader.BytesFromBytesRefsBlockLoader(name()); } SourceValueFetcher fetcher = sourceValueFetcher(blContext.sourcePaths(name())); - var sourceMode = blContext.indexSettings().getIndexMappingSourceMode(); - return new BlockSourceReader.BytesRefsBlockLoader(fetcher, sourceBlockLoaderLookup(blContext), sourceMode); + return new BlockSourceReader.BytesRefsBlockLoader(fetcher, sourceBlockLoaderLookup(blContext)); } private BlockSourceReader.LeafIteratorLookup sourceBlockLoaderLookup(BlockLoaderContext blContext) { diff --git a/server/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java index 3608e8ab261c1..55ed1e10428aa 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java @@ -462,12 +462,8 @@ BlockLoader blockLoaderFromDocValues(String fieldName) { } @Override - BlockLoader blockLoaderFromSource( - SourceValueFetcher sourceValueFetcher, - BlockSourceReader.LeafIteratorLookup lookup, - SourceFieldMapper.Mode sourceMode - ) { - return new BlockSourceReader.DoublesBlockLoader(sourceValueFetcher, lookup, sourceMode); + BlockLoader blockLoaderFromSource(SourceValueFetcher sourceValueFetcher, BlockSourceReader.LeafIteratorLookup lookup) { + return new BlockSourceReader.DoublesBlockLoader(sourceValueFetcher, lookup); } }, FLOAT("float", NumericType.FLOAT) { @@ -650,12 +646,8 @@ BlockLoader blockLoaderFromDocValues(String fieldName) { } @Override - BlockLoader blockLoaderFromSource( - SourceValueFetcher sourceValueFetcher, - BlockSourceReader.LeafIteratorLookup lookup, - SourceFieldMapper.Mode sourceMode - ) { - return new BlockSourceReader.DoublesBlockLoader(sourceValueFetcher, lookup, sourceMode); + BlockLoader blockLoaderFromSource(SourceValueFetcher sourceValueFetcher, BlockSourceReader.LeafIteratorLookup lookup) { + return new BlockSourceReader.DoublesBlockLoader(sourceValueFetcher, lookup); } }, DOUBLE("double", NumericType.DOUBLE) { @@ -804,12 +796,8 @@ BlockLoader blockLoaderFromDocValues(String fieldName) { } @Override - BlockLoader blockLoaderFromSource( - SourceValueFetcher sourceValueFetcher, - BlockSourceReader.LeafIteratorLookup lookup, - SourceFieldMapper.Mode sourceMode - ) { - return new BlockSourceReader.DoublesBlockLoader(sourceValueFetcher, lookup, sourceMode); + BlockLoader blockLoaderFromSource(SourceValueFetcher sourceValueFetcher, BlockSourceReader.LeafIteratorLookup lookup) { + return new BlockSourceReader.DoublesBlockLoader(sourceValueFetcher, lookup); } }, BYTE("byte", NumericType.BYTE) { @@ -921,12 +909,8 @@ BlockLoader blockLoaderFromDocValues(String fieldName) { } @Override - BlockLoader blockLoaderFromSource( - SourceValueFetcher sourceValueFetcher, - BlockSourceReader.LeafIteratorLookup lookup, - SourceFieldMapper.Mode sourceMode - ) { - return new BlockSourceReader.IntsBlockLoader(sourceValueFetcher, lookup, sourceMode); + BlockLoader blockLoaderFromSource(SourceValueFetcher sourceValueFetcher, BlockSourceReader.LeafIteratorLookup lookup) { + return new BlockSourceReader.IntsBlockLoader(sourceValueFetcher, lookup); } private boolean isOutOfRange(Object value) { @@ -1038,12 +1022,8 @@ BlockLoader blockLoaderFromDocValues(String fieldName) { } @Override - BlockLoader blockLoaderFromSource( - SourceValueFetcher sourceValueFetcher, - BlockSourceReader.LeafIteratorLookup lookup, - SourceFieldMapper.Mode sourceMode - ) { - return new BlockSourceReader.IntsBlockLoader(sourceValueFetcher, lookup, sourceMode); + BlockLoader blockLoaderFromSource(SourceValueFetcher sourceValueFetcher, BlockSourceReader.LeafIteratorLookup lookup) { + return new BlockSourceReader.IntsBlockLoader(sourceValueFetcher, lookup); } private boolean isOutOfRange(Object value) { @@ -1229,12 +1209,8 @@ BlockLoader blockLoaderFromDocValues(String fieldName) { } @Override - BlockLoader blockLoaderFromSource( - SourceValueFetcher sourceValueFetcher, - BlockSourceReader.LeafIteratorLookup lookup, - SourceFieldMapper.Mode sourceMode - ) { - return new BlockSourceReader.IntsBlockLoader(sourceValueFetcher, lookup, sourceMode); + BlockLoader blockLoaderFromSource(SourceValueFetcher sourceValueFetcher, BlockSourceReader.LeafIteratorLookup lookup) { + return new BlockSourceReader.IntsBlockLoader(sourceValueFetcher, lookup); } }, LONG("long", NumericType.LONG) { @@ -1380,12 +1356,8 @@ BlockLoader blockLoaderFromDocValues(String fieldName) { } @Override - BlockLoader blockLoaderFromSource( - SourceValueFetcher sourceValueFetcher, - BlockSourceReader.LeafIteratorLookup lookup, - SourceFieldMapper.Mode sourceMode - ) { - return new BlockSourceReader.LongsBlockLoader(sourceValueFetcher, lookup, sourceMode); + BlockLoader blockLoaderFromSource(SourceValueFetcher sourceValueFetcher, BlockSourceReader.LeafIteratorLookup lookup) { + return new BlockSourceReader.LongsBlockLoader(sourceValueFetcher, lookup); } private boolean isOutOfRange(Object value) { @@ -1663,11 +1635,7 @@ protected void writeValue(XContentBuilder b, long value) throws IOException { abstract BlockLoader blockLoaderFromDocValues(String fieldName); - abstract BlockLoader blockLoaderFromSource( - SourceValueFetcher sourceValueFetcher, - BlockSourceReader.LeafIteratorLookup lookup, - SourceFieldMapper.Mode sourceMode - ); + abstract BlockLoader blockLoaderFromSource(SourceValueFetcher sourceValueFetcher, BlockSourceReader.LeafIteratorLookup lookup); } public static class NumberFieldType extends SimpleMappedFieldType { @@ -1806,8 +1774,7 @@ public BlockLoader blockLoader(BlockLoaderContext blContext) { BlockSourceReader.LeafIteratorLookup lookup = isStored() || isIndexed() ? BlockSourceReader.lookupFromFieldNames(blContext.fieldNames(), name()) : BlockSourceReader.lookupMatchingAll(); - var sourceMode = blContext.indexSettings().getIndexMappingSourceMode(); - return type.blockLoaderFromSource(sourceValueFetcher(blContext.sourcePaths(name())), lookup, sourceMode); + return type.blockLoaderFromSource(sourceValueFetcher(blContext.sourcePaths(name())), lookup); } @Override diff --git a/server/src/main/java/org/elasticsearch/index/mapper/TextFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/TextFieldMapper.java index 3f77edc819602..253f70f4fda47 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/TextFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/TextFieldMapper.java @@ -1007,20 +1007,8 @@ protected String delegatingTo() { if (isStored()) { return new BlockStoredFieldsReader.BytesFromStringsBlockLoader(name()); } - if (isSyntheticSource && syntheticSourceDelegate == null) { - /* - * When we're in synthetic source mode we don't currently - * support text fields that are not stored and are not children - * of perfect keyword fields. We'd have to load from the parent - * field and then convert the result to a string. In this case, - * even if we would synthesize the source, the current field - * would be missing. - */ - return null; - } SourceValueFetcher fetcher = SourceValueFetcher.toString(blContext.sourcePaths(name())); - var sourceMode = blContext.indexSettings().getIndexMappingSourceMode(); - return new BlockSourceReader.BytesRefsBlockLoader(fetcher, blockReaderDisiLookup(blContext), sourceMode); + return new BlockSourceReader.BytesRefsBlockLoader(fetcher, blockReaderDisiLookup(blContext)); } /** diff --git a/server/src/test/java/org/elasticsearch/index/mapper/BlockSourceReaderTests.java b/server/src/test/java/org/elasticsearch/index/mapper/BlockSourceReaderTests.java index 286be8d12570d..357ada3ad656d 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/BlockSourceReaderTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/BlockSourceReaderTests.java @@ -51,7 +51,7 @@ public void testEmptyArray() throws IOException { private void loadBlock(LeafReaderContext ctx, Consumer test) throws IOException { ValueFetcher valueFetcher = SourceValueFetcher.toString(Set.of("field")); BlockSourceReader.LeafIteratorLookup lookup = BlockSourceReader.lookupFromNorms("field"); - BlockLoader loader = new BlockSourceReader.BytesRefsBlockLoader(valueFetcher, lookup, null); + BlockLoader loader = new BlockSourceReader.BytesRefsBlockLoader(valueFetcher, lookup); assertThat(loader.columnAtATimeReader(ctx), nullValue()); BlockLoader.RowStrideReader reader = loader.rowStrideReader(ctx); assertThat(loader.rowStrideStoredFieldSpec(), equalTo(StoredFieldsSpec.NEEDS_SOURCE)); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/TextFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/TextFieldMapperTests.java index 86914cfe9ced7..c2375e948fda0 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/TextFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/TextFieldMapperTests.java @@ -1353,6 +1353,7 @@ private void testBlockLoaderFromParent(boolean columnReader, boolean syntheticSo }; MapperService mapper = createMapperService(syntheticSource ? syntheticSourceMapping(buildFields) : mapping(buildFields)); BlockReaderSupport blockReaderSupport = getSupportedReaders(mapper, "field.sub"); - testBlockLoader(columnReader, example, blockReaderSupport); + var sourceLoader = mapper.mappingLookup().newSourceLoader(SourceFieldMetrics.NOOP); + testBlockLoader(columnReader, example, blockReaderSupport, sourceLoader); } } diff --git a/test/framework/src/main/java/org/elasticsearch/index/mapper/KeywordFieldSyntheticSourceSupport.java b/test/framework/src/main/java/org/elasticsearch/index/mapper/KeywordFieldSyntheticSourceSupport.java index 0d05c3d0cd77b..502ffdde62e5a 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/mapper/KeywordFieldSyntheticSourceSupport.java +++ b/test/framework/src/main/java/org/elasticsearch/index/mapper/KeywordFieldSyntheticSourceSupport.java @@ -37,6 +37,11 @@ public class KeywordFieldSyntheticSourceSupport implements MapperTestCase.Synthe this.docValues = useFallbackSyntheticSource == false || ESTestCase.randomBoolean(); } + @Override + public boolean ignoreAbove() { + return ignoreAbove != null; + } + @Override public boolean preservesExactSource() { // We opt in into fallback synthetic source implementation diff --git a/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperServiceTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperServiceTestCase.java index 8bc2666bcfe3b..da04f30ff8023 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperServiceTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperServiceTestCase.java @@ -179,6 +179,11 @@ public final MapperService createMapperService(XContentBuilder mappings) throws return createMapperService(getVersion(), mappings); } + public final MapperService createSytheticSourceMapperService(XContentBuilder mappings) throws IOException { + var settings = Settings.builder().put("index.mapping.source.mode", "synthetic").build(); + return createMapperService(getVersion(), settings, () -> true, mappings); + } + protected IndexVersion getVersion() { return IndexVersion.current(); } diff --git a/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperTestCase.java index 7669ada750c14..c89c0b2e37dd2 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperTestCase.java @@ -49,6 +49,7 @@ import org.elasticsearch.script.ScriptFactory; import org.elasticsearch.script.field.DocValuesScriptFieldFactory; import org.elasticsearch.search.DocValueFormat; +import org.elasticsearch.search.fetch.StoredFieldsSpec; import org.elasticsearch.search.lookup.LeafStoredFieldsLookup; import org.elasticsearch.search.lookup.SearchLookup; import org.elasticsearch.search.lookup.Source; @@ -1103,6 +1104,10 @@ default boolean preservesExactSource() { return false; } + default boolean ignoreAbove() { + return false; + } + /** * Examples that should work when source is generated from doc values. */ @@ -1321,15 +1326,12 @@ private BlockLoader getBlockLoader(boolean columnReader) { return mapper.fieldType(loaderFieldName).blockLoader(new MappedFieldType.BlockLoaderContext() { @Override public String indexName() { - return "test_index"; + return mapper.getIndexSettings().getIndex().getName(); } @Override public IndexSettings indexSettings() { - var imd = IndexMetadata.builder(indexName()) - .settings(MapperTestCase.indexSettings(IndexVersion.current(), 1, 1).put(Settings.EMPTY)) - .build(); - return new IndexSettings(imd, Settings.EMPTY); + return mapper.getIndexSettings(); } @Override @@ -1362,9 +1364,19 @@ public FieldNamesFieldMapper.FieldNamesFieldType fieldNames() { private void testBlockLoader(boolean syntheticSource, boolean columnReader) throws IOException { // TODO if we're not using synthetic source use a different sort of example. Or something. - SyntheticSourceExample example = syntheticSourceSupport(false, columnReader).example(5); + var syntheticSourceSupport = syntheticSourceSupport(false, columnReader); + SyntheticSourceExample example = syntheticSourceSupport.example(5); + if (syntheticSource && columnReader == false) { + // The synthetic source testing support can't always handle now the difference between stored and synthetic source mode. + // In case of ignore above, the ignored values are always appended after the valid values + // (both if field has doc values or stored field). While stored source just reads original values (from _source) and there + // is no notion of values that are ignored. + // TODO: fix this by improving block loader support: https://github.com/elastic/elasticsearch/issues/115257 + assumeTrue("inconsistent synthetic source testing support with ignore above", syntheticSourceSupport.ignoreAbove() == false); + } + // TODO: only rely index.mapping.source.mode setting XContentBuilder mapping = syntheticSource ? syntheticSourceFieldMapping(example.mapping) : fieldMapping(example.mapping); - MapperService mapper = createMapperService(mapping); + MapperService mapper = syntheticSource ? createSytheticSourceMapperService(mapping) : createMapperService(mapping); BlockReaderSupport blockReaderSupport = getSupportedReaders(mapper, "field"); if (syntheticSource) { // geo_point and point do not yet support synthetic source @@ -1373,11 +1385,16 @@ private void testBlockLoader(boolean syntheticSource, boolean columnReader) thro blockReaderSupport.syntheticSource ); } - testBlockLoader(columnReader, example, blockReaderSupport); + var sourceLoader = mapper.mappingLookup().newSourceLoader(SourceFieldMetrics.NOOP); + testBlockLoader(columnReader, example, blockReaderSupport, sourceLoader); } - protected final void testBlockLoader(boolean columnReader, SyntheticSourceExample example, BlockReaderSupport blockReaderSupport) - throws IOException { + protected final void testBlockLoader( + boolean columnReader, + SyntheticSourceExample example, + BlockReaderSupport blockReaderSupport, + SourceLoader sourceLoader + ) throws IOException { BlockLoader loader = blockReaderSupport.getBlockLoader(columnReader); Function valuesConvert = loadBlockExpected(blockReaderSupport, columnReader); if (valuesConvert == null) { @@ -1404,9 +1421,15 @@ protected final void testBlockLoader(boolean columnReader, SyntheticSourceExampl return; } } else { + StoredFieldsSpec storedFieldsSpec = loader.rowStrideStoredFieldSpec(); + if (storedFieldsSpec.requiresSource()) { + storedFieldsSpec = storedFieldsSpec.merge( + new StoredFieldsSpec(true, storedFieldsSpec.requiresMetadata(), sourceLoader.requiredStoredFields()) + ); + } BlockLoaderStoredFieldsFromLeafLoader storedFieldsLoader = new BlockLoaderStoredFieldsFromLeafLoader( - StoredFieldLoader.fromSpec(loader.rowStrideStoredFieldSpec()).getLoader(ctx, null), - loader.rowStrideStoredFieldSpec().requiresSource() ? SourceLoader.FROM_STORED_SOURCE.leaf(ctx.reader(), null) : null + StoredFieldLoader.fromSpec(storedFieldsSpec).getLoader(ctx, null), + storedFieldsSpec.requiresSource() ? sourceLoader.leaf(ctx.reader(), null) : null ); storedFieldsLoader.advanceTo(0); BlockLoader.Builder builder = loader.builder(TestBlock.factory(ctx.reader().numDocs()), 1); diff --git a/test/framework/src/main/java/org/elasticsearch/index/mapper/TextFieldFamilySyntheticSourceTestSetup.java b/test/framework/src/main/java/org/elasticsearch/index/mapper/TextFieldFamilySyntheticSourceTestSetup.java index b6a031c9ff906..97ded7f9a06f2 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/mapper/TextFieldFamilySyntheticSourceTestSetup.java +++ b/test/framework/src/main/java/org/elasticsearch/index/mapper/TextFieldFamilySyntheticSourceTestSetup.java @@ -51,24 +51,9 @@ public static MapperTestCase.BlockReaderSupport getSupportedReaders(MapperServic } public static Function loadBlockExpected(MapperTestCase.BlockReaderSupport blockReaderSupport, boolean columnReader) { - if (nullLoaderExpected(blockReaderSupport.mapper(), blockReaderSupport.loaderFieldName())) { - return null; - } return v -> ((BytesRef) v).utf8ToString(); } - private static boolean nullLoaderExpected(MapperService mapper, String fieldName) { - MappedFieldType type = mapper.fieldType(fieldName); - if (type instanceof TextFieldMapper.TextFieldType t) { - if (t.isSyntheticSource() == false || t.canUseSyntheticSourceDelegateForQuerying() || t.isStored()) { - return false; - } - String parentField = mapper.mappingLookup().parentField(fieldName); - return parentField == null || nullLoaderExpected(mapper, parentField); - } - return false; - } - public static void validateRoundTripReader(String syntheticSource, DirectoryReader reader, DirectoryReader roundTripReader) { // `reader` here is reader of original document and `roundTripReader` reads document // created from synthetic source. @@ -98,6 +83,11 @@ private static class TextFieldFamilySyntheticSourceSupport implements MapperTest ); } + @Override + public boolean ignoreAbove() { + return keywordMultiFieldSyntheticSourceSupport.ignoreAbove(); + } + @Override public MapperTestCase.SyntheticSourceExample example(int maxValues) { if (store) { diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/ValuesSourceReaderOperator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/ValuesSourceReaderOperator.java index ee747d98c26f8..74affb10eaf20 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/ValuesSourceReaderOperator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/ValuesSourceReaderOperator.java @@ -241,6 +241,12 @@ private void loadFromSingleLeaf(Block[] blocks, int shard, int segment, BlockLoa } } + SourceLoader sourceLoader = null; + if (storedFieldsSpec.requiresSource()) { + sourceLoader = shardContexts.get(shard).newSourceLoader.get(); + storedFieldsSpec = storedFieldsSpec.merge(new StoredFieldsSpec(true, false, sourceLoader.requiredStoredFields())); + } + if (rowStrideReaders.isEmpty()) { return; } @@ -259,7 +265,7 @@ private void loadFromSingleLeaf(Block[] blocks, int shard, int segment, BlockLoa } BlockLoaderStoredFieldsFromLeafLoader storedFields = new BlockLoaderStoredFieldsFromLeafLoader( storedFieldLoader.getLoader(ctx, null), - storedFieldsSpec.requiresSource() ? shardContexts.get(shard).newSourceLoader.get().leaf(ctx.reader(), null) : null + sourceLoader != null ? sourceLoader.leaf(ctx.reader(), null) : null ); for (int p = 0; p < docs.count(); p++) { int doc = docs.get(p); @@ -381,13 +387,18 @@ private void fieldsMoved(LeafReaderContext ctx, int shard) throws IOException { FieldWork field = fields[f]; rowStride[f] = field.rowStride(ctx); storedFieldsSpec = storedFieldsSpec.merge(field.loader.rowStrideStoredFieldSpec()); - storedFields = new BlockLoaderStoredFieldsFromLeafLoader( - StoredFieldLoader.fromSpec(storedFieldsSpec).getLoader(ctx, null), - storedFieldsSpec.requiresSource() ? shardContexts.get(shard).newSourceLoader.get().leaf(ctx.reader(), null) : null - ); - if (false == storedFieldsSpec.equals(StoredFieldsSpec.NO_REQUIREMENTS)) { - trackStoredFields(storedFieldsSpec, false); - } + } + SourceLoader sourceLoader = null; + if (storedFieldsSpec.requiresSource()) { + sourceLoader = shardContexts.get(shard).newSourceLoader.get(); + storedFieldsSpec = storedFieldsSpec.merge(new StoredFieldsSpec(true, false, sourceLoader.requiredStoredFields())); + } + storedFields = new BlockLoaderStoredFieldsFromLeafLoader( + StoredFieldLoader.fromSpec(storedFieldsSpec).getLoader(ctx, null), + sourceLoader != null ? sourceLoader.leaf(ctx.reader(), null) : null + ); + if (false == storedFieldsSpec.equals(StoredFieldsSpec.NO_REQUIREMENTS)) { + trackStoredFields(storedFieldsSpec, false); } } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/mapper/SemanticTextFieldMapper.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/mapper/SemanticTextFieldMapper.java index fb18cfb4959c7..4c07516051287 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/mapper/SemanticTextFieldMapper.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/mapper/SemanticTextFieldMapper.java @@ -617,8 +617,7 @@ private String generateInvalidQueryInferenceResultsMessage(StringBuilder baseMes @Override public BlockLoader blockLoader(MappedFieldType.BlockLoaderContext blContext) { SourceValueFetcher fetcher = SourceValueFetcher.toString(blContext.sourcePaths(name().concat(".text"))); - var sourceMode = blContext.indexSettings().getIndexMappingSourceMode(); - return new BlockSourceReader.BytesRefsBlockLoader(fetcher, BlockSourceReader.lookupMatchingAll(), sourceMode); + return new BlockSourceReader.BytesRefsBlockLoader(fetcher, BlockSourceReader.lookupMatchingAll()); } } diff --git a/x-pack/plugin/logsdb/src/yamlRestTest/resources/rest-api-spec/test/50_esql_synthetic_source_disabled_fields.yml b/x-pack/plugin/logsdb/src/yamlRestTest/resources/rest-api-spec/test/50_esql_synthetic_source_disabled_fields.yml index 68597afda6c78..bc81d1eb67309 100644 --- a/x-pack/plugin/logsdb/src/yamlRestTest/resources/rest-api-spec/test/50_esql_synthetic_source_disabled_fields.yml +++ b/x-pack/plugin/logsdb/src/yamlRestTest/resources/rest-api-spec/test/50_esql_synthetic_source_disabled_fields.yml @@ -283,7 +283,7 @@ teardown: - match: {values.0.3: "PUT"} - match: {values.0.4: false} - match: {values.0.5: "POINT (-74.006 40.7128)"} - - match: {values.0.6: null} # null is expected, because text fields aren't stored in ignored source + - match: {values.0.6: "Do. Or do not. There is no try."} - match: {values.0.7: 102} - do: @@ -296,10 +296,86 @@ teardown: - match: {columns.0.name: "message"} - match: {columns.0.type: "text"} - # null is expected, because text fields aren't stored in ignored source - - match: {values.0.0: null} - - match: {values.1.0: null} - - match: {values.2.0: null} - - match: {values.3.0: null} - - match: {values.4.0: null} - - match: {values.5.0: null} + - match: {values.0.0: "Do. Or do not. There is no try."} + - match: {values.1.0: "I find your lack of faith disturbing."} + - match: {values.2.0: "Wars not make one great."} + - match: {values.3.0: "No, I am your father."} + - match: {values.4.0: "May the force be with you."} + - match: {values.5.0: "That's no moon. It's a space station."} + +--- +"message field with keyword multi-field with ignore_above": + - do: + indices.create: + index: my-index2 + body: + settings: + index: + mode: logsdb + mappings: + properties: + "@timestamp": + type: date + host.name: + type: keyword + store: false + message: + type: text + store: false + fields: + raw: + type: keyword + ignore_above: 3 + + - do: + bulk: + index: my-index2 + refresh: true + body: + - { "index": { } } + - { "@timestamp": "2024-02-12T10:30:00Z", "host.name": "foo", "message": "No, I am your father." } + - { "index": { } } + - { "@timestamp": "2024-02-12T10:31:00Z", "host.name": "bar", "message": "Do. Or do not. There is no try." } + - { "index": { } } + - { "@timestamp": "2024-02-12T10:32:00Z", "host.name": "foo", "message": "May the force be with you." } + - { "index": { } } + - { "@timestamp": "2024-02-12T10:33:00Z", "host.name": "baz", "message": "I find your lack of faith disturbing." } + - { "index": { } } + - { "@timestamp": "2024-02-12T10:34:00Z", "host.name": "baz", "message": "Wars not make one great." } + - { "index": { } } + - { "@timestamp": "2024-02-12T10:35:00Z", "host.name": "foo", "message": "That's no moon. It's a space station." } + + - do: + esql.query: + body: + query: 'FROM my-index2 | SORT host.name, @timestamp | LIMIT 1' + + - match: {columns.0.name: "@timestamp"} + - match: {columns.0.type: "date"} + - match: {columns.1.name: "host.name"} + - match: {columns.1.type: "keyword"} + - match: {columns.2.name: "message"} + - match: {columns.2.type: "text"} + - match: {columns.3.name: "message.raw"} + - match: {columns.3.type: "keyword"} + + - match: {values.0.0: "2024-02-12T10:31:00.000Z"} + - match: {values.0.1: "bar"} + - match: {values.0.2: "Do. Or do not. There is no try."} + # Note that isn't related to synthetic source. For both stored and synthetic source null is returned: +# - match: {values.0.3: "Do. Or do not. There is no try."} + + - do: + esql.query: + body: + query: 'FROM my-index2 | SORT host.name, @timestamp | KEEP message | LIMIT 10' + + - match: {columns.0.name: "message"} + - match: {columns.0.type: "text"} + + - match: {values.0.0: "Do. Or do not. There is no try."} + - match: {values.1.0: "I find your lack of faith disturbing."} + - match: {values.2.0: "Wars not make one great."} + - match: {values.3.0: "No, I am your father."} + - match: {values.4.0: "May the force be with you."} + - match: {values.5.0: "That's no moon. It's a space station."} diff --git a/x-pack/plugin/logsdb/src/yamlRestTest/resources/rest-api-spec/test/51_esql_synthetic_source.yml b/x-pack/plugin/logsdb/src/yamlRestTest/resources/rest-api-spec/test/51_esql_synthetic_source.yml index 7e305bda4ef4e..6c840a0cf9d3a 100644 --- a/x-pack/plugin/logsdb/src/yamlRestTest/resources/rest-api-spec/test/51_esql_synthetic_source.yml +++ b/x-pack/plugin/logsdb/src/yamlRestTest/resources/rest-api-spec/test/51_esql_synthetic_source.yml @@ -175,3 +175,80 @@ teardown: - match: {values.3.0: "No, I am your father."} - match: {values.4.0: "May the force be with you."} - match: {values.5.0: "That's no moon. It's a space station."} + +--- +"message field with stored keyword multi-field with ignore_above": + - do: + indices.create: + index: my-index2 + body: + settings: + index: + mode: logsdb + mappings: + properties: + "@timestamp": + type: date + host.name: + type: keyword + store: false + message: + type: text + store: false + fields: + raw: + type: keyword + store: true + + - do: + bulk: + index: my-index2 + refresh: true + body: + - { "index": { } } + - { "@timestamp": "2024-02-12T10:30:00Z", "host.name": "foo", "message": "No, I am your father." } + - { "index": { } } + - { "@timestamp": "2024-02-12T10:31:00Z", "host.name": "bar", "message": "Do. Or do not. There is no try." } + - { "index": { } } + - { "@timestamp": "2024-02-12T10:32:00Z", "host.name": "foo", "message": "May the force be with you." } + - { "index": { } } + - { "@timestamp": "2024-02-12T10:33:00Z", "host.name": "baz", "message": "I find your lack of faith disturbing." } + - { "index": { } } + - { "@timestamp": "2024-02-12T10:34:00Z", "host.name": "baz", "message": "Wars not make one great." } + - { "index": { } } + - { "@timestamp": "2024-02-12T10:35:00Z", "host.name": "foo", "message": "That's no moon. It's a space station." } + + - do: + esql.query: + body: + query: 'FROM my-index2 | SORT host.name, @timestamp | LIMIT 1' + + - match: {columns.0.name: "@timestamp"} + - match: {columns.0.type: "date"} + - match: {columns.1.name: "host.name"} + - match: {columns.1.type: "keyword"} + - match: {columns.2.name: "message"} + - match: {columns.2.type: "text"} + - match: {columns.3.name: "message.raw"} + - match: {columns.3.type: "keyword"} + + - match: {values.0.0: "2024-02-12T10:31:00.000Z"} + - match: {values.0.1: "bar"} + - match: {values.0.2: "Do. Or do not. There is no try."} + - match: {values.0.3: "Do. Or do not. There is no try."} + + - do: + esql.query: + body: + query: 'FROM my-index2 | SORT host.name, @timestamp | KEEP message | LIMIT 10' + + - match: {columns.0.name: "message"} + - match: {columns.0.type: "text"} + + - match: {values.0.0: "Do. Or do not. There is no try."} + - match: {values.1.0: "I find your lack of faith disturbing."} + - match: {values.2.0: "Wars not make one great."} + - match: {values.3.0: "No, I am your father."} + - match: {values.4.0: "May the force be with you."} + - match: {values.5.0: "That's no moon. It's a space station."} + diff --git a/x-pack/plugin/mapper-unsigned-long/src/main/java/org/elasticsearch/xpack/unsignedlong/UnsignedLongFieldMapper.java b/x-pack/plugin/mapper-unsigned-long/src/main/java/org/elasticsearch/xpack/unsignedlong/UnsignedLongFieldMapper.java index e8fd0da496bbe..b43d87c17e644 100644 --- a/x-pack/plugin/mapper-unsigned-long/src/main/java/org/elasticsearch/xpack/unsignedlong/UnsignedLongFieldMapper.java +++ b/x-pack/plugin/mapper-unsigned-long/src/main/java/org/elasticsearch/xpack/unsignedlong/UnsignedLongFieldMapper.java @@ -339,8 +339,7 @@ protected Object parseSourceValue(Object value) { BlockSourceReader.LeafIteratorLookup lookup = isStored() || isIndexed() ? BlockSourceReader.lookupFromFieldNames(blContext.fieldNames(), name()) : BlockSourceReader.lookupMatchingAll(); - var sourceMode = blContext.indexSettings().getIndexMappingSourceMode(); - return new BlockSourceReader.LongsBlockLoader(valueFetcher, lookup, sourceMode); + return new BlockSourceReader.LongsBlockLoader(valueFetcher, lookup); } @Override From 291ced7b482ab952f420993f43cacac49a2f9a9e Mon Sep 17 00:00:00 2001 From: Simon Cooper Date: Wed, 23 Oct 2024 10:23:32 +0100 Subject: [PATCH 007/324] Change from Version to BuildVersion in PersistedClusterStateService (#115301) --- .../elasticsearch/env/NodeEnvironmentIT.java | 4 ++-- .../org/elasticsearch/env/BuildVersion.java | 1 - .../env/DefaultBuildVersion.java | 2 +- .../org/elasticsearch/env/NodeMetadata.java | 5 ---- .../env/OverrideNodeVersionCommand.java | 6 ++--- .../gateway/PersistedClusterStateService.java | 24 +++++++------------ .../elasticsearch/env/NodeMetadataTests.java | 8 ------- .../env/OverrideNodeVersionCommandTests.java | 18 +++++++------- .../PersistedClusterStateServiceTests.java | 10 +++++--- 9 files changed, 32 insertions(+), 46 deletions(-) diff --git a/server/src/internalClusterTest/java/org/elasticsearch/env/NodeEnvironmentIT.java b/server/src/internalClusterTest/java/org/elasticsearch/env/NodeEnvironmentIT.java index f813932ebe924..ecd5c5af8649f 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/env/NodeEnvironmentIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/env/NodeEnvironmentIT.java @@ -123,7 +123,7 @@ public Settings onNodeStopped(String nodeName) { public void testFailsToStartIfDowngraded() { final IllegalStateException illegalStateException = expectThrowsOnRestart( - dataPaths -> PersistedClusterStateService.overrideVersion(NodeMetadataTests.tooNewVersion(), dataPaths) + dataPaths -> PersistedClusterStateService.overrideVersion(NodeMetadataTests.tooNewBuildVersion(), dataPaths) ); assertThat( illegalStateException.getMessage(), @@ -133,7 +133,7 @@ public void testFailsToStartIfDowngraded() { public void testFailsToStartIfUpgradedTooFar() { final IllegalStateException illegalStateException = expectThrowsOnRestart( - dataPaths -> PersistedClusterStateService.overrideVersion(NodeMetadataTests.tooOldVersion(), dataPaths) + dataPaths -> PersistedClusterStateService.overrideVersion(NodeMetadataTests.tooOldBuildVersion(), dataPaths) ); assertThat( illegalStateException.getMessage(), diff --git a/server/src/main/java/org/elasticsearch/env/BuildVersion.java b/server/src/main/java/org/elasticsearch/env/BuildVersion.java index 0de346249ccbc..42c45a14977eb 100644 --- a/server/src/main/java/org/elasticsearch/env/BuildVersion.java +++ b/server/src/main/java/org/elasticsearch/env/BuildVersion.java @@ -59,7 +59,6 @@ public abstract class BuildVersion { public abstract boolean isFutureVersion(); // temporary - // TODO[wrb]: remove from PersistedClusterStateService // TODO[wrb]: remove from security bootstrap checks @Deprecated public Version toVersion() { diff --git a/server/src/main/java/org/elasticsearch/env/DefaultBuildVersion.java b/server/src/main/java/org/elasticsearch/env/DefaultBuildVersion.java index e0531b5a192a0..dcc5ed3aee3f8 100644 --- a/server/src/main/java/org/elasticsearch/env/DefaultBuildVersion.java +++ b/server/src/main/java/org/elasticsearch/env/DefaultBuildVersion.java @@ -72,6 +72,6 @@ public int hashCode() { @Override public String toString() { - return Version.fromId(versionId).toString(); + return version.toString(); } } diff --git a/server/src/main/java/org/elasticsearch/env/NodeMetadata.java b/server/src/main/java/org/elasticsearch/env/NodeMetadata.java index 6a72a7e7fcda5..5b2ee39c1b622 100644 --- a/server/src/main/java/org/elasticsearch/env/NodeMetadata.java +++ b/server/src/main/java/org/elasticsearch/env/NodeMetadata.java @@ -42,7 +42,6 @@ public final class NodeMetadata { private final IndexVersion oldestIndexVersion; - @UpdateForV9(owner = UpdateForV9.Owner.CORE_INFRA) // version should be non-null in the node metadata from v9 onwards private NodeMetadata( final String nodeId, final BuildVersion buildVersion, @@ -112,11 +111,7 @@ public IndexVersion oldestIndexVersion() { return oldestIndexVersion; } - @UpdateForV9(owner = UpdateForV9.Owner.CORE_INFRA) public void verifyUpgradeToCurrentVersion() { - // Enable the following assertion for V9: - // assert (nodeVersion.equals(BuildVersion.empty()) == false) : "version is required in the node metadata from v9 onwards"; - if (nodeVersion.onOrAfterMinimumCompatible() == false) { throw new IllegalStateException( "cannot upgrade a node from version [" diff --git a/server/src/main/java/org/elasticsearch/env/OverrideNodeVersionCommand.java b/server/src/main/java/org/elasticsearch/env/OverrideNodeVersionCommand.java index 96158965cddfe..1ddc8d5b26bd9 100644 --- a/server/src/main/java/org/elasticsearch/env/OverrideNodeVersionCommand.java +++ b/server/src/main/java/org/elasticsearch/env/OverrideNodeVersionCommand.java @@ -74,7 +74,7 @@ protected void processDataPaths(Terminal terminal, Path[] paths, OptionSet optio "found [" + nodeMetadata + "] which is compatible with current version [" - + Version.CURRENT + + BuildVersion.current() + "], so there is no need to override the version checks" ); } catch (IllegalStateException e) { @@ -86,10 +86,10 @@ protected void processDataPaths(Terminal terminal, Path[] paths, OptionSet optio (nodeMetadata.nodeVersion().onOrAfterMinimumCompatible() == false ? TOO_OLD_MESSAGE : TOO_NEW_MESSAGE).replace( "V_OLD", nodeMetadata.nodeVersion().toString() - ).replace("V_NEW", nodeMetadata.nodeVersion().toString()).replace("V_CUR", Version.CURRENT.toString()) + ).replace("V_NEW", nodeMetadata.nodeVersion().toString()).replace("V_CUR", BuildVersion.current().toString()) ); - PersistedClusterStateService.overrideVersion(Version.CURRENT, paths); + PersistedClusterStateService.overrideVersion(BuildVersion.current(), paths); terminal.println(SUCCESS_MESSAGE); } diff --git a/server/src/main/java/org/elasticsearch/gateway/PersistedClusterStateService.java b/server/src/main/java/org/elasticsearch/gateway/PersistedClusterStateService.java index 0c6cf2c8a0761..92b8686700a05 100644 --- a/server/src/main/java/org/elasticsearch/gateway/PersistedClusterStateService.java +++ b/server/src/main/java/org/elasticsearch/gateway/PersistedClusterStateService.java @@ -42,7 +42,6 @@ import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.InfoStream; import org.apache.lucene.util.SetOnce; -import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.MappingMetadata; @@ -159,8 +158,6 @@ public class PersistedClusterStateService { public static final int IS_LAST_PAGE = 1; public static final int IS_NOT_LAST_PAGE = 0; private static final int COMMIT_DATA_SIZE = 7; - // We added CLUSTER_UUID_KEY and CLUSTER_UUID_COMMITTED_KEY in 8.8 - private static final int COMMIT_DATA_SIZE_BEFORE_8_8 = 5; private static final MergePolicy NO_MERGE_POLICY = noMergePolicy(); private static final MergePolicy DEFAULT_MERGE_POLICY = defaultMergePolicy(); @@ -350,7 +347,7 @@ public record OnDiskStateMetadata( @Nullable public static NodeMetadata nodeMetadata(Path... dataPaths) throws IOException { String nodeId = null; - Version version = null; + BuildVersion version = null; IndexVersion oldestIndexVersion = IndexVersions.ZERO; for (final Path dataPath : dataPaths) { final Path indexPath = dataPath.resolve(METADATA_DIRECTORY_NAME); @@ -367,7 +364,7 @@ public static NodeMetadata nodeMetadata(Path... dataPaths) throws IOException { ); } else if (nodeId == null) { nodeId = thisNodeId; - version = Version.fromId(Integer.parseInt(userData.get(NODE_VERSION_KEY))); + version = BuildVersion.fromVersionId(Integer.parseInt(userData.get(NODE_VERSION_KEY))); if (userData.containsKey(OLDEST_INDEX_VERSION_KEY)) { oldestIndexVersion = IndexVersion.fromId(Integer.parseInt(userData.get(OLDEST_INDEX_VERSION_KEY))); } else { @@ -382,14 +379,13 @@ public static NodeMetadata nodeMetadata(Path... dataPaths) throws IOException { if (nodeId == null) { return null; } - // TODO: remove use of Version here (ES-7343) - return new NodeMetadata(nodeId, BuildVersion.fromVersionId(version.id()), oldestIndexVersion); + return new NodeMetadata(nodeId, version, oldestIndexVersion); } /** * Overrides the version field for the metadata in the given data path */ - public static void overrideVersion(Version newVersion, Path... dataPaths) throws IOException { + public static void overrideVersion(BuildVersion newVersion, Path... dataPaths) throws IOException { for (final Path dataPath : dataPaths) { final Path indexPath = dataPath.resolve(METADATA_DIRECTORY_NAME); if (Files.exists(indexPath)) { @@ -399,7 +395,7 @@ public static void overrideVersion(Version newVersion, Path... dataPaths) throws try (IndexWriter indexWriter = createIndexWriter(new NIOFSDirectory(dataPath.resolve(METADATA_DIRECTORY_NAME)), true)) { final Map commitData = new HashMap<>(userData); - commitData.put(NODE_VERSION_KEY, Integer.toString(newVersion.id)); + commitData.put(NODE_VERSION_KEY, Integer.toString(newVersion.id())); commitData.put(OVERRIDDEN_NODE_VERSION_KEY, Boolean.toString(true)); indexWriter.setLiveCommitData(commitData.entrySet()); indexWriter.commit(); @@ -664,11 +660,9 @@ public OnDiskStateMetadata loadOnDiskStateMetadataFromUserData(Map commitData = Maps.newMapWithExpectedSize(COMMIT_DATA_SIZE); commitData.put(CURRENT_TERM_KEY, Long.toString(currentTerm)); commitData.put(LAST_ACCEPTED_VERSION_KEY, Long.toString(lastAcceptedVersion)); - commitData.put(NODE_VERSION_KEY, Integer.toString(Version.CURRENT.id)); + commitData.put(NODE_VERSION_KEY, Integer.toString(BuildVersion.current().id())); commitData.put(OLDEST_INDEX_VERSION_KEY, Integer.toString(oldestIndexVersion.id())); commitData.put(NODE_ID_KEY, nodeId); commitData.put(CLUSTER_UUID_KEY, clusterUUID); diff --git a/server/src/test/java/org/elasticsearch/env/NodeMetadataTests.java b/server/src/test/java/org/elasticsearch/env/NodeMetadataTests.java index 8bfd4c7c5ac68..22308e15f4845 100644 --- a/server/src/test/java/org/elasticsearch/env/NodeMetadataTests.java +++ b/server/src/test/java/org/elasticsearch/env/NodeMetadataTests.java @@ -167,10 +167,6 @@ public void testUpgradeMarksPreviousVersion() { assertThat(nodeMetadata.previousNodeVersion(), equalTo(buildVersion)); } - public static Version tooNewVersion() { - return Version.fromId(between(Version.CURRENT.id + 1, 99999999)); - } - public static IndexVersion tooNewIndexVersion() { return IndexVersion.fromId(between(IndexVersion.current().id() + 1, 99999999)); } @@ -179,10 +175,6 @@ public static BuildVersion tooNewBuildVersion() { return BuildVersion.fromVersionId(between(Version.CURRENT.id() + 1, 99999999)); } - public static Version tooOldVersion() { - return Version.fromId(between(1, Version.CURRENT.minimumCompatibilityVersion().id - 1)); - } - public static BuildVersion tooOldBuildVersion() { return BuildVersion.fromVersionId(between(1, Version.CURRENT.minimumCompatibilityVersion().id - 1)); } diff --git a/server/src/test/java/org/elasticsearch/env/OverrideNodeVersionCommandTests.java b/server/src/test/java/org/elasticsearch/env/OverrideNodeVersionCommandTests.java index bf3fc1697aa44..c7614e2d98eed 100644 --- a/server/src/test/java/org/elasticsearch/env/OverrideNodeVersionCommandTests.java +++ b/server/src/test/java/org/elasticsearch/env/OverrideNodeVersionCommandTests.java @@ -96,7 +96,9 @@ public void testFailsOnEmptyPath() { } public void testFailsIfUnnecessary() throws IOException { - final Version nodeVersion = Version.fromId(between(Version.CURRENT.minimumCompatibilityVersion().id, Version.CURRENT.id)); + final BuildVersion nodeVersion = BuildVersion.fromVersionId( + between(Version.CURRENT.minimumCompatibilityVersion().id, Version.CURRENT.id) + ); PersistedClusterStateService.overrideVersion(nodeVersion, dataPaths); final MockTerminal mockTerminal = MockTerminal.create(); final ElasticsearchException elasticsearchException = expectThrows( @@ -107,7 +109,7 @@ public void testFailsIfUnnecessary() throws IOException { elasticsearchException.getMessage(), allOf( containsString("compatible with current version"), - containsString(Version.CURRENT.toString()), + containsString(BuildVersion.current().toString()), containsString(nodeVersion.toString()) ) ); @@ -115,7 +117,7 @@ public void testFailsIfUnnecessary() throws IOException { } public void testWarnsIfTooOld() throws Exception { - final Version nodeVersion = NodeMetadataTests.tooOldVersion(); + final BuildVersion nodeVersion = NodeMetadataTests.tooOldBuildVersion(); PersistedClusterStateService.overrideVersion(nodeVersion, dataPaths); final MockTerminal mockTerminal = MockTerminal.create(); mockTerminal.addTextInput("n"); @@ -137,11 +139,11 @@ public void testWarnsIfTooOld() throws Exception { expectThrows(IllegalStateException.class, () -> mockTerminal.readText("")); final NodeMetadata nodeMetadata = PersistedClusterStateService.nodeMetadata(dataPaths); - assertThat(nodeMetadata.nodeVersion().toVersion(), equalTo(nodeVersion)); + assertThat(nodeMetadata.nodeVersion(), equalTo(nodeVersion)); } public void testWarnsIfTooNew() throws Exception { - final Version nodeVersion = NodeMetadataTests.tooNewVersion(); + final BuildVersion nodeVersion = NodeMetadataTests.tooNewBuildVersion(); PersistedClusterStateService.overrideVersion(nodeVersion, dataPaths); final MockTerminal mockTerminal = MockTerminal.create(); mockTerminal.addTextInput(randomFrom("yy", "Yy", "n", "yes", "true", "N", "no")); @@ -162,11 +164,11 @@ public void testWarnsIfTooNew() throws Exception { expectThrows(IllegalStateException.class, () -> mockTerminal.readText("")); final NodeMetadata nodeMetadata = PersistedClusterStateService.nodeMetadata(dataPaths); - assertThat(nodeMetadata.nodeVersion().toVersion(), equalTo(nodeVersion)); + assertThat(nodeMetadata.nodeVersion(), equalTo(nodeVersion)); } public void testOverwritesIfTooOld() throws Exception { - final Version nodeVersion = NodeMetadataTests.tooOldVersion(); + final BuildVersion nodeVersion = NodeMetadataTests.tooOldBuildVersion(); PersistedClusterStateService.overrideVersion(nodeVersion, dataPaths); final MockTerminal mockTerminal = MockTerminal.create(); mockTerminal.addTextInput(randomFrom("y", "Y")); @@ -189,7 +191,7 @@ public void testOverwritesIfTooOld() throws Exception { } public void testOverwritesIfTooNew() throws Exception { - final Version nodeVersion = NodeMetadataTests.tooNewVersion(); + final BuildVersion nodeVersion = NodeMetadataTests.tooNewBuildVersion(); PersistedClusterStateService.overrideVersion(nodeVersion, dataPaths); final MockTerminal mockTerminal = MockTerminal.create(); mockTerminal.addTextInput(randomFrom("y", "Y")); diff --git a/server/src/test/java/org/elasticsearch/gateway/PersistedClusterStateServiceTests.java b/server/src/test/java/org/elasticsearch/gateway/PersistedClusterStateServiceTests.java index 450d123f551c8..4428a7e078510 100644 --- a/server/src/test/java/org/elasticsearch/gateway/PersistedClusterStateServiceTests.java +++ b/server/src/test/java/org/elasticsearch/gateway/PersistedClusterStateServiceTests.java @@ -54,6 +54,7 @@ import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.core.IOUtils; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.env.BuildVersion; import org.elasticsearch.env.Environment; import org.elasticsearch.env.NodeEnvironment; @@ -1414,14 +1415,17 @@ public void testOverrideLuceneVersion() throws IOException { assertThat(clusterState.metadata().version(), equalTo(version)); } + @UpdateForV9(owner = UpdateForV9.Owner.SEARCH_FOUNDATIONS) + BuildVersion overrideVersion = BuildVersion.fromVersionId(Version.V_8_0_0.id); + NodeMetadata prevMetadata = PersistedClusterStateService.nodeMetadata(persistedClusterStateService.getDataPaths()); assertEquals(BuildVersion.current(), prevMetadata.nodeVersion()); - PersistedClusterStateService.overrideVersion(Version.V_8_0_0, persistedClusterStateService.getDataPaths()); + PersistedClusterStateService.overrideVersion(overrideVersion, persistedClusterStateService.getDataPaths()); NodeMetadata metadata = PersistedClusterStateService.nodeMetadata(persistedClusterStateService.getDataPaths()); - assertEquals(BuildVersion.fromVersionId(Version.V_8_0_0.id()), metadata.nodeVersion()); + assertEquals(overrideVersion, metadata.nodeVersion()); for (Path p : persistedClusterStateService.getDataPaths()) { NodeMetadata individualMetadata = PersistedClusterStateService.nodeMetadata(p); - assertEquals(BuildVersion.fromVersionId(Version.V_8_0_0.id()), individualMetadata.nodeVersion()); + assertEquals(overrideVersion, individualMetadata.nodeVersion()); } } } From 0f7ddd5c9878e03f6f0ce6ac6bce58c609e25ff5 Mon Sep 17 00:00:00 2001 From: David Kyle Date: Wed, 23 Oct 2024 13:34:37 +0100 Subject: [PATCH 008/324] [ML] New names for the default inference endpoints (#115395) The new names are .elser-2-elasticsearch and .multilingual-e5-small-elasticsearch --- .../services/elasticsearch/ElasticsearchInternalService.java | 4 ++-- .../elasticsearch/ElasticsearchInternalServiceTests.java | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalService.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalService.java index 49919fda9f89d..6732e5719b897 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalService.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalService.java @@ -85,8 +85,8 @@ public class ElasticsearchInternalService extends BaseElasticsearchInternalServi ); public static final int EMBEDDING_MAX_BATCH_SIZE = 10; - public static final String DEFAULT_ELSER_ID = ".elser-2"; - public static final String DEFAULT_E5_ID = ".multi-e5-small"; + public static final String DEFAULT_ELSER_ID = ".elser-2-elasticsearch"; + public static final String DEFAULT_E5_ID = ".multilingual-e5-small-elasticsearch"; private static final Logger logger = LogManager.getLogger(ElasticsearchInternalService.class); private static final DeprecationLogger DEPRECATION_LOGGER = DeprecationLogger.getLogger(ElasticsearchInternalService.class); diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalServiceTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalServiceTests.java index b82b8a08f2175..5ec66687752a8 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalServiceTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalServiceTests.java @@ -1561,8 +1561,8 @@ public void testEmbeddingTypeFromTaskTypeAndSettings() { public void testIsDefaultId() { var service = createService(mock(Client.class)); - assertTrue(service.isDefaultId(".elser-2")); - assertTrue(service.isDefaultId(".multi-e5-small")); + assertTrue(service.isDefaultId(".elser-2-elasticsearch")); + assertTrue(service.isDefaultId(".multilingual-e5-small-elasticsearch")); assertFalse(service.isDefaultId("foo")); } From 91a5a2e6a1eefa64bfbd39db62cb50a478f022fb Mon Sep 17 00:00:00 2001 From: Kostas Krikellas <131142368+kkrik-es@users.noreply.github.com> Date: Wed, 23 Oct 2024 15:45:09 +0300 Subject: [PATCH 009/324] Unmute SearchWithMinCompatibleSearchNodeIT tests muted for 7.17 (#115386) --- muted-tests.yml | 6 ------ 1 file changed, 6 deletions(-) diff --git a/muted-tests.yml b/muted-tests.yml index 45b1398df7ace..c248729b539fd 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -23,12 +23,6 @@ tests: - class: org.elasticsearch.index.store.FsDirectoryFactoryTests method: testPreload issue: https://github.com/elastic/elasticsearch/issues/110211 -- class: org.elasticsearch.backwards.SearchWithMinCompatibleSearchNodeIT - method: testMinVersionAsNewVersion - issue: https://github.com/elastic/elasticsearch/issues/95384 -- class: org.elasticsearch.backwards.SearchWithMinCompatibleSearchNodeIT - method: testCcsMinimizeRoundtripsIsFalse - issue: https://github.com/elastic/elasticsearch/issues/101974 - class: "org.elasticsearch.xpack.searchablesnapshots.FrozenSearchableSnapshotsIntegTests" issue: "https://github.com/elastic/elasticsearch/issues/110408" method: "testCreateAndRestorePartialSearchableSnapshot" From 9e95cbd86b3e2009d679c0bdbacb35f1a8cc7e27 Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Thu, 24 Oct 2024 00:00:13 +1100 Subject: [PATCH 010/324] Mute org.elasticsearch.smoketest.SmokeTestIngestWithAllDepsClientYamlTestSuiteIT test {yaml=ingest/80_ingest_simulate/Test mapping addition works with legacy templates} #115412 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index c248729b539fd..93714e098f677 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -297,6 +297,9 @@ tests: - class: org.elasticsearch.reservedstate.service.FileSettingsServiceTests method: testProcessFileChanges issue: https://github.com/elastic/elasticsearch/issues/115280 +- class: org.elasticsearch.smoketest.SmokeTestIngestWithAllDepsClientYamlTestSuiteIT + method: test {yaml=ingest/80_ingest_simulate/Test mapping addition works with legacy templates} + issue: https://github.com/elastic/elasticsearch/issues/115412 # Examples: # From 728ac0c8a790fd9afc2dfc970d5c2e39c36cedc9 Mon Sep 17 00:00:00 2001 From: Jan Kuipers <148754765+jan-elastic@users.noreply.github.com> Date: Wed, 23 Oct 2024 15:07:04 +0200 Subject: [PATCH 011/324] adaptive allocations: reset time interval with zero requests upon starting an allocation (#115400) --- .../AdaptiveAllocationsScaler.java | 9 ++++-- .../AdaptiveAllocationsScalerService.java | 11 ++++++- .../AdaptiveAllocationsScalerTests.java | 29 +++++++++++++++++++ 3 files changed, 46 insertions(+), 3 deletions(-) diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/adaptiveallocations/AdaptiveAllocationsScaler.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/adaptiveallocations/AdaptiveAllocationsScaler.java index bbd63e0d3bfe9..0dec99a9b9bb9 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/adaptiveallocations/AdaptiveAllocationsScaler.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/adaptiveallocations/AdaptiveAllocationsScaler.java @@ -33,6 +33,7 @@ public class AdaptiveAllocationsScaler { private final String deploymentId; private final KalmanFilter1d requestRateEstimator; private final KalmanFilter1d inferenceTimeEstimator; + private final long scaleToZeroAfterNoRequestsSeconds; private double timeWithoutRequestsSeconds; private int numberOfAllocations; @@ -44,10 +45,11 @@ public class AdaptiveAllocationsScaler { private Double lastMeasuredRequestRate; private Double lastMeasuredInferenceTime; private Long lastMeasuredQueueSize; - private long scaleToZeroAfterNoRequestsSeconds; AdaptiveAllocationsScaler(String deploymentId, int numberOfAllocations, long scaleToZeroAfterNoRequestsSeconds) { this.deploymentId = deploymentId; + this.scaleToZeroAfterNoRequestsSeconds = scaleToZeroAfterNoRequestsSeconds; + // A smoothing factor of 100 roughly means the last 100 measurements have an effect // on the estimated values. The sampling time is 10 seconds, so approximately the // last 15 minutes are taken into account. @@ -67,7 +69,6 @@ public class AdaptiveAllocationsScaler { lastMeasuredRequestRate = null; lastMeasuredInferenceTime = null; lastMeasuredQueueSize = null; - this.scaleToZeroAfterNoRequestsSeconds = scaleToZeroAfterNoRequestsSeconds; } void setMinMaxNumberOfAllocations(Integer minNumberOfAllocations, Integer maxNumberOfAllocations) { @@ -117,6 +118,10 @@ void process(AdaptiveAllocationsScalerService.Stats stats, double timeIntervalSe dynamicsChanged = false; } + void resetTimeWithoutRequests() { + timeWithoutRequestsSeconds = 0; + } + double getLoadLower() { double requestRateLower = Math.max(0.0, requestRateEstimator.lower()); double inferenceTimeLower = Math.max(0.0, inferenceTimeEstimator.hasValue() ? inferenceTimeEstimator.lower() : 1.0); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/adaptiveallocations/AdaptiveAllocationsScalerService.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/adaptiveallocations/AdaptiveAllocationsScalerService.java index 770e890512935..16ec3ee9b468c 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/adaptiveallocations/AdaptiveAllocationsScalerService.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/adaptiveallocations/AdaptiveAllocationsScalerService.java @@ -188,7 +188,10 @@ Collection observeDouble(Function Date: Wed, 23 Oct 2024 09:16:13 -0400 Subject: [PATCH 012/324] [ML] Increase default queue_capacity to 10_000 and decrease max queue_capacity to 100_000 (#115041) * Increase default queue capacity and decrease max queue capacity * Update docs/changelog/115041.yaml * Update tests to match new constants --- docs/changelog/115041.yaml | 6 ++++++ .../ml/action/StartTrainedModelDeploymentAction.java | 4 ++-- .../StartTrainedModelDeploymentRequestTests.java | 10 +++++----- 3 files changed, 13 insertions(+), 7 deletions(-) create mode 100644 docs/changelog/115041.yaml diff --git a/docs/changelog/115041.yaml b/docs/changelog/115041.yaml new file mode 100644 index 0000000000000..f4c047c1569ec --- /dev/null +++ b/docs/changelog/115041.yaml @@ -0,0 +1,6 @@ +pr: 115041 +summary: Increase default `queue_capacity` to 10_000 and decrease max `queue_capacity` + to 100_000 +area: Machine Learning +type: enhancement +issues: [] diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/StartTrainedModelDeploymentAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/StartTrainedModelDeploymentAction.java index ca789fee7b744..b298d486c9e03 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/StartTrainedModelDeploymentAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/StartTrainedModelDeploymentAction.java @@ -71,7 +71,7 @@ public class StartTrainedModelDeploymentAction extends ActionType implements ToXCon /** * If the queue is created then we can OOM when we create the queue. */ - private static final int MAX_QUEUE_CAPACITY = 1_000_000; + private static final int MAX_QUEUE_CAPACITY = 100_000; public static final ParseField MODEL_ID = new ParseField("model_id"); public static final ParseField DEPLOYMENT_ID = new ParseField("deployment_id"); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/StartTrainedModelDeploymentRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/StartTrainedModelDeploymentRequestTests.java index 730d994fc5e35..46fc8a36c2c2b 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/StartTrainedModelDeploymentRequestTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/StartTrainedModelDeploymentRequestTests.java @@ -67,7 +67,7 @@ public static Request createRandom() { request.setNumberOfAllocations(randomIntBetween(1, 8)); } if (randomBoolean()) { - request.setQueueCapacity(randomIntBetween(1, 1000000)); + request.setQueueCapacity(randomIntBetween(1, 100_000)); } if (randomBoolean()) { request.setPriority(randomFrom(Priority.values()).toString()); @@ -168,7 +168,7 @@ public void testValidate_GivenQueueCapacityIsNegative() { public void testValidate_GivenQueueCapacityIsAtLimit() { Request request = createRandom(); - request.setQueueCapacity(1_000_000); + request.setQueueCapacity(100_000); ActionRequestValidationException e = request.validate(); @@ -177,12 +177,12 @@ public void testValidate_GivenQueueCapacityIsAtLimit() { public void testValidate_GivenQueueCapacityIsOverLimit() { Request request = createRandom(); - request.setQueueCapacity(1_000_001); + request.setQueueCapacity(100_001); ActionRequestValidationException e = request.validate(); assertThat(e, is(not(nullValue()))); - assertThat(e.getMessage(), containsString("[queue_capacity] must be less than 1000000")); + assertThat(e.getMessage(), containsString("[queue_capacity] must be less than 100000")); } public void testValidate_GivenTimeoutIsNegative() { @@ -234,6 +234,6 @@ public void testDefaults() { assertThat(request.getNumberOfAllocations(), nullValue()); assertThat(request.computeNumberOfAllocations(), equalTo(1)); assertThat(request.getThreadsPerAllocation(), equalTo(1)); - assertThat(request.getQueueCapacity(), equalTo(1024)); + assertThat(request.getQueueCapacity(), equalTo(10_000)); } } From 43a6b3592eda425338ddecd357f11956710e2ca6 Mon Sep 17 00:00:00 2001 From: Panagiotis Bailis Date: Wed, 23 Oct 2024 16:29:03 +0300 Subject: [PATCH 013/324] Unmuting RankDocsRetrieverBuilderTests testRewrite (#115403) --- muted-tests.yml | 3 --- 1 file changed, 3 deletions(-) diff --git a/muted-tests.yml b/muted-tests.yml index 93714e098f677..f59ca0c213279 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -242,9 +242,6 @@ tests: - class: org.elasticsearch.xpack.inference.InferenceRestIT method: test {p0=inference/40_semantic_text_query/Query a field that uses the default ELSER 2 endpoint} issue: https://github.com/elastic/elasticsearch/issues/114376 -- class: org.elasticsearch.search.retriever.RankDocsRetrieverBuilderTests - method: testRewrite - issue: https://github.com/elastic/elasticsearch/issues/114467 - class: org.elasticsearch.packaging.test.DockerTests method: test022InstallPluginsFromLocalArchive issue: https://github.com/elastic/elasticsearch/issues/111063 From 98d53352160d6d2c397ea74567ebc690b701973f Mon Sep 17 00:00:00 2001 From: Alexander Spies Date: Wed, 23 Oct 2024 15:29:53 +0200 Subject: [PATCH 014/324] ESQL: Disable pushdown of WHERE past STATS (#115308) Fix https://github.com/elastic/elasticsearch/issues/115281 Let's disable the faulty optimization for now and re-introduce it later, correctly. --- docs/changelog/115308.yaml | 6 +++ .../src/main/resources/stats.csv-spec | 27 +++++++++++++ .../xpack/esql/action/EsqlCapabilities.java | 7 +++- .../logical/PushDownAndCombineFilters.java | 15 ++----- .../optimizer/LogicalPlanOptimizerTests.java | 40 +++++++++++-------- .../PushDownAndCombineFiltersTests.java | 1 + 6 files changed, 67 insertions(+), 29 deletions(-) create mode 100644 docs/changelog/115308.yaml diff --git a/docs/changelog/115308.yaml b/docs/changelog/115308.yaml new file mode 100644 index 0000000000000..163f0232a3e58 --- /dev/null +++ b/docs/changelog/115308.yaml @@ -0,0 +1,6 @@ +pr: 115308 +summary: "ESQL: Disable pushdown of WHERE past STATS" +area: ES|QL +type: bug +issues: + - 115281 diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats.csv-spec index ac4351413129e..6d4c596e8d7de 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats.csv-spec @@ -2291,6 +2291,33 @@ m:integer |a:double |x:integer 74999 |48249.0 |0 ; +statsWithFilterOnGroups +required_capability: fix_filter_pushdown_past_stats +FROM employees +| STATS v = VALUES(emp_no) by job_positions | WHERE job_positions == "Accountant" | MV_EXPAND v | SORT v +; + +v:integer | job_positions:keyword + 10001 | Accountant + 10012 | Accountant + 10016 | Accountant + 10023 | Accountant + 10025 | Accountant + 10028 | Accountant + 10034 | Accountant + 10037 | Accountant + 10044 | Accountant + 10045 | Accountant + 10050 | Accountant + 10051 | Accountant + 10066 | Accountant + 10081 | Accountant + 10085 | Accountant + 10089 | Accountant + 10092 | Accountant + 10094 | Accountant +; + statsWithFiltering required_capability: per_agg_filtering diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java index 94211e4726a2c..5157a80022c39 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java @@ -411,7 +411,12 @@ public enum Cap { /** * Support for semantic_text field mapping */ - SEMANTIC_TEXT_TYPE(EsqlCorePlugin.SEMANTIC_TEXT_FEATURE_FLAG); + SEMANTIC_TEXT_TYPE(EsqlCorePlugin.SEMANTIC_TEXT_FEATURE_FLAG), + /** + * Fix for an optimization that caused wrong results + * https://github.com/elastic/elasticsearch/issues/115281 + */ + FIX_FILTER_PUSHDOWN_PAST_STATS; private final boolean enabled; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/PushDownAndCombineFilters.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/PushDownAndCombineFilters.java index ed09d0bc16754..15e49c22a44db 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/PushDownAndCombineFilters.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/PushDownAndCombineFilters.java @@ -15,8 +15,6 @@ import org.elasticsearch.xpack.esql.core.expression.Expressions; import org.elasticsearch.xpack.esql.core.expression.ReferenceAttribute; import org.elasticsearch.xpack.esql.core.expression.predicate.Predicates; -import org.elasticsearch.xpack.esql.expression.function.aggregate.AggregateFunction; -import org.elasticsearch.xpack.esql.plan.logical.Aggregate; import org.elasticsearch.xpack.esql.plan.logical.Enrich; import org.elasticsearch.xpack.esql.plan.logical.Eval; import org.elasticsearch.xpack.esql.plan.logical.Filter; @@ -38,18 +36,13 @@ protected LogicalPlan rule(Filter filter) { LogicalPlan child = filter.child(); Expression condition = filter.condition(); + // TODO: Push down past STATS if the filter is only on the groups; but take into account how `STATS ... BY field` handles + // multi-values: It seems to be equivalent to `EVAL field = MV_DEDUPE(field) | MV_EXPAND(field) | STATS ... BY field`, where the + // last `STATS ... BY field` can assume that `field` is single-valued (to be checked more thoroughly). + // https://github.com/elastic/elasticsearch/issues/115311 if (child instanceof Filter f) { // combine nodes into a single Filter with updated ANDed condition plan = f.with(Predicates.combineAnd(List.of(f.condition(), condition))); - } else if (child instanceof Aggregate agg) { // TODO: re-evaluate along with multi-value support - // Only push [parts of] a filter past an agg if these/it operates on agg's grouping[s], not output. - plan = maybePushDownPastUnary( - filter, - agg, - e -> e instanceof Attribute && agg.output().contains(e) && agg.groupings().contains(e) == false - || e instanceof AggregateFunction, - NO_OP - ); } else if (child instanceof Eval eval) { // Don't push if Filter (still) contains references to Eval's fields. // Account for simple aliases in the Eval, though - these shouldn't stop us. diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java index 8d7c1997f78e3..ff7675504d6ff 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java @@ -738,6 +738,7 @@ public void testMultipleCombineLimits() { ); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/115311") public void testSelectivelyPushDownFilterPastRefAgg() { // expected plan: "from test | where emp_no > 1 and emp_no < 3 | stats x = count(1) by emp_no | where x > 7" LogicalPlan plan = optimizedPlan(""" @@ -790,6 +791,7 @@ public void testNoPushDownOrFilterPastAgg() { assertTrue(stats.child() instanceof EsRelation); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/115311") public void testSelectivePushDownComplexFilterPastAgg() { // expected plan: from test | emp_no > 0 | stats x = count(1) by emp_no | where emp_no < 3 or x > 9 LogicalPlan plan = optimizedPlan(""" @@ -1393,13 +1395,15 @@ public void testPushDownLimitThroughMultipleSort_AfterMvExpand2() { } /** + * TODO: Push down the filter correctly https://github.com/elastic/elasticsearch/issues/115311 + * * Expected * Limit[5[INTEGER]] - * \_Aggregate[[first_name{f}#232],[MAX(salary{f}#233) AS max_s, first_name{f}#232]] - * \_Filter[ISNOTNULL(first_name{f}#232)] - * \_MvExpand[first_name{f}#232] - * \_TopN[[Order[emp_no{f}#231,ASC,LAST]],50[INTEGER]] - * \_EsRelation[employees][emp_no{f}#231, first_name{f}#232, salary{f}#233] + * \_Filter[ISNOTNULL(first_name{r}#23)] + * \_Aggregate[STANDARD,[first_name{r}#23],[MAX(salary{f}#18,true[BOOLEAN]) AS max_s, first_name{r}#23]] + * \_MvExpand[first_name{f}#14,first_name{r}#23] + * \_TopN[[Order[emp_no{f}#13,ASC,LAST]],50[INTEGER]] + * \_EsRelation[test][_meta_field{f}#19, emp_no{f}#13, first_name{f}#14, ..] */ public void testDontPushDownLimitPastAggregate_AndMvExpand() { LogicalPlan plan = optimizedPlan(""" @@ -1413,10 +1417,10 @@ public void testDontPushDownLimitPastAggregate_AndMvExpand() { | limit 5"""); var limit = as(plan, Limit.class); + var filter = as(limit.child(), Filter.class); assertThat(limit.limit().fold(), equalTo(5)); - var agg = as(limit.child(), Aggregate.class); - var filter = as(agg.child(), Filter.class); - var mvExp = as(filter.child(), MvExpand.class); + var agg = as(filter.child(), Aggregate.class); + var mvExp = as(agg.child(), MvExpand.class); var topN = as(mvExp.child(), TopN.class); assertThat(topN.limit().fold(), equalTo(50)); assertThat(orderNames(topN), contains("emp_no")); @@ -1424,14 +1428,16 @@ public void testDontPushDownLimitPastAggregate_AndMvExpand() { } /** + * TODO: Push down the filter correctly https://github.com/elastic/elasticsearch/issues/115311 + * * Expected * Limit[5[INTEGER]] - * \_Aggregate[[first_name{f}#262],[MAX(salary{f}#263) AS max_s, first_name{f}#262]] - * \_Filter[ISNOTNULL(first_name{f}#262)] - * \_Limit[50[INTEGER]] - * \_MvExpand[first_name{f}#262] - * \_Limit[50[INTEGER]] - * \_EsRelation[employees][emp_no{f}#261, first_name{f}#262, salary{f}#263] + * \_Filter[ISNOTNULL(first_name{r}#22)] + * \_Aggregate[STANDARD,[first_name{r}#22],[MAX(salary{f}#17,true[BOOLEAN]) AS max_s, first_name{r}#22]] + * \_Limit[50[INTEGER]] + * \_MvExpand[first_name{f}#13,first_name{r}#22] + * \_Limit[50[INTEGER]] + * \_EsRelation[test][_meta_field{f}#18, emp_no{f}#12, first_name{f}#13, ..] */ public void testPushDown_TheRightLimit_PastMvExpand() { LogicalPlan plan = optimizedPlan(""" @@ -1445,9 +1451,9 @@ public void testPushDown_TheRightLimit_PastMvExpand() { var limit = as(plan, Limit.class); assertThat(limit.limit().fold(), equalTo(5)); - var agg = as(limit.child(), Aggregate.class); - var filter = as(agg.child(), Filter.class); - limit = as(filter.child(), Limit.class); + var filter = as(limit.child(), Filter.class); + var agg = as(filter.child(), Aggregate.class); + limit = as(agg.child(), Limit.class); assertThat(limit.limit().fold(), equalTo(50)); var mvExp = as(limit.child(), MvExpand.class); limit = as(mvExp.child(), Limit.class); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/PushDownAndCombineFiltersTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/PushDownAndCombineFiltersTests.java index 49a738f4f4fa3..e159e5ed0bd7d 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/PushDownAndCombineFiltersTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/PushDownAndCombineFiltersTests.java @@ -213,6 +213,7 @@ public void testPushDownLikeRlikeFilter() { // from ... | where a > 1 | stats count(1) by b | where count(1) >= 3 and b < 2 // => ... | where a > 1 and b < 2 | stats count(1) by b | where count(1) >= 3 + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/115311") public void testSelectivelyPushDownFilterPastFunctionAgg() { EsRelation relation = relation(); GreaterThan conditionA = greaterThanOf(getFieldAttribute("a"), ONE); From 1ca39789e27ce4ca4c1f4e88112aa24a87d96649 Mon Sep 17 00:00:00 2001 From: Panagiotis Bailis Date: Wed, 23 Oct 2024 17:40:18 +0300 Subject: [PATCH 015/324] Updating error handling for compound retrievers (#115277) --- .../retriever/CompoundRetrieverBuilder.java | 19 +++++- .../xpack/rank/rrf/RRFRetrieverBuilderIT.java | 63 ++++++++++++++++--- 2 files changed, 74 insertions(+), 8 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/search/retriever/CompoundRetrieverBuilder.java b/server/src/main/java/org/elasticsearch/search/retriever/CompoundRetrieverBuilder.java index 7373bc5b75049..b15798db95b6f 100644 --- a/server/src/main/java/org/elasticsearch/search/retriever/CompoundRetrieverBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/retriever/CompoundRetrieverBuilder.java @@ -11,6 +11,8 @@ import org.apache.lucene.search.ScoreDoc; import org.apache.lucene.util.SetOnce; +import org.elasticsearch.ElasticsearchStatusException; +import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.search.MultiSearchRequest; @@ -20,6 +22,7 @@ import org.elasticsearch.action.search.TransportMultiSearchAction; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryRewriteContext; +import org.elasticsearch.rest.RestStatus; import org.elasticsearch.search.builder.PointInTimeBuilder; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.search.fetch.StoredFieldsContext; @@ -121,10 +124,17 @@ public final RetrieverBuilder rewrite(QueryRewriteContext ctx) throws IOExceptio public void onResponse(MultiSearchResponse items) { List topDocs = new ArrayList<>(); List failures = new ArrayList<>(); + // capture the max status code returned by any of the responses + int statusCode = RestStatus.OK.getStatus(); + List retrieversWithFailures = new ArrayList<>(); for (int i = 0; i < items.getResponses().length; i++) { var item = items.getResponses()[i]; if (item.isFailure()) { failures.add(item.getFailure()); + retrieversWithFailures.add(innerRetrievers.get(i).retriever().getName()); + if (ExceptionsHelper.status(item.getFailure()).getStatus() > statusCode) { + statusCode = ExceptionsHelper.status(item.getFailure()).getStatus(); + } } else { assert item.getResponse() != null; var rankDocs = getRankDocs(item.getResponse()); @@ -133,7 +143,14 @@ public void onResponse(MultiSearchResponse items) { } } if (false == failures.isEmpty()) { - IllegalStateException ex = new IllegalStateException("Search failed - some nested retrievers returned errors."); + assert statusCode != RestStatus.OK.getStatus(); + final String errMessage = "[" + + getName() + + "] search failed - retrievers '" + + retrieversWithFailures + + "' returned errors. " + + "All failures are attached as suppressed exceptions."; + Exception ex = new ElasticsearchStatusException(errMessage, RestStatus.fromCode(statusCode)); failures.forEach(ex::addSuppressed); listener.onFailure(ex); } else { diff --git a/x-pack/plugin/rank-rrf/src/internalClusterTest/java/org/elasticsearch/xpack/rank/rrf/RRFRetrieverBuilderIT.java b/x-pack/plugin/rank-rrf/src/internalClusterTest/java/org/elasticsearch/xpack/rank/rrf/RRFRetrieverBuilderIT.java index c5978219d94d3..37e1807d138aa 100644 --- a/x-pack/plugin/rank-rrf/src/internalClusterTest/java/org/elasticsearch/xpack/rank/rrf/RRFRetrieverBuilderIT.java +++ b/x-pack/plugin/rank-rrf/src/internalClusterTest/java/org/elasticsearch/xpack/rank/rrf/RRFRetrieverBuilderIT.java @@ -8,6 +8,8 @@ package org.elasticsearch.xpack.rank.rrf; import org.apache.lucene.search.TotalHits; +import org.elasticsearch.ElasticsearchStatusException; +import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.TransportVersion; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.search.SearchRequestBuilder; @@ -18,6 +20,7 @@ import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.rest.RestStatus; import org.elasticsearch.search.aggregations.AggregationBuilders; import org.elasticsearch.search.aggregations.bucket.terms.Terms; import org.elasticsearch.search.builder.SearchSourceBuilder; @@ -47,7 +50,6 @@ import static org.hamcrest.CoreMatchers.is; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.lessThanOrEqualTo; @@ -589,11 +591,11 @@ public void testRRFExplainWithAnotherNestedRRF() { }); } - public void testRRFInnerRetrieverSearchError() { + public void testRRFInnerRetrieverAll4xxSearchErrors() { final int rankWindowSize = 100; final int rankConstant = 10; SearchSourceBuilder source = new SearchSourceBuilder(); - // this will throw an error during evaluation + // this will throw a 4xx error during evaluation StandardRetrieverBuilder standard0 = new StandardRetrieverBuilder( QueryBuilders.constantScoreQuery(QueryBuilders.rangeQuery(VECTOR_FIELD).gte(10)) ); @@ -615,10 +617,57 @@ public void testRRFInnerRetrieverSearchError() { ) ); SearchRequestBuilder req = client().prepareSearch(INDEX).setSource(source); - Exception ex = expectThrows(IllegalStateException.class, req::get); - assertThat(ex, instanceOf(IllegalStateException.class)); - assertThat(ex.getMessage(), containsString("Search failed - some nested retrievers returned errors")); - assertThat(ex.getSuppressed().length, greaterThan(0)); + Exception ex = expectThrows(ElasticsearchStatusException.class, req::get); + assertThat(ex, instanceOf(ElasticsearchStatusException.class)); + assertThat( + ex.getMessage(), + containsString( + "[rrf] search failed - retrievers '[standard]' returned errors. All failures are attached as suppressed exceptions." + ) + ); + assertThat(ExceptionsHelper.status(ex), equalTo(RestStatus.BAD_REQUEST)); + assertThat(ex.getSuppressed().length, equalTo(1)); + assertThat(ex.getSuppressed()[0].getCause().getCause(), instanceOf(IllegalArgumentException.class)); + } + + public void testRRFInnerRetrieverMultipleErrorsOne5xx() { + final int rankWindowSize = 100; + final int rankConstant = 10; + SearchSourceBuilder source = new SearchSourceBuilder(); + // this will throw a 4xx error during evaluation + StandardRetrieverBuilder standard0 = new StandardRetrieverBuilder( + QueryBuilders.constantScoreQuery(QueryBuilders.rangeQuery(VECTOR_FIELD).gte(10)) + ); + // this will throw a 5xx error + TestRetrieverBuilder testRetrieverBuilder = new TestRetrieverBuilder("val") { + @Override + public void extractToSearchSourceBuilder(SearchSourceBuilder searchSourceBuilder, boolean compoundUsed) { + searchSourceBuilder.aggregation(AggregationBuilders.avg("some_invalid_param")); + } + }; + source.retriever( + new RRFRetrieverBuilder( + Arrays.asList( + new CompoundRetrieverBuilder.RetrieverSource(standard0, null), + new CompoundRetrieverBuilder.RetrieverSource(testRetrieverBuilder, null) + ), + rankWindowSize, + rankConstant + ) + ); + SearchRequestBuilder req = client().prepareSearch(INDEX).setSource(source); + Exception ex = expectThrows(ElasticsearchStatusException.class, req::get); + assertThat(ex, instanceOf(ElasticsearchStatusException.class)); + assertThat( + ex.getMessage(), + containsString( + "[rrf] search failed - retrievers '[standard, test]' returned errors. All failures are attached as suppressed exceptions." + ) + ); + assertThat(ExceptionsHelper.status(ex), equalTo(RestStatus.INTERNAL_SERVER_ERROR)); + assertThat(ex.getSuppressed().length, equalTo(2)); + assertThat(ex.getSuppressed()[0].getCause().getCause(), instanceOf(IllegalArgumentException.class)); + assertThat(ex.getSuppressed()[1].getCause().getCause(), instanceOf(IllegalStateException.class)); } public void testRRFInnerRetrieverErrorWhenExtractingToSource() { From c6bd53f21b0a0fde6f51c97545543dbd2a2f7c65 Mon Sep 17 00:00:00 2001 From: Nikolaj Volgushev Date: Wed, 23 Oct 2024 17:17:38 +0200 Subject: [PATCH 016/324] Fix `FileSettingsRoleMappingUpgradeIT` assertions (#115422) Fixes some faulty assertions in an upgrade test. Test failures only manifest on the 8.16 branch since 9.x does not qualify for these upgrade tests, and the change is not backported to 8.17 yet (unrelated CI failures). I validated this works by running it locally from the 8.16 branch. Resolves: https://github.com/elastic/elasticsearch/issues/115410 Resolves: https://github.com/elastic/elasticsearch/issues/115411 --- .../FileSettingsRoleMappingUpgradeIT.java | 22 +++++++++---------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/FileSettingsRoleMappingUpgradeIT.java b/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/FileSettingsRoleMappingUpgradeIT.java index b3d4dfc68d399..834d97f755dfb 100644 --- a/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/FileSettingsRoleMappingUpgradeIT.java +++ b/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/FileSettingsRoleMappingUpgradeIT.java @@ -25,12 +25,10 @@ import java.io.IOException; import java.util.List; -import java.util.Map; import java.util.function.Supplier; -import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.hasItem; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.nullValue; @@ -104,15 +102,17 @@ public void testRoleMappingsAppliedOnUpgrade() throws IOException { // the nodes have all been upgraded. Check they re-processed the role mappings in the settings file on // upgrade Request clusterStateRequest = new Request("GET", "/_cluster/state/metadata"); - List roleMappings = new XContentTestUtils.JsonMapView(entityAsMap(client().performRequest(clusterStateRequest))).get( - "metadata.role_mappings.role_mappings" + List clusterStateRoleMappings = new XContentTestUtils.JsonMapView( + entityAsMap(client().performRequest(clusterStateRequest)) + ).get("metadata.role_mappings.role_mappings"); + assertThat(clusterStateRoleMappings, is(not(nullValue()))); + assertThat(clusterStateRoleMappings.size(), equalTo(1)); + + assertThat( + entityAsMap(client().performRequest(new Request("GET", "/_security/role_mapping"))).keySet(), + // TODO change this to `contains` once the clean-up migration work is merged + hasItem("everyone_kibana-read-only-operator-mapping") ); - assertThat(roleMappings, is(not(nullValue()))); - assertThat(roleMappings.size(), equalTo(1)); - assertThat(roleMappings, is(instanceOf(Map.class))); - @SuppressWarnings("unchecked") - Map roleMapping = (Map) roleMappings; - assertThat(roleMapping.keySet(), contains("everyone_kibana-read-only-operator-mapping")); } } } From 1c38f87db49e7ea1bc0f3e80d2fe5561b3038caf Mon Sep 17 00:00:00 2001 From: Joe Gallo Date: Wed, 23 Oct 2024 11:24:55 -0400 Subject: [PATCH 017/324] Refactor PipelineConfiguration#getVersion (#115423) --- .../ingest/PipelineConfiguration.java | 17 ++++++----------- 1 file changed, 6 insertions(+), 11 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/ingest/PipelineConfiguration.java b/server/src/main/java/org/elasticsearch/ingest/PipelineConfiguration.java index de172d86b810d..7406ee8837264 100644 --- a/server/src/main/java/org/elasticsearch/ingest/PipelineConfiguration.java +++ b/server/src/main/java/org/elasticsearch/ingest/PipelineConfiguration.java @@ -99,18 +99,13 @@ BytesReference getConfig() { } public Integer getVersion() { - var configMap = getConfigAsMap(); - if (configMap.containsKey("version")) { - Object o = configMap.get("version"); - if (o == null) { - return null; - } else if (o instanceof Number number) { - return number.intValue(); - } else { - throw new IllegalStateException("unexpected version type [" + o.getClass().getName() + "]"); - } - } else { + Object o = getConfigAsMap().get("version"); + if (o == null) { return null; + } else if (o instanceof Number number) { + return number.intValue(); + } else { + throw new IllegalStateException("unexpected version type [" + o.getClass().getName() + "]"); } } From 4bbedb8aedb27b338f11a692e421e0a212f39f45 Mon Sep 17 00:00:00 2001 From: Nikolaj Volgushev Date: Wed, 23 Oct 2024 18:03:58 +0200 Subject: [PATCH 018/324] Fix file settings service test on windows (#115234) Fix unit test on windows: it looks like the replace-existing flag is necessary to avoid AccessDeniedExceptions like this [example failure](https://gradle-enterprise.elastic.co/s/4tjgx5vzblv36/tests/task/:server:test/details/org.elasticsearch.reservedstate.service.FileSettingsServiceTests/testProcessFileChanges?top-execution=1). Resolves: https://github.com/elastic/elasticsearch/issues/115280 --- .../service/FileSettingsServiceTests.java | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/server/src/test/java/org/elasticsearch/reservedstate/service/FileSettingsServiceTests.java b/server/src/test/java/org/elasticsearch/reservedstate/service/FileSettingsServiceTests.java index 8ee2754427dda..c0657b5888ad2 100644 --- a/server/src/test/java/org/elasticsearch/reservedstate/service/FileSettingsServiceTests.java +++ b/server/src/test/java/org/elasticsearch/reservedstate/service/FileSettingsServiceTests.java @@ -250,7 +250,7 @@ public void testProcessFileChanges() throws Exception { fileSettingsService.start(); fileSettingsService.clusterChanged(new ClusterChangedEvent("test", clusterService.state(), ClusterState.EMPTY_STATE)); // second file change; contents still don't matter - writeTestFile(fileSettingsService.watchedFile(), "{}"); + overwriteTestFile(fileSettingsService.watchedFile(), "{}"); // wait for listener to be called (once for initial processing, once for subsequent update) assertTrue(latch.await(20, TimeUnit.SECONDS)); @@ -355,6 +355,12 @@ public void testHandleSnapshotRestoreResetsMetadata() throws Exception { private void writeTestFile(Path path, String contents) throws IOException { Path tempFilePath = createTempFile(); Files.writeString(tempFilePath, contents); - Files.move(tempFilePath, path, StandardCopyOption.ATOMIC_MOVE); + Files.move(tempFilePath, path, StandardCopyOption.ATOMIC_MOVE, StandardCopyOption.REPLACE_EXISTING); + } + + private void overwriteTestFile(Path path, String contents) throws IOException { + Path tempFilePath = createTempFile(); + Files.writeString(tempFilePath, contents); + Files.move(tempFilePath, path, StandardCopyOption.REPLACE_EXISTING); } } From e581ae3482624983eeb4e201299735ed4ebc1066 Mon Sep 17 00:00:00 2001 From: shainaraskas <58563081+shainaraskas@users.noreply.github.com> Date: Wed, 23 Oct 2024 12:57:59 -0400 Subject: [PATCH 019/324] Reorder docs sidebar (#115360) --- docs/reference/index.asciidoc | 62 ++++++++++++------- .../release-notes/highlights.asciidoc | 11 ++-- 2 files changed, 46 insertions(+), 27 deletions(-) diff --git a/docs/reference/index.asciidoc b/docs/reference/index.asciidoc index 7e207146e38e3..18052cfb64e8f 100644 --- a/docs/reference/index.asciidoc +++ b/docs/reference/index.asciidoc @@ -6,7 +6,7 @@ include::links.asciidoc[] include::landing-page.asciidoc[] -include::release-notes/highlights.asciidoc[] +// overview / install include::intro.asciidoc[] @@ -14,33 +14,37 @@ include::quickstart/index.asciidoc[] include::setup.asciidoc[] -include::upgrade.asciidoc[] +// search solution -include::index-modules.asciidoc[] +include::search/search-your-data/search-your-data.asciidoc[] -include::mapping.asciidoc[] +include::reranking/index.asciidoc[] -include::analysis.asciidoc[] +// data management + +include::index-modules.asciidoc[] include::indices/index-templates.asciidoc[] -include::data-streams/data-streams.asciidoc[] +include::alias.asciidoc[] -include::ingest.asciidoc[] +include::mapping.asciidoc[] -include::alias.asciidoc[] +include::analysis.asciidoc[] -include::search/search-your-data/search-your-data.asciidoc[] +include::ingest.asciidoc[] -include::reranking/index.asciidoc[] +include::connector/docs/index.asciidoc[] -include::query-dsl.asciidoc[] +include::data-streams/data-streams.asciidoc[] -include::aggregations.asciidoc[] +include::data-management.asciidoc[] -include::geospatial-analysis.asciidoc[] +include::data-rollup-transform.asciidoc[] -include::connector/docs/index.asciidoc[] +// analysis tools + +include::query-dsl.asciidoc[] include::eql/eql.asciidoc[] @@ -50,34 +54,48 @@ include::sql/index.asciidoc[] include::scripting.asciidoc[] -include::data-management.asciidoc[] +include::aggregations.asciidoc[] -include::autoscaling/index.asciidoc[] +include::geospatial-analysis.asciidoc[] + +include::watcher/index.asciidoc[] + +// cluster management include::monitoring/index.asciidoc[] -include::data-rollup-transform.asciidoc[] +include::security/index.asciidoc[] + +// production tasks include::high-availability.asciidoc[] +include::how-to.asciidoc[] + +include::autoscaling/index.asciidoc[] + include::snapshot-restore/index.asciidoc[] -include::security/index.asciidoc[] +// reference -include::watcher/index.asciidoc[] +include::rest-api/index.asciidoc[] include::commands/index.asciidoc[] -include::how-to.asciidoc[] - include::troubleshooting.asciidoc[] -include::rest-api/index.asciidoc[] +// upgrades + +include::upgrade.asciidoc[] include::migration/index.asciidoc[] +include::release-notes/highlights.asciidoc[] + include::release-notes.asciidoc[] include::dependencies-versions.asciidoc[] +// etc + include::redirects.asciidoc[] \ No newline at end of file diff --git a/docs/reference/release-notes/highlights.asciidoc b/docs/reference/release-notes/highlights.asciidoc index 81d46b5773877..c3f6fb43f2ffd 100644 --- a/docs/reference/release-notes/highlights.asciidoc +++ b/docs/reference/release-notes/highlights.asciidoc @@ -1,5 +1,6 @@ +[chapter] [[release-highlights]] -== What's new in {minor-version} += What's new in {minor-version} coming::[{minor-version}] @@ -37,7 +38,7 @@ endif::[] [discrete] [[esql_inlinestats]] -=== ESQL: INLINESTATS +== ESQL: INLINESTATS This adds the `INLINESTATS` command to ESQL which performs a STATS and then enriches the results into the output stream. So, this query: @@ -62,7 +63,7 @@ Produces output like: [discrete] [[always_allow_rebalancing_by_default]] -=== Always allow rebalancing by default +== Always allow rebalancing by default In earlier versions of {es} the `cluster.routing.allocation.allow_rebalance` setting defaults to `indices_all_active` which blocks all rebalancing moves while the cluster is in `yellow` or `red` health. This was appropriate for the legacy allocator which might do too many rebalancing moves otherwise. Today's allocator has @@ -74,7 +75,7 @@ version 8.16 `allow_rebalance` setting defaults to `always` unless the legacy al [discrete] [[add_global_retention_in_data_stream_lifecycle]] -=== Add global retention in data stream lifecycle +== Add global retention in data stream lifecycle Data stream lifecycle now supports configuring retention on a cluster level, namely global retention. Global retention \nallows us to configure two different retentions: @@ -88,7 +89,7 @@ data stream lifecycle and it allows any data stream \ndata to be deleted after t [discrete] [[enable_zstandard_compression_for_indices_with_index_codec_set_to_best_compression]] -=== Enable ZStandard compression for indices with index.codec set to best_compression +== Enable ZStandard compression for indices with index.codec set to best_compression Before DEFLATE compression was used to compress stored fields in indices with index.codec index setting set to best_compression, with this change ZStandard is used as compression algorithm to stored fields for indices with index.codec index setting set to best_compression. The usage ZStandard results in less storage usage with a From 06a3e1902102a43e4ef9685c22d71c10d4bb280c Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Wed, 23 Oct 2024 10:28:17 -0700 Subject: [PATCH 020/324] Remove LongGCDisruption scheme (#115046) Long GC disruption relies on Thread.resume, which is removed in JDK 23. Tests that use it predate more modern disruption tests. This commit removes gc disruption and the master disruption tests. Note that tests relying on this scheme have already not been running since JDK 20 first deprecated Thread.resume. --- .../discovery/MasterDisruptionIT.java | 44 --- .../discovery/StableMasterDisruptionIT.java | 272 -------------- .../IntermittentLongGCDisruption.java | 109 ------ .../test/disruption/LongGCDisruption.java | 350 ------------------ .../disruption/LongGCDisruptionTests.java | 257 ------------- 5 files changed, 1032 deletions(-) delete mode 100644 test/framework/src/main/java/org/elasticsearch/test/disruption/IntermittentLongGCDisruption.java delete mode 100644 test/framework/src/main/java/org/elasticsearch/test/disruption/LongGCDisruption.java delete mode 100644 test/framework/src/test/java/org/elasticsearch/test/disruption/LongGCDisruptionTests.java diff --git a/server/src/internalClusterTest/java/org/elasticsearch/discovery/MasterDisruptionIT.java b/server/src/internalClusterTest/java/org/elasticsearch/discovery/MasterDisruptionIT.java index 214fc47222f3a..bf81200509691 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/discovery/MasterDisruptionIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/discovery/MasterDisruptionIT.java @@ -21,21 +21,15 @@ import org.elasticsearch.core.TimeValue; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.disruption.BlockMasterServiceOnMaster; -import org.elasticsearch.test.disruption.IntermittentLongGCDisruption; import org.elasticsearch.test.disruption.NetworkDisruption; import org.elasticsearch.test.disruption.NetworkDisruption.TwoPartitions; import org.elasticsearch.test.disruption.ServiceDisruptionScheme; -import org.elasticsearch.test.disruption.SingleNodeDisruption; import org.elasticsearch.xcontent.XContentType; -import java.util.ArrayList; -import java.util.HashSet; import java.util.List; -import java.util.Set; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.not; /** * Tests relating to the loss of the master. @@ -43,44 +37,6 @@ @ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0) public class MasterDisruptionIT extends AbstractDisruptionTestCase { - /** - * Test that cluster recovers from a long GC on master that causes other nodes to elect a new one - */ - public void testMasterNodeGCs() throws Exception { - List nodes = startCluster(3); - // NOTE: this assume must happen after starting the cluster, so that cleanup will have something to cleanup. - assumeFalse("jdk20 removed thread suspend/resume", Runtime.version().feature() >= 20); - - String oldMasterNode = internalCluster().getMasterName(); - // a very long GC, but it's OK as we remove the disruption when it has had an effect - SingleNodeDisruption masterNodeDisruption = new IntermittentLongGCDisruption(random(), oldMasterNode, 100, 200, 30000, 60000); - internalCluster().setDisruptionScheme(masterNodeDisruption); - masterNodeDisruption.startDisrupting(); - - Set oldNonMasterNodesSet = new HashSet<>(nodes); - oldNonMasterNodesSet.remove(oldMasterNode); - - List oldNonMasterNodes = new ArrayList<>(oldNonMasterNodesSet); - - logger.info("waiting for nodes to de-elect master [{}]", oldMasterNode); - for (String node : oldNonMasterNodesSet) { - assertDifferentMaster(node, oldMasterNode); - } - - logger.info("waiting for nodes to elect a new master"); - ensureStableCluster(2, oldNonMasterNodes.get(0)); - - // restore GC - masterNodeDisruption.stopDisrupting(); - final TimeValue waitTime = new TimeValue(DISRUPTION_HEALING_OVERHEAD.millis() + masterNodeDisruption.expectedTimeToHeal().millis()); - ensureStableCluster(3, waitTime, false, oldNonMasterNodes.get(0)); - - // make sure all nodes agree on master - String newMaster = internalCluster().getMasterName(); - assertThat(newMaster, not(equalTo(oldMasterNode))); - assertMaster(newMaster, nodes); - } - /** * This test isolates the master from rest of the cluster, waits for a new master to be elected, restores the partition * and verifies that all node agree on the new cluster state diff --git a/server/src/internalClusterTest/java/org/elasticsearch/discovery/StableMasterDisruptionIT.java b/server/src/internalClusterTest/java/org/elasticsearch/discovery/StableMasterDisruptionIT.java index 32c602791cca4..48db23635220c 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/discovery/StableMasterDisruptionIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/discovery/StableMasterDisruptionIT.java @@ -14,33 +14,26 @@ import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.ClusterStateUpdateTask; import org.elasticsearch.cluster.coordination.CoordinationDiagnosticsService; import org.elasticsearch.cluster.coordination.Coordinator; import org.elasticsearch.cluster.coordination.FollowersChecker; import org.elasticsearch.cluster.coordination.LeaderChecker; import org.elasticsearch.cluster.coordination.MasterHistoryService; -import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodeUtils; import org.elasticsearch.cluster.node.DiscoveryNodes; -import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.Priority; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.common.xcontent.ChunkedToXContent; import org.elasticsearch.core.TimeValue; -import org.elasticsearch.core.Tuple; import org.elasticsearch.health.GetHealthAction; import org.elasticsearch.health.HealthStatus; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; -import org.elasticsearch.test.disruption.LongGCDisruption; import org.elasticsearch.test.disruption.NetworkDisruption; import org.elasticsearch.test.disruption.NetworkDisruption.NetworkLinkDisruptionType; import org.elasticsearch.test.disruption.NetworkDisruption.TwoPartitions; -import org.elasticsearch.test.disruption.SingleNodeDisruption; import org.elasticsearch.test.transport.MockTransportService; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xcontent.ToXContent; @@ -50,17 +43,12 @@ import org.junit.Before; import java.io.IOException; -import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.Collections; -import java.util.HashMap; import java.util.HashSet; import java.util.List; -import java.util.Map; -import java.util.Objects; import java.util.Set; -import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import static java.util.Collections.singleton; @@ -227,266 +215,6 @@ private void testFollowerCheckerAfterMasterReelection(NetworkLinkDisruptionType ensureStableCluster(3); } - /** - * Tests that emulates a frozen elected master node that unfreezes and pushes its cluster state to other nodes that already are - * following another elected master node. These nodes should reject this cluster state and prevent them from following the stale master. - */ - public void testStaleMasterNotHijackingMajority() throws Exception { - assumeFalse("jdk20 removed thread suspend/resume", Runtime.version().feature() >= 20); - final List nodes = internalCluster().startNodes( - 3, - Settings.builder() - .put(LeaderChecker.LEADER_CHECK_TIMEOUT_SETTING.getKey(), "1s") - .put(Coordinator.PUBLISH_TIMEOUT_SETTING.getKey(), "1s") - .build() - ); - ensureStableCluster(3); - - // Save the current master node as old master node, because that node will get frozen - final String oldMasterNode = internalCluster().getMasterName(); - - // Simulating a painful gc by suspending all threads for a long time on the current elected master node. - SingleNodeDisruption masterNodeDisruption = new LongGCDisruption(random(), oldMasterNode); - - // Save the majority side - final List majoritySide = new ArrayList<>(nodes); - majoritySide.remove(oldMasterNode); - - // Keeps track of the previous and current master when a master node transition took place on each node on the majority side: - final Map>> masters = Collections.synchronizedMap(new HashMap<>()); - for (final String node : majoritySide) { - masters.put(node, new ArrayList<>()); - internalCluster().getInstance(ClusterService.class, node).addListener(event -> { - DiscoveryNode previousMaster = event.previousState().nodes().getMasterNode(); - DiscoveryNode currentMaster = event.state().nodes().getMasterNode(); - if (Objects.equals(previousMaster, currentMaster) == false) { - logger.info( - "--> node {} received new cluster state: {} \n and had previous cluster state: {}", - node, - event.state(), - event.previousState() - ); - String previousMasterNodeName = previousMaster != null ? previousMaster.getName() : null; - String currentMasterNodeName = currentMaster != null ? currentMaster.getName() : null; - masters.get(node).add(new Tuple<>(previousMasterNodeName, currentMasterNodeName)); - } - }); - } - - final CountDownLatch oldMasterNodeSteppedDown = new CountDownLatch(1); - internalCluster().getInstance(ClusterService.class, oldMasterNode).addListener(event -> { - if (event.state().nodes().getMasterNodeId() == null) { - oldMasterNodeSteppedDown.countDown(); - } - }); - - internalCluster().setDisruptionScheme(masterNodeDisruption); - logger.info("--> freezing node [{}]", oldMasterNode); - masterNodeDisruption.startDisrupting(); - - // Wait for majority side to elect a new master - assertBusy(() -> { - for (final Map.Entry>> entry : masters.entrySet()) { - final List> transitions = entry.getValue(); - assertTrue(entry.getKey() + ": " + transitions, transitions.stream().anyMatch(transition -> transition.v2() != null)); - } - }); - - // The old master node is frozen, but here we submit a cluster state update task that doesn't get executed, but will be queued and - // once the old master node un-freezes it gets executed. The old master node will send this update + the cluster state where it is - // flagged as master to the other nodes that follow the new master. These nodes should ignore this update. - internalCluster().getInstance(ClusterService.class, oldMasterNode) - .submitUnbatchedStateUpdateTask("sneaky-update", new ClusterStateUpdateTask(Priority.IMMEDIATE) { - @Override - public ClusterState execute(ClusterState currentState) { - return ClusterState.builder(currentState).build(); - } - - @Override - public void onFailure(Exception e) { - logger.warn("failure [sneaky-update]", e); - } - }); - - // Save the new elected master node - final String newMasterNode = internalCluster().getMasterName(majoritySide.get(0)); - logger.info("--> new detected master node [{}]", newMasterNode); - - // Stop disruption - logger.info("--> unfreezing node [{}]", oldMasterNode); - masterNodeDisruption.stopDisrupting(); - - oldMasterNodeSteppedDown.await(30, TimeUnit.SECONDS); - logger.info("--> [{}] stepped down as master", oldMasterNode); - ensureStableCluster(3); - - assertThat(masters.size(), equalTo(2)); - for (Map.Entry>> entry : masters.entrySet()) { - String nodeName = entry.getKey(); - List> transitions = entry.getValue(); - assertTrue( - "[" + nodeName + "] should not apply state from old master [" + oldMasterNode + "] but it did: " + transitions, - transitions.stream().noneMatch(t -> oldMasterNode.equals(t.v2())) - ); - } - assertGreenMasterStability(internalCluster().client()); - } - - /** - * This helper method creates a 3-node cluster where all nodes are master-eligible, and then simulates a long GC on the master node 5 - * times (forcing another node to be elected master 5 times). It then asserts that the master stability health indicator status is - * YELLOW, and that expectedMasterStabilitySymptomSubstring is contained in the symptom. - * @param expectedMasterStabilitySymptomSubstring A string to expect in the master stability health indicator symptom - * @throws Exception - */ - public void testRepeatedMasterChanges(String expectedMasterStabilitySymptomSubstring) throws Exception { - assumeFalse("jdk20 removed thread suspend/resume", Runtime.version().feature() >= 20); - final List nodes = internalCluster().startNodes( - 3, - Settings.builder() - .put(LeaderChecker.LEADER_CHECK_TIMEOUT_SETTING.getKey(), "1s") - .put(Coordinator.PUBLISH_TIMEOUT_SETTING.getKey(), "1s") - .put(CoordinationDiagnosticsService.IDENTITY_CHANGES_THRESHOLD_SETTING.getKey(), 1) - .put(CoordinationDiagnosticsService.NO_MASTER_TRANSITIONS_THRESHOLD_SETTING.getKey(), 100) - .build() - ); - ensureStableCluster(3); - String firstMaster = internalCluster().getMasterName(); - // Force the master to change 2 times: - for (int i = 0; i < 2; i++) { - // Save the current master node as old master node, because that node will get frozen - final String oldMasterNode = internalCluster().getMasterName(); - - // Simulating a painful gc by suspending all threads for a long time on the current elected master node. - SingleNodeDisruption masterNodeDisruption = new LongGCDisruption(random(), oldMasterNode); - - // Save the majority side - final List majoritySide = new ArrayList<>(nodes); - majoritySide.remove(oldMasterNode); - - // Keeps track of the previous and current master when a master node transition took place on each node on the majority side: - final Map>> masters = Collections.synchronizedMap(new HashMap<>()); - for (final String node : majoritySide) { - masters.put(node, new ArrayList<>()); - internalCluster().getInstance(ClusterService.class, node).addListener(event -> { - DiscoveryNode previousMaster = event.previousState().nodes().getMasterNode(); - DiscoveryNode currentMaster = event.state().nodes().getMasterNode(); - if (Objects.equals(previousMaster, currentMaster) == false) { - logger.info( - "--> node {} received new cluster state: {} \n and had previous cluster state: {}", - node, - event.state(), - event.previousState() - ); - String previousMasterNodeName = previousMaster != null ? previousMaster.getName() : null; - String currentMasterNodeName = currentMaster != null ? currentMaster.getName() : null; - masters.get(node).add(new Tuple<>(previousMasterNodeName, currentMasterNodeName)); - } - }); - } - - final CountDownLatch oldMasterNodeSteppedDown = new CountDownLatch(1); - internalCluster().getInstance(ClusterService.class, oldMasterNode).addListener(event -> { - if (event.state().nodes().getMasterNodeId() == null) { - oldMasterNodeSteppedDown.countDown(); - } - }); - internalCluster().clearDisruptionScheme(); - internalCluster().setDisruptionScheme(masterNodeDisruption); - logger.info("--> freezing node [{}]", oldMasterNode); - masterNodeDisruption.startDisrupting(); - - // Wait for majority side to elect a new master - assertBusy(() -> { - for (final Map.Entry>> entry : masters.entrySet()) { - final List> transitions = entry.getValue(); - assertTrue(entry.getKey() + ": " + transitions, transitions.stream().anyMatch(transition -> transition.v2() != null)); - } - }); - - // Save the new elected master node - final String newMasterNode = internalCluster().getMasterName(majoritySide.get(0)); - logger.info("--> new detected master node [{}]", newMasterNode); - - // Stop disruption - logger.info("--> unfreezing node [{}]", oldMasterNode); - masterNodeDisruption.stopDisrupting(); - - oldMasterNodeSteppedDown.await(30, TimeUnit.SECONDS); - logger.info("--> [{}] stepped down as master", oldMasterNode); - ensureStableCluster(3); - - assertThat(masters.size(), equalTo(2)); - } - List nodeNamesExceptFirstMaster = Arrays.stream(internalCluster().getNodeNames()) - .filter(name -> name.equals(firstMaster) == false) - .toList(); - /* - * It is possible that the first node that became master got re-elected repeatedly. And since it was in a simulated GC when the - * other node(s) were master, it only saw itself as master. So we want to check with another node. - */ - Client client = internalCluster().client(randomFrom(nodeNamesExceptFirstMaster)); - assertMasterStability(client, HealthStatus.YELLOW, containsString(expectedMasterStabilitySymptomSubstring)); - } - - public void testRepeatedNullMasterRecognizedAsGreenIfMasterDoesNotKnowItIsUnstable() throws Exception { - assumeFalse("jdk20 removed thread suspend/resume", Runtime.version().feature() >= 20); - /* - * In this test we have a single master-eligible node. We pause it repeatedly (simulating a long GC pause for example) so that - * other nodes decide it is no longer the master. However since there is no other master-eligible node, another node is never - * elected master. And the master node never recognizes that it had a problem. So when we run the master stability check on one - * of the data nodes, it will see that there is a problem (the master has gone null repeatedly), but when it checks with the - * master, the master says everything is fine. So we expect a GREEN status. - */ - final List masterNodes = internalCluster().startMasterOnlyNodes( - 1, - Settings.builder() - .put(LeaderChecker.LEADER_CHECK_TIMEOUT_SETTING.getKey(), "1s") - .put(Coordinator.PUBLISH_TIMEOUT_SETTING.getKey(), "1s") - .put(CoordinationDiagnosticsService.NO_MASTER_TRANSITIONS_THRESHOLD_SETTING.getKey(), 1) - .build() - ); - int nullTransitionsThreshold = 1; - final List dataNodes = internalCluster().startDataOnlyNodes( - 2, - Settings.builder() - .put(LeaderChecker.LEADER_CHECK_TIMEOUT_SETTING.getKey(), "1s") - .put(Coordinator.PUBLISH_TIMEOUT_SETTING.getKey(), "1s") - .put(CoordinationDiagnosticsService.NO_MASTER_TRANSITIONS_THRESHOLD_SETTING.getKey(), nullTransitionsThreshold) - .put(CoordinationDiagnosticsService.NODE_HAS_MASTER_LOOKUP_TIMEFRAME_SETTING.getKey(), new TimeValue(60, TimeUnit.SECONDS)) - .build() - ); - ensureStableCluster(3); - for (int i = 0; i < nullTransitionsThreshold + 1; i++) { - final String masterNode = masterNodes.get(0); - - // Simulating a painful gc by suspending all threads for a long time on the current elected master node. - SingleNodeDisruption masterNodeDisruption = new LongGCDisruption(random(), masterNode); - - final CountDownLatch dataNodeMasterSteppedDown = new CountDownLatch(2); - internalCluster().getInstance(ClusterService.class, dataNodes.get(0)).addListener(event -> { - if (event.state().nodes().getMasterNodeId() == null) { - dataNodeMasterSteppedDown.countDown(); - } - }); - internalCluster().getInstance(ClusterService.class, dataNodes.get(1)).addListener(event -> { - if (event.state().nodes().getMasterNodeId() == null) { - dataNodeMasterSteppedDown.countDown(); - } - }); - internalCluster().clearDisruptionScheme(); - internalCluster().setDisruptionScheme(masterNodeDisruption); - logger.info("--> freezing node [{}]", masterNode); - masterNodeDisruption.startDisrupting(); - dataNodeMasterSteppedDown.await(30, TimeUnit.SECONDS); - // Stop disruption - logger.info("--> unfreezing node [{}]", masterNode); - masterNodeDisruption.stopDisrupting(); - ensureStableCluster(3, TimeValue.timeValueSeconds(30), false, randomFrom(dataNodes)); - } - assertGreenMasterStability(internalCluster().client(randomFrom(dataNodes))); - } - public void testNoMasterEligibleNodes() throws Exception { /* * In this test we have a single master-eligible node. We then stop the master. We set the master lookup threshold very low on the diff --git a/test/framework/src/main/java/org/elasticsearch/test/disruption/IntermittentLongGCDisruption.java b/test/framework/src/main/java/org/elasticsearch/test/disruption/IntermittentLongGCDisruption.java deleted file mode 100644 index 9e2f8c931c84a..0000000000000 --- a/test/framework/src/main/java/org/elasticsearch/test/disruption/IntermittentLongGCDisruption.java +++ /dev/null @@ -1,109 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the "Elastic License - * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side - * Public License v 1"; you may not use this file except in compliance with, at - * your election, the "Elastic License 2.0", the "GNU Affero General Public - * License v3.0 only", or the "Server Side Public License, v 1". - */ -package org.elasticsearch.test.disruption; - -import org.elasticsearch.core.TimeValue; - -import java.util.HashSet; -import java.util.Random; -import java.util.Set; -import java.util.concurrent.atomic.AtomicInteger; - -/** - * Simulates irregular long gc intervals. - */ -public class IntermittentLongGCDisruption extends LongGCDisruption { - - volatile boolean disrupting; - volatile Thread worker; - - final long intervalBetweenDelaysMin; - final long intervalBetweenDelaysMax; - final long delayDurationMin; - final long delayDurationMax; - - public IntermittentLongGCDisruption( - Random random, - String disruptedNode, - long intervalBetweenDelaysMin, - long intervalBetweenDelaysMax, - long delayDurationMin, - long delayDurationMax - ) { - super(random, disruptedNode); - this.intervalBetweenDelaysMin = intervalBetweenDelaysMin; - this.intervalBetweenDelaysMax = intervalBetweenDelaysMax; - this.delayDurationMin = delayDurationMin; - this.delayDurationMax = delayDurationMax; - } - - static final AtomicInteger thread_ids = new AtomicInteger(); - - @Override - public void startDisrupting() { - disrupting = true; - worker = new Thread(new BackgroundWorker(), "long_gc_simulation_" + thread_ids.incrementAndGet()); - worker.setDaemon(true); - worker.start(); - } - - @Override - public void stopDisrupting() { - if (worker == null) { - return; - } - logger.info("stopping long GCs on [{}]", disruptedNode); - disrupting = false; - worker.interrupt(); - try { - worker.join(2 * (intervalBetweenDelaysMax + delayDurationMax)); - } catch (InterruptedException e) { - logger.info("background thread failed to stop"); - } - worker = null; - } - - private void simulateLongGC(final TimeValue duration) throws InterruptedException { - logger.info("node [{}] goes into GC for for [{}]", disruptedNode, duration); - final Set nodeThreads = new HashSet<>(); - try { - while (suspendThreads(nodeThreads)) - ; - if (nodeThreads.isEmpty() == false) { - Thread.sleep(duration.millis()); - } - } finally { - logger.info("node [{}] resumes from GC", disruptedNode); - resumeThreads(nodeThreads); - } - } - - class BackgroundWorker implements Runnable { - - @Override - public void run() { - while (disrupting) { - try { - TimeValue duration = new TimeValue(delayDurationMin + random.nextInt((int) (delayDurationMax - delayDurationMin))); - simulateLongGC(duration); - - duration = new TimeValue( - intervalBetweenDelaysMin + random.nextInt((int) (intervalBetweenDelaysMax - intervalBetweenDelaysMin)) - ); - if (disrupting) { - Thread.sleep(duration.millis()); - } - } catch (InterruptedException e) {} catch (Exception e) { - logger.error("error in background worker", e); - } - } - } - } - -} diff --git a/test/framework/src/main/java/org/elasticsearch/test/disruption/LongGCDisruption.java b/test/framework/src/main/java/org/elasticsearch/test/disruption/LongGCDisruption.java deleted file mode 100644 index dce9e2600d0a6..0000000000000 --- a/test/framework/src/main/java/org/elasticsearch/test/disruption/LongGCDisruption.java +++ /dev/null @@ -1,350 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the "Elastic License - * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side - * Public License v 1"; you may not use this file except in compliance with, at - * your election, the "Elastic License 2.0", the "GNU Affero General Public - * License v3.0 only", or the "Server Side Public License, v 1". - */ - -package org.elasticsearch.test.disruption; - -import org.elasticsearch.common.util.concurrent.AbstractRunnable; -import org.elasticsearch.core.Nullable; -import org.elasticsearch.core.SuppressForbidden; -import org.elasticsearch.core.TimeValue; -import org.elasticsearch.test.InternalTestCluster; - -import java.lang.management.ManagementFactory; -import java.lang.management.ThreadInfo; -import java.lang.management.ThreadMXBean; -import java.util.Arrays; -import java.util.Random; -import java.util.Set; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.atomic.AtomicReference; -import java.util.regex.Pattern; -import java.util.stream.Collectors; - -/** - * Suspends all threads on the specified node in order to simulate a long gc. - */ -public class LongGCDisruption extends SingleNodeDisruption { - - private static final Pattern[] unsafeClasses = new Pattern[] { - // logging has shared JVM locks; we may suspend a thread and block other nodes from doing their thing - Pattern.compile("logging\\.log4j"), - // security manager is shared across all nodes and it uses synchronized maps internally - Pattern.compile("java\\.lang\\.SecurityManager"), - // SecureRandom instance from SecureRandomHolder class is shared by all nodes - Pattern.compile("java\\.security\\.SecureRandom"), - // Lucene's WindowsFS is shared across nodes and contains some coarse synchronization - Pattern.compile("org\\.apache\\.lucene\\.tests\\.mockfile\\.WindowsFS") }; - - private static final ThreadMXBean threadBean = ManagementFactory.getThreadMXBean(); - - protected final String disruptedNode; - private Set suspendedThreads; - private Thread blockDetectionThread; - - private final AtomicBoolean sawSlowSuspendBug = new AtomicBoolean(false); - - public LongGCDisruption(Random random, String disruptedNode) { - super(random); - this.disruptedNode = disruptedNode; - } - - /** - * Checks if during disruption we ran into a known JVM issue that makes {@link Thread#suspend()} calls block for multiple seconds - * was observed. - * @see JDK-8218446 - * @return true if during thread suspending a call to {@link Thread#suspend()} took more than 3s - */ - public boolean sawSlowSuspendBug() { - return sawSlowSuspendBug.get(); - } - - @Override - public synchronized void startDisrupting() { - if (suspendedThreads == null) { - boolean success = false; - try { - suspendedThreads = ConcurrentHashMap.newKeySet(); - - final String currentThreadName = Thread.currentThread().getName(); - assert isDisruptedNodeThread(currentThreadName) == false - : "current thread match pattern. thread name: " + currentThreadName + ", node: " + disruptedNode; - // we spawn a background thread to protect against deadlock which can happen - // if there are shared resources between caller thread and suspended threads - // see unsafeClasses to how to avoid that - final AtomicReference suspendingError = new AtomicReference<>(); - final Thread suspendingThread = new Thread(new AbstractRunnable() { - @Override - public void onFailure(Exception e) { - suspendingError.set(e); - } - - @Override - protected void doRun() throws Exception { - // keep trying to suspend threads, until no new threads are discovered. - while (suspendThreads(suspendedThreads)) { - if (Thread.interrupted()) { - return; - } - } - } - }); - suspendingThread.setName(currentThreadName + "[LongGCDisruption][threadSuspender]"); - suspendingThread.start(); - try { - suspendingThread.join(getSuspendingTimeoutInMillis()); - } catch (InterruptedException e) { - suspendingThread.interrupt(); // best effort to signal suspending - throw new RuntimeException(e); - } - if (suspendingError.get() != null) { - throw new RuntimeException("unknown error while suspending threads", suspendingError.get()); - } - if (suspendingThread.isAlive()) { - logger.warn( - """ - failed to suspend node [{}]'s threads within [{}] millis. Suspending thread stack trace: - {} - Threads that weren't suspended: - {}""", - disruptedNode, - getSuspendingTimeoutInMillis(), - stackTrace(suspendingThread.getStackTrace()), - suspendedThreads.stream() - .map(t -> t.getName() + "\n----\n" + stackTrace(t.getStackTrace())) - .collect(Collectors.joining("\n")) - ); - suspendingThread.interrupt(); // best effort; - try { - /* - * We need to join on the suspending thread in case it has suspended a thread that is in a critical section and - * needs to be resumed. - */ - suspendingThread.join(); - } catch (InterruptedException e) { - throw new RuntimeException(e); - } - throw new RuntimeException("suspending node threads took too long"); - } - // block detection checks if other threads are blocked waiting on an object that is held by one - // of the threads that was suspended - if (isBlockDetectionSupported()) { - blockDetectionThread = new Thread(new AbstractRunnable() { - @Override - public void onFailure(Exception e) { - if (e instanceof InterruptedException == false) { - throw new AssertionError("unexpected exception in blockDetectionThread", e); - } - } - - @Override - protected void doRun() throws Exception { - while (Thread.currentThread().isInterrupted() == false) { - ThreadInfo[] threadInfos = threadBean.dumpAllThreads(true, true); - for (ThreadInfo threadInfo : threadInfos) { - if (isDisruptedNodeThread(threadInfo.getThreadName()) == false - && threadInfo.getLockOwnerName() != null - && isDisruptedNodeThread(threadInfo.getLockOwnerName())) { - - // find ThreadInfo object of the blocking thread (if available) - ThreadInfo blockingThreadInfo = null; - for (ThreadInfo otherThreadInfo : threadInfos) { - if (otherThreadInfo.getThreadId() == threadInfo.getLockOwnerId()) { - blockingThreadInfo = otherThreadInfo; - break; - } - } - onBlockDetected(threadInfo, blockingThreadInfo); - } - } - Thread.sleep(getBlockDetectionIntervalInMillis()); - } - } - }); - blockDetectionThread.setName(currentThreadName + "[LongGCDisruption][blockDetection]"); - blockDetectionThread.start(); - } - success = true; - } finally { - if (success == false) { - stopBlockDetection(); - // resume threads if failed - resumeThreads(suspendedThreads); - suspendedThreads = null; - } - } - } else { - throw new IllegalStateException("can't disrupt twice, call stopDisrupting() first"); - } - } - - public boolean isDisruptedNodeThread(String threadName) { - return threadName.contains("[" + disruptedNode + "]"); - } - - private static String stackTrace(StackTraceElement[] stackTraceElements) { - return Arrays.stream(stackTraceElements).map(Object::toString).collect(Collectors.joining("\n")); - } - - @Override - public synchronized void stopDisrupting() { - stopBlockDetection(); - if (suspendedThreads != null) { - resumeThreads(suspendedThreads); - suspendedThreads = null; - } - } - - private void stopBlockDetection() { - if (blockDetectionThread != null) { - try { - blockDetectionThread.interrupt(); // best effort - blockDetectionThread.join(getSuspendingTimeoutInMillis()); - } catch (InterruptedException e) { - throw new RuntimeException(e); - } - blockDetectionThread = null; - } - } - - @Override - public void removeAndEnsureHealthy(InternalTestCluster cluster) { - removeFromCluster(cluster); - ensureNodeCount(cluster); - } - - @Override - public TimeValue expectedTimeToHeal() { - return TimeValue.timeValueMillis(0); - } - - /** - * resolves all threads belonging to given node and suspends them if their current stack trace - * is "safe". Threads are added to nodeThreads if suspended. - * - * returns true if some live threads were found. The caller is expected to call this method - * until no more "live" are found. - */ - @SuppressWarnings("deprecation") // suspends/resumes threads intentionally - @SuppressForbidden(reason = "suspends/resumes threads intentionally") - protected boolean suspendThreads(Set nodeThreads) { - Thread[] allThreads = null; - while (allThreads == null) { - allThreads = new Thread[Thread.activeCount()]; - if (Thread.enumerate(allThreads) > allThreads.length) { - // we didn't make enough space, retry - allThreads = null; - } - } - boolean liveThreadsFound = false; - for (Thread thread : allThreads) { - if (thread == null) { - continue; - } - String threadName = thread.getName(); - if (isDisruptedNodeThread(threadName)) { - if (thread.isAlive() && nodeThreads.add(thread)) { - liveThreadsFound = true; - logger.trace("suspending thread [{}]", threadName); - // we assume it is not safe to suspend the thread - boolean safe = false; - try { - /* - * At the bottom of this try-block we will know whether or not it is safe to suspend the thread; we start by - * assuming that it is safe. - */ - boolean definitelySafe = true; - final long startTime = System.nanoTime(); - thread.suspend(); - if (System.nanoTime() - startTime > TimeUnit.SECONDS.toNanos(3L)) { - sawSlowSuspendBug.set(true); - } - // double check the thread is not in a shared resource like logging; if so, let it go and come back - safe: for (StackTraceElement stackElement : thread.getStackTrace()) { - String className = stackElement.getClassName(); - for (Pattern unsafePattern : getUnsafeClasses()) { - if (unsafePattern.matcher(className).find()) { - // it is definitely not safe to suspend the thread - definitelySafe = false; - break safe; - } - } - } - safe = definitelySafe; - } finally { - if (safe == false) { - /* - * Do not log before resuming as we might be interrupted while logging in which case we will throw an - * interrupted exception and never resume the suspended thread that is in a critical section. Also, logging - * before resuming makes for confusing log messages if we never hit the resume. - */ - thread.resume(); - logger.trace("resumed thread [{}] as it is in a critical section", threadName); - nodeThreads.remove(thread); - } - } - } - } - } - return liveThreadsFound; - } - - // for testing - protected Pattern[] getUnsafeClasses() { - return unsafeClasses; - } - - // for testing - protected long getSuspendingTimeoutInMillis() { - return TimeValue.timeValueSeconds(30).getMillis(); - } - - public boolean isBlockDetectionSupported() { - return threadBean.isObjectMonitorUsageSupported() && threadBean.isSynchronizerUsageSupported(); - } - - // for testing - protected long getBlockDetectionIntervalInMillis() { - return 3000L; - } - - // for testing - protected void onBlockDetected(ThreadInfo blockedThread, @Nullable ThreadInfo blockingThread) { - String blockedThreadStackTrace = stackTrace(blockedThread.getStackTrace()); - String blockingThreadStackTrace = blockingThread != null ? stackTrace(blockingThread.getStackTrace()) : "not available"; - throw new AssertionError( - "Thread [" - + blockedThread.getThreadName() - + "] is blocked waiting on the resource [" - + blockedThread.getLockInfo() - + "] held by the suspended thread [" - + blockedThread.getLockOwnerName() - + "] of the disrupted node [" - + disruptedNode - + "].\n" - + "Please add this occurrence to the unsafeClasses list in [" - + LongGCDisruption.class.getName() - + "].\n" - + "Stack trace of blocked thread: " - + blockedThreadStackTrace - + "\n" - + "Stack trace of blocking thread: " - + blockingThreadStackTrace - ); - } - - @SuppressWarnings("deprecation") // suspends/resumes threads intentionally - @SuppressForbidden(reason = "suspends/resumes threads intentionally") - protected void resumeThreads(Set threads) { - for (Thread thread : threads) { - thread.resume(); - } - } -} diff --git a/test/framework/src/test/java/org/elasticsearch/test/disruption/LongGCDisruptionTests.java b/test/framework/src/test/java/org/elasticsearch/test/disruption/LongGCDisruptionTests.java deleted file mode 100644 index 72ecba8d502f1..0000000000000 --- a/test/framework/src/test/java/org/elasticsearch/test/disruption/LongGCDisruptionTests.java +++ /dev/null @@ -1,257 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the "Elastic License - * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side - * Public License v 1"; you may not use this file except in compliance with, at - * your election, the "Elastic License 2.0", the "GNU Affero General Public - * License v3.0 only", or the "Server Side Public License, v 1". - */ -package org.elasticsearch.test.disruption; - -import org.elasticsearch.core.Nullable; -import org.elasticsearch.test.ESTestCase; -import org.junit.BeforeClass; - -import java.lang.management.ThreadInfo; -import java.util.ArrayList; -import java.util.List; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.atomic.AtomicLong; -import java.util.concurrent.atomic.AtomicReference; -import java.util.concurrent.locks.ReentrantLock; -import java.util.regex.Pattern; - -import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.greaterThan; - -public class LongGCDisruptionTests extends ESTestCase { - - static class LockedExecutor { - ReentrantLock lock = new ReentrantLock(); - - public void executeLocked(Runnable r) { - lock.lock(); - try { - r.run(); - } finally { - lock.unlock(); - } - } - } - - @BeforeClass - public static void ignoreJdk20Plus() { - assumeFalse("jdk20 removed thread suspend/resume", Runtime.version().feature() >= 20); - } - - public void testBlockingTimeout() throws Exception { - final String nodeName = "test_node"; - LongGCDisruption disruption = new LongGCDisruption(random(), nodeName) { - @Override - protected Pattern[] getUnsafeClasses() { - return new Pattern[] { Pattern.compile(LockedExecutor.class.getSimpleName()) }; - } - - @Override - protected long getSuspendingTimeoutInMillis() { - return 100; - } - }; - final AtomicBoolean stop = new AtomicBoolean(); - final CountDownLatch underLock = new CountDownLatch(1); - final CountDownLatch pauseUnderLock = new CountDownLatch(1); - final LockedExecutor lockedExecutor = new LockedExecutor(); - final AtomicLong ops = new AtomicLong(); - final Thread[] threads = new Thread[10]; - try { - for (int i = 0; i < 10; i++) { - // at least one locked and one none lock thread - final boolean lockedExec = (i < 9 && randomBoolean()) || i == 0; - threads[i] = new Thread(() -> { - while (stop.get() == false) { - if (lockedExec) { - lockedExecutor.executeLocked(() -> { - try { - underLock.countDown(); - ops.incrementAndGet(); - pauseUnderLock.await(); - } catch (InterruptedException e) { - - } - }); - } else { - ops.incrementAndGet(); - } - } - }); - threads[i].setName("[" + nodeName + "][" + i + "]"); - threads[i].start(); - } - // make sure some threads are under lock - underLock.await(); - RuntimeException e = expectThrows(RuntimeException.class, disruption::startDisrupting); - assertThat(e.getMessage(), containsString("suspending node threads took too long")); - } finally { - stop.set(true); - pauseUnderLock.countDown(); - for (final Thread thread : threads) { - thread.join(); - } - } - } - - /** - * Checks that a GC disruption never blocks threads while they are doing something "unsafe" - * but does keep retrying until all threads can be safely paused - */ - public void testNotBlockingUnsafeStackTraces() throws Exception { - final String nodeName = "test_node"; - LongGCDisruption disruption = new LongGCDisruption(random(), nodeName) { - @Override - protected Pattern[] getUnsafeClasses() { - return new Pattern[] { Pattern.compile(LockedExecutor.class.getSimpleName()) }; - } - }; - final AtomicBoolean stop = new AtomicBoolean(); - final LockedExecutor lockedExecutor = new LockedExecutor(); - final AtomicLong ops = new AtomicLong(); - final Thread[] threads = new Thread[5]; - final Runnable yieldAndIncrement = () -> { - Thread.yield(); // give some chance to catch this stack trace - ops.incrementAndGet(); - }; - try { - for (int i = 0; i < threads.length; i++) { - threads[i] = new Thread(() -> { - for (int iter = 0; stop.get() == false; iter++) { - if (iter % 2 == 0) { - lockedExecutor.executeLocked(yieldAndIncrement); - } else { - yieldAndIncrement.run(); - } - } - }); - threads[i].setName("[" + nodeName + "][" + i + "]"); - threads[i].start(); - } - // make sure some threads are under lock - try { - disruption.startDisrupting(); - } catch (RuntimeException e) { - if (e.getMessage().contains("suspending node threads took too long") && disruption.sawSlowSuspendBug()) { - return; - } - throw new AssertionError(e); - } - long first = ops.get(); - assertThat(lockedExecutor.lock.isLocked(), equalTo(false)); // no threads should own the lock - Thread.sleep(100); - assertThat(ops.get(), equalTo(first)); - disruption.stopDisrupting(); - assertBusy(() -> assertThat(ops.get(), greaterThan(first))); - } finally { - disruption.stopDisrupting(); - stop.set(true); - for (final Thread thread : threads) { - thread.join(); - } - } - } - - public void testBlockDetection() throws Exception { - final String disruptedNodeName = "disrupted_node"; - final String blockedNodeName = "blocked_node"; - CountDownLatch waitForBlockDetectionResult = new CountDownLatch(1); - AtomicReference blockDetectionResult = new AtomicReference<>(); - LongGCDisruption disruption = new LongGCDisruption(random(), disruptedNodeName) { - @Override - protected Pattern[] getUnsafeClasses() { - return new Pattern[0]; - } - - @Override - protected void onBlockDetected(ThreadInfo blockedThread, @Nullable ThreadInfo blockingThread) { - blockDetectionResult.set(blockedThread); - waitForBlockDetectionResult.countDown(); - } - - @Override - protected long getBlockDetectionIntervalInMillis() { - return 10L; - } - }; - if (disruption.isBlockDetectionSupported() == false) { - return; - } - final AtomicBoolean stop = new AtomicBoolean(); - final CountDownLatch underLock = new CountDownLatch(1); - final CountDownLatch pauseUnderLock = new CountDownLatch(1); - final LockedExecutor lockedExecutor = new LockedExecutor(); - final AtomicLong ops = new AtomicLong(); - final List threads = new ArrayList<>(); - try { - for (int i = 0; i < 5; i++) { - // at least one locked and one none lock thread - final boolean lockedExec = (i < 4 && randomBoolean()) || i == 0; - Thread thread = new Thread(() -> { - while (stop.get() == false) { - if (lockedExec) { - lockedExecutor.executeLocked(() -> { - try { - underLock.countDown(); - ops.incrementAndGet(); - pauseUnderLock.await(); - } catch (InterruptedException e) { - - } - }); - } else { - ops.incrementAndGet(); - } - } - }); - - thread.setName("[" + disruptedNodeName + "][" + i + "]"); - threads.add(thread); - thread.start(); - } - - for (int i = 0; i < 5; i++) { - // at least one locked and one none lock thread - final boolean lockedExec = (i < 4 && randomBoolean()) || i == 0; - Thread thread = new Thread(() -> { - while (stop.get() == false) { - if (lockedExec) { - lockedExecutor.executeLocked(() -> { ops.incrementAndGet(); }); - } else { - ops.incrementAndGet(); - } - } - }); - thread.setName("[" + blockedNodeName + "][" + i + "]"); - threads.add(thread); - thread.start(); - } - // make sure some threads of test_node are under lock - underLock.await(); - disruption.startDisrupting(); - assertTrue(waitForBlockDetectionResult.await(30, TimeUnit.SECONDS)); - disruption.stopDisrupting(); - - ThreadInfo threadInfo = blockDetectionResult.get(); - assertNotNull(threadInfo); - assertThat(threadInfo.getThreadName(), containsString("[" + blockedNodeName + "]")); - assertThat(threadInfo.getLockOwnerName(), containsString("[" + disruptedNodeName + "]")); - assertThat(threadInfo.getLockInfo().getClassName(), containsString(ReentrantLock.class.getName())); - } finally { - stop.set(true); - pauseUnderLock.countDown(); - for (final Thread thread : threads) { - thread.join(); - } - } - } -} From 7544c88c128e1a5b27e2291c7bcf1a461ecac6bd Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Wed, 23 Oct 2024 10:29:05 -0700 Subject: [PATCH 021/324] Consolidate @Before of rolling upgrade tests (#114677) Multiple @Before methods in junit are run in random order. This commit cosolidates the @Before methods of ParameterizedRollingUpgradeTestCase since the code has interdependencies. closes #114330 --- .../upgrades/ParameterizedRollingUpgradeTestCase.java | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/ParameterizedRollingUpgradeTestCase.java b/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/ParameterizedRollingUpgradeTestCase.java index e7cff5cca5a92..a20981a119d8f 100644 --- a/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/ParameterizedRollingUpgradeTestCase.java +++ b/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/ParameterizedRollingUpgradeTestCase.java @@ -55,14 +55,13 @@ public static Iterable parameters() { protected abstract ElasticsearchCluster getUpgradeCluster(); @Before - public void extractOldClusterFeatures() { + public void upgradeNode() throws Exception { + // extract old cluster features if (isOldCluster() && oldClusterTestFeatureService == null) { oldClusterTestFeatureService = testFeatureService; } - } - @Before - public void extractOldIndexVersion() throws Exception { + // extract old index version if (oldIndexVersion == null && upgradedNodes.isEmpty()) { IndexVersion indexVersion = null; // these should all be the same version @@ -93,13 +92,11 @@ public void extractOldIndexVersion() throws Exception { assertThat("Index version could not be read", indexVersion, notNullValue()); oldIndexVersion = indexVersion; } - } - @Before - public void upgradeNode() throws Exception { // Skip remaining tests if upgrade failed assumeFalse("Cluster upgrade failed", upgradeFailed); + // finally, upgrade node if (upgradedNodes.size() < requestedUpgradedNodes) { closeClients(); // we might be running a specific upgrade test by itself - check previous nodes too From b31e5c9609f65dd09800d8caaceb4c00881a8a8e Mon Sep 17 00:00:00 2001 From: Joe Gallo Date: Wed, 23 Oct 2024 13:35:50 -0400 Subject: [PATCH 022/324] Refactor the download_database_on_pipeline_creation checks (#115421) --- .../geoip/GeoIpDownloaderTaskExecutor.java | 4 ++-- .../ingest/geoip/GeoIpProcessor.java | 16 +++++++++++----- 2 files changed, 13 insertions(+), 7 deletions(-) diff --git a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTaskExecutor.java b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTaskExecutor.java index e4150005ed1ae..61ca050d91c13 100644 --- a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTaskExecutor.java +++ b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTaskExecutor.java @@ -309,14 +309,14 @@ private static boolean hasAtLeastOneGeoipProcessor(Map processor { final Map processorConfig = (Map) processor.get(GEOIP_TYPE); if (processorConfig != null) { - return downloadDatabaseOnPipelineCreation(GEOIP_TYPE, processorConfig, null) == downloadDatabaseOnPipelineCreation; + return downloadDatabaseOnPipelineCreation(processorConfig) == downloadDatabaseOnPipelineCreation; } } { final Map processorConfig = (Map) processor.get(IP_LOCATION_TYPE); if (processorConfig != null) { - return downloadDatabaseOnPipelineCreation(IP_LOCATION_TYPE, processorConfig, null) == downloadDatabaseOnPipelineCreation; + return downloadDatabaseOnPipelineCreation(processorConfig) == downloadDatabaseOnPipelineCreation; } } diff --git a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpProcessor.java b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpProcessor.java index 9508bf0346058..f99f8dbe2fdd0 100644 --- a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpProcessor.java +++ b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpProcessor.java @@ -238,9 +238,8 @@ public Processor create( boolean ignoreMissing = readBooleanProperty(type, processorTag, config, "ignore_missing", false); boolean firstOnly = readBooleanProperty(type, processorTag, config, "first_only", true); - // Validating the download_database_on_pipeline_creation even if the result - // is not used directly by the factory. - downloadDatabaseOnPipelineCreation(type, config, processorTag); + // validate (and consume) the download_database_on_pipeline_creation property even though the result is not used by the factory + readBooleanProperty(type, processorTag, config, "download_database_on_pipeline_creation", true); // noop, should be removed in 9.0 Object value = config.remove("fallback_to_default_databases"); @@ -319,8 +318,15 @@ public Processor create( ); } - public static boolean downloadDatabaseOnPipelineCreation(String type, Map config, String processorTag) { - return readBooleanProperty(type, processorTag, config, "download_database_on_pipeline_creation", true); + /** + * Get the value of the "download_database_on_pipeline_creation" property from a processor's config map. + *

+ * As with the actual property definition, the default value of the property is 'true'. Unlike the actual + * property definition, this method doesn't consume (that is, config.remove) the property from + * the config map. + */ + public static boolean downloadDatabaseOnPipelineCreation(Map config) { + return (boolean) config.getOrDefault("download_database_on_pipeline_creation", true); } } From 254cedc988f3bb3b6c731cb1c2cb33c4f8bc018a Mon Sep 17 00:00:00 2001 From: Andrei Stefan Date: Wed, 23 Oct 2024 20:36:44 +0300 Subject: [PATCH 023/324] Separate tests for snapshot and release versions (#115402) --- .../xpack/esql/action/EsqlCapabilities.java | 12 ++- .../rest-api-spec/test/esql/60_usage.yml | 82 ++++++++++++++++++- 2 files changed, 92 insertions(+), 2 deletions(-) diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java index 5157a80022c39..5e336e6759b1e 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java @@ -416,7 +416,17 @@ public enum Cap { * Fix for an optimization that caused wrong results * https://github.com/elastic/elasticsearch/issues/115281 */ - FIX_FILTER_PUSHDOWN_PAST_STATS; + FIX_FILTER_PUSHDOWN_PAST_STATS, + + /** + * This enables 60_usage.yml "Basic ESQL usage....snapshot" version test. See also the next capability. + */ + SNAPSHOT_TEST_FOR_TELEMETRY(Build.current().isSnapshot()), + + /** + * This enables 60_usage.yml "Basic ESQL usage....non-snapshot" version test. See also the previous capability. + */ + NON_SNAPSHOT_TEST_FOR_TELEMETRY(Build.current().isSnapshot() == false); private final boolean enabled; diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/60_usage.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/60_usage.yml index 7d1a4e123299b..b51bbdc4d2f87 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/60_usage.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/60_usage.yml @@ -23,7 +23,86 @@ setup: type: integer --- -"Basic ESQL usage output (telemetry)": +"Basic ESQL usage output (telemetry) snapshot version": + - requires: + test_runner_features: [capabilities] + capabilities: + - method: POST + path: /_query + parameters: [] + capabilities: [ snapshot_test_for_telemetry ] + reason: "Test that should only be executed on snapshot versions" + + - do: {xpack.usage: {}} + - match: { esql.available: true } + - match: { esql.enabled: true } + - length: { esql.features: 15 } + - set: {esql.features.dissect: dissect_counter} + - set: {esql.features.drop: drop_counter} + - set: {esql.features.eval: eval_counter} + - set: {esql.features.enrich: enrich_counter} + - set: {esql.features.from: from_counter} + - set: {esql.features.grok: grok_counter} + - set: {esql.features.keep: keep_counter} + - set: {esql.features.limit: limit_counter} + - set: {esql.features.mv_expand: mv_expand_counter} + - set: {esql.features.rename: rename_counter} + - set: {esql.features.row: row_counter} + - set: {esql.features.show: show_counter} + - set: {esql.features.sort: sort_counter} + - set: {esql.features.stats: stats_counter} + - set: {esql.features.where: where_counter} + - length: { esql.queries: 3 } + - set: {esql.queries.rest.total: rest_total_counter} + - set: {esql.queries.rest.failed: rest_failed_counter} + - set: {esql.queries.kibana.total: kibana_total_counter} + - set: {esql.queries.kibana.failed: kibana_failed_counter} + - set: {esql.queries._all.total: all_total_counter} + - set: {esql.queries._all.failed: all_failed_counter} + - set: {esql.functions.max: functions_max} + - set: {esql.functions.min: functions_min} + - set: {esql.functions.cos: functions_cos} + - set: {esql.functions.to_long: functions_to_long} + - set: {esql.functions.coalesce: functions_coalesce} + + - do: + esql.query: + body: + query: 'from test | where data > 2 and to_long(data) > 2 | sort count desc | limit 5 | stats m = max(data)' + + - do: {xpack.usage: {}} + - match: { esql.available: true } + - match: { esql.enabled: true } + - match: {esql.features.dissect: $dissect_counter} + - match: {esql.features.eval: $eval_counter} + - match: {esql.features.grok: $grok_counter} + - gt: {esql.features.limit: $limit_counter} + - gt: {esql.features.sort: $sort_counter} + - gt: {esql.features.stats: $stats_counter} + - gt: {esql.features.where: $where_counter} + - gt: {esql.queries.rest.total: $rest_total_counter} + - match: {esql.queries.rest.failed: $rest_failed_counter} + - match: {esql.queries.kibana.total: $kibana_total_counter} + - match: {esql.queries.kibana.failed: $kibana_failed_counter} + - gt: {esql.queries._all.total: $all_total_counter} + - match: {esql.queries._all.failed: $all_failed_counter} + - gt: {esql.functions.max: $functions_max} + - match: {esql.functions.min: $functions_min} + - match: {esql.functions.cos: $functions_cos} + - gt: {esql.functions.to_long: $functions_to_long} + - match: {esql.functions.coalesce: $functions_coalesce} + - length: {esql.functions: 117} # check the "sister" test below for a likely update to the same esql.functions length check + +--- +"Basic ESQL usage output (telemetry) non-snapshot version": + - requires: + test_runner_features: [capabilities] + capabilities: + - method: POST + path: /_query + parameters: [] + capabilities: [ non_snapshot_test_for_telemetry ] + reason: "Test that should only be executed on release versions" - do: {xpack.usage: {}} - match: { esql.available: true } @@ -83,3 +162,4 @@ setup: - match: {esql.functions.cos: $functions_cos} - gt: {esql.functions.to_long: $functions_to_long} - match: {esql.functions.coalesce: $functions_coalesce} + - length: {esql.functions: 115} # check the "sister" test above for a likely update to the same esql.functions length check From c851c25568f22455f75f8cbf99eb9b8a7b3f7302 Mon Sep 17 00:00:00 2001 From: Joe Gallo Date: Wed, 23 Oct 2024 13:41:34 -0400 Subject: [PATCH 024/324] Refactor InferenceProcessorInfoExtractor to avoid ConfigurationUtils (#115425) --- .../InferenceProcessorInfoExtractor.java | 45 +++++++++---------- 1 file changed, 21 insertions(+), 24 deletions(-) diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/utils/InferenceProcessorInfoExtractor.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/utils/InferenceProcessorInfoExtractor.java index e61342d281c90..83f7832645270 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/utils/InferenceProcessorInfoExtractor.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/utils/InferenceProcessorInfoExtractor.java @@ -10,9 +10,7 @@ import org.apache.lucene.util.Counter; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.Metadata; -import org.elasticsearch.ingest.ConfigurationUtils; import org.elasticsearch.ingest.IngestMetadata; -import org.elasticsearch.ingest.Pipeline; import org.elasticsearch.transport.Transports; import java.util.HashMap; @@ -24,6 +22,7 @@ import java.util.function.Consumer; import static org.elasticsearch.inference.InferenceResults.MODEL_ID_RESULTS_FIELD; +import static org.elasticsearch.ingest.Pipeline.ON_FAILURE_KEY; import static org.elasticsearch.ingest.Pipeline.PROCESSORS_KEY; /** @@ -53,16 +52,10 @@ public static int countInferenceProcessors(ClusterState state) { Counter counter = Counter.newCounter(); ingestMetadata.getPipelines().forEach((pipelineId, configuration) -> { Map configMap = configuration.getConfigAsMap(); - List> processorConfigs = ConfigurationUtils.readList(null, null, configMap, PROCESSORS_KEY); + List> processorConfigs = (List>) configMap.get(PROCESSORS_KEY); for (Map processorConfigWithKey : processorConfigs) { for (Map.Entry entry : processorConfigWithKey.entrySet()) { - addModelsAndPipelines( - entry.getKey(), - pipelineId, - (Map) entry.getValue(), - pam -> counter.addAndGet(1), - 0 - ); + addModelsAndPipelines(entry.getKey(), pipelineId, entry.getValue(), pam -> counter.addAndGet(1), 0); } } }); @@ -73,7 +66,6 @@ public static int countInferenceProcessors(ClusterState state) { * @param ingestMetadata The ingestMetadata of current ClusterState * @return The set of model IDs referenced by inference processors */ - @SuppressWarnings("unchecked") public static Set getModelIdsFromInferenceProcessors(IngestMetadata ingestMetadata) { if (ingestMetadata == null) { return Set.of(); @@ -82,7 +74,7 @@ public static Set getModelIdsFromInferenceProcessors(IngestMetadata inge Set modelIds = new LinkedHashSet<>(); ingestMetadata.getPipelines().forEach((pipelineId, configuration) -> { Map configMap = configuration.getConfigAsMap(); - List> processorConfigs = ConfigurationUtils.readList(null, null, configMap, PROCESSORS_KEY); + List> processorConfigs = readList(configMap, PROCESSORS_KEY); for (Map processorConfigWithKey : processorConfigs) { for (Map.Entry entry : processorConfigWithKey.entrySet()) { addModelsAndPipelines(entry.getKey(), pipelineId, entry.getValue(), pam -> modelIds.add(pam.modelIdOrAlias()), 0); @@ -96,7 +88,6 @@ public static Set getModelIdsFromInferenceProcessors(IngestMetadata inge * @param state Current cluster state * @return a map from Model or Deployment IDs or Aliases to each pipeline referencing them. */ - @SuppressWarnings("unchecked") public static Map> pipelineIdsByResource(ClusterState state, Set ids) { assert Transports.assertNotTransportThread("non-trivial nested loops over cluster state structures"); Map> pipelineIdsByModelIds = new HashMap<>(); @@ -110,7 +101,7 @@ public static Map> pipelineIdsByResource(ClusterState state, } ingestMetadata.getPipelines().forEach((pipelineId, configuration) -> { Map configMap = configuration.getConfigAsMap(); - List> processorConfigs = ConfigurationUtils.readList(null, null, configMap, PROCESSORS_KEY); + List> processorConfigs = readList(configMap, PROCESSORS_KEY); for (Map processorConfigWithKey : processorConfigs) { for (Map.Entry entry : processorConfigWithKey.entrySet()) { addModelsAndPipelines(entry.getKey(), pipelineId, entry.getValue(), pam -> { @@ -128,7 +119,6 @@ public static Map> pipelineIdsByResource(ClusterState state, * @param state Current {@link ClusterState} * @return a map from Model or Deployment IDs or Aliases to each pipeline referencing them. */ - @SuppressWarnings("unchecked") public static Set pipelineIdsForResource(ClusterState state, Set ids) { assert Transports.assertNotTransportThread("non-trivial nested loops over cluster state structures"); Set pipelineIds = new HashSet<>(); @@ -142,7 +132,7 @@ public static Set pipelineIdsForResource(ClusterState state, Set } ingestMetadata.getPipelines().forEach((pipelineId, configuration) -> { Map configMap = configuration.getConfigAsMap(); - List> processorConfigs = ConfigurationUtils.readList(null, null, configMap, PROCESSORS_KEY); + List> processorConfigs = readList(configMap, PROCESSORS_KEY); for (Map processorConfigWithKey : processorConfigs) { for (Map.Entry entry : processorConfigWithKey.entrySet()) { addModelsAndPipelines(entry.getKey(), pipelineId, entry.getValue(), pam -> { @@ -188,7 +178,7 @@ private static void addModelsAndPipelines( addModelsAndPipelines( innerProcessorWithName.getKey(), pipelineId, - (Map) innerProcessorWithName.getValue(), + innerProcessorWithName.getValue(), handler, level + 1 ); @@ -196,13 +186,8 @@ private static void addModelsAndPipelines( } return; } - if (processorDefinition instanceof Map definitionMap && definitionMap.containsKey(Pipeline.ON_FAILURE_KEY)) { - List> onFailureConfigs = ConfigurationUtils.readList( - null, - null, - (Map) definitionMap, - Pipeline.ON_FAILURE_KEY - ); + if (processorDefinition instanceof Map definitionMap && definitionMap.containsKey(ON_FAILURE_KEY)) { + List> onFailureConfigs = readList(definitionMap, ON_FAILURE_KEY); onFailureConfigs.stream() .flatMap(map -> map.entrySet().stream()) .forEach(entry -> addModelsAndPipelines(entry.getKey(), pipelineId, entry.getValue(), handler, level + 1)); @@ -211,4 +196,16 @@ private static void addModelsAndPipelines( private record PipelineAndModel(String pipelineId, String modelIdOrAlias) {} + /** + * A local alternative to ConfigurationUtils.readList(...) that reads list properties out of the processor configuration map, + * but doesn't rely on mutating the configuration map. + */ + @SuppressWarnings("unchecked") + private static List> readList(Map processorConfig, String key) { + Object val = processorConfig.get(key); + if (val == null) { + throw new IllegalArgumentException("Missing required property [" + key + "]"); + } + return (List>) val; + } } From b6d078908ca7dce354d24f3bcf8db5d604488c14 Mon Sep 17 00:00:00 2001 From: David Kyle Date: Wed, 23 Oct 2024 18:42:02 +0100 Subject: [PATCH 025/324] [ML] Add pathc transport version for change to Get Inference Request (#115250) --- .../src/main/java/org/elasticsearch/TransportVersions.java | 1 + .../core/inference/action/GetInferenceModelAction.java | 6 ++++-- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/TransportVersions.java b/server/src/main/java/org/elasticsearch/TransportVersions.java index 7e06004e47cfb..6d9bf2ac52f2d 100644 --- a/server/src/main/java/org/elasticsearch/TransportVersions.java +++ b/server/src/main/java/org/elasticsearch/TransportVersions.java @@ -174,6 +174,7 @@ static TransportVersion def(int id) { public static final TransportVersion ESQL_PER_AGGREGATE_FILTER = def(8_770_00_0); public static final TransportVersion ML_INFERENCE_ATTACH_TO_EXISTSING_DEPLOYMENT = def(8_771_00_0); public static final TransportVersion CONVERT_FAILURE_STORE_OPTIONS_TO_SELECTOR_OPTIONS_INTERNALLY = def(8_772_00_0); + public static final TransportVersion INFERENCE_DONT_PERSIST_ON_READ_BACKPORT_8_16 = def(8_772_00_1); public static final TransportVersion REMOVE_MIN_COMPATIBLE_SHARD_NODE = def(8_773_00_0); public static final TransportVersion REVERT_REMOVE_MIN_COMPATIBLE_SHARD_NODE = def(8_774_00_0); public static final TransportVersion ESQL_FIELD_ATTRIBUTE_PARENT_SIMPLIFIED = def(8_775_00_0); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/action/GetInferenceModelAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/action/GetInferenceModelAction.java index 6e06133509644..ea0462d0f103e 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/action/GetInferenceModelAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/action/GetInferenceModelAction.java @@ -62,7 +62,8 @@ public Request(StreamInput in) throws IOException { super(in); this.inferenceEntityId = in.readString(); this.taskType = TaskType.fromStream(in); - if (in.getTransportVersion().onOrAfter(TransportVersions.INFERENCE_DONT_PERSIST_ON_READ)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.INFERENCE_DONT_PERSIST_ON_READ) + || in.getTransportVersion().isPatchFrom(TransportVersions.INFERENCE_DONT_PERSIST_ON_READ_BACKPORT_8_16)) { this.persistDefaultConfig = in.readBoolean(); } else { this.persistDefaultConfig = PERSIST_DEFAULT_CONFIGS; @@ -87,7 +88,8 @@ public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeString(inferenceEntityId); taskType.writeTo(out); - if (out.getTransportVersion().onOrAfter(TransportVersions.INFERENCE_DONT_PERSIST_ON_READ)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.INFERENCE_DONT_PERSIST_ON_READ) + || out.getTransportVersion().isPatchFrom(TransportVersions.INFERENCE_DONT_PERSIST_ON_READ_BACKPORT_8_16)) { out.writeBoolean(this.persistDefaultConfig); } } From 57532e7b7fb1348c8abf50b225932cddea5937a9 Mon Sep 17 00:00:00 2001 From: Artem Prigoda Date: Wed, 23 Oct 2024 19:44:04 +0200 Subject: [PATCH 026/324] [test] Unmute FsDirectoryFactoryTests#testStoreDirectory (#115440) Resolve #110210 --- muted-tests.yml | 3 --- 1 file changed, 3 deletions(-) diff --git a/muted-tests.yml b/muted-tests.yml index f59ca0c213279..19e8416c396c3 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -17,9 +17,6 @@ tests: - class: "org.elasticsearch.xpack.deprecation.DeprecationHttpIT" issue: "https://github.com/elastic/elasticsearch/issues/108628" method: "testDeprecatedSettingsReturnWarnings" -- class: org.elasticsearch.index.store.FsDirectoryFactoryTests - method: testStoreDirectory - issue: https://github.com/elastic/elasticsearch/issues/110210 - class: org.elasticsearch.index.store.FsDirectoryFactoryTests method: testPreload issue: https://github.com/elastic/elasticsearch/issues/110211 From e0acb56086a27c247867fa4b464973ebefef5230 Mon Sep 17 00:00:00 2001 From: Pat Whelan Date: Wed, 23 Oct 2024 14:02:47 -0400 Subject: [PATCH 027/324] [ML] Mitigate IOSession timeouts (#115414) We are seeing exceptions ~0.03% of the time in our integration tests: ``` org.apache.http.ConnectionClosedException: Connection closed unexpectedly ``` The `contentDecoder` does not always fully consume the body within `SimpleInputBuffer.consumeContent`. When we return back to Apache, the rest of the body is never delivered, and the IOSession eventually times out and gets cleaned up. During that cleanup process, Apache calls our Consumer with the above exception. If we read 0 bytes and return back immediately, Apache has a better chance to load the rest of the body/footer, and it will call `consumeContent` again. This reduces the exception rate down to ~0.001%. Fix #114105 Fix #114232 Fix #114327 Fix #114385 --- docs/changelog/115414.yaml | 9 +++++++ muted-tests.yml | 12 --------- .../http/StreamingHttpResultPublisher.java | 26 +++++++++---------- .../services/InferenceEventsAssertion.java | 10 ++++--- .../anthropic/AnthropicServiceTests.java | 2 -- .../AzureAiStudioServiceTests.java | 2 -- .../azureopenai/AzureOpenAiServiceTests.java | 2 -- .../services/cohere/CohereServiceTests.java | 2 -- .../services/openai/OpenAiServiceTests.java | 2 -- 9 files changed, 27 insertions(+), 40 deletions(-) create mode 100644 docs/changelog/115414.yaml diff --git a/docs/changelog/115414.yaml b/docs/changelog/115414.yaml new file mode 100644 index 0000000000000..7475b765bb30e --- /dev/null +++ b/docs/changelog/115414.yaml @@ -0,0 +1,9 @@ +pr: 115414 +summary: Mitigate IOSession timeouts +area: Machine Learning +type: bug +issues: + - 114385 + - 114327 + - 114105 + - 114232 diff --git a/muted-tests.yml b/muted-tests.yml index 19e8416c396c3..cce16a07e647a 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -215,24 +215,12 @@ tests: - class: org.elasticsearch.xpack.inference.TextEmbeddingCrudIT method: testPutE5Small_withPlatformSpecificVariant issue: https://github.com/elastic/elasticsearch/issues/113950 -- class: org.elasticsearch.xpack.inference.services.openai.OpenAiServiceTests - method: testInfer_StreamRequest_ErrorResponse - issue: https://github.com/elastic/elasticsearch/issues/114105 - class: org.elasticsearch.xpack.inference.InferenceCrudIT method: testGet issue: https://github.com/elastic/elasticsearch/issues/114135 - class: org.elasticsearch.xpack.ilm.ExplainLifecycleIT method: testStepInfoPreservedOnAutoRetry issue: https://github.com/elastic/elasticsearch/issues/114220 -- class: org.elasticsearch.xpack.inference.services.openai.OpenAiServiceTests - method: testInfer_StreamRequest - issue: https://github.com/elastic/elasticsearch/issues/114232 -- class: org.elasticsearch.xpack.inference.services.cohere.CohereServiceTests - method: testInfer_StreamRequest_ErrorResponse - issue: https://github.com/elastic/elasticsearch/issues/114327 -- class: org.elasticsearch.xpack.inference.services.cohere.CohereServiceTests - method: testInfer_StreamRequest - issue: https://github.com/elastic/elasticsearch/issues/114385 - class: org.elasticsearch.xpack.inference.InferenceRestIT method: test {p0=inference/30_semantic_text_inference/Calculates embeddings using the default ELSER 2 endpoint} issue: https://github.com/elastic/elasticsearch/issues/114412 diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/StreamingHttpResultPublisher.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/StreamingHttpResultPublisher.java index bf74ca86a969a..0b2268a448c8a 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/StreamingHttpResultPublisher.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/StreamingHttpResultPublisher.java @@ -96,11 +96,10 @@ public void consumeContent(ContentDecoder contentDecoder, IOControl ioControl) t try { var consumed = inputBuffer.consumeContent(contentDecoder); - var allBytes = new byte[consumed]; - inputBuffer.read(allBytes); - - // we can have empty bytes, don't bother sending them - if (allBytes.length > 0) { + // we could have read 0 bytes if the body was delayed getting in, we need to return out so apache can load the body/footer + if (consumed > 0) { + var allBytes = new byte[consumed]; + inputBuffer.read(allBytes); queue.offer(() -> { subscriber.onNext(new HttpResult(response, allBytes)); var currentBytesInQueue = bytesInQueue.updateAndGet(current -> Long.max(0, current - allBytes.length)); @@ -111,18 +110,17 @@ public void consumeContent(ContentDecoder contentDecoder, IOControl ioControl) t } } }); - } - // always check if totalByteSize > the configured setting in case the settings change - if (bytesInQueue.accumulateAndGet(allBytes.length, Long::sum) >= settings.getMaxResponseSize().getBytes()) { - pauseProducer(ioControl); - } + // always check if totalByteSize > the configured setting in case the settings change + if (bytesInQueue.accumulateAndGet(allBytes.length, Long::sum) >= settings.getMaxResponseSize().getBytes()) { + pauseProducer(ioControl); + } - // always run in case we're waking up from a pause and need to start a new thread - taskRunner.requestNextRun(); + taskRunner.requestNextRun(); - if (listenerCalled.compareAndSet(false, true)) { - listener.onResponse(this); + if (listenerCalled.compareAndSet(false, true)) { + listener.onResponse(this); + } } } finally { inputBuffer.reset(); diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/InferenceEventsAssertion.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/InferenceEventsAssertion.java index f23ea2aa414b2..7cfd231be39f3 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/InferenceEventsAssertion.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/InferenceEventsAssertion.java @@ -12,7 +12,6 @@ import org.elasticsearch.common.xcontent.ChunkedToXContent; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.inference.InferenceServiceResults; -import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xcontent.XContentFactory; import org.hamcrest.MatcherAssert; import org.hamcrest.Matchers; @@ -26,6 +25,7 @@ import java.util.concurrent.atomic.AtomicInteger; import java.util.stream.Stream; +import static org.elasticsearch.test.ESTestCase.fail; import static org.elasticsearch.xcontent.ToXContent.EMPTY_PARAMS; import static org.hamcrest.CoreMatchers.is; @@ -47,7 +47,9 @@ public InferenceEventsAssertion hasFinishedStream() { } public InferenceEventsAssertion hasNoErrors() { - MatcherAssert.assertThat("Expected no errors from stream.", error, Matchers.nullValue()); + if (error != null) { + fail(error, "Expected no errors from stream."); + } return this; } @@ -66,7 +68,7 @@ public InferenceEventsAssertion hasErrorWithStatusCode(int statusCode) { } t = t.getCause(); } - ESTestCase.fail(error, "Expected an underlying ElasticsearchStatusException."); + fail(error, "Expected an underlying ElasticsearchStatusException."); return this; } @@ -79,7 +81,7 @@ public InferenceEventsAssertion hasErrorContaining(String message) { } t = t.getCause(); } - ESTestCase.fail(error, "Expected exception to contain string: " + message); + fail(error, "Expected exception to contain string: " + message); return this; } diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/anthropic/AnthropicServiceTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/anthropic/AnthropicServiceTests.java index 8adf75b4c0a81..48277112d9306 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/anthropic/AnthropicServiceTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/anthropic/AnthropicServiceTests.java @@ -532,7 +532,6 @@ public void testInfer_SendsCompletionRequest() throws IOException { } } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/114385") public void testInfer_StreamRequest() throws Exception { String responseJson = """ data: {"type": "message_start", "message": {"model": "claude, probably"}} @@ -578,7 +577,6 @@ private InferenceServiceResults streamChatCompletion() throws IOException { } } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/114385") public void testInfer_StreamRequest_ErrorResponse() throws Exception { String responseJson = """ data: {"type": "error", "error": {"type": "request_too_large", "message": "blah"}} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/azureaistudio/AzureAiStudioServiceTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/azureaistudio/AzureAiStudioServiceTests.java index 44b0d17d9b448..e85edf573ba96 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/azureaistudio/AzureAiStudioServiceTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/azureaistudio/AzureAiStudioServiceTests.java @@ -1308,7 +1308,6 @@ public void testInfer_UnauthorisedResponse() throws IOException { } } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/114385") public void testInfer_StreamRequest() throws Exception { String responseJson = """ data: {\ @@ -1364,7 +1363,6 @@ private InferenceServiceResults streamChatCompletion() throws IOException, URISy } } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/114385") public void testInfer_StreamRequest_ErrorResponse() throws Exception { String responseJson = """ { diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/azureopenai/AzureOpenAiServiceTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/azureopenai/AzureOpenAiServiceTests.java index 900b666c0b8fb..3408fc358cac0 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/azureopenai/AzureOpenAiServiceTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/azureopenai/AzureOpenAiServiceTests.java @@ -1425,7 +1425,6 @@ private void testChunkedInfer(AzureOpenAiEmbeddingsModel model) throws IOExcepti } } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/114385") public void testInfer_StreamRequest() throws Exception { String responseJson = """ data: {\ @@ -1484,7 +1483,6 @@ private InferenceServiceResults streamChatCompletion() throws IOException, URISy } } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/114385") public void testInfer_StreamRequest_ErrorResponse() throws Exception { String responseJson = """ { diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/cohere/CohereServiceTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/cohere/CohereServiceTests.java index cf114db45619f..758c38166778b 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/cohere/CohereServiceTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/cohere/CohereServiceTests.java @@ -1635,7 +1635,6 @@ public void testDefaultSimilarity() { assertEquals(SimilarityMeasure.DOT_PRODUCT, CohereService.defaultSimilarity()); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/114385") public void testInfer_StreamRequest() throws Exception { String responseJson = """ {"event_type":"text-generation", "text":"hello"} @@ -1669,7 +1668,6 @@ private InferenceServiceResults streamChatCompletion() throws IOException { } } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/114385") public void testInfer_StreamRequest_ErrorResponse() throws Exception { String responseJson = """ { "event_type":"stream-end", "finish_reason":"ERROR", "response":{ "text": "how dare you" } } diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/OpenAiServiceTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/OpenAiServiceTests.java index beba9b1a92477..cf1438b334478 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/OpenAiServiceTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/OpenAiServiceTests.java @@ -1007,7 +1007,6 @@ public void testInfer_SendsRequest() throws IOException { } } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/114385") public void testInfer_StreamRequest() throws Exception { String responseJson = """ data: {\ @@ -1057,7 +1056,6 @@ private InferenceServiceResults streamChatCompletion() throws IOException { } } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/114385") public void testInfer_StreamRequest_ErrorResponse() throws Exception { String responseJson = """ { From 9eea83c45f1c800626d83067a9ecc9c1c9ef7e27 Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Thu, 24 Oct 2024 05:08:01 +1100 Subject: [PATCH 028/324] Mute org.elasticsearch.xpack.security.FileSettingsRoleMappingsRestartIT testFileSettingsReprocessedOnRestartWithoutVersionChange #115450 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index cce16a07e647a..bd0145611237b 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -282,6 +282,9 @@ tests: - class: org.elasticsearch.smoketest.SmokeTestIngestWithAllDepsClientYamlTestSuiteIT method: test {yaml=ingest/80_ingest_simulate/Test mapping addition works with legacy templates} issue: https://github.com/elastic/elasticsearch/issues/115412 +- class: org.elasticsearch.xpack.security.FileSettingsRoleMappingsRestartIT + method: testFileSettingsReprocessedOnRestartWithoutVersionChange + issue: https://github.com/elastic/elasticsearch/issues/115450 # Examples: # From 176015d59b71e633822cec21042c6f07c4a7b415 Mon Sep 17 00:00:00 2001 From: Mark Vieira Date: Wed, 23 Oct 2024 12:07:10 -0700 Subject: [PATCH 029/324] Temporarily disable buildkite upload on Windows agents (#115449) --- .../gradle/internal/ElasticsearchBuildCompletePlugin.java | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchBuildCompletePlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchBuildCompletePlugin.java index 25ad5bcf89581..7d9537feaea56 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchBuildCompletePlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchBuildCompletePlugin.java @@ -15,6 +15,7 @@ import org.apache.commons.compress.archivers.tar.TarArchiveOutputStream; import org.apache.commons.compress.compressors.bzip2.BZip2CompressorOutputStream; import org.apache.commons.io.IOUtils; +import org.elasticsearch.gradle.OS; import org.elasticsearch.gradle.util.GradleUtils; import org.gradle.api.Plugin; import org.gradle.api.Project; @@ -61,7 +62,7 @@ public void apply(Project target) { ? System.getenv("BUILD_NUMBER") : System.getenv("BUILDKITE_BUILD_NUMBER"); String performanceTest = System.getenv("BUILD_PERFORMANCE_TEST"); - if (buildNumber != null && performanceTest == null && GradleUtils.isIncludedBuild(target) == false) { + if (buildNumber != null && performanceTest == null && GradleUtils.isIncludedBuild(target) == false && OS.current() != OS.WINDOWS) { File targetFile = calculateTargetFile(target, buildNumber); File projectDir = target.getProjectDir(); File gradleWorkersDir = new File(target.getGradle().getGradleUserHomeDir(), "workers/"); From 60678a1a9caf69177c588f7fc3c4585013e027f2 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Wed, 23 Oct 2024 15:10:50 -0400 Subject: [PATCH 030/324] ESQL: Fix filtered grouping on ords (#115312) This fixes filtered aggs when they are grouped on a field with ordinals. This looks like: ``` | STATS max = max(salary) WHERE salary > 0 BY job_positions ``` when the `job_positions` field is a keyword field with doc values. In that case we use a faster group-by-segment-ordinals algorithm that needs to be able to merge the results of aggregators from multiple segments. This previously failed with a `ClassCastException` because of a mistake. Also! the group-by-segment-ordinals algorithm wasn't properly releasing the closure used to add inputs, causing a breaker size leak. This wasn't really leaking memory, but leaking *tracking* of memory. Closes #114897 --- docs/changelog/115312.yaml | 6 + .../FilteredGroupingAggregatorFunction.java | 2 +- .../operator/OrdinalsGroupingOperator.java | 4 +- .../FilteredAggregatorFunctionTests.java | 1 - ...lteredGroupingAggregatorFunctionTests.java | 39 +++++- .../GroupingAggregatorFunctionTestCase.java | 7 +- .../src/main/resources/stats.csv-spec | 126 ++++++++++++++++++ .../xpack/esql/action/EsqlCapabilities.java | 5 + 8 files changed, 183 insertions(+), 7 deletions(-) create mode 100644 docs/changelog/115312.yaml diff --git a/docs/changelog/115312.yaml b/docs/changelog/115312.yaml new file mode 100644 index 0000000000000..acf6bbc69c36c --- /dev/null +++ b/docs/changelog/115312.yaml @@ -0,0 +1,6 @@ +pr: 115312 +summary: "ESQL: Fix filtered grouping on ords" +area: ES|QL +type: bug +issues: + - 114897 diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/FilteredGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/FilteredGroupingAggregatorFunction.java index 3e38b6d6fe9fa..8d3dbf3164c47 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/FilteredGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/FilteredGroupingAggregatorFunction.java @@ -97,7 +97,7 @@ public void addIntermediateInput(int positionOffset, IntVector groupIdVector, Pa @Override public void addIntermediateRowInput(int groupId, GroupingAggregatorFunction input, int position) { - next.addIntermediateRowInput(groupId, input, position); + next.addIntermediateRowInput(groupId, ((FilteredGroupingAggregatorFunction) input).next(), position); } @Override diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/OrdinalsGroupingOperator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/OrdinalsGroupingOperator.java index 5e0e625abb914..7cf47bc7fed1c 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/OrdinalsGroupingOperator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/OrdinalsGroupingOperator.java @@ -372,8 +372,8 @@ static final class OrdinalSegmentAggregator implements Releasable, SeenGroupIds } void addInput(IntVector docs, Page page) { + GroupingAggregatorFunction.AddInput[] prepared = new GroupingAggregatorFunction.AddInput[aggregators.size()]; try { - GroupingAggregatorFunction.AddInput[] prepared = new GroupingAggregatorFunction.AddInput[aggregators.size()]; for (int i = 0; i < prepared.length; i++) { prepared[i] = aggregators.get(i).prepareProcessPage(this, page); } @@ -392,7 +392,7 @@ void addInput(IntVector docs, Page page) { } catch (IOException e) { throw new UncheckedIOException(e); } finally { - page.releaseBlocks(); + Releasables.close(page::releaseBlocks, Releasables.wrap(prepared)); } } diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/FilteredAggregatorFunctionTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/FilteredAggregatorFunctionTests.java index da2c3502144db..35ecced470e01 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/FilteredAggregatorFunctionTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/FilteredAggregatorFunctionTests.java @@ -27,7 +27,6 @@ public class FilteredAggregatorFunctionTests extends AggregatorFunctionTestCase { private final List unclosed = Collections.synchronizedList(new ArrayList<>()); - // TODO some version of this test that applies across all aggs @Override protected AggregatorFunctionSupplier aggregatorFunction(List inputChannels) { return new FilteredAggregatorFunctionSupplier( diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/FilteredGroupingAggregatorFunctionTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/FilteredGroupingAggregatorFunctionTests.java index 87cb99bd0709f..26971dc927cd1 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/FilteredGroupingAggregatorFunctionTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/FilteredGroupingAggregatorFunctionTests.java @@ -11,12 +11,14 @@ import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.BooleanVector; import org.elasticsearch.compute.data.IntBlock; +import org.elasticsearch.compute.data.IntVector; import org.elasticsearch.compute.data.LongBlock; import org.elasticsearch.compute.data.Page; import org.elasticsearch.compute.operator.DriverContext; import org.elasticsearch.compute.operator.EvalOperator; import org.elasticsearch.compute.operator.LongIntBlockSourceOperator; import org.elasticsearch.compute.operator.SourceOperator; +import org.elasticsearch.core.Releasables; import org.elasticsearch.core.Tuple; import org.junit.After; @@ -31,7 +33,6 @@ public class FilteredGroupingAggregatorFunctionTests extends GroupingAggregatorFunctionTestCase { private final List unclosed = Collections.synchronizedList(new ArrayList<>()); - // TODO some version of this test that applies across all aggs @Override protected AggregatorFunctionSupplier aggregatorFunction(List inputChannels) { return new FilteredAggregatorFunctionSupplier( @@ -104,6 +105,42 @@ protected SourceOperator simpleInput(BlockFactory blockFactory, int size) { ); } + /** + * Tests {@link GroupingAggregator#addIntermediateRow} by building results using the traditional + * add mechanism and using {@link GroupingAggregator#addIntermediateRow} then asserting that they + * produce the same output. + */ + public void testAddIntermediateRowInput() { + DriverContext ctx = driverContext(); + AggregatorFunctionSupplier supplier = aggregatorFunction(channels(AggregatorMode.SINGLE)); + Block[] results = new Block[2]; + try ( + GroupingAggregatorFunction main = supplier.groupingAggregator(ctx); + GroupingAggregatorFunction leaf = supplier.groupingAggregator(ctx); + SourceOperator source = simpleInput(ctx.blockFactory(), 10); + ) { + Page p; + while ((p = source.getOutput()) != null) { + try ( + IntVector group = ctx.blockFactory().newConstantIntVector(0, p.getPositionCount()); + GroupingAggregatorFunction.AddInput addInput = leaf.prepareProcessPage(null, p) + ) { + addInput.add(0, group); + } finally { + p.releaseBlocks(); + } + } + main.addIntermediateRowInput(0, leaf, 0); + try (IntVector selected = ctx.blockFactory().newConstantIntVector(0, 1)) { + main.evaluateFinal(results, 0, selected, ctx); + leaf.evaluateFinal(results, 1, selected, ctx); + } + assertThat(results[0], equalTo(results[1])); + } finally { + Releasables.close(results); + } + } + @After public void checkUnclosed() { for (Exception tracker : unclosed) { diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/GroupingAggregatorFunctionTestCase.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/GroupingAggregatorFunctionTestCase.java index 9414e076a26e6..cb190dfffafb9 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/GroupingAggregatorFunctionTestCase.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/GroupingAggregatorFunctionTestCase.java @@ -89,14 +89,17 @@ protected final Operator.OperatorFactory simpleWithMode(AggregatorMode mode) { return simpleWithMode(mode, Function.identity()); } + protected List channels(AggregatorMode mode) { + return mode.isInputPartial() ? range(1, 1 + aggregatorIntermediateBlockCount()).boxed().toList() : List.of(1); + } + private Operator.OperatorFactory simpleWithMode( AggregatorMode mode, Function wrap ) { - List channels = mode.isInputPartial() ? range(1, 1 + aggregatorIntermediateBlockCount()).boxed().toList() : List.of(1); int emitChunkSize = between(100, 200); - AggregatorFunctionSupplier supplier = wrap.apply(aggregatorFunction(channels)); + AggregatorFunctionSupplier supplier = wrap.apply(aggregatorFunction(channels(mode))); if (randomBoolean()) { supplier = chunkGroups(emitChunkSize, supplier); } diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats.csv-spec index 6d4c596e8d7de..2dc21a86e6394 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats.csv-spec @@ -2529,3 +2529,129 @@ FROM employees | eval x = [1,2,3], y = 5 + 6 | stats m = max(y) by y+1 m:integer | y+1:integer 11 | 12 ; + +filterIsAlwaysTrue +required_capability: per_agg_filtering +FROM employees +| STATS max = max(salary) WHERE salary > 0 +; + +max:integer +74999 +; + +filterIsAlwaysFalse +required_capability: per_agg_filtering +FROM employees +| STATS max = max(salary) WHERE first_name == "" +; + +max:integer +null +; + +filterSometimesMatches +required_capability: per_agg_filtering +FROM employees +| STATS max = max(salary) WHERE first_name IS NULL +; + +max:integer +70011 +; + +groupingFilterIsAlwaysTrue +required_capability: per_agg_filtering +FROM employees +| MV_EXPAND job_positions +| STATS max = max(salary) WHERE salary > 0 BY job_positions = SUBSTRING(job_positions, 1, 1) +| SORT job_positions +| LIMIT 4 +; + +max:integer | job_positions:keyword +74970 | A +58121 | B +74999 | D +58715 | H +; + +groupingFilterIsAlwaysFalse +required_capability: per_agg_filtering +FROM employees +| MV_EXPAND job_positions +| STATS max = max(salary) WHERE first_name == "" BY job_positions = SUBSTRING(job_positions, 1, 1) +| SORT job_positions +| LIMIT 4 +; + +max:integer | job_positions:keyword +null | A +null | B +null | D +null | H +; + +groupingFilterSometimesMatches +required_capability: per_agg_filtering +FROM employees +| MV_EXPAND job_positions +| STATS max = max(salary) WHERE first_name IS NULL BY job_positions = SUBSTRING(job_positions, 1, 1) +| SORT job_positions +| LIMIT 4 +; + +max:integer | job_positions:keyword +62233 | A +39878 | B +67492 | D +null | H +; + +groupingByOrdinalsFilterIsAlwaysTrue +required_capability: per_agg_filtering +required_capability: per_agg_filtering_ords +FROM employees +| STATS max = max(salary) WHERE salary > 0 BY job_positions +| SORT job_positions +| LIMIT 4 +; + +max:integer | job_positions:keyword +74970 | Accountant +69904 | Architect +58121 | Business Analyst +74999 | Data Scientist +; + +groupingByOrdinalsFilterIsAlwaysFalse +required_capability: per_agg_filtering +required_capability: per_agg_filtering_ords +FROM employees +| STATS max = max(salary) WHERE first_name == "" BY job_positions +| SORT job_positions +| LIMIT 4 +; + +max:integer | job_positions:keyword +null | Accountant +null | Architect +null | Business Analyst +null | Data Scientist +; + +groupingByOrdinalsFilterSometimesMatches +required_capability: per_agg_filtering +required_capability: per_agg_filtering_ords +FROM employees +| STATS max = max(salary) WHERE first_name IS NULL BY job_positions +| SORT job_positions +| LIMIT 4 +; + +max:integer | job_positions:keyword +39878 | Accountant +62233 | Architect +39878 | Business Analyst +67492 | Data Scientist +; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java index 5e336e6759b1e..dfca6ab2bf814 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java @@ -399,6 +399,11 @@ public enum Cap { */ PER_AGG_FILTERING, + /** + * Fix {@link #PER_AGG_FILTERING} grouped by ordinals. + */ + PER_AGG_FILTERING_ORDS, + /** * Fix for https://github.com/elastic/elasticsearch/issues/114714 */ From c9f995ad92145733900465e8383ab0f38882cda7 Mon Sep 17 00:00:00 2001 From: Ankita Kumar Date: Wed, 23 Oct 2024 16:17:40 -0400 Subject: [PATCH 031/324] Log reindexing failures (#112676) Wait for reindexing tasks to finish during shutdown for an amount of time defined by settings. Also log the number of reindexing tasks still in flight after the wait. --- .../index/reindex/ReindexNodeShutdownIT.java | 139 +++++++++++++ .../common/settings/ClusterSettings.java | 3 + .../java/org/elasticsearch/node/Node.java | 119 +---------- .../elasticsearch/node/NodeConstruction.java | 3 + .../node/ShutdownPrepareService.java | 184 ++++++++++++++++++ 5 files changed, 332 insertions(+), 116 deletions(-) create mode 100644 modules/reindex/src/internalClusterTest/java/org/elasticsearch/index/reindex/ReindexNodeShutdownIT.java create mode 100644 server/src/main/java/org/elasticsearch/node/ShutdownPrepareService.java diff --git a/modules/reindex/src/internalClusterTest/java/org/elasticsearch/index/reindex/ReindexNodeShutdownIT.java b/modules/reindex/src/internalClusterTest/java/org/elasticsearch/index/reindex/ReindexNodeShutdownIT.java new file mode 100644 index 0000000000000..4a001bb2d0969 --- /dev/null +++ b/modules/reindex/src/internalClusterTest/java/org/elasticsearch/index/reindex/ReindexNodeShutdownIT.java @@ -0,0 +1,139 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.index.reindex; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.node.ShutdownPrepareService; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.reindex.ReindexPlugin; +import org.elasticsearch.tasks.TaskInfo; +import org.elasticsearch.tasks.TaskManager; +import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.transport.TransportService; + +import java.util.Arrays; +import java.util.Collection; +import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; +import java.util.stream.IntStream; + +import static org.elasticsearch.node.ShutdownPrepareService.MAXIMUM_REINDEXING_TIMEOUT_SETTING; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; + +/** + * Test that a wait added during shutdown is necessary for a large reindexing task to complete. + * The test works as follows: + * 1. Start a large (reasonably long running) reindexing request on the coordinator-only node. + * 2. Check that the reindexing task appears on the coordinating node + * 3. With a 10s timeout value for MAXIMUM_REINDEXING_TIMEOUT_SETTING, + * wait for the reindexing task to complete before closing the node + * 4. Confirm that the reindexing task succeeds with the wait (it will fail without it) + */ +@ESIntegTestCase.ClusterScope(numDataNodes = 0, numClientNodes = 0, scope = ESIntegTestCase.Scope.TEST) +public class ReindexNodeShutdownIT extends ESIntegTestCase { + + protected static final String INDEX = "reindex-shutdown-index"; + protected static final String DEST_INDEX = "dest-index"; + + @Override + protected Collection> nodePlugins() { + return Arrays.asList(ReindexPlugin.class); + } + + protected ReindexRequestBuilder reindex(String nodeName) { + return new ReindexRequestBuilder(internalCluster().client(nodeName)); + } + + public void testReindexWithShutdown() throws Exception { + final String masterNodeName = internalCluster().startMasterOnlyNode(); + final String dataNodeName = internalCluster().startDataOnlyNode(); + + final Settings COORD_SETTINGS = Settings.builder() + .put(MAXIMUM_REINDEXING_TIMEOUT_SETTING.getKey(), TimeValue.timeValueSeconds(10)) + .build(); + final String coordNodeName = internalCluster().startCoordinatingOnlyNode(Settings.EMPTY); + + ensureStableCluster(3); + + int numDocs = 20000; + createIndex(numDocs); + createReindexTaskAndShutdown(coordNodeName); + checkDestinationIndex(dataNodeName, numDocs); + } + + private void createIndex(int numDocs) { + // INDEX will be created on the dataNode + createIndex(INDEX); + + logger.debug("setting up [{}] docs", numDocs); + indexRandom( + true, + false, + true, + IntStream.range(0, numDocs) + .mapToObj(i -> prepareIndex(INDEX).setId(String.valueOf(i)).setSource("n", i)) + .collect(Collectors.toList()) + ); + + // Checks that the all documents have been indexed and correctly counted + assertHitCount(prepareSearch(INDEX).setSize(0).setTrackTotalHits(true), numDocs); + } + + private void createReindexTaskAndShutdown(final String coordNodeName) throws Exception { + AbstractBulkByScrollRequestBuilder builder = reindex(coordNodeName).source(INDEX).destination(DEST_INDEX); + AbstractBulkByScrollRequest reindexRequest = builder.request(); + ShutdownPrepareService shutdownPrepareService = internalCluster().getInstance(ShutdownPrepareService.class, coordNodeName); + + TaskManager taskManager = internalCluster().getInstance(TransportService.class, coordNodeName).getTaskManager(); + + // Now execute the reindex action... + ActionListener reindexListener = new ActionListener() { + @Override + public void onResponse(BulkByScrollResponse bulkByScrollResponse) { + assertNull(bulkByScrollResponse.getReasonCancelled()); + logger.debug(bulkByScrollResponse.toString()); + } + + @Override + public void onFailure(Exception e) { + logger.debug("Encounterd " + e.toString()); + fail(e, "Encounterd " + e.toString()); + } + }; + internalCluster().client(coordNodeName).execute(ReindexAction.INSTANCE, reindexRequest, reindexListener); + + // Check for reindex task to appear in the tasks list and Immediately stop coordinating node + waitForTask(ReindexAction.INSTANCE.name(), coordNodeName); + shutdownPrepareService.prepareForShutdown(taskManager); + internalCluster().stopNode(coordNodeName); + } + + // Make sure all documents from the source index have been reindexed into the destination index + private void checkDestinationIndex(String dataNodeName, int numDocs) throws Exception { + assertTrue(indexExists(DEST_INDEX)); + flushAndRefresh(DEST_INDEX); + assertBusy(() -> { assertHitCount(prepareSearch(DEST_INDEX).setSize(0).setTrackTotalHits(true), numDocs); }); + } + + private static void waitForTask(String actionName, String nodeName) throws Exception { + assertBusy(() -> { + ListTasksResponse tasks = clusterAdmin().prepareListTasks(nodeName).setActions(actionName).setDetailed(true).get(); + tasks.rethrowFailures("Find my task"); + for (TaskInfo taskInfo : tasks.getTasks()) { + // Skip tasks with a parent because those are children of the task we want + if (taskInfo.parentTaskId().isSet() == false) return; + } + fail("Couldn't find task after waiting, tasks=" + tasks.getTasks()); + }, 10, TimeUnit.SECONDS); + } +} diff --git a/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java b/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java index 8cbacccb915ac..7bb78eabc8727 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java @@ -108,6 +108,7 @@ import org.elasticsearch.monitor.process.ProcessService; import org.elasticsearch.node.Node; import org.elasticsearch.node.NodeRoleSettings; +import org.elasticsearch.node.ShutdownPrepareService; import org.elasticsearch.persistent.PersistentTasksClusterService; import org.elasticsearch.persistent.decider.EnableAssignmentDecider; import org.elasticsearch.plugins.PluginsService; @@ -456,6 +457,8 @@ public void apply(Settings value, Settings current, Settings previous) { Environment.PATH_SHARED_DATA_SETTING, NodeEnvironment.NODE_ID_SEED_SETTING, Node.INITIAL_STATE_TIMEOUT_SETTING, + ShutdownPrepareService.MAXIMUM_SHUTDOWN_TIMEOUT_SETTING, + ShutdownPrepareService.MAXIMUM_REINDEXING_TIMEOUT_SETTING, DiscoveryModule.DISCOVERY_TYPE_SETTING, DiscoveryModule.DISCOVERY_SEED_PROVIDERS_SETTING, DiscoveryModule.ELECTION_STRATEGY_SETTING, diff --git a/server/src/main/java/org/elasticsearch/node/Node.java b/server/src/main/java/org/elasticsearch/node/Node.java index 32a65302922a8..e30f76fdd9414 100644 --- a/server/src/main/java/org/elasticsearch/node/Node.java +++ b/server/src/main/java/org/elasticsearch/node/Node.java @@ -13,10 +13,6 @@ import org.apache.logging.log4j.Logger; import org.apache.lucene.util.SetOnce; import org.elasticsearch.ElasticsearchTimeoutException; -import org.elasticsearch.action.search.TransportSearchAction; -import org.elasticsearch.action.support.PlainActionFuture; -import org.elasticsearch.action.support.RefCountingListener; -import org.elasticsearch.action.support.SubscribableListener; import org.elasticsearch.bootstrap.BootstrapCheck; import org.elasticsearch.bootstrap.BootstrapContext; import org.elasticsearch.client.internal.Client; @@ -82,7 +78,6 @@ import org.elasticsearch.snapshots.SnapshotShardsService; import org.elasticsearch.snapshots.SnapshotsService; import org.elasticsearch.tasks.TaskCancellationService; -import org.elasticsearch.tasks.TaskManager; import org.elasticsearch.tasks.TaskResultsService; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.RemoteClusterPortSettings; @@ -106,18 +101,12 @@ import java.util.List; import java.util.Map; import java.util.concurrent.CountDownLatch; -import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; import java.util.function.BiConsumer; import java.util.function.Function; -import java.util.function.Supplier; -import java.util.stream.Collectors; import javax.net.ssl.SNIHostName; -import static org.elasticsearch.core.Strings.format; - /** * A node represent a node within a cluster ({@code cluster.name}). The {@link #client()} can be used * in order to use a {@link Client} to perform actions/operations against the cluster. @@ -161,12 +150,6 @@ public class Node implements Closeable { Property.NodeScope ); - public static final Setting MAXIMUM_SHUTDOWN_TIMEOUT_SETTING = Setting.positiveTimeSetting( - "node.maximum_shutdown_grace_period", - TimeValue.ZERO, - Setting.Property.NodeScope - ); - private final Lifecycle lifecycle = new Lifecycle(); /** @@ -187,6 +170,7 @@ public class Node implements Closeable { private final LocalNodeFactory localNodeFactory; private final NodeService nodeService; private final TerminationHandler terminationHandler; + // for testing final NamedWriteableRegistry namedWriteableRegistry; final NamedXContentRegistry namedXContentRegistry; @@ -606,105 +590,8 @@ public synchronized void close() throws IOException { * logic should use Node Shutdown, see {@link org.elasticsearch.cluster.metadata.NodesShutdownMetadata}. */ public void prepareForClose() { - final var maxTimeout = MAXIMUM_SHUTDOWN_TIMEOUT_SETTING.get(this.settings()); - - record Stopper(String name, SubscribableListener listener) { - boolean isIncomplete() { - return listener().isDone() == false; - } - } - - final var stoppers = new ArrayList(); - final var allStoppersFuture = new PlainActionFuture(); - try (var listeners = new RefCountingListener(allStoppersFuture)) { - final BiConsumer stopperRunner = (name, action) -> { - final var stopper = new Stopper(name, new SubscribableListener<>()); - stoppers.add(stopper); - stopper.listener().addListener(listeners.acquire()); - new Thread(() -> { - try { - action.run(); - } catch (Exception ex) { - logger.warn("unexpected exception in shutdown task [" + stopper.name() + "]", ex); - } finally { - stopper.listener().onResponse(null); - } - }, stopper.name()).start(); - }; - - stopperRunner.accept("http-server-transport-stop", injector.getInstance(HttpServerTransport.class)::close); - stopperRunner.accept("async-search-stop", () -> awaitSearchTasksComplete(maxTimeout)); - if (terminationHandler != null) { - stopperRunner.accept("termination-handler-stop", terminationHandler::handleTermination); - } - } - - final Supplier incompleteStoppersDescriber = () -> stoppers.stream() - .filter(Stopper::isIncomplete) - .map(Stopper::name) - .collect(Collectors.joining(", ", "[", "]")); - - try { - if (TimeValue.ZERO.equals(maxTimeout)) { - allStoppersFuture.get(); - } else { - allStoppersFuture.get(maxTimeout.millis(), TimeUnit.MILLISECONDS); - } - } catch (ExecutionException e) { - assert false : e; // listeners are never completed exceptionally - logger.warn("failed during graceful shutdown tasks", e); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - logger.warn("interrupted while waiting for graceful shutdown tasks: " + incompleteStoppersDescriber.get(), e); - } catch (TimeoutException e) { - logger.warn("timed out while waiting for graceful shutdown tasks: " + incompleteStoppersDescriber.get()); - } - } - - private void awaitSearchTasksComplete(TimeValue asyncSearchTimeout) { - TaskManager taskManager = injector.getInstance(TransportService.class).getTaskManager(); - long millisWaited = 0; - while (true) { - long searchTasksRemaining = taskManager.getTasks() - .values() - .stream() - .filter(task -> TransportSearchAction.TYPE.name().equals(task.getAction())) - .count(); - if (searchTasksRemaining == 0) { - logger.debug("all search tasks complete"); - return; - } else { - // Let the system work on those searches for a while. We're on a dedicated thread to manage app shutdown, so we - // literally just want to wait and not take up resources on this thread for now. Poll period chosen to allow short - // response times, but checking the tasks list is relatively expensive, and we don't want to waste CPU time we could - // be spending on finishing those searches. - final TimeValue pollPeriod = TimeValue.timeValueMillis(500); - millisWaited += pollPeriod.millis(); - if (TimeValue.ZERO.equals(asyncSearchTimeout) == false && millisWaited >= asyncSearchTimeout.millis()) { - logger.warn( - format( - "timed out after waiting [%s] for [%d] search tasks to finish", - asyncSearchTimeout.toString(), - searchTasksRemaining - ) - ); - return; - } - logger.debug(format("waiting for [%s] search tasks to finish, next poll in [%s]", searchTasksRemaining, pollPeriod)); - try { - Thread.sleep(pollPeriod.millis()); - } catch (InterruptedException ex) { - logger.warn( - format( - "interrupted while waiting [%s] for [%d] search tasks to finish", - asyncSearchTimeout.toString(), - searchTasksRemaining - ) - ); - return; - } - } - } + injector.getInstance(ShutdownPrepareService.class) + .prepareForShutdown(injector.getInstance(TransportService.class).getTaskManager()); } /** diff --git a/server/src/main/java/org/elasticsearch/node/NodeConstruction.java b/server/src/main/java/org/elasticsearch/node/NodeConstruction.java index 8e66486329577..7e3991c1df1f4 100644 --- a/server/src/main/java/org/elasticsearch/node/NodeConstruction.java +++ b/server/src/main/java/org/elasticsearch/node/NodeConstruction.java @@ -1099,6 +1099,8 @@ private void construct( telemetryProvider.getTracer() ); + final ShutdownPrepareService shutdownPrepareService = new ShutdownPrepareService(settings, httpServerTransport, terminationHandler); + modules.add( loadPersistentTasksService( settingsModule, @@ -1200,6 +1202,7 @@ private void construct( b.bind(CompatibilityVersions.class).toInstance(compatibilityVersions); b.bind(DataStreamAutoShardingService.class).toInstance(dataStreamAutoShardingService); b.bind(FailureStoreMetrics.class).toInstance(failureStoreMetrics); + b.bind(ShutdownPrepareService.class).toInstance(shutdownPrepareService); }); if (ReadinessService.enabled(environment)) { diff --git a/server/src/main/java/org/elasticsearch/node/ShutdownPrepareService.java b/server/src/main/java/org/elasticsearch/node/ShutdownPrepareService.java new file mode 100644 index 0000000000000..ab9537053f45d --- /dev/null +++ b/server/src/main/java/org/elasticsearch/node/ShutdownPrepareService.java @@ -0,0 +1,184 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.node; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.action.search.TransportSearchAction; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.action.support.RefCountingListener; +import org.elasticsearch.action.support.SubscribableListener; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.http.HttpServerTransport; +import org.elasticsearch.index.reindex.ReindexAction; +import org.elasticsearch.node.internal.TerminationHandler; +import org.elasticsearch.tasks.TaskManager; + +import java.util.ArrayList; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import java.util.function.BiConsumer; +import java.util.function.Supplier; +import java.util.stream.Collectors; + +import static org.elasticsearch.core.Strings.format; + +/** + * This class was created to extract out the logic from {@link Node#prepareForClose()} to facilitate testing. + *

+ * Invokes hooks to prepare this node to be closed. This should be called when Elasticsearch receives a request to shut down + * gracefully from the underlying operating system, before system resources are closed. + *

+ * Note that this class is part of infrastructure to react to signals from the operating system - most graceful shutdown + * logic should use Node Shutdown, see {@link org.elasticsearch.cluster.metadata.NodesShutdownMetadata}. + */ +public class ShutdownPrepareService { + + private final Logger logger = LogManager.getLogger(ShutdownPrepareService.class); + private final Settings settings; + private final HttpServerTransport httpServerTransport; + private final TerminationHandler terminationHandler; + private volatile boolean hasBeenShutdown = false; + + public ShutdownPrepareService(Settings settings, HttpServerTransport httpServerTransport, TerminationHandler terminationHandler) { + this.settings = settings; + this.httpServerTransport = httpServerTransport; + this.terminationHandler = terminationHandler; + } + + public static final Setting MAXIMUM_SHUTDOWN_TIMEOUT_SETTING = Setting.positiveTimeSetting( + "node.maximum_shutdown_grace_period", + TimeValue.ZERO, + Setting.Property.NodeScope + ); + + public static final Setting MAXIMUM_REINDEXING_TIMEOUT_SETTING = Setting.positiveTimeSetting( + "node.maximum_reindexing_grace_period", + TimeValue.timeValueSeconds(10), + Setting.Property.NodeScope + ); + + /** + * Invokes hooks to prepare this node to be closed. This should be called when Elasticsearch receives a request to shut down + * gracefully from the underlying operating system, before system resources are closed. This method will block + * until the node is ready to shut down. + *

+ * Note that this class is part of infrastructure to react to signals from the operating system - most graceful shutdown + * logic should use Node Shutdown, see {@link org.elasticsearch.cluster.metadata.NodesShutdownMetadata}. + */ + public void prepareForShutdown(TaskManager taskManager) { + assert hasBeenShutdown == false; + hasBeenShutdown = true; + final var maxTimeout = MAXIMUM_SHUTDOWN_TIMEOUT_SETTING.get(settings); + final var reindexTimeout = MAXIMUM_REINDEXING_TIMEOUT_SETTING.get(settings); + + record Stopper(String name, SubscribableListener listener) { + boolean isIncomplete() { + return listener().isDone() == false; + } + } + + final var stoppers = new ArrayList(); + final var allStoppersFuture = new PlainActionFuture(); + try (var listeners = new RefCountingListener(allStoppersFuture)) { + final BiConsumer stopperRunner = (name, action) -> { + final var stopper = new Stopper(name, new SubscribableListener<>()); + stoppers.add(stopper); + stopper.listener().addListener(listeners.acquire()); + new Thread(() -> { + try { + action.run(); + } catch (Exception ex) { + logger.warn("unexpected exception in shutdown task [" + stopper.name() + "]", ex); + } finally { + stopper.listener().onResponse(null); + } + }, stopper.name()).start(); + }; + + stopperRunner.accept("http-server-transport-stop", httpServerTransport::close); + stopperRunner.accept("async-search-stop", () -> awaitSearchTasksComplete(maxTimeout, taskManager)); + stopperRunner.accept("reindex-stop", () -> awaitReindexTasksComplete(reindexTimeout, taskManager)); + if (terminationHandler != null) { + stopperRunner.accept("termination-handler-stop", terminationHandler::handleTermination); + } + } + + final Supplier incompleteStoppersDescriber = () -> stoppers.stream() + .filter(Stopper::isIncomplete) + .map(Stopper::name) + .collect(Collectors.joining(", ", "[", "]")); + + try { + if (TimeValue.ZERO.equals(maxTimeout)) { + allStoppersFuture.get(); + } else { + allStoppersFuture.get(maxTimeout.millis(), TimeUnit.MILLISECONDS); + } + } catch (ExecutionException e) { + assert false : e; // listeners are never completed exceptionally + logger.warn("failed during graceful shutdown tasks", e); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + logger.warn("interrupted while waiting for graceful shutdown tasks: " + incompleteStoppersDescriber.get(), e); + } catch (TimeoutException e) { + logger.warn("timed out while waiting for graceful shutdown tasks: " + incompleteStoppersDescriber.get()); + } + } + + private void awaitTasksComplete(TimeValue timeout, String taskName, TaskManager taskManager) { + long millisWaited = 0; + while (true) { + long tasksRemaining = taskManager.getTasks().values().stream().filter(task -> taskName.equals(task.getAction())).count(); + if (tasksRemaining == 0) { + logger.debug("all " + taskName + " tasks complete"); + return; + } else { + // Let the system work on those tasks for a while. We're on a dedicated thread to manage app shutdown, so we + // literally just want to wait and not take up resources on this thread for now. Poll period chosen to allow short + // response times, but checking the tasks list is relatively expensive, and we don't want to waste CPU time we could + // be spending on finishing those tasks. + final TimeValue pollPeriod = TimeValue.timeValueMillis(500); + millisWaited += pollPeriod.millis(); + if (TimeValue.ZERO.equals(timeout) == false && millisWaited >= timeout.millis()) { + logger.warn( + format("timed out after waiting [%s] for [%d] " + taskName + " tasks to finish", timeout.toString(), tasksRemaining) + ); + return; + } + logger.debug(format("waiting for [%s] " + taskName + " tasks to finish, next poll in [%s]", tasksRemaining, pollPeriod)); + try { + Thread.sleep(pollPeriod.millis()); + } catch (InterruptedException ex) { + logger.warn( + format( + "interrupted while waiting [%s] for [%d] " + taskName + " tasks to finish", + timeout.toString(), + tasksRemaining + ) + ); + return; + } + } + } + } + + private void awaitSearchTasksComplete(TimeValue asyncSearchTimeout, TaskManager taskManager) { + awaitTasksComplete(asyncSearchTimeout, TransportSearchAction.NAME, taskManager); + } + + private void awaitReindexTasksComplete(TimeValue asyncReindexTimeout, TaskManager taskManager) { + awaitTasksComplete(asyncReindexTimeout, ReindexAction.NAME, taskManager); + } + +} From f04bf5c3561f19f30f21ba28419c8e7ed6ed7b3a Mon Sep 17 00:00:00 2001 From: Oleksandr Kolomiiets Date: Wed, 23 Oct 2024 13:22:26 -0700 Subject: [PATCH 032/324] Apply workaround for synthetic source of object arrays inside nested objects (#115275) --- rest-api-spec/build.gradle | 1 + .../21_synthetic_source_stored.yml | 11 ++- .../index/mapper/DocumentParser.java | 6 +- .../index/mapper/DocumentParserContext.java | 39 +++++--- .../mapper/IgnoredSourceFieldMapper.java | 3 + .../index/mapper/MapperFeatures.java | 3 +- .../mapper/IgnoredSourceFieldMapperTests.java | 88 +++++++++++++++++++ 7 files changed, 132 insertions(+), 19 deletions(-) diff --git a/rest-api-spec/build.gradle b/rest-api-spec/build.gradle index 7525ff2dc12d2..4bd293f0a8641 100644 --- a/rest-api-spec/build.gradle +++ b/rest-api-spec/build.gradle @@ -59,4 +59,5 @@ tasks.named("yamlRestCompatTestTransform").configure ({ task -> task.replaceValueInMatch("profile.shards.0.dfs.knn.0.query.0.description", "DocAndScoreQuery[0,...][0.009673266,...],0.009673266", "dfs knn vector profiling with vector_operations_count") task.skipTest("indices.sort/10_basic/Index Sort", "warning does not exist for compatibility") task.skipTest("search/330_fetch_fields/Test search rewrite", "warning does not exist for compatibility") + task.skipTest("indices.create/21_synthetic_source_stored/object param - nested object with stored array", "temporary until backported") }) diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/21_synthetic_source_stored.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/21_synthetic_source_stored.yml index eab51427876aa..6a4e92f694220 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/21_synthetic_source_stored.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/21_synthetic_source_stored.yml @@ -319,8 +319,8 @@ object param - nested object array next to other fields: --- object param - nested object with stored array: - requires: - cluster_features: ["mapper.synthetic_source_keep", "mapper.bwc_workaround_9_0"] - reason: requires tracking ignored source + cluster_features: ["mapper.ignored_source.always_store_object_arrays_in_nested", "mapper.bwc_workaround_9_0"] + reason: requires fix to object array handling - do: indices.create: @@ -356,8 +356,11 @@ object param - nested object with stored array: sort: name - match: { hits.total.value: 2 } - match: { hits.hits.0._source.name: A } - - match: { hits.hits.0._source.nested_array_regular.0.b.c: [ 10, 100] } - - match: { hits.hits.0._source.nested_array_regular.1.b.c: [ 20, 200] } + # due to a workaround for #115261 + - match: { hits.hits.0._source.nested_array_regular.0.b.0.c: 10 } + - match: { hits.hits.0._source.nested_array_regular.0.b.1.c: 100 } + - match: { hits.hits.0._source.nested_array_regular.1.b.0.c: 20 } + - match: { hits.hits.0._source.nested_array_regular.1.b.1.c: 200 } - match: { hits.hits.1._source.name: B } - match: { hits.hits.1._source.nested_array_stored.0.b.0.c: 10 } - match: { hits.hits.1._source.nested_array_stored.0.b.1.c: 100 } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java b/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java index bac987a3df96d..1ed0a117ddd89 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java @@ -810,8 +810,10 @@ private static void parseNonDynamicArray( boolean objectWithFallbackSyntheticSource = false; if (mapper instanceof ObjectMapper objectMapper) { mode = getSourceKeepMode(context, objectMapper.sourceKeepMode()); - objectWithFallbackSyntheticSource = (mode == Mapper.SourceKeepMode.ALL - || (mode == Mapper.SourceKeepMode.ARRAYS && objectMapper instanceof NestedObjectMapper == false)); + objectWithFallbackSyntheticSource = mode == Mapper.SourceKeepMode.ALL + // Inside nested objects we always store object arrays as a workaround for #115261. + || ((context.inNestedScope() || mode == Mapper.SourceKeepMode.ARRAYS) + && objectMapper instanceof NestedObjectMapper == false); } boolean fieldWithFallbackSyntheticSource = false; boolean fieldWithStoredArraySource = false; diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DocumentParserContext.java b/server/src/main/java/org/elasticsearch/index/mapper/DocumentParserContext.java index ef87ce52fbabf..3b1f1a6d2809a 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/DocumentParserContext.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/DocumentParserContext.java @@ -104,6 +104,16 @@ public int get() { } } + /** + * Defines the scope parser is currently in. + * This is used for synthetic source related logic during parsing. + */ + private enum Scope { + SINGLETON, + ARRAY, + NESTED + } + private final MappingLookup mappingLookup; private final MappingParserContext mappingParserContext; private final SourceToParse sourceToParse; @@ -112,7 +122,7 @@ public int get() { private final List ignoredFieldValues; private final List ignoredFieldsMissingValues; private boolean inArrayScopeEnabled; - private boolean inArrayScope; + private Scope currentScope; private final Map> dynamicMappers; private final DynamicMapperSize dynamicMappersSize; @@ -145,7 +155,7 @@ private DocumentParserContext( List ignoredFieldValues, List ignoredFieldsWithNoSource, boolean inArrayScopeEnabled, - boolean inArrayScope, + Scope currentScope, Map> dynamicMappers, Map dynamicObjectMappers, Map> dynamicRuntimeFields, @@ -167,7 +177,7 @@ private DocumentParserContext( this.ignoredFieldValues = ignoredFieldValues; this.ignoredFieldsMissingValues = ignoredFieldsWithNoSource; this.inArrayScopeEnabled = inArrayScopeEnabled; - this.inArrayScope = inArrayScope; + this.currentScope = currentScope; this.dynamicMappers = dynamicMappers; this.dynamicObjectMappers = dynamicObjectMappers; this.dynamicRuntimeFields = dynamicRuntimeFields; @@ -192,7 +202,7 @@ private DocumentParserContext(ObjectMapper parent, ObjectMapper.Dynamic dynamic, in.ignoredFieldValues, in.ignoredFieldsMissingValues, in.inArrayScopeEnabled, - in.inArrayScope, + in.currentScope, in.dynamicMappers, in.dynamicObjectMappers, in.dynamicRuntimeFields, @@ -224,7 +234,7 @@ protected DocumentParserContext( new ArrayList<>(), new ArrayList<>(), mappingParserContext.getIndexSettings().isSyntheticSourceSecondDocParsingPassEnabled(), - false, + Scope.SINGLETON, new HashMap<>(), new HashMap<>(), new HashMap<>(), @@ -335,7 +345,7 @@ public final void deduplicateIgnoredFieldValues(final Set fullNames) { public final DocumentParserContext addIgnoredFieldFromContext(IgnoredSourceFieldMapper.NameValue ignoredFieldWithNoSource) throws IOException { if (canAddIgnoredField()) { - if (inArrayScope) { + if (currentScope == Scope.ARRAY) { // The field is an array within an array, store all sub-array elements. ignoredFieldsMissingValues.add(ignoredFieldWithNoSource); return cloneWithRecordedSource(); @@ -379,10 +389,10 @@ public final DocumentParserContext maybeCloneForArray(Mapper mapper) throws IOEx if (canAddIgnoredField() && mapper instanceof ObjectMapper && mapper instanceof NestedObjectMapper == false - && inArrayScope == false + && currentScope != Scope.ARRAY && inArrayScopeEnabled) { DocumentParserContext subcontext = switchParser(parser()); - subcontext.inArrayScope = true; + subcontext.currentScope = Scope.ARRAY; return subcontext; } return this; @@ -673,6 +683,10 @@ public boolean isWithinCopyTo() { return false; } + public boolean inNestedScope() { + return currentScope == Scope.NESTED; + } + public final DocumentParserContext createChildContext(ObjectMapper parent) { return new Wrapper(parent, this); } @@ -716,10 +730,11 @@ public LuceneDocument doc() { return document; } }; - // Disable tracking array scopes for ignored source, as it would be added to the parent doc. - // Nested documents are added to preserve object structure within arrays of objects, so the use - // of ignored source for arrays inside them should be mostly redundant. - cloned.inArrayScope = false; + + cloned.currentScope = Scope.NESTED; + // Disable using second parsing pass since it currently can not determine which parts + // of source belong to which nested document. + // See #115261. cloned.inArrayScopeEnabled = false; return cloned; } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/IgnoredSourceFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/IgnoredSourceFieldMapper.java index 296c2c5311d9a..70d73fc2ffb9a 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/IgnoredSourceFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/IgnoredSourceFieldMapper.java @@ -58,6 +58,9 @@ public class IgnoredSourceFieldMapper extends MetadataFieldMapper { static final NodeFeature TRACK_IGNORED_SOURCE = new NodeFeature("mapper.track_ignored_source"); static final NodeFeature DONT_EXPAND_DOTS_IN_IGNORED_SOURCE = new NodeFeature("mapper.ignored_source.dont_expand_dots"); + static final NodeFeature ALWAYS_STORE_OBJECT_ARRAYS_IN_NESTED_OBJECTS = new NodeFeature( + "mapper.ignored_source.always_store_object_arrays_in_nested" + ); /* Setting to disable encoding and writing values for this field. diff --git a/server/src/main/java/org/elasticsearch/index/mapper/MapperFeatures.java b/server/src/main/java/org/elasticsearch/index/mapper/MapperFeatures.java index 31c89b2fc8ad4..026c7c98d7aeb 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/MapperFeatures.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/MapperFeatures.java @@ -62,7 +62,8 @@ public Set getTestFeatures() { return Set.of( RangeFieldMapper.DATE_RANGE_INDEXING_FIX, IgnoredSourceFieldMapper.DONT_EXPAND_DOTS_IN_IGNORED_SOURCE, - SourceFieldMapper.REMOVE_SYNTHETIC_SOURCE_ONLY_VALIDATION + SourceFieldMapper.REMOVE_SYNTHETIC_SOURCE_ONLY_VALIDATION, + IgnoredSourceFieldMapper.ALWAYS_STORE_OBJECT_ARRAYS_IN_NESTED_OBJECTS ); } } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/IgnoredSourceFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/IgnoredSourceFieldMapperTests.java index 934744ef3ef96..7a4ce8bcb03fa 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/IgnoredSourceFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/IgnoredSourceFieldMapperTests.java @@ -962,6 +962,94 @@ public void testArrayWithNestedObjects() throws IOException { {"path":{"to":[{"id":[1,20,3]},{"id":10},{"id":0}]}}""", syntheticSource); } + public void testObjectArrayWithinNestedObjects() throws IOException { + DocumentMapper documentMapper = createMapperService(syntheticSourceMapping(b -> { + b.startObject("path").startObject("properties"); + { + b.startObject("to").field("type", "nested").startObject("properties"); + { + b.startObject("obj").startObject("properties"); + { + b.startObject("id").field("type", "integer").field("synthetic_source_keep", "arrays").endObject(); + } + b.endObject().endObject(); + } + b.endObject().endObject(); + } + b.endObject().endObject(); + })).documentMapper(); + + var syntheticSource = syntheticSource(documentMapper, b -> { + b.startObject("path"); + { + b.startObject("to"); + { + b.startArray("obj"); + { + b.startObject().array("id", 1, 20, 3).endObject(); + b.startObject().field("id", 10).endObject(); + } + b.endArray(); + } + b.endObject(); + } + b.endObject(); + }); + assertEquals(""" + {"path":{"to":{"obj":[{"id":[1,20,3]},{"id":10}]}}}""", syntheticSource); + } + + public void testObjectArrayWithinNestedObjectsArray() throws IOException { + DocumentMapper documentMapper = createMapperService(syntheticSourceMapping(b -> { + b.startObject("path").startObject("properties"); + { + b.startObject("to").field("type", "nested").startObject("properties"); + { + b.startObject("obj").startObject("properties"); + { + b.startObject("id").field("type", "integer").field("synthetic_source_keep", "arrays").endObject(); + } + b.endObject().endObject(); + } + b.endObject().endObject(); + } + b.endObject().endObject(); + })).documentMapper(); + + var syntheticSource = syntheticSource(documentMapper, b -> { + b.startObject("path"); + { + b.startArray("to"); + { + b.startObject(); + { + b.startArray("obj"); + { + b.startObject().array("id", 1, 20, 3).endObject(); + b.startObject().field("id", 10).endObject(); + } + b.endArray(); + } + b.endObject(); + b.startObject(); + { + b.startArray("obj"); + { + b.startObject().array("id", 200, 300, 500).endObject(); + b.startObject().field("id", 100).endObject(); + } + b.endArray(); + } + b.endObject(); + } + b.endArray(); + } + b.endObject(); + }); + assertEquals(""" + {"path":{"to":[{"obj":[{"id":[1,20,3]},{"id":10}]},{"obj":[{"id":[200,300,500]},{"id":100}]}]}}""", syntheticSource); + } + public void testArrayWithinArray() throws IOException { DocumentMapper documentMapper = createMapperService(syntheticSourceMapping(b -> { b.startObject("path"); From d8bcbb6bede44334719d1879bf8603425e29a731 Mon Sep 17 00:00:00 2001 From: Paul Tavares <56442535+paul-tavares@users.noreply.github.com> Date: Wed, 23 Oct 2024 16:29:34 -0400 Subject: [PATCH 033/324] [Security Solution] Add `create_index` to `kibana_system` role for Elastic Defend indices (#115241) Adds create_index privilege to the kibana_system role for Elastic Defend internal indices Indices: ``` .logs-endpoint.heartbeat-* .logs-endpoint.diagnostic.collection-* .logs-endpoint.action.responses-* ``` --- docs/changelog/115241.yaml | 6 ++++ .../KibanaOwnedReservedRoleDescriptors.java | 19 +++++++---- .../authz/store/ReservedRolesStoreTests.java | 34 +++++++++---------- 3 files changed, 35 insertions(+), 24 deletions(-) create mode 100644 docs/changelog/115241.yaml diff --git a/docs/changelog/115241.yaml b/docs/changelog/115241.yaml new file mode 100644 index 0000000000000..b7119d7f6aaeb --- /dev/null +++ b/docs/changelog/115241.yaml @@ -0,0 +1,6 @@ +pr: 115241 +summary: "[Security Solution] Add `create_index` to `kibana_system` role for index/DS\ + \ `.logs-endpoint.action.responses-*`" +area: Authorization +type: enhancement +issues: [] diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/KibanaOwnedReservedRoleDescriptors.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/KibanaOwnedReservedRoleDescriptors.java index 0028508e87f32..5fb753ab55aab 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/KibanaOwnedReservedRoleDescriptors.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/KibanaOwnedReservedRoleDescriptors.java @@ -152,8 +152,11 @@ static RoleDescriptor kibanaSystem(String name) { // Data telemetry reads mappings, metadata and stats of indices RoleDescriptor.IndicesPrivileges.builder().indices("*").privileges("view_index_metadata", "monitor").build(), // Endpoint diagnostic information. Kibana reads from these indices to send - // telemetry - RoleDescriptor.IndicesPrivileges.builder().indices(".logs-endpoint.diagnostic.collection-*").privileges("read").build(), + // telemetry and also creates the index when policies are first created + RoleDescriptor.IndicesPrivileges.builder() + .indices(".logs-endpoint.diagnostic.collection-*") + .privileges("read", "create_index") + .build(), // Fleet secrets. Kibana can only write to this index. RoleDescriptor.IndicesPrivileges.builder() .indices(".fleet-secrets*") @@ -277,17 +280,19 @@ static RoleDescriptor kibanaSystem(String name) { ) .build(), // Endpoint specific action responses. Kibana reads and writes (for third party - // agents) to the index - // to display action responses to the user. + // agents) to the index to display action responses to the user. + // `create_index`: is necessary in order to ensure that the DOT datastream index is + // created by Kibana in order to avoid errors on the Elastic Defend side when streaming + // documents to it. RoleDescriptor.IndicesPrivileges.builder() .indices(".logs-endpoint.action.responses-*") - .privileges("auto_configure", "read", "write") + .privileges("auto_configure", "read", "write", "create_index") .build(), // Endpoint specific actions. Kibana reads and writes to this index to track new // actions and display them. RoleDescriptor.IndicesPrivileges.builder() .indices(".logs-endpoint.actions-*") - .privileges("auto_configure", "read", "write") + .privileges("auto_configure", "read", "write", "create_index") .build(), // Legacy Osquery manager specific action responses. Kibana reads from these to // display responses to the user. @@ -475,7 +480,7 @@ static RoleDescriptor kibanaSystem(String name) { RoleDescriptor.IndicesPrivileges.builder().indices(".slo-observability.*").privileges("all").build(), // Endpoint heartbeat. Kibana reads from these to determine metering/billing for // endpoints. - RoleDescriptor.IndicesPrivileges.builder().indices(".logs-endpoint.heartbeat-*").privileges("read").build(), + RoleDescriptor.IndicesPrivileges.builder().indices(".logs-endpoint.heartbeat-*").privileges("read", "create_index").build(), // For connectors telemetry. Will be removed once we switched to connectors API RoleDescriptor.IndicesPrivileges.builder().indices(".elastic-connectors*").privileges("read").build() }, null, diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java index 26b306d6f1334..a71ac6a9b51fd 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java @@ -801,7 +801,7 @@ public void testKibanaSystemRole() { assertThat(kibanaRole.indices().allowedIndicesMatcher(GetIndexAction.NAME).test(mockIndexAbstraction(index)), is(true)); assertThat( kibanaRole.indices().allowedIndicesMatcher(TransportCreateIndexAction.TYPE.name()).test(mockIndexAbstraction(index)), - is(false) + is(true) ); assertThat(kibanaRole.indices().allowedIndicesMatcher(TransportIndexAction.NAME).test(mockIndexAbstraction(index)), is(false)); assertThat(kibanaRole.indices().allowedIndicesMatcher(TransportDeleteAction.NAME).test(mockIndexAbstraction(index)), is(false)); @@ -949,7 +949,7 @@ public void testKibanaSystemRole() { ); }); - // read-only index for Endpoint and Osquery manager specific action responses + // Elastic Defend internal index for response actions results Arrays.asList(".logs-endpoint.action.responses-" + randomAlphaOfLength(randomIntBetween(0, 13))).forEach((index) -> { final IndexAbstraction indexAbstraction = mockIndexAbstraction(index); assertThat(kibanaRole.indices().allowedIndicesMatcher("indices:foo").test(indexAbstraction), is(false)); @@ -959,10 +959,7 @@ public void testKibanaSystemRole() { is(false) ); assertThat(kibanaRole.indices().allowedIndicesMatcher(GetIndexAction.NAME).test(indexAbstraction), is(true)); - assertThat( - kibanaRole.indices().allowedIndicesMatcher(TransportCreateIndexAction.TYPE.name()).test(indexAbstraction), - is(false) - ); + assertThat(kibanaRole.indices().allowedIndicesMatcher(TransportCreateIndexAction.TYPE.name()).test(indexAbstraction), is(true)); assertThat(kibanaRole.indices().allowedIndicesMatcher(TransportIndexAction.NAME).test(indexAbstraction), is(true)); assertThat(kibanaRole.indices().allowedIndicesMatcher(TransportDeleteAction.NAME).test(indexAbstraction), is(true)); assertThat(kibanaRole.indices().allowedIndicesMatcher(TransportSearchAction.TYPE.name()).test(indexAbstraction), is(true)); @@ -1069,10 +1066,7 @@ public void testKibanaSystemRole() { is(false) ); assertThat(kibanaRole.indices().allowedIndicesMatcher(GetIndexAction.NAME).test(indexAbstraction), is(true)); - assertThat( - kibanaRole.indices().allowedIndicesMatcher(TransportCreateIndexAction.TYPE.name()).test(indexAbstraction), - is(false) - ); + assertThat(kibanaRole.indices().allowedIndicesMatcher(TransportCreateIndexAction.TYPE.name()).test(indexAbstraction), is(true)); assertThat(kibanaRole.indices().allowedIndicesMatcher(TransportIndexAction.NAME).test(indexAbstraction), is(true)); assertThat(kibanaRole.indices().allowedIndicesMatcher(TransportDeleteAction.NAME).test(indexAbstraction), is(true)); assertThat(kibanaRole.indices().allowedIndicesMatcher(TransportSearchAction.TYPE.name()).test(indexAbstraction), is(true)); @@ -1097,10 +1091,7 @@ public void testKibanaSystemRole() { is(false) ); assertThat(kibanaRole.indices().allowedIndicesMatcher(GetIndexAction.NAME).test(indexAbstraction), is(true)); - assertThat( - kibanaRole.indices().allowedIndicesMatcher(TransportCreateIndexAction.TYPE.name()).test(indexAbstraction), - is(false) - ); + assertThat(kibanaRole.indices().allowedIndicesMatcher(TransportCreateIndexAction.TYPE.name()).test(indexAbstraction), is(true)); assertThat(kibanaRole.indices().allowedIndicesMatcher(TransportIndexAction.NAME).test(indexAbstraction), is(false)); assertThat(kibanaRole.indices().allowedIndicesMatcher(TransportDeleteAction.NAME).test(indexAbstraction), is(false)); assertThat(kibanaRole.indices().allowedIndicesMatcher(TransportSearchAction.TYPE.name()).test(indexAbstraction), is(true)); @@ -1319,12 +1310,21 @@ public void testKibanaSystemRole() { final boolean isAlsoAutoCreateIndex = indexName.startsWith(".logs-endpoint.actions-") || indexName.startsWith(".logs-endpoint.action.responses-"); + + final boolean isAlsoCreateIndex = indexName.startsWith(".logs-endpoint.actions-") + || indexName.startsWith(".logs-endpoint.action.responses-") + || indexName.startsWith(".logs-endpoint.diagnostic.collection-") + || indexName.startsWith(".logs-endpoint.heartbeat-"); + assertThat( kibanaRole.indices().allowedIndicesMatcher(TransportCreateIndexAction.TYPE.name()).test(indexAbstraction), - is(false) + is(isAlsoCreateIndex) + ); + assertThat(kibanaRole.indices().allowedIndicesMatcher(AutoCreateAction.NAME).test(indexAbstraction), is(isAlsoCreateIndex)); + assertThat( + kibanaRole.indices().allowedIndicesMatcher(CreateDataStreamAction.NAME).test(indexAbstraction), + is(isAlsoCreateIndex) ); - assertThat(kibanaRole.indices().allowedIndicesMatcher(AutoCreateAction.NAME).test(indexAbstraction), is(isAlsoAutoCreateIndex)); - assertThat(kibanaRole.indices().allowedIndicesMatcher(CreateDataStreamAction.NAME).test(indexAbstraction), is(false)); assertThat( kibanaRole.indices().allowedIndicesMatcher(TransportIndexAction.NAME).test(indexAbstraction), is(isAlsoAutoCreateIndex) From 0fc0922ff0aade7189fadda137bb5aa8a0474997 Mon Sep 17 00:00:00 2001 From: David Kyle Date: Wed, 23 Oct 2024 22:10:45 +0100 Subject: [PATCH 034/324] [ML] Fix NPE in Get Deployment Stats (#115404) If a node has been removed from the cluster and the trained model assignment has not been updated the GET stats action can have an inconsistent view where it thinks a model is deployed on the removed node. The bug only affected nodes with failed deployments. --- docs/changelog/115404.yaml | 5 +++ .../TransportGetDeploymentStatsAction.java | 2 +- ...ransportGetDeploymentStatsActionTests.java | 39 +++++++++++++++++++ 3 files changed, 45 insertions(+), 1 deletion(-) create mode 100644 docs/changelog/115404.yaml diff --git a/docs/changelog/115404.yaml b/docs/changelog/115404.yaml new file mode 100644 index 0000000000000..e443b152955f3 --- /dev/null +++ b/docs/changelog/115404.yaml @@ -0,0 +1,5 @@ +pr: 115404 +summary: Fix NPE in Get Deployment Stats +area: Machine Learning +type: bug +issues: [] diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetDeploymentStatsAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetDeploymentStatsAction.java index 980cdc09252cb..9ebc510af4f4d 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetDeploymentStatsAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetDeploymentStatsAction.java @@ -220,7 +220,7 @@ static GetDeploymentStatsAction.Response addFailedRoutes( // add nodes from the failures that were not in the task responses for (var nodeRoutingState : nodeToRoutingStates.entrySet()) { - if (visitedNodes.contains(nodeRoutingState.getKey()) == false) { + if ((visitedNodes.contains(nodeRoutingState.getKey()) == false) && nodes.nodeExists(nodeRoutingState.getKey())) { updatedNodeStats.add( AssignmentStats.NodeStats.forNotStartedState( nodes.get(nodeRoutingState.getKey()), diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportGetDeploymentStatsActionTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportGetDeploymentStatsActionTests.java index 4a66be4a773f5..2490cd8d5ab21 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportGetDeploymentStatsActionTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportGetDeploymentStatsActionTests.java @@ -148,6 +148,45 @@ public void testAddFailedRoutes_TaskResultIsOverwritten() throws UnknownHostExce assertEquals(RoutingState.FAILED, results.get(0).getNodeStats().get(1).getRoutingState().getState()); } + public void testAddFailedRoutes_MissingNode() throws UnknownHostException { + DiscoveryNodes nodes = buildNodes("node1", "node2"); + var missingNode = DiscoveryNodeUtils.create( + "node3", + new TransportAddress(InetAddress.getByAddress(new byte[] { (byte) 192, (byte) 168, (byte) 0, (byte) 1 }), 9203) + ); + + List nodeStatsList = new ArrayList<>(); + nodeStatsList.add(AssignmentStatsTests.randomNodeStats(nodes.get("node1"))); + nodeStatsList.add(AssignmentStatsTests.randomNodeStats(nodes.get("node2"))); + + var model1 = new AssignmentStats( + "model1", + "deployment1", + randomBoolean() ? null : randomIntBetween(1, 8), + randomBoolean() ? null : randomIntBetween(1, 8), + null, + randomBoolean() ? null : randomIntBetween(1, 10000), + randomBoolean() ? null : ByteSizeValue.ofBytes(randomLongBetween(1, 1000000)), + Instant.now(), + nodeStatsList, + randomFrom(Priority.values()) + ); + var response = new GetDeploymentStatsAction.Response(Collections.emptyList(), Collections.emptyList(), List.of(model1), 1); + + // failed state for node 3 conflicts + Map> badRoutes = new HashMap<>(); + Map nodeRoutes = new HashMap<>(); + nodeRoutes.put("node3", new RoutingInfo(1, 1, RoutingState.FAILED, "failed on node3")); + badRoutes.put(createAssignment("model1"), nodeRoutes); + + var modified = TransportGetDeploymentStatsAction.addFailedRoutes(response, badRoutes, nodes); + List results = modified.getStats().results(); + assertThat(results, hasSize(1)); + assertThat(results.get(0).getNodeStats(), hasSize(2)); // 3 + assertEquals("node1", results.get(0).getNodeStats().get(0).getNode().getId()); + assertEquals("node2", results.get(0).getNodeStats().get(1).getNode().getId()); + } + private DiscoveryNodes buildNodes(String... nodeIds) throws UnknownHostException { InetAddress inetAddress = InetAddress.getByAddress(new byte[] { (byte) 192, (byte) 168, (byte) 0, (byte) 1 }); DiscoveryNodes.Builder builder = DiscoveryNodes.builder(); From f2b146ed1c19f3801224c39c67a49800d980bca9 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Wed, 23 Oct 2024 17:38:10 -0400 Subject: [PATCH 035/324] ESQL: Fix test muting (#115448) (#115466) Fix the test muting on the test for grapheme clusters - it should only allow the test if we're on the 20+ jvm. Closes #114536 --- .../src/test/java/org/elasticsearch/xpack/esql/CsvTests.java | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java index 63233f0c46a0d..3119fd4b52153 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java @@ -259,6 +259,10 @@ public final void test() throws Throwable { testCase.requiredCapabilities, everyItem(in(EsqlCapabilities.capabilities(true))) ); + assumeTrue( + "Capability not supported in this build", + EsqlCapabilities.capabilities(false).containsAll(testCase.requiredCapabilities) + ); } else { for (EsqlCapabilities.Cap c : EsqlCapabilities.Cap.values()) { if (false == c.isEnabled()) { From 2f64d2037a23de4d9d0da3efb7dca182e066ef48 Mon Sep 17 00:00:00 2001 From: Artem Prigoda Date: Thu, 24 Oct 2024 00:42:58 +0200 Subject: [PATCH 036/324] [test] Unmute FsDirectoryFactoryTests#testPreload (#115438) Resolve #110211 --- muted-tests.yml | 3 --- .../org/elasticsearch/index/store/FsDirectoryFactoryTests.java | 2 -- 2 files changed, 5 deletions(-) diff --git a/muted-tests.yml b/muted-tests.yml index bd0145611237b..8b9c3cc6ce712 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -17,9 +17,6 @@ tests: - class: "org.elasticsearch.xpack.deprecation.DeprecationHttpIT" issue: "https://github.com/elastic/elasticsearch/issues/108628" method: "testDeprecatedSettingsReturnWarnings" -- class: org.elasticsearch.index.store.FsDirectoryFactoryTests - method: testPreload - issue: https://github.com/elastic/elasticsearch/issues/110211 - class: "org.elasticsearch.xpack.searchablesnapshots.FrozenSearchableSnapshotsIntegTests" issue: "https://github.com/elastic/elasticsearch/issues/110408" method: "testCreateAndRestorePartialSearchableSnapshot" diff --git a/server/src/test/java/org/elasticsearch/index/store/FsDirectoryFactoryTests.java b/server/src/test/java/org/elasticsearch/index/store/FsDirectoryFactoryTests.java index 38e6ca0be0647..b0a14515f2fbc 100644 --- a/server/src/test/java/org/elasticsearch/index/store/FsDirectoryFactoryTests.java +++ b/server/src/test/java/org/elasticsearch/index/store/FsDirectoryFactoryTests.java @@ -115,8 +115,6 @@ private void doTestPreload(String... preload) throws IOException { var func = fsDirectoryFactory.preLoadFuncMap.get(mmapDirectory); assertNotEquals(fsDirectoryFactory.preLoadFuncMap.get(mmapDirectory), MMapDirectory.ALL_FILES); assertNotEquals(fsDirectoryFactory.preLoadFuncMap.get(mmapDirectory), MMapDirectory.NO_FILES); - assertTrue(func.test("foo.dvd", newIOContext(random()))); - assertTrue(func.test("foo.tmp", newIOContext(random()))); for (String ext : preload) { assertTrue("ext: " + ext, func.test("foo." + ext, newIOContext(random()))); } From 92ecd36a031de094cda642f14000bea545b01740 Mon Sep 17 00:00:00 2001 From: Fang Xing <155562079+fang-xing-esql@users.noreply.github.com> Date: Wed, 23 Oct 2024 22:00:48 -0400 Subject: [PATCH 037/324] [ES|QL] Simplify syntax of named parameter for identifier and pattern (#115061) * simplify syntax of named parameter for identifier and pattern --- docs/changelog/115061.yaml | 5 + .../xpack/esql/qa/rest/RestEsqlTestCase.java | 23 ++-- .../xpack/esql/action/EsqlCapabilities.java | 12 +- .../xpack/esql/action/RequestXContent.java | 105 +++++++++--------- .../esql/action/EsqlQueryRequestTests.java | 76 +++++++------ .../xpack/esql/analysis/AnalyzerTests.java | 8 +- .../esql/parser/StatementParserTests.java | 8 +- .../rest-api-spec/test/esql/10_basic.yml | 4 +- 8 files changed, 122 insertions(+), 119 deletions(-) create mode 100644 docs/changelog/115061.yaml diff --git a/docs/changelog/115061.yaml b/docs/changelog/115061.yaml new file mode 100644 index 0000000000000..7d40d5ae2629e --- /dev/null +++ b/docs/changelog/115061.yaml @@ -0,0 +1,5 @@ +pr: 115061 +summary: "[ES|QL] Simplify syntax of named parameter for identifier and pattern" +area: ES|QL +type: bug +issues: [] diff --git a/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/RestEsqlTestCase.java b/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/RestEsqlTestCase.java index 2a50988e9e35e..8c52a24231a41 100644 --- a/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/RestEsqlTestCase.java +++ b/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/RestEsqlTestCase.java @@ -672,7 +672,7 @@ public void testErrorMessageForArrayValuesInParams() throws IOException { public void testNamedParamsForIdentifierAndIdentifierPatterns() throws IOException { assumeTrue( "named parameters for identifiers and patterns require snapshot build", - EsqlCapabilities.Cap.NAMED_PARAMETER_FOR_FIELD_AND_FUNCTION_NAMES.isEnabled() + EsqlCapabilities.Cap.NAMED_PARAMETER_FOR_FIELD_AND_FUNCTION_NAMES_SIMPLIFIED_SYNTAX.isEnabled() ); bulkLoadTestData(10); // positive @@ -684,12 +684,9 @@ public void testNamedParamsForIdentifierAndIdentifierPatterns() throws IOExcepti ) ) .params( - "[{\"n1\" : {\"value\" : \"integer\" , \"kind\" : \"identifier\"}}," - + "{\"n2\" : {\"value\" : \"short\" , \"kind\" : \"identifier\"}}, " - + "{\"n3\" : {\"value\" : \"double\" , \"kind\" : \"identifier\"}}," - + "{\"n4\" : {\"value\" : \"boolean\" , \"kind\" : \"identifier\"}}, " - + "{\"n5\" : {\"value\" : \"xx*\" , \"kind\" : \"pattern\"}}, " - + "{\"fn1\" : {\"value\" : \"max\" , \"kind\" : \"identifier\"}}]" + "[{\"n1\" : {\"identifier\" : \"integer\"}}, {\"n2\" : {\"identifier\" : \"short\"}}, " + + "{\"n3\" : {\"identifier\" : \"double\"}}, {\"n4\" : {\"identifier\" : \"boolean\"}}, " + + "{\"n5\" : {\"pattern\" : \"xx*\"}}, {\"fn1\" : {\"identifier\" : \"max\"}}]" ); Map result = runEsql(query); Map colA = Map.of("name", "boolean", "type", "boolean"); @@ -728,10 +725,7 @@ public void testNamedParamsForIdentifierAndIdentifierPatterns() throws IOExcepti ResponseException.class, () -> runEsqlSync( requestObjectBuilder().query(format(null, "from {} | {}", testIndexName(), command.getKey())) - .params( - "[{\"n1\" : {\"value\" : \"integer\" , \"kind\" : \"identifier\"}}," - + "{\"n2\" : {\"value\" : \"short\" , \"kind\" : \"identifier\"}}]" - ) + .params("[{\"n1\" : {\"identifier\" : \"integer\"}}, {\"n2\" : {\"identifier\" : \"short\"}}]") ) ); error = re.getMessage(); @@ -751,9 +745,8 @@ public void testNamedParamsForIdentifierAndIdentifierPatterns() throws IOExcepti () -> runEsqlSync( requestObjectBuilder().query(format(null, "from {} | {}", testIndexName(), command.getKey())) .params( - "[{\"n1\" : {\"value\" : \"`n1`\" , \"kind\" : \"identifier\"}}," - + "{\"n2\" : {\"value\" : \"`n2`\" , \"kind\" : \"identifier\"}}, " - + "{\"n3\" : {\"value\" : \"`n3`\" , \"kind\" : \"identifier\"}}]" + "[{\"n1\" : {\"identifier\" : \"`n1`\"}}, {\"n2\" : {\"identifier\" : \"`n2`\"}}, " + + "{\"n3\" : {\"identifier\" : \"`n3`\"}}]" ) ) ); @@ -781,7 +774,7 @@ public void testNamedParamsForIdentifierAndIdentifierPatterns() throws IOExcepti ResponseException.class, () -> runEsqlSync( requestObjectBuilder().query(format(null, "from {} | ?cmd {}", testIndexName(), command.getValue())) - .params("[{\"cmd\" : {\"value\" : \"" + command.getKey() + "\", \"kind\" : \"identifier\"}}]") + .params("[{\"cmd\" : {\"identifier\" : \"" + command.getKey() + "\"}}]") ) ); error = re.getMessage(); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java index dfca6ab2bf814..f22ad07a4c6f6 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java @@ -384,11 +384,6 @@ public enum Cap { */ DATE_DIFF_YEAR_CALENDARIAL, - /** - * Support named parameters for field names. - */ - NAMED_PARAMETER_FOR_FIELD_AND_FUNCTION_NAMES(Build.current().isSnapshot()), - /** * Fix sorting not allowed on _source and counters. */ @@ -431,7 +426,12 @@ public enum Cap { /** * This enables 60_usage.yml "Basic ESQL usage....non-snapshot" version test. See also the previous capability. */ - NON_SNAPSHOT_TEST_FOR_TELEMETRY(Build.current().isSnapshot() == false); + NON_SNAPSHOT_TEST_FOR_TELEMETRY(Build.current().isSnapshot() == false), + + /** + * Support simplified syntax for named parameters for field and function names. + */ + NAMED_PARAMETER_FOR_FIELD_AND_FUNCTION_NAMES_SIMPLIFIED_SYNTAX(Build.current().isSnapshot()); private final boolean enabled; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/RequestXContent.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/RequestXContent.java index 71aface993ab9..d8904288523a7 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/RequestXContent.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/RequestXContent.java @@ -8,7 +8,6 @@ package org.elasticsearch.xpack.esql.action; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.util.Maps; import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.query.AbstractQueryBuilder; import org.elasticsearch.xcontent.ObjectParser; @@ -90,19 +89,6 @@ String fields() { private static final ObjectParser SYNC_PARSER = objectParserSync(EsqlQueryRequest::syncEsqlQueryRequest); private static final ObjectParser ASYNC_PARSER = objectParserAsync(EsqlQueryRequest::asyncEsqlQueryRequest); - private enum ParamParsingKey { - VALUE, - KIND - } - - private static final Map paramParsingKeys = Maps.newMapWithExpectedSize(ParamParsingKey.values().length); - - static { - for (ParamParsingKey e : ParamParsingKey.values()) { - paramParsingKeys.put(e.name(), e); - } - } - /** Parses a synchronous request. */ static EsqlQueryRequest parseSync(XContentParser parser) { return SYNC_PARSER.apply(parser, null); @@ -180,25 +166,21 @@ private static QueryParams parseParams(XContentParser p) throws IOException { ); } for (Map.Entry entry : param.fields.entrySet()) { - ParserUtils.ParamClassification classification; + ParserUtils.ParamClassification classification = null; + paramValue = null; String paramName = entry.getKey(); checkParamNameValidity(paramName, errors, loc); - if (EsqlCapabilities.Cap.NAMED_PARAMETER_FOR_FIELD_AND_FUNCTION_NAMES.isEnabled() - && entry.getValue() instanceof Map values) {// parameter specified as key:value pairs - Map paramElements = Maps.newMapWithExpectedSize(2); - for (Object keyName : values.keySet()) { - ParamParsingKey paramType = checkParamValueKeysValidity(keyName.toString(), errors, loc); - if (paramType != null) { - paramElements.put(paramType, values.get(keyName)); + if (EsqlCapabilities.Cap.NAMED_PARAMETER_FOR_FIELD_AND_FUNCTION_NAMES_SIMPLIFIED_SYNTAX.isEnabled() + && entry.getValue() instanceof Map value) {// parameter specified as a key:value pair + checkParamValueSize(paramName, value, loc, errors); + for (Object keyName : value.keySet()) { + classification = getParamClassification(keyName.toString(), errors, loc); + if (classification != null) { + paramValue = value.get(keyName); + checkParamValueValidity(classification, paramValue, loc, errors); } } - paramValue = paramElements.get(ParamParsingKey.VALUE); - if (paramValue == null && values.size() > 1) { // require non-null value for identifier and pattern - errors.add(new XContentParseException(loc, "[" + entry + "] does not have a value specified")); - } - - classification = getClassificationForParam(paramElements, loc, errors); } else {// parameter specifies as a value only paramValue = entry.getValue(); classification = VALUE; @@ -280,12 +262,45 @@ private static void checkParamNameValidity(String name, List paramValue, + XContentLocation loc, + List errors + ) { + if (paramValue.size() == 1) { + return; + } + String errorMessage; + if (paramValue.isEmpty()) { + errorMessage = " has no valid param attribute"; + } else { + errorMessage = " has multiple param attributes [" + + paramValue.keySet().stream().map(Object::toString).collect(Collectors.joining(", ")) + + "]"; + } + errors.add( + new XContentParseException( + loc, + "[" + + paramName + + "]" + + errorMessage + + ", only one of " + + Arrays.stream(ParserUtils.ParamClassification.values()) + .map(ParserUtils.ParamClassification::name) + .collect(Collectors.joining(", ")) + + " can be defined in a param" + ) + ); + } + + private static ParserUtils.ParamClassification getParamClassification( String paramKeyName, List errors, XContentLocation loc ) { - ParamParsingKey paramType = paramParsingKeys.get(paramKeyName.toUpperCase(Locale.ROOT)); + ParserUtils.ParamClassification paramType = paramClassifications.get(paramKeyName.toUpperCase(Locale.ROOT)); if (paramType == null) { errors.add( new XContentParseException( @@ -293,38 +308,21 @@ private static ParamParsingKey checkParamValueKeysValidity( "[" + paramKeyName + "] is not a valid param attribute, a valid attribute is any of " - + Arrays.stream(ParamParsingKey.values()).map(ParamParsingKey::name).collect(Collectors.joining(", ")) + + Arrays.stream(ParserUtils.ParamClassification.values()) + .map(ParserUtils.ParamClassification::name) + .collect(Collectors.joining(", ")) ) ); } return paramType; } - private static ParserUtils.ParamClassification getClassificationForParam( - Map paramElements, + private static void checkParamValueValidity( + ParserUtils.ParamClassification classification, + Object value, XContentLocation loc, List errors ) { - Object value = paramElements.get(ParamParsingKey.VALUE); - Object kind = paramElements.get(ParamParsingKey.KIND); - ParserUtils.ParamClassification classification = VALUE; - if (kind != null) { - classification = paramClassifications.get(kind.toString().toUpperCase(Locale.ROOT)); - if (classification == null) { - errors.add( - new XContentParseException( - loc, - "[" - + kind - + "] is not a valid param kind, a valid kind is any of " - + Arrays.stream(ParserUtils.ParamClassification.values()) - .map(ParserUtils.ParamClassification::name) - .collect(Collectors.joining(", ")) - ) - ); - } - } - // If a param is an "identifier" or a "pattern", validate it is a string. // If a param is a "pattern", validate it contains *. if (classification == IDENTIFIER || classification == PATTERN) { @@ -345,6 +343,5 @@ private static ParserUtils.ParamClassification getClassificationForParam( ); } } - return classification; } } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/action/EsqlQueryRequestTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/action/EsqlQueryRequestTests.java index 7deaff6ebe6bb..dcb83dadfcf96 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/action/EsqlQueryRequestTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/action/EsqlQueryRequestTests.java @@ -146,7 +146,7 @@ public void testNamedParams() throws IOException { public void testNamedParamsForIdentifiersPatterns() throws IOException { assumeTrue( "named parameters for identifiers and patterns require snapshot build", - EsqlCapabilities.Cap.NAMED_PARAMETER_FOR_FIELD_AND_FUNCTION_NAMES.isEnabled() + EsqlCapabilities.Cap.NAMED_PARAMETER_FOR_FIELD_AND_FUNCTION_NAMES_SIMPLIFIED_SYNTAX.isEnabled() ); String query = randomAlphaOfLengthBetween(1, 100); boolean columnar = randomBoolean(); @@ -154,12 +154,12 @@ public void testNamedParamsForIdentifiersPatterns() throws IOException { QueryBuilder filter = randomQueryBuilder(); String paramsString = """ - ,"params":[ {"n1" : {"value" : "f1", "kind" : "Identifier"}}, - {"n2" : {"value" : "f1*", "Kind" : "identifier"}}, - {"n3" : {"value" : "f.1*", "KIND" : "Pattern"}}, - {"n4" : {"value" : "*", "kind" : "pattern"}}, - {"n5" : {"value" : "esql", "kind" : "Value"}}, - {"n_6" : {"value" : "null", "kind" : "identifier"}}, + ,"params":[ {"n1" : {"identifier" : "f1"}}, + {"n2" : {"Identifier" : "f1*"}}, + {"n3" : {"pattern" : "f.1*"}}, + {"n4" : {"Pattern" : "*"}}, + {"n5" : {"Value" : "esql"}}, + {"n_6" : {"identifier" : "null"}}, {"n7_" : {"value" : "f.1.1"}}] }"""; List params = List.of( @@ -262,7 +262,7 @@ public void testInvalidParams() throws IOException { public void testInvalidParamsForIdentifiersPatterns() throws IOException { assumeTrue( "named parameters for identifiers and patterns require snapshot build", - EsqlCapabilities.Cap.NAMED_PARAMETER_FOR_FIELD_AND_FUNCTION_NAMES.isEnabled() + EsqlCapabilities.Cap.NAMED_PARAMETER_FOR_FIELD_AND_FUNCTION_NAMES_SIMPLIFIED_SYNTAX.isEnabled() ); String query = randomAlphaOfLengthBetween(1, 100); boolean columnar = randomBoolean(); @@ -271,13 +271,12 @@ public void testInvalidParamsForIdentifiersPatterns() throws IOException { // invalid named parameter for identifier and identifier pattern String paramsString1 = """ - "params":[ {"n1" : {"v" : "v1"}}, {"n2" : {"value" : "v2", "type" : "identifier"}}, - {"n3" : {"value" : "v3", "kind" : "id" }}, {"n4" : {"value" : "v4", "kind" : true}}, - {"n5" : {"value" : "v5", "kind" : ["identifier", "pattern"]}}, {"n6" : {"value" : "v6", "kind" : 0}}, - {"n7" : {"value" : 1, "kind" : "Identifier"}}, {"n8" : {"value" : true, "kind" : "Pattern"}}, - {"n9" : {"kind" : "identifier"}}, {"n10" : {"v" : "v10", "kind" : "identifier"}}, - {"n11" : {"value" : "v11", "kind" : "pattern"}}, {"n12" : {"value" : ["x", "y"], "kind" : "identifier"}}, - {"n13" : {"value" : "v13", "kind" : "identifier", "type" : "pattern"}}, {"n14" : {"v" : "v14", "kind" : "value"}}]"""; + "params":[{"n1" : {"v" : "v1"}}, {"n2" : {"identifier" : "v2", "pattern" : "v2"}}, + {"n3" : {"identifier" : "v3", "pattern" : "v3"}}, {"n4" : {"pattern" : "v4.1", "value" : "v4.2"}}, + {"n5" : {"value" : {"a5" : "v5"}}},{"n6" : {"identifier" : {"a6.1" : "v6.1", "a6.2" : "v6.2"}}}, {"n7" : {}}, + {"n8" : {"value" : ["x", "y"]}}, {"n9" : {"identifier" : ["x", "y"]}}, {"n10" : {"pattern" : ["x*", "y*"]}}, + {"n11" : {"identifier" : 1}}, {"n12" : {"pattern" : true}}, {"n13" : {"identifier" : null}}, {"n14" : {"pattern" : "v14"}}, + {"n15" : {"pattern" : "v15*"}, "n16" : {"identifier" : "v16"}}]"""; String json1 = String.format(Locale.ROOT, """ { %s @@ -291,28 +290,37 @@ public void testInvalidParamsForIdentifiersPatterns() throws IOException { assertThat( e1.getCause().getMessage(), containsString( - "Failed to parse params: [2:16] [v] is not a valid param attribute, a valid attribute is any of VALUE, KIND; " - + "[2:39] [type] is not a valid param attribute, a valid attribute is any of VALUE, KIND; " - + "[3:1] [id] is not a valid param kind, a valid kind is any of VALUE, IDENTIFIER, PATTERN; " - + "[3:44] [true] is not a valid param kind, a valid kind is any of VALUE, IDENTIFIER, PATTERN; " - + "[4:1] [[identifier, pattern]] is not a valid param kind, a valid kind is any of VALUE, IDENTIFIER, PATTERN; " - + "[4:64] [0] is not a valid param kind, a valid kind is any of VALUE, IDENTIFIER, PATTERN; " - + "[5:1] [1] is not a valid value for IDENTIFIER parameter, a valid value for IDENTIFIER parameter is a string; " - + "[5:48] [true] is not a valid value for PATTERN parameter, " + "[2:15] [v] is not a valid param attribute, a valid attribute is any of VALUE, IDENTIFIER, PATTERN; " + + "[2:38] [n2] has multiple param attributes [identifier, pattern], " + + "only one of VALUE, IDENTIFIER, PATTERN can be defined in a param; " + + "[2:38] [v2] is not a valid value for PATTERN parameter, " + "a valid value for PATTERN parameter is a string and contains *; " - + "[6:1] [null] is not a valid value for IDENTIFIER parameter, a valid value for IDENTIFIER parameter is a string; " - + "[6:35] [v] is not a valid param attribute, a valid attribute is any of VALUE, KIND; " - + "[6:35] [n10={v=v10, kind=identifier}] does not have a value specified; " - + "[6:35] [null] is not a valid value for IDENTIFIER parameter, " + + "[3:1] [n3] has multiple param attributes [identifier, pattern], " + + "only one of VALUE, IDENTIFIER, PATTERN can be defined in a param; " + + "[3:1] [v3] is not a valid value for PATTERN parameter, " + + "a valid value for PATTERN parameter is a string and contains *; " + + "[3:51] [n4] has multiple param attributes [pattern, value], " + + "only one of VALUE, IDENTIFIER, PATTERN can be defined in a param; " + + "[3:51] [v4.1] is not a valid value for PATTERN parameter, " + + "a valid value for PATTERN parameter is a string and contains *; " + + "[4:1] n5={value={a5=v5}} is not supported as a parameter; " + + "[4:36] [{a6.1=v6.1, a6.2=v6.2}] is not a valid value for IDENTIFIER parameter, " + "a valid value for IDENTIFIER parameter is a string; " - + "[7:1] [v11] is not a valid value for PATTERN parameter, " + + "[4:36] n6={identifier={a6.1=v6.1, a6.2=v6.2}} is not supported as a parameter; " + + "[4:98] [n7] has no valid param attribute, only one of VALUE, IDENTIFIER, PATTERN can be defined in a param; " + + "[5:1] n8={value=[x, y]} is not supported as a parameter; " + + "[5:34] [[x, y]] is not a valid value for IDENTIFIER parameter, a valid value for IDENTIFIER parameter is a string; " + + "[5:34] n9={identifier=[x, y]} is not supported as a parameter; " + + "[5:72] [[x*, y*]] is not a valid value for PATTERN parameter, " + + "a valid value for PATTERN parameter is a string and contains *; " + + "[5:72] n10={pattern=[x*, y*]} is not supported as a parameter; " + + "[6:1] [1] is not a valid value for IDENTIFIER parameter, a valid value for IDENTIFIER parameter is a string; " + + "[6:31] [true] is not a valid value for PATTERN parameter, " + + "a valid value for PATTERN parameter is a string and contains *; " + + "[6:61] [null] is not a valid value for IDENTIFIER parameter, a valid value for IDENTIFIER parameter is a string; " + + "[6:94] [v14] is not a valid value for PATTERN parameter, " + "a valid value for PATTERN parameter is a string and contains *; " - + "[7:50] [[x, y]] is not a valid value for IDENTIFIER parameter," - + " a valid value for IDENTIFIER parameter is a string; " - + "[7:50] n12={kind=identifier, value=[x, y]} is not supported as a parameter; " - + "[8:1] [type] is not a valid param attribute, a valid attribute is any of VALUE, KIND; " - + "[8:73] [v] is not a valid param attribute, a valid attribute is any of VALUE, KIND; " - + "[8:73] [n14={v=v14, kind=value}] does not have a value specified" + + "[7:1] Cannot parse more than one key:value pair as parameter, found [{n16:{identifier=v16}}, {n15:{pattern=v15*}}]" ) ); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTests.java index 3048686efbe44..c18f55a651408 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTests.java @@ -2087,7 +2087,7 @@ public void testCoalesceWithMixedNumericTypes() { public void testNamedParamsForIdentifiers() { assumeTrue( "named parameters for identifiers and patterns require snapshot build", - EsqlCapabilities.Cap.NAMED_PARAMETER_FOR_FIELD_AND_FUNCTION_NAMES.isEnabled() + EsqlCapabilities.Cap.NAMED_PARAMETER_FOR_FIELD_AND_FUNCTION_NAMES_SIMPLIFIED_SYNTAX.isEnabled() ); assertProjectionWithMapping( """ @@ -2181,7 +2181,7 @@ public void testNamedParamsForIdentifiers() { public void testInvalidNamedParamsForIdentifiers() { assumeTrue( "named parameters for identifiers and patterns require snapshot build", - EsqlCapabilities.Cap.NAMED_PARAMETER_FOR_FIELD_AND_FUNCTION_NAMES.isEnabled() + EsqlCapabilities.Cap.NAMED_PARAMETER_FOR_FIELD_AND_FUNCTION_NAMES_SIMPLIFIED_SYNTAX.isEnabled() ); // missing field assertError( @@ -2254,7 +2254,7 @@ public void testInvalidNamedParamsForIdentifiers() { public void testNamedParamsForIdentifierPatterns() { assumeTrue( "named parameters for identifiers and patterns require snapshot build", - EsqlCapabilities.Cap.NAMED_PARAMETER_FOR_FIELD_AND_FUNCTION_NAMES.isEnabled() + EsqlCapabilities.Cap.NAMED_PARAMETER_FOR_FIELD_AND_FUNCTION_NAMES_SIMPLIFIED_SYNTAX.isEnabled() ); assertProjectionWithMapping( """ @@ -2288,7 +2288,7 @@ public void testNamedParamsForIdentifierPatterns() { public void testInvalidNamedParamsForIdentifierPatterns() { assumeTrue( "named parameters for identifiers and patterns require snapshot build", - EsqlCapabilities.Cap.NAMED_PARAMETER_FOR_FIELD_AND_FUNCTION_NAMES.isEnabled() + EsqlCapabilities.Cap.NAMED_PARAMETER_FOR_FIELD_AND_FUNCTION_NAMES_SIMPLIFIED_SYNTAX.isEnabled() ); // missing pattern assertError( diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/parser/StatementParserTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/parser/StatementParserTests.java index 094d301875d8e..8019dbf77ffbf 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/parser/StatementParserTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/parser/StatementParserTests.java @@ -1564,7 +1564,7 @@ public void testIntervalParam() { public void testParamForIdentifier() { assumeTrue( "named parameters for identifiers and patterns require snapshot build", - EsqlCapabilities.Cap.NAMED_PARAMETER_FOR_FIELD_AND_FUNCTION_NAMES.isEnabled() + EsqlCapabilities.Cap.NAMED_PARAMETER_FOR_FIELD_AND_FUNCTION_NAMES_SIMPLIFIED_SYNTAX.isEnabled() ); // field names can appear in eval/where/stats/sort/keep/drop/rename/dissect/grok/enrich/mvexpand // eval, where @@ -1825,7 +1825,7 @@ public void testParamForIdentifier() { public void testParamForIdentifierPattern() { assumeTrue( "named parameters for identifiers and patterns require snapshot build", - EsqlCapabilities.Cap.NAMED_PARAMETER_FOR_FIELD_AND_FUNCTION_NAMES.isEnabled() + EsqlCapabilities.Cap.NAMED_PARAMETER_FOR_FIELD_AND_FUNCTION_NAMES_SIMPLIFIED_SYNTAX.isEnabled() ); // name patterns can appear in keep and drop // all patterns @@ -1918,7 +1918,7 @@ public void testParamForIdentifierPattern() { public void testParamInInvalidPosition() { assumeTrue( "named parameters for identifiers and patterns require snapshot build", - EsqlCapabilities.Cap.NAMED_PARAMETER_FOR_FIELD_AND_FUNCTION_NAMES.isEnabled() + EsqlCapabilities.Cap.NAMED_PARAMETER_FOR_FIELD_AND_FUNCTION_NAMES_SIMPLIFIED_SYNTAX.isEnabled() ); // param for pattern is not supported in eval/where/stats/sort/rename/dissect/grok/enrich/mvexpand // where/stats/sort/dissect/grok are covered in RestEsqlTestCase @@ -1973,7 +1973,7 @@ public void testParamInInvalidPosition() { public void testMissingParam() { assumeTrue( "named parameters for identifiers and patterns require snapshot build", - EsqlCapabilities.Cap.NAMED_PARAMETER_FOR_FIELD_AND_FUNCTION_NAMES.isEnabled() + EsqlCapabilities.Cap.NAMED_PARAMETER_FOR_FIELD_AND_FUNCTION_NAMES_SIMPLIFIED_SYNTAX.isEnabled() ); // cover all processing commands eval/where/stats/sort/rename/dissect/grok/enrich/mvexpand/keep/drop String error = "Unknown query parameter [f1], did you mean [f4]?"; diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/10_basic.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/10_basic.yml index 4e8f82d507a5f..96145e84ad2cd 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/10_basic.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/10_basic.yml @@ -430,14 +430,14 @@ setup: - method: POST path: /_query parameters: [ ] - capabilities: [ named_parameter_for_field_and_function_names ] + capabilities: [ named_parameter_for_field_and_function_names_simplified_syntax ] reason: "named or positional parameters for field names" - do: esql.query: body: query: 'from test | stats x = count(?f1), y = sum(?f2) by ?f3 | sort ?f3 | keep ?f3, x, y | limit 3' - params: [{"f1" : {"value" : "time", "kind" : "identifier" }}, {"f2" : { "value" : "count", "kind" : "identifier" }}, {"f3" : { "value" : "color", "kind" : "identifier" }}] + params: [{"f1" : {"identifier" : "time"}}, {"f2" : { "identifier" : "count" }}, {"f3" : { "identifier" : "color"}}] - length: {columns: 3} - match: {columns.0.name: "color"} From 541bcf30e5d03944cace8deec24559fc63c8bcb5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Istv=C3=A1n=20Zolt=C3=A1n=20Szab=C3=B3?= Date: Thu, 24 Oct 2024 08:53:12 +0200 Subject: [PATCH 038/324] [DOCS] Documents that ELSER is the default service for `semantic_text` (#114615) Co-authored-by: Mike Pellegrini --- .../mapping/types/semantic-text.asciidoc | 26 ++++++++- .../semantic-search-semantic-text.asciidoc | 57 +++---------------- 2 files changed, 33 insertions(+), 50 deletions(-) diff --git a/docs/reference/mapping/types/semantic-text.asciidoc b/docs/reference/mapping/types/semantic-text.asciidoc index ac23c153e01a3..893e2c6cff8ed 100644 --- a/docs/reference/mapping/types/semantic-text.asciidoc +++ b/docs/reference/mapping/types/semantic-text.asciidoc @@ -13,25 +13,47 @@ Long passages are <> to smaller secti The `semantic_text` field type specifies an inference endpoint identifier that will be used to generate embeddings. You can create the inference endpoint by using the <>. This field type and the <> type make it simpler to perform semantic search on your data. +If you don't specify an inference endpoint, the <> is used by default. Using `semantic_text`, you won't need to specify how to generate embeddings for your data, or how to index it. The {infer} endpoint automatically determines the embedding generation, indexing, and query to use. +If you use the ELSER service, you can set up `semantic_text` with the following API request: + [source,console] ------------------------------------------------------------ PUT my-index-000001 +{ + "mappings": { + "properties": { + "inference_field": { + "type": "semantic_text" + } + } + } +} +------------------------------------------------------------ + +NOTE: In Serverless, you must create an {infer} endpoint using the <> and reference it when setting up `semantic_text` even if you use the ELSER service. + +If you use a service other than ELSER, you must create an {infer} endpoint using the <> and reference it when setting up `semantic_text` as the following example demonstrates: + +[source,console] +------------------------------------------------------------ +PUT my-index-000002 { "mappings": { "properties": { "inference_field": { "type": "semantic_text", - "inference_id": "my-elser-endpoint" + "inference_id": "my-openai-endpoint" <1> } } } } ------------------------------------------------------------ // TEST[skip:Requires inference endpoint] +<1> The `inference_id` of the {infer} endpoint to use to generate embeddings. The recommended way to use semantic_text is by having dedicated {infer} endpoints for ingestion and search. @@ -40,7 +62,7 @@ After creating dedicated {infer} endpoints for both, you can reference them usin [source,console] ------------------------------------------------------------ -PUT my-index-000002 +PUT my-index-000003 { "mappings": { "properties": { diff --git a/docs/reference/search/search-your-data/semantic-search-semantic-text.asciidoc b/docs/reference/search/search-your-data/semantic-search-semantic-text.asciidoc index 60692c19c184a..f881ca87a92e6 100644 --- a/docs/reference/search/search-your-data/semantic-search-semantic-text.asciidoc +++ b/docs/reference/search/search-your-data/semantic-search-semantic-text.asciidoc @@ -21,45 +21,11 @@ This tutorial uses the <> for demonstra [[semantic-text-requirements]] ==== Requirements -To use the `semantic_text` field type, you must have an {infer} endpoint deployed in -your cluster using the <>. +This tutorial uses the <> for demonstration, which is created automatically as needed. +To use the `semantic_text` field type with an {infer} service other than ELSER, you must create an inference endpoint using the <>. -[discrete] -[[semantic-text-infer-endpoint]] -==== Create the {infer} endpoint - -Create an inference endpoint by using the <>: +NOTE: In Serverless, you must create an {infer} endpoint using the <> and reference it when setting up `semantic_text` even if you use the ELSER service. -[source,console] ------------------------------------------------------------- -PUT _inference/sparse_embedding/my-elser-endpoint <1> -{ - "service": "elser", <2> - "service_settings": { - "adaptive_allocations": { <3> - "enabled": true, - "min_number_of_allocations": 3, - "max_number_of_allocations": 10 - }, - "num_threads": 1 - } -} ------------------------------------------------------------- -// TEST[skip:TBD] -<1> The task type is `sparse_embedding` in the path as the `elser` service will -be used and ELSER creates sparse vectors. The `inference_id` is -`my-elser-endpoint`. -<2> The `elser` service is used in this example. -<3> This setting enables and configures {ml-docs}/ml-nlp-auto-scale.html#nlp-model-adaptive-allocations[adaptive allocations]. -Adaptive allocations make it possible for ELSER to automatically scale up or down resources based on the current load on the process. - -[NOTE] -==== -You might see a 502 bad gateway error in the response when using the {kib} Console. -This error usually just reflects a timeout, while the model downloads in the background. -You can check the download progress in the {ml-app} UI. -If using the Python client, you can set the `timeout` parameter to a higher value. -==== [discrete] [[semantic-text-index-mapping]] @@ -75,8 +41,7 @@ PUT semantic-embeddings "mappings": { "properties": { "content": { <1> - "type": "semantic_text", <2> - "inference_id": "my-elser-endpoint" <3> + "type": "semantic_text" <2> } } } @@ -85,18 +50,14 @@ PUT semantic-embeddings // TEST[skip:TBD] <1> The name of the field to contain the generated embeddings. <2> The field to contain the embeddings is a `semantic_text` field. -<3> The `inference_id` is the inference endpoint you created in the previous step. -It will be used to generate the embeddings based on the input text. -Every time you ingest data into the related `semantic_text` field, this endpoint will be used for creating the vector representation of the text. +Since no `inference_id` is provided, the <> is used by default. +To use a different {infer} service, you must create an {infer} endpoint first using the <> and then specify it in the `semantic_text` field mapping using the `inference_id` parameter. [NOTE] ==== -If you're using web crawlers or connectors to generate indices, you have to -<> for these indices to -include the `semantic_text` field. Once the mapping is updated, you'll need to run -a full web crawl or a full connector sync. This ensures that all existing -documents are reprocessed and updated with the new semantic embeddings, -enabling semantic search on the updated data. +If you're using web crawlers or connectors to generate indices, you have to <> for these indices to include the `semantic_text` field. +Once the mapping is updated, you'll need to run a full web crawl or a full connector sync. +This ensures that all existing documents are reprocessed and updated with the new semantic embeddings, enabling semantic search on the updated data. ==== From bffaabb6f5c185d6e1003dd08029567ba469fe79 Mon Sep 17 00:00:00 2001 From: Luigi Dell'Aquila Date: Thu, 24 Oct 2024 09:19:46 +0200 Subject: [PATCH 039/324] ES|QL: improve docs about escaping for GROK, DISSECT, LIKE, RLIKE (#115320) --- ...ql-process-data-with-dissect-grok.asciidoc | 31 ++++++---- .../functions/kibana/definition/like.json | 2 +- .../functions/kibana/definition/rlike.json | 2 +- .../esql/functions/kibana/docs/like.md | 2 +- .../esql/functions/kibana/docs/rlike.md | 2 +- docs/reference/esql/functions/like.asciidoc | 16 ++++++ docs/reference/esql/functions/rlike.asciidoc | 16 ++++++ .../src/main/resources/docs.csv-spec | 42 +++++++++----- .../src/main/resources/string.csv-spec | 56 +++++++++++++++++++ .../function/scalar/string/RLike.java | 18 +++++- .../function/scalar/string/WildcardLike.java | 18 +++++- 11 files changed, 175 insertions(+), 30 deletions(-) diff --git a/docs/reference/esql/esql-process-data-with-dissect-grok.asciidoc b/docs/reference/esql/esql-process-data-with-dissect-grok.asciidoc index 87748fee4f202..e626e058a4e56 100644 --- a/docs/reference/esql/esql-process-data-with-dissect-grok.asciidoc +++ b/docs/reference/esql/esql-process-data-with-dissect-grok.asciidoc @@ -40,7 +40,7 @@ delimiter-based pattern, and extracts the specified keys as columns. For example, the following pattern: [source,txt] ---- -%{clientip} [%{@timestamp}] %{status} +%{clientip} [%{@timestamp}] %{status} ---- matches a log line of this format: @@ -76,8 +76,8 @@ ignore certain fields, append fields, skip over padding, etc. ===== Terminology dissect pattern:: -the set of fields and delimiters describing the textual -format. Also known as a dissection. +the set of fields and delimiters describing the textual +format. Also known as a dissection. The dissection is described using a set of `%{}` sections: `%{a} - %{b} - %{c}` @@ -91,14 +91,14 @@ Any set of characters other than `%{`, `'not }'`, or `}` is a delimiter. key:: + -- -the text between the `%{` and `}`, exclusive of the `?`, `+`, `&` prefixes -and the ordinal suffix. +the text between the `%{` and `}`, exclusive of the `?`, `+`, `&` prefixes +and the ordinal suffix. Examples: -* `%{?aaa}` - the key is `aaa` -* `%{+bbb/3}` - the key is `bbb` -* `%{&ccc}` - the key is `ccc` +* `%{?aaa}` - the key is `aaa` +* `%{+bbb/3}` - the key is `bbb` +* `%{&ccc}` - the key is `ccc` -- [[esql-dissect-examples]] @@ -218,7 +218,7 @@ Putting it together as an {esql} query: [source.merge.styled,esql] ---- -include::{esql-specs}/docs.csv-spec[tag=grokWithEscape] +include::{esql-specs}/docs.csv-spec[tag=grokWithEscapeTripleQuotes] ---- `GROK` adds the following columns to the input table: @@ -239,15 +239,24 @@ with a `\`. For example, in the earlier pattern: %{IP:ip} \[%{TIMESTAMP_ISO8601:@timestamp}\] %{GREEDYDATA:status} ---- -In {esql} queries, the backslash character itself is a special character that +In {esql} queries, when using single quotes for strings, the backslash character itself is a special character that needs to be escaped with another `\`. For this example, the corresponding {esql} query becomes: [source.merge.styled,esql] ---- include::{esql-specs}/docs.csv-spec[tag=grokWithEscape] ---- + +For this reason, in general it is more convenient to use triple quotes `"""` for GROK patterns, +that do not require escaping for backslash. + +[source.merge.styled,esql] +---- +include::{esql-specs}/docs.csv-spec[tag=grokWithEscapeTripleQuotes] +---- ==== + [[esql-grok-patterns]] ===== Grok patterns @@ -318,4 +327,4 @@ as the `GROK` command. The `GROK` command does not support configuring <>, or <>. The `GROK` command is not subject to <>. -// end::grok-limitations[] \ No newline at end of file +// end::grok-limitations[] diff --git a/docs/reference/esql/functions/kibana/definition/like.json b/docs/reference/esql/functions/kibana/definition/like.json index 97e84e0361fd2..f375c697bd60d 100644 --- a/docs/reference/esql/functions/kibana/definition/like.json +++ b/docs/reference/esql/functions/kibana/definition/like.json @@ -42,7 +42,7 @@ } ], "examples" : [ - "FROM employees\n| WHERE first_name LIKE \"?b*\"\n| KEEP first_name, last_name" + "FROM employees\n| WHERE first_name LIKE \"\"\"?b*\"\"\"\n| KEEP first_name, last_name" ], "preview" : false, "snapshot_only" : false diff --git a/docs/reference/esql/functions/kibana/definition/rlike.json b/docs/reference/esql/functions/kibana/definition/rlike.json index e442bb2c55050..7a328293383bb 100644 --- a/docs/reference/esql/functions/kibana/definition/rlike.json +++ b/docs/reference/esql/functions/kibana/definition/rlike.json @@ -42,7 +42,7 @@ } ], "examples" : [ - "FROM employees\n| WHERE first_name RLIKE \".leja.*\"\n| KEEP first_name, last_name" + "FROM employees\n| WHERE first_name RLIKE \"\"\".leja.*\"\"\"\n| KEEP first_name, last_name" ], "preview" : false, "snapshot_only" : false diff --git a/docs/reference/esql/functions/kibana/docs/like.md b/docs/reference/esql/functions/kibana/docs/like.md index 4c400bdc65479..ea2ac11b6f4b9 100644 --- a/docs/reference/esql/functions/kibana/docs/like.md +++ b/docs/reference/esql/functions/kibana/docs/like.md @@ -15,6 +15,6 @@ The following wildcard characters are supported: ``` FROM employees -| WHERE first_name LIKE "?b*" +| WHERE first_name LIKE """?b*""" | KEEP first_name, last_name ``` diff --git a/docs/reference/esql/functions/kibana/docs/rlike.md b/docs/reference/esql/functions/kibana/docs/rlike.md index ed94553e7e44f..95b57799ffe29 100644 --- a/docs/reference/esql/functions/kibana/docs/rlike.md +++ b/docs/reference/esql/functions/kibana/docs/rlike.md @@ -10,6 +10,6 @@ expression. The right-hand side of the operator represents the pattern. ``` FROM employees -| WHERE first_name RLIKE ".leja.*" +| WHERE first_name RLIKE """.leja.*""" | KEEP first_name, last_name ``` diff --git a/docs/reference/esql/functions/like.asciidoc b/docs/reference/esql/functions/like.asciidoc index 2298617be5699..a569896bc3c1e 100644 --- a/docs/reference/esql/functions/like.asciidoc +++ b/docs/reference/esql/functions/like.asciidoc @@ -23,4 +23,20 @@ include::{esql-specs}/docs.csv-spec[tag=like] |=== include::{esql-specs}/docs.csv-spec[tag=like-result] |=== + +Matching the exact characters `*` and `.` will require escaping. +The escape character is backslash `\`. Since also backslash is a special character in string literals, +it will require further escaping. + +[source.merge.styled,esql] +---- +include::{esql-specs}/string.csv-spec[tag=likeEscapingSingleQuotes] +---- + +To reduce the overhead of escaping, we suggest using triple quotes strings `"""` + +[source.merge.styled,esql] +---- +include::{esql-specs}/string.csv-spec[tag=likeEscapingTripleQuotes] +---- // end::body[] diff --git a/docs/reference/esql/functions/rlike.asciidoc b/docs/reference/esql/functions/rlike.asciidoc index 031594ae403da..f6009b2c49528 100644 --- a/docs/reference/esql/functions/rlike.asciidoc +++ b/docs/reference/esql/functions/rlike.asciidoc @@ -18,4 +18,20 @@ include::{esql-specs}/docs.csv-spec[tag=rlike] |=== include::{esql-specs}/docs.csv-spec[tag=rlike-result] |=== + +Matching special characters (eg. `.`, `*`, `(`...) will require escaping. +The escape character is backslash `\`. Since also backslash is a special character in string literals, +it will require further escaping. + +[source.merge.styled,esql] +---- +include::{esql-specs}/string.csv-spec[tag=rlikeEscapingSingleQuotes] +---- + +To reduce the overhead of escaping, we suggest using triple quotes strings `"""` + +[source.merge.styled,esql] +---- +include::{esql-specs}/string.csv-spec[tag=rlikeEscapingTripleQuotes] +---- // end::body[] diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/docs.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/docs.csv-spec index 15fe6853ae491..a9c5a5214f159 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/docs.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/docs.csv-spec @@ -382,7 +382,7 @@ count:long | languages:integer basicGrok // tag::basicGrok[] ROW a = "2023-01-23T12:15:00.000Z 127.0.0.1 some.email@foo.com 42" -| GROK a "%{TIMESTAMP_ISO8601:date} %{IP:ip} %{EMAILADDRESS:email} %{NUMBER:num}" +| GROK a """%{TIMESTAMP_ISO8601:date} %{IP:ip} %{EMAILADDRESS:email} %{NUMBER:num}""" | KEEP date, ip, email, num // end::basicGrok[] ; @@ -396,7 +396,7 @@ date:keyword | ip:keyword | email:keyword | num:keyword grokWithConversionSuffix // tag::grokWithConversionSuffix[] ROW a = "2023-01-23T12:15:00.000Z 127.0.0.1 some.email@foo.com 42" -| GROK a "%{TIMESTAMP_ISO8601:date} %{IP:ip} %{EMAILADDRESS:email} %{NUMBER:num:int}" +| GROK a """%{TIMESTAMP_ISO8601:date} %{IP:ip} %{EMAILADDRESS:email} %{NUMBER:num:int}""" | KEEP date, ip, email, num // end::grokWithConversionSuffix[] ; @@ -410,7 +410,7 @@ date:keyword | ip:keyword | email:keyword | num:integer grokWithToDatetime // tag::grokWithToDatetime[] ROW a = "2023-01-23T12:15:00.000Z 127.0.0.1 some.email@foo.com 42" -| GROK a "%{TIMESTAMP_ISO8601:date} %{IP:ip} %{EMAILADDRESS:email} %{NUMBER:num:int}" +| GROK a """%{TIMESTAMP_ISO8601:date} %{IP:ip} %{EMAILADDRESS:email} %{NUMBER:num:int}""" | KEEP date, ip, email, num | EVAL date = TO_DATETIME(date) // end::grokWithToDatetime[] @@ -436,11 +436,27 @@ ROW a = "1.2.3.4 [2023-01-23T12:15:00.000Z] Connected" // end::grokWithEscape-result[] ; + +grokWithEscapeTripleQuotes +// tag::grokWithEscapeTripleQuotes[] +ROW a = "1.2.3.4 [2023-01-23T12:15:00.000Z] Connected" +| GROK a """%{IP:ip} \[%{TIMESTAMP_ISO8601:@timestamp}\] %{GREEDYDATA:status}""" +// end::grokWithEscapeTripleQuotes[] +| KEEP @timestamp +; + +// tag::grokWithEscapeTripleQuotes-result[] +@timestamp:keyword +2023-01-23T12:15:00.000Z +// end::grokWithEscapeTripleQuotes-result[] +; + + grokWithDuplicateFieldNames // tag::grokWithDuplicateFieldNames[] FROM addresses | KEEP city.name, zip_code -| GROK zip_code "%{WORD:zip_parts} %{WORD:zip_parts}" +| GROK zip_code """%{WORD:zip_parts} %{WORD:zip_parts}""" // end::grokWithDuplicateFieldNames[] | SORT city.name ; @@ -456,7 +472,7 @@ Tokyo | 100-7014 | null basicDissect // tag::basicDissect[] ROW a = "2023-01-23T12:15:00.000Z - some text - 127.0.0.1" -| DISSECT a "%{date} - %{msg} - %{ip}" +| DISSECT a """%{date} - %{msg} - %{ip}""" | KEEP date, msg, ip // end::basicDissect[] ; @@ -470,7 +486,7 @@ date:keyword | msg:keyword | ip:keyword dissectWithToDatetime // tag::dissectWithToDatetime[] ROW a = "2023-01-23T12:15:00.000Z - some text - 127.0.0.1" -| DISSECT a "%{date} - %{msg} - %{ip}" +| DISSECT a """%{date} - %{msg} - %{ip}""" | KEEP date, msg, ip | EVAL date = TO_DATETIME(date) // end::dissectWithToDatetime[] @@ -485,7 +501,7 @@ some text | 127.0.0.1 | 2023-01-23T12:15:00.000Z dissectRightPaddingModifier // tag::dissectRightPaddingModifier[] ROW message="1998-08-10T17:15:42 WARN" -| DISSECT message "%{ts->} %{level}" +| DISSECT message """%{ts->} %{level}""" // end::dissectRightPaddingModifier[] ; @@ -498,7 +514,7 @@ message:keyword | ts:keyword | level:keyword dissectEmptyRightPaddingModifier#[skip:-8.11.2, reason:Support for empty right padding modifiers introduced in 8.11.2] // tag::dissectEmptyRightPaddingModifier[] ROW message="[1998-08-10T17:15:42] [WARN]" -| DISSECT message "[%{ts}]%{->}[%{level}]" +| DISSECT message """[%{ts}]%{->}[%{level}]""" // end::dissectEmptyRightPaddingModifier[] ; @@ -511,7 +527,7 @@ ROW message="[1998-08-10T17:15:42] [WARN]" dissectAppendModifier // tag::dissectAppendModifier[] ROW message="john jacob jingleheimer schmidt" -| DISSECT message "%{+name} %{+name} %{+name} %{+name}" APPEND_SEPARATOR=" " +| DISSECT message """%{+name} %{+name} %{+name} %{+name}""" APPEND_SEPARATOR=" " // end::dissectAppendModifier[] ; @@ -524,7 +540,7 @@ john jacob jingleheimer schmidt|john jacob jingleheimer schmidt dissectAppendWithOrderModifier // tag::dissectAppendWithOrderModifier[] ROW message="john jacob jingleheimer schmidt" -| DISSECT message "%{+name/2} %{+name/4} %{+name/3} %{+name/1}" APPEND_SEPARATOR="," +| DISSECT message """%{+name/2} %{+name/4} %{+name/3} %{+name/1}""" APPEND_SEPARATOR="," // end::dissectAppendWithOrderModifier[] ; @@ -537,7 +553,7 @@ john jacob jingleheimer schmidt|schmidt,john,jingleheimer,jacob dissectNamedSkipKey // tag::dissectNamedSkipKey[] ROW message="1.2.3.4 - - 30/Apr/1998:22:00:52 +0000" -| DISSECT message "%{clientip} %{?ident} %{?auth} %{@timestamp}" +| DISSECT message """%{clientip} %{?ident} %{?auth} %{@timestamp}""" // end::dissectNamedSkipKey[] ; @@ -550,7 +566,7 @@ message:keyword | clientip:keyword | @timestamp:keyword docsLike // tag::like[] FROM employees -| WHERE first_name LIKE "?b*" +| WHERE first_name LIKE """?b*""" | KEEP first_name, last_name // end::like[] | SORT first_name @@ -566,7 +582,7 @@ Eberhardt |Terkki docsRlike // tag::rlike[] FROM employees -| WHERE first_name RLIKE ".leja.*" +| WHERE first_name RLIKE """.leja.*""" | KEEP first_name, last_name // end::rlike[] ; diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/string.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/string.csv-spec index dd9d519649c01..00fa2fddb2106 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/string.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/string.csv-spec @@ -1800,3 +1800,59 @@ warning:Line 1:29: java.lang.IllegalArgumentException: single-value function enc x:keyword null ; + + +likeEscapingSingleQuotes +// tag::likeEscapingSingleQuotes[] +ROW message = "foo * bar" +| WHERE message LIKE "foo \\* bar" +// end::likeEscapingSingleQuotes[] +; + +// tag::likeEscapingSingleQuotes-result[] +message:keyword +foo * bar +// end::likeEscapingSingleQuotes-result[] +; + + +likeEscapingTripleQuotes +// tag::likeEscapingTripleQuotes[] +ROW message = "foo * bar" +| WHERE message LIKE """foo \* bar""" +// end::likeEscapingTripleQuotes[] +; + +// tag::likeEscapingTripleQuotes-result[] +message:keyword +foo * bar +// end::likeEscapingTripleQuotes-result[] +; + + +rlikeEscapingSingleQuotes +// tag::rlikeEscapingSingleQuotes[] +ROW message = "foo ( bar" +| WHERE message RLIKE "foo \\( bar" +// end::rlikeEscapingSingleQuotes[] +; + +// tag::rlikeEscapingSingleQuotes-result[] +message:keyword +foo ( bar +// end::rlikeEscapingSingleQuotes-result[] +; + + +rlikeEscapingTripleQuotes +// tag::rlikeEscapingTripleQuotes[] +ROW message = "foo ( bar" +| WHERE message RLIKE """foo \( bar""" +// end::rlikeEscapingTripleQuotes[] +; + +// tag::rlikeEscapingTripleQuotes-result[] +message:keyword +foo ( bar +// end::rlikeEscapingTripleQuotes-result[] +; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/RLike.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/RLike.java index b46c46c89deba..cd42711177510 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/RLike.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/RLike.java @@ -33,7 +33,23 @@ public class RLike extends org.elasticsearch.xpack.esql.core.expression.predicat Use `RLIKE` to filter data based on string patterns using using <>. `RLIKE` usually acts on a field placed on the left-hand side of the operator, but it can also act on a constant (literal) - expression. The right-hand side of the operator represents the pattern.""", examples = @Example(file = "docs", tag = "rlike")) + expression. The right-hand side of the operator represents the pattern.""", detailedDescription = """ + Matching special characters (eg. `.`, `*`, `(`...) will require escaping. + The escape character is backslash `\\`. Since also backslash is a special character in string literals, + it will require further escaping. + + [source.merge.styled,esql] + ---- + include::{esql-specs}/string.csv-spec[tag=rlikeEscapingSingleQuotes] + ---- + + To reduce the overhead of escaping, we suggest using triple quotes strings `\"\"\"` + + [source.merge.styled,esql] + ---- + include::{esql-specs}/string.csv-spec[tag=rlikeEscapingTripleQuotes] + ---- + """, examples = @Example(file = "docs", tag = "rlike")) public RLike( Source source, @Param(name = "str", type = { "keyword", "text" }, description = "A literal value.") Expression value, diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/WildcardLike.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/WildcardLike.java index 714c4ca04a862..c1b4f20f41795 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/WildcardLike.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/WildcardLike.java @@ -43,7 +43,23 @@ also act on a constant (literal) expression. The right-hand side of the operator The following wildcard characters are supported: * `*` matches zero or more characters. - * `?` matches one character.""", examples = @Example(file = "docs", tag = "like")) + * `?` matches one character.""", detailedDescription = """ + Matching the exact characters `*` and `.` will require escaping. + The escape character is backslash `\\`. Since also backslash is a special character in string literals, + it will require further escaping. + + [source.merge.styled,esql] + ---- + include::{esql-specs}/string.csv-spec[tag=likeEscapingSingleQuotes] + ---- + + To reduce the overhead of escaping, we suggest using triple quotes strings `\"\"\"` + + [source.merge.styled,esql] + ---- + include::{esql-specs}/string.csv-spec[tag=likeEscapingTripleQuotes] + ---- + """, examples = @Example(file = "docs", tag = "like")) public WildcardLike( Source source, @Param(name = "str", type = { "keyword", "text" }, description = "A literal expression.") Expression left, From 6f7bd550b17cbaf7d11acf68d0aacfa1d569f7c8 Mon Sep 17 00:00:00 2001 From: Kostas Krikellas <131142368+kkrik-es@users.noreply.github.com> Date: Thu, 24 Oct 2024 10:34:12 +0300 Subject: [PATCH 040/324] Use settings from LogsdbIndexModeSettingsProvider in SyntheticSourceIndexSettingsProvider (#115437) * Use settings from LogsdbIndexModeSettingsProvider in SyntheticSourceIndexSettingsProvider * update --- .../xpack/logsdb/LogsdbWithBasicRestIT.java | 32 ++++++++ .../xpack/logsdb/LogsdbRestIT.java | 39 ++++++++-- .../xpack/logsdb/LogsDBPlugin.java | 2 +- .../LogsdbIndexModeSettingsProvider.java | 18 ++--- .../SyntheticSourceIndexSettingsProvider.java | 13 +++- ...heticSourceIndexSettingsProviderTests.java | 75 ++++++++++++++++++- 6 files changed, 159 insertions(+), 20 deletions(-) diff --git a/x-pack/plugin/logsdb/qa/with-basic/src/javaRestTest/java/org/elasticsearch/xpack/logsdb/LogsdbWithBasicRestIT.java b/x-pack/plugin/logsdb/qa/with-basic/src/javaRestTest/java/org/elasticsearch/xpack/logsdb/LogsdbWithBasicRestIT.java index f5ac107628d1a..381c83ceee289 100644 --- a/x-pack/plugin/logsdb/qa/with-basic/src/javaRestTest/java/org/elasticsearch/xpack/logsdb/LogsdbWithBasicRestIT.java +++ b/x-pack/plugin/logsdb/qa/with-basic/src/javaRestTest/java/org/elasticsearch/xpack/logsdb/LogsdbWithBasicRestIT.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.logsdb; import org.elasticsearch.client.Request; +import org.elasticsearch.cluster.metadata.DataStream; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.mapper.SourceFieldMapper; import org.elasticsearch.test.cluster.ElasticsearchCluster; @@ -171,4 +172,35 @@ public void testLogsdbOverrideNullInTemplate() throws IOException { assertEquals("logsdb", settings.get("index.mode")); assertEquals(SourceFieldMapper.Mode.STORED.toString(), settings.get("index.mapping.source.mode")); } + + public void testLogsdbOverrideDefaultModeForLogsIndex() throws IOException { + Request request = new Request("PUT", "/_cluster/settings"); + request.setJsonEntity("{ \"transient\": { \"cluster.logsdb.enabled\": true } }"); + assertOK(client().performRequest(request)); + + request = new Request("POST", "/_index_template/1"); + request.setJsonEntity(""" + { + "index_patterns": ["logs-test-*"], + "data_stream": { + } + } + """); + assertOK(client().performRequest(request)); + + request = new Request("POST", "/logs-test-foo/_doc"); + request.setJsonEntity(""" + { + "@timestamp": "2020-01-01T00:00:00.000Z", + "host.name": "foo", + "message": "bar" + } + """); + assertOK(client().performRequest(request)); + + String index = DataStream.getDefaultBackingIndexName("logs-test-foo", 1); + var settings = (Map) ((Map) getIndexSettings(index).get(index)).get("settings"); + assertEquals("logsdb", settings.get("index.mode")); + assertEquals(SourceFieldMapper.Mode.STORED.toString(), settings.get("index.mapping.source.mode")); + } } diff --git a/x-pack/plugin/logsdb/src/javaRestTest/java/org/elasticsearch/xpack/logsdb/LogsdbRestIT.java b/x-pack/plugin/logsdb/src/javaRestTest/java/org/elasticsearch/xpack/logsdb/LogsdbRestIT.java index 16759c3292f7a..2bf8b00cf551c 100644 --- a/x-pack/plugin/logsdb/src/javaRestTest/java/org/elasticsearch/xpack/logsdb/LogsdbRestIT.java +++ b/x-pack/plugin/logsdb/src/javaRestTest/java/org/elasticsearch/xpack/logsdb/LogsdbRestIT.java @@ -7,6 +7,8 @@ package org.elasticsearch.xpack.logsdb; +import org.elasticsearch.client.Request; +import org.elasticsearch.cluster.metadata.DataStream; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.test.cluster.ElasticsearchCluster; import org.elasticsearch.test.cluster.local.distribution.DistributionType; @@ -35,12 +37,6 @@ protected String getTestRestCluster() { } public void testFeatureUsageWithLogsdbIndex() throws IOException { - { - var response = getAsMap("/_license/feature_usage"); - @SuppressWarnings("unchecked") - List> features = (List>) response.get("features"); - assertThat(features, Matchers.empty()); - } { if (randomBoolean()) { createIndex("test-index", Settings.builder().put("index.mode", "logsdb").build()); @@ -81,4 +77,35 @@ public void testFeatureUsageWithLogsdbIndex() throws IOException { } } + public void testLogsdbSourceModeForLogsIndex() throws IOException { + Request request = new Request("PUT", "/_cluster/settings"); + request.setJsonEntity("{ \"transient\": { \"cluster.logsdb.enabled\": true } }"); + assertOK(client().performRequest(request)); + + request = new Request("POST", "/_index_template/1"); + request.setJsonEntity(""" + { + "index_patterns": ["logs-test-*"], + "data_stream": { + } + } + """); + assertOK(client().performRequest(request)); + + request = new Request("POST", "/logs-test-foo/_doc"); + request.setJsonEntity(""" + { + "@timestamp": "2020-01-01T00:00:00.000Z", + "host.name": "foo", + "message": "bar" + } + """); + assertOK(client().performRequest(request)); + + String index = DataStream.getDefaultBackingIndexName("logs-test-foo", 1); + var settings = (Map) ((Map) getIndexSettings(index).get(index)).get("settings"); + assertEquals("logsdb", settings.get("index.mode")); + assertNull(settings.get("index.mapping.source.mode")); + } + } diff --git a/x-pack/plugin/logsdb/src/main/java/org/elasticsearch/xpack/logsdb/LogsDBPlugin.java b/x-pack/plugin/logsdb/src/main/java/org/elasticsearch/xpack/logsdb/LogsDBPlugin.java index 49a83335671cd..089be0604146f 100644 --- a/x-pack/plugin/logsdb/src/main/java/org/elasticsearch/xpack/logsdb/LogsDBPlugin.java +++ b/x-pack/plugin/logsdb/src/main/java/org/elasticsearch/xpack/logsdb/LogsDBPlugin.java @@ -52,7 +52,7 @@ public Collection getAdditionalIndexSettingProviders(Index return List.of(logsdbIndexModeSettingsProvider); } return List.of( - new SyntheticSourceIndexSettingsProvider(licenseService, parameters.mapperServiceFactory()), + new SyntheticSourceIndexSettingsProvider(licenseService, parameters.mapperServiceFactory(), logsdbIndexModeSettingsProvider), logsdbIndexModeSettingsProvider ); } diff --git a/x-pack/plugin/logsdb/src/main/java/org/elasticsearch/xpack/logsdb/LogsdbIndexModeSettingsProvider.java b/x-pack/plugin/logsdb/src/main/java/org/elasticsearch/xpack/logsdb/LogsdbIndexModeSettingsProvider.java index ee9d6129dcd54..329cd3bc8a04b 100644 --- a/x-pack/plugin/logsdb/src/main/java/org/elasticsearch/xpack/logsdb/LogsdbIndexModeSettingsProvider.java +++ b/x-pack/plugin/logsdb/src/main/java/org/elasticsearch/xpack/logsdb/LogsdbIndexModeSettingsProvider.java @@ -48,19 +48,16 @@ public Settings getAdditionalIndexSettings( final Settings settings, final List combinedTemplateMappings ) { - if (isLogsdbEnabled == false || dataStreamName == null) { - return Settings.EMPTY; - } - - final IndexMode indexMode = resolveIndexMode(settings.get(IndexSettings.MODE.getKey())); - if (indexMode != null) { - return Settings.EMPTY; - } + return getLogsdbModeSetting(dataStreamName, settings); + } - if (matchesLogsPattern(dataStreamName)) { + Settings getLogsdbModeSetting(final String dataStreamName, final Settings settings) { + if (isLogsdbEnabled + && dataStreamName != null + && resolveIndexMode(settings.get(IndexSettings.MODE.getKey())) == null + && matchesLogsPattern(dataStreamName)) { return Settings.builder().put("index.mode", IndexMode.LOGSDB.getName()).build(); } - return Settings.EMPTY; } @@ -71,5 +68,4 @@ private static boolean matchesLogsPattern(final String name) { private IndexMode resolveIndexMode(final String mode) { return mode != null ? Enum.valueOf(IndexMode.class, mode.toUpperCase(Locale.ROOT)) : null; } - } diff --git a/x-pack/plugin/logsdb/src/main/java/org/elasticsearch/xpack/logsdb/SyntheticSourceIndexSettingsProvider.java b/x-pack/plugin/logsdb/src/main/java/org/elasticsearch/xpack/logsdb/SyntheticSourceIndexSettingsProvider.java index 4625fe91294d7..e7572d6a646e1 100644 --- a/x-pack/plugin/logsdb/src/main/java/org/elasticsearch/xpack/logsdb/SyntheticSourceIndexSettingsProvider.java +++ b/x-pack/plugin/logsdb/src/main/java/org/elasticsearch/xpack/logsdb/SyntheticSourceIndexSettingsProvider.java @@ -38,13 +38,16 @@ final class SyntheticSourceIndexSettingsProvider implements IndexSettingProvider private final SyntheticSourceLicenseService syntheticSourceLicenseService; private final CheckedFunction mapperServiceFactory; + private final LogsdbIndexModeSettingsProvider logsdbIndexModeSettingsProvider; SyntheticSourceIndexSettingsProvider( SyntheticSourceLicenseService syntheticSourceLicenseService, - CheckedFunction mapperServiceFactory + CheckedFunction mapperServiceFactory, + LogsdbIndexModeSettingsProvider logsdbIndexModeSettingsProvider ) { this.syntheticSourceLicenseService = syntheticSourceLicenseService; this.mapperServiceFactory = mapperServiceFactory; + this.logsdbIndexModeSettingsProvider = logsdbIndexModeSettingsProvider; } @Override @@ -63,6 +66,14 @@ public Settings getAdditionalIndexSettings( Settings indexTemplateAndCreateRequestSettings, List combinedTemplateMappings ) { + var logsdbSettings = logsdbIndexModeSettingsProvider.getLogsdbModeSetting(dataStreamName, indexTemplateAndCreateRequestSettings); + if (logsdbSettings != Settings.EMPTY) { + indexTemplateAndCreateRequestSettings = Settings.builder() + .put(logsdbSettings) + .put(indexTemplateAndCreateRequestSettings) + .build(); + } + // This index name is used when validating component and index templates, we should skip this check in that case. // (See MetadataIndexTemplateService#validateIndexTemplateV2(...) method) boolean isTemplateValidation = "validate-index-name".equals(indexName); diff --git a/x-pack/plugin/logsdb/src/test/java/org/elasticsearch/xpack/logsdb/SyntheticSourceIndexSettingsProviderTests.java b/x-pack/plugin/logsdb/src/test/java/org/elasticsearch/xpack/logsdb/SyntheticSourceIndexSettingsProviderTests.java index 362b387726105..2ab77b38b3373 100644 --- a/x-pack/plugin/logsdb/src/test/java/org/elasticsearch/xpack/logsdb/SyntheticSourceIndexSettingsProviderTests.java +++ b/x-pack/plugin/logsdb/src/test/java/org/elasticsearch/xpack/logsdb/SyntheticSourceIndexSettingsProviderTests.java @@ -14,6 +14,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.Tuple; import org.elasticsearch.index.IndexMode; +import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.MapperTestUtils; import org.elasticsearch.index.mapper.SourceFieldMapper; import org.elasticsearch.license.MockLicenseState; @@ -35,6 +36,10 @@ public class SyntheticSourceIndexSettingsProviderTests extends ESTestCase { private SyntheticSourceLicenseService syntheticSourceLicenseService; private SyntheticSourceIndexSettingsProvider provider; + private static LogsdbIndexModeSettingsProvider getLogsdbIndexModeSettingsProvider(boolean enabled) { + return new LogsdbIndexModeSettingsProvider(Settings.builder().put("cluster.logsdb.enabled", enabled).build()); + } + @Before public void setup() { MockLicenseState licenseState = mock(MockLicenseState.class); @@ -46,7 +51,8 @@ public void setup() { provider = new SyntheticSourceIndexSettingsProvider( syntheticSourceLicenseService, - im -> MapperTestUtils.newMapperService(xContentRegistry(), createTempDir(), im.getSettings(), im.getIndex().getName()) + im -> MapperTestUtils.newMapperService(xContentRegistry(), createTempDir(), im.getSettings(), im.getIndex().getName()), + getLogsdbIndexModeSettingsProvider(false) ); } @@ -310,4 +316,71 @@ public void testGetAdditionalIndexSettingsDowngradeFromSyntheticSource() throws assertThat(result.size(), equalTo(1)); assertEquals(SourceFieldMapper.Mode.STORED, SourceFieldMapper.INDEX_MAPPER_SOURCE_MODE_SETTING.get(result)); } + + public void testGetAdditionalIndexSettingsDowngradeFromSyntheticSourceFileMatch() throws IOException { + syntheticSourceLicenseService.setSyntheticSourceFallback(true); + provider = new SyntheticSourceIndexSettingsProvider( + syntheticSourceLicenseService, + im -> MapperTestUtils.newMapperService(xContentRegistry(), createTempDir(), im.getSettings(), im.getIndex().getName()), + getLogsdbIndexModeSettingsProvider(true) + ); + final Settings settings = Settings.EMPTY; + + String dataStreamName = "logs-app1"; + Metadata.Builder mb = Metadata.builder( + DataStreamTestHelper.getClusterStateWithDataStreams( + List.of(Tuple.tuple(dataStreamName, 1)), + List.of(), + Instant.now().toEpochMilli(), + builder().build(), + 1 + ).getMetadata() + ); + Metadata metadata = mb.build(); + Settings result = provider.getAdditionalIndexSettings( + DataStream.getDefaultBackingIndexName(dataStreamName, 2), + dataStreamName, + null, + metadata, + Instant.ofEpochMilli(1L), + settings, + List.of() + ); + assertThat(result.size(), equalTo(0)); + + dataStreamName = "logs-app1-0"; + mb = Metadata.builder( + DataStreamTestHelper.getClusterStateWithDataStreams( + List.of(Tuple.tuple(dataStreamName, 1)), + List.of(), + Instant.now().toEpochMilli(), + builder().build(), + 1 + ).getMetadata() + ); + metadata = mb.build(); + + result = provider.getAdditionalIndexSettings( + DataStream.getDefaultBackingIndexName(dataStreamName, 2), + dataStreamName, + null, + metadata, + Instant.ofEpochMilli(1L), + settings, + List.of() + ); + assertThat(result.size(), equalTo(1)); + assertEquals(SourceFieldMapper.Mode.STORED, SourceFieldMapper.INDEX_MAPPER_SOURCE_MODE_SETTING.get(result)); + + result = provider.getAdditionalIndexSettings( + DataStream.getDefaultBackingIndexName(dataStreamName, 2), + dataStreamName, + null, + metadata, + Instant.ofEpochMilli(1L), + builder().put(IndexSettings.MODE.getKey(), IndexMode.STANDARD.toString()).build(), + List.of() + ); + assertThat(result.size(), equalTo(0)); + } } From cc9a08a7085e87975d780bb1c2f5c698dbe19b4e Mon Sep 17 00:00:00 2001 From: Nick Tindall Date: Thu, 24 Oct 2024 18:43:51 +1100 Subject: [PATCH 041/324] Only publish desired balance gauges on master (#115383) Closes ES-9834 --- docs/changelog/115383.yaml | 5 + .../DesiredBalanceReconcilerMetricsIT.java | 69 ++++++++++ .../allocator/DesiredBalanceMetrics.java | 118 ++++++++++++++++++ .../allocator/DesiredBalanceReconciler.java | 56 +-------- .../DesiredBalanceShardsAllocator.java | 19 +-- .../allocator/DesiredBalanceMetricsTests.java | 116 +++++++++++++++++ .../DesiredBalanceReconcilerTests.java | 7 +- 7 files changed, 326 insertions(+), 64 deletions(-) create mode 100644 docs/changelog/115383.yaml create mode 100644 server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconcilerMetricsIT.java create mode 100644 server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceMetrics.java create mode 100644 server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceMetricsTests.java diff --git a/docs/changelog/115383.yaml b/docs/changelog/115383.yaml new file mode 100644 index 0000000000000..19eadd41c0726 --- /dev/null +++ b/docs/changelog/115383.yaml @@ -0,0 +1,5 @@ +pr: 115383 +summary: Only publish desired balance gauges on master +area: Allocation +type: enhancement +issues: [] diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconcilerMetricsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconcilerMetricsIT.java new file mode 100644 index 0000000000000..cb279c93b402e --- /dev/null +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconcilerMetricsIT.java @@ -0,0 +1,69 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.cluster.routing.allocation.allocator; + +import org.elasticsearch.common.util.CollectionUtils; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.plugins.PluginsService; +import org.elasticsearch.telemetry.TestTelemetryPlugin; +import org.elasticsearch.test.ESIntegTestCase; +import org.hamcrest.Matcher; + +import java.util.Collection; + +import static org.hamcrest.Matchers.empty; +import static org.hamcrest.Matchers.not; + +public class DesiredBalanceReconcilerMetricsIT extends ESIntegTestCase { + + @Override + protected Collection> nodePlugins() { + return CollectionUtils.appendToCopy(super.nodePlugins(), TestTelemetryPlugin.class); + } + + public void testDesiredBalanceGaugeMetricsAreOnlyPublishedByCurrentMaster() throws Exception { + internalCluster().ensureAtLeastNumDataNodes(2); + prepareCreate("test").setSettings(indexSettings(2, 1)).get(); + ensureGreen(); + + assertOnlyMasterIsPublishingMetrics(); + + // fail over and check again + int numFailOvers = randomIntBetween(1, 3); + for (int i = 0; i < numFailOvers; i++) { + internalCluster().restartNode(internalCluster().getMasterName()); + ensureGreen(); + + assertOnlyMasterIsPublishingMetrics(); + } + } + + private static void assertOnlyMasterIsPublishingMetrics() { + String masterNodeName = internalCluster().getMasterName(); + String[] nodeNames = internalCluster().getNodeNames(); + for (String nodeName : nodeNames) { + assertMetricsAreBeingPublished(nodeName, nodeName.equals(masterNodeName)); + } + } + + private static void assertMetricsAreBeingPublished(String nodeName, boolean shouldBePublishing) { + final TestTelemetryPlugin testTelemetryPlugin = internalCluster().getInstance(PluginsService.class, nodeName) + .filterPlugins(TestTelemetryPlugin.class) + .findFirst() + .orElseThrow(); + testTelemetryPlugin.resetMeter(); + testTelemetryPlugin.collect(); + Matcher> matcher = shouldBePublishing ? not(empty()) : empty(); + assertThat(testTelemetryPlugin.getLongGaugeMeasurement(DesiredBalanceMetrics.UNASSIGNED_SHARDS_METRIC_NAME), matcher); + assertThat(testTelemetryPlugin.getLongGaugeMeasurement(DesiredBalanceMetrics.TOTAL_SHARDS_METRIC_NAME), matcher); + assertThat(testTelemetryPlugin.getLongGaugeMeasurement(DesiredBalanceMetrics.UNDESIRED_ALLOCATION_COUNT_METRIC_NAME), matcher); + assertThat(testTelemetryPlugin.getDoubleGaugeMeasurement(DesiredBalanceMetrics.UNDESIRED_ALLOCATION_RATIO_METRIC_NAME), matcher); + } +} diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceMetrics.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceMetrics.java new file mode 100644 index 0000000000000..436f1ac38c0c2 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceMetrics.java @@ -0,0 +1,118 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.cluster.routing.allocation.allocator; + +import org.elasticsearch.telemetry.metric.DoubleWithAttributes; +import org.elasticsearch.telemetry.metric.LongWithAttributes; +import org.elasticsearch.telemetry.metric.MeterRegistry; + +import java.util.List; + +public class DesiredBalanceMetrics { + + public static final DesiredBalanceMetrics NOOP = new DesiredBalanceMetrics(MeterRegistry.NOOP); + public static final String UNASSIGNED_SHARDS_METRIC_NAME = "es.allocator.desired_balance.shards.unassigned.current"; + public static final String TOTAL_SHARDS_METRIC_NAME = "es.allocator.desired_balance.shards.current"; + public static final String UNDESIRED_ALLOCATION_COUNT_METRIC_NAME = "es.allocator.desired_balance.allocations.undesired.current"; + public static final String UNDESIRED_ALLOCATION_RATIO_METRIC_NAME = "es.allocator.desired_balance.allocations.undesired.ratio"; + + private volatile boolean nodeIsMaster = false; + + /** + * Number of unassigned shards during last reconciliation + */ + private volatile long unassignedShards; + /** + * Total number of assigned shards during last reconciliation + */ + private volatile long totalAllocations; + /** + * Number of assigned shards during last reconciliation that are not allocated on desired node and need to be moved + */ + private volatile long undesiredAllocations; + + public void updateMetrics(long unassignedShards, long totalAllocations, long undesiredAllocations) { + this.unassignedShards = unassignedShards; + this.totalAllocations = totalAllocations; + this.undesiredAllocations = undesiredAllocations; + } + + public DesiredBalanceMetrics(MeterRegistry meterRegistry) { + meterRegistry.registerLongsGauge( + UNASSIGNED_SHARDS_METRIC_NAME, + "Current number of unassigned shards", + "{shard}", + this::getUnassignedShardsMetrics + ); + meterRegistry.registerLongsGauge(TOTAL_SHARDS_METRIC_NAME, "Total number of shards", "{shard}", this::getTotalAllocationsMetrics); + meterRegistry.registerLongsGauge( + UNDESIRED_ALLOCATION_COUNT_METRIC_NAME, + "Total number of shards allocated on undesired nodes excluding shutting down nodes", + "{shard}", + this::getUndesiredAllocationsMetrics + ); + meterRegistry.registerDoublesGauge( + UNDESIRED_ALLOCATION_RATIO_METRIC_NAME, + "Ratio of undesired allocations to shard count excluding shutting down nodes", + "1", + this::getUndesiredAllocationsRatioMetrics + ); + } + + public void setNodeIsMaster(boolean nodeIsMaster) { + this.nodeIsMaster = nodeIsMaster; + } + + public long unassignedShards() { + return unassignedShards; + } + + public long totalAllocations() { + return totalAllocations; + } + + public long undesiredAllocations() { + return undesiredAllocations; + } + + private List getUnassignedShardsMetrics() { + return getIfPublishing(unassignedShards); + } + + private List getTotalAllocationsMetrics() { + return getIfPublishing(totalAllocations); + } + + private List getUndesiredAllocationsMetrics() { + return getIfPublishing(undesiredAllocations); + } + + private List getIfPublishing(long value) { + if (nodeIsMaster) { + return List.of(new LongWithAttributes(value)); + } + return List.of(); + } + + private List getUndesiredAllocationsRatioMetrics() { + if (nodeIsMaster) { + var total = totalAllocations; + var undesired = undesiredAllocations; + return List.of(new DoubleWithAttributes(total != 0 ? (double) undesired / total : 0.0)); + } + return List.of(); + } + + public void zeroAllMetrics() { + unassignedShards = 0; + totalAllocations = 0; + undesiredAllocations = 0; + } +} diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconciler.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconciler.java index da52148919cdb..dced9214a3245 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconciler.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconciler.java @@ -30,10 +30,6 @@ import org.elasticsearch.gateway.PriorityComparator; import org.elasticsearch.index.IndexVersions; import org.elasticsearch.index.shard.ShardId; -import org.elasticsearch.telemetry.metric.DoubleGauge; -import org.elasticsearch.telemetry.metric.DoubleWithAttributes; -import org.elasticsearch.telemetry.metric.LongGaugeMetric; -import org.elasticsearch.telemetry.metric.MeterRegistry; import org.elasticsearch.threadpool.ThreadPool; import java.util.Comparator; @@ -73,23 +69,10 @@ public class DesiredBalanceReconciler { private double undesiredAllocationsLogThreshold; private final NodeAllocationOrdering allocationOrdering = new NodeAllocationOrdering(); private final NodeAllocationOrdering moveOrdering = new NodeAllocationOrdering(); + private final DesiredBalanceMetrics desiredBalanceMetrics; - // stats - /** - * Number of unassigned shards during last reconciliation - */ - protected final LongGaugeMetric unassignedShards; - /** - * Total number of assigned shards during last reconciliation - */ - protected final LongGaugeMetric totalAllocations; - /** - * Number of assigned shards during last reconciliation that are not allocated on desired node and need to be moved - */ - protected final LongGaugeMetric undesiredAllocations; - private final DoubleGauge undesiredAllocationsRatio; - - public DesiredBalanceReconciler(ClusterSettings clusterSettings, ThreadPool threadPool, MeterRegistry meterRegistry) { + public DesiredBalanceReconciler(ClusterSettings clusterSettings, ThreadPool threadPool, DesiredBalanceMetrics desiredBalanceMetrics) { + this.desiredBalanceMetrics = desiredBalanceMetrics; this.undesiredAllocationLogInterval = new FrequencyCappedAction( threadPool.relativeTimeInMillisSupplier(), TimeValue.timeValueMinutes(5) @@ -99,35 +82,6 @@ public DesiredBalanceReconciler(ClusterSettings clusterSettings, ThreadPool thre UNDESIRED_ALLOCATIONS_LOG_THRESHOLD_SETTING, value -> this.undesiredAllocationsLogThreshold = value ); - - unassignedShards = LongGaugeMetric.create( - meterRegistry, - "es.allocator.desired_balance.shards.unassigned.current", - "Current number of unassigned shards", - "{shard}" - ); - totalAllocations = LongGaugeMetric.create( - meterRegistry, - "es.allocator.desired_balance.shards.current", - "Total number of shards", - "{shard}" - ); - undesiredAllocations = LongGaugeMetric.create( - meterRegistry, - "es.allocator.desired_balance.allocations.undesired.current", - "Total number of shards allocated on undesired nodes excluding shutting down nodes", - "{shard}" - ); - undesiredAllocationsRatio = meterRegistry.registerDoubleGauge( - "es.allocator.desired_balance.allocations.undesired.ratio", - "Ratio of undesired allocations to shard count excluding shutting down nodes", - "1", - () -> { - var total = totalAllocations.get(); - var undesired = undesiredAllocations.get(); - return new DoubleWithAttributes(total != 0 ? (double) undesired / total : 0.0); - } - ); } public void reconcile(DesiredBalance desiredBalance, RoutingAllocation allocation) { @@ -578,9 +532,7 @@ private void balance() { } } - DesiredBalanceReconciler.this.unassignedShards.set(unassignedShards); - DesiredBalanceReconciler.this.undesiredAllocations.set(undesiredAllocationsExcludingShuttingDownNodes); - DesiredBalanceReconciler.this.totalAllocations.set(totalAllocations); + desiredBalanceMetrics.updateMetrics(unassignedShards, totalAllocations, undesiredAllocationsExcludingShuttingDownNodes); maybeLogUndesiredAllocationsWarning(totalAllocations, undesiredAllocationsExcludingShuttingDownNodes, routingNodes.size()); } diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceShardsAllocator.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceShardsAllocator.java index ba16915f2ad2b..4171100191211 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceShardsAllocator.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceShardsAllocator.java @@ -64,6 +64,7 @@ public class DesiredBalanceShardsAllocator implements ShardsAllocator { private volatile DesiredBalance currentDesiredBalance = DesiredBalance.INITIAL; private volatile boolean resetCurrentDesiredBalance = false; private final Set processedNodeShutdowns = new HashSet<>(); + private final DesiredBalanceMetrics desiredBalanceMetrics; // stats protected final CounterMetric computationsSubmitted = new CounterMetric(); @@ -104,6 +105,7 @@ public DesiredBalanceShardsAllocator( DesiredBalanceReconcilerAction reconciler, TelemetryProvider telemetryProvider ) { + this.desiredBalanceMetrics = new DesiredBalanceMetrics(telemetryProvider.getMeterRegistry()); this.delegateAllocator = delegateAllocator; this.threadPool = threadPool; this.reconciler = reconciler; @@ -111,7 +113,7 @@ public DesiredBalanceShardsAllocator( this.desiredBalanceReconciler = new DesiredBalanceReconciler( clusterService.getClusterSettings(), threadPool, - telemetryProvider.getMeterRegistry() + desiredBalanceMetrics ); this.desiredBalanceComputation = new ContinuousComputation<>(threadPool.generic()) { @@ -168,6 +170,10 @@ public String toString() { if (event.localNodeMaster() == false) { onNoLongerMaster(); } + // Only update on change, to minimise volatile writes + if (event.localNodeMaster() != event.previousState().nodes().isLocalNodeElectedMaster()) { + desiredBalanceMetrics.setNodeIsMaster(event.localNodeMaster()); + } }); } @@ -306,9 +312,9 @@ public DesiredBalanceStats getStats() { computedShardMovements.sum(), cumulativeComputationTime.count(), cumulativeReconciliationTime.count(), - desiredBalanceReconciler.unassignedShards.get(), - desiredBalanceReconciler.totalAllocations.get(), - desiredBalanceReconciler.undesiredAllocations.get() + desiredBalanceMetrics.unassignedShards(), + desiredBalanceMetrics.totalAllocations(), + desiredBalanceMetrics.undesiredAllocations() ); } @@ -318,10 +324,7 @@ private void onNoLongerMaster() { queue.completeAllAsNotMaster(); pendingDesiredBalanceMoves.clear(); desiredBalanceReconciler.clear(); - - desiredBalanceReconciler.unassignedShards.set(0); - desiredBalanceReconciler.totalAllocations.set(0); - desiredBalanceReconciler.undesiredAllocations.set(0); + desiredBalanceMetrics.zeroAllMetrics(); } } diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceMetricsTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceMetricsTests.java new file mode 100644 index 0000000000000..2c642da665051 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceMetricsTests.java @@ -0,0 +1,116 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.cluster.routing.allocation.allocator; + +import org.elasticsearch.telemetry.InstrumentType; +import org.elasticsearch.telemetry.RecordingMeterRegistry; +import org.elasticsearch.telemetry.metric.MeterRegistry; +import org.elasticsearch.test.ESTestCase; + +import static org.hamcrest.Matchers.empty; +import static org.hamcrest.Matchers.equalTo; + +public class DesiredBalanceMetricsTests extends ESTestCase { + + public void testZeroAllMetrics() { + DesiredBalanceMetrics metrics = new DesiredBalanceMetrics(MeterRegistry.NOOP); + long unassignedShards = randomNonNegativeLong(); + long totalAllocations = randomNonNegativeLong(); + long undesiredAllocations = randomNonNegativeLong(); + metrics.updateMetrics(unassignedShards, totalAllocations, undesiredAllocations); + assertEquals(totalAllocations, metrics.totalAllocations()); + assertEquals(unassignedShards, metrics.unassignedShards()); + assertEquals(undesiredAllocations, metrics.undesiredAllocations()); + metrics.zeroAllMetrics(); + assertEquals(0, metrics.totalAllocations()); + assertEquals(0, metrics.unassignedShards()); + assertEquals(0, metrics.undesiredAllocations()); + } + + public void testMetricsAreOnlyPublishedWhenNodeIsMaster() { + RecordingMeterRegistry meterRegistry = new RecordingMeterRegistry(); + DesiredBalanceMetrics metrics = new DesiredBalanceMetrics(meterRegistry); + + long unassignedShards = randomNonNegativeLong(); + long totalAllocations = randomLongBetween(100, 10000000); + long undesiredAllocations = randomLongBetween(0, totalAllocations); + metrics.updateMetrics(unassignedShards, totalAllocations, undesiredAllocations); + + // Collect when not master + meterRegistry.getRecorder().collect(); + assertThat( + meterRegistry.getRecorder() + .getMeasurements(InstrumentType.LONG_GAUGE, DesiredBalanceMetrics.UNDESIRED_ALLOCATION_COUNT_METRIC_NAME), + empty() + ); + assertThat( + meterRegistry.getRecorder().getMeasurements(InstrumentType.LONG_GAUGE, DesiredBalanceMetrics.TOTAL_SHARDS_METRIC_NAME), + empty() + ); + assertThat( + meterRegistry.getRecorder().getMeasurements(InstrumentType.LONG_GAUGE, DesiredBalanceMetrics.UNASSIGNED_SHARDS_METRIC_NAME), + empty() + ); + assertThat( + meterRegistry.getRecorder() + .getMeasurements(InstrumentType.DOUBLE_GAUGE, DesiredBalanceMetrics.UNDESIRED_ALLOCATION_RATIO_METRIC_NAME), + empty() + ); + + // Collect when master + metrics.setNodeIsMaster(true); + meterRegistry.getRecorder().collect(); + assertThat( + meterRegistry.getRecorder() + .getMeasurements(InstrumentType.LONG_GAUGE, DesiredBalanceMetrics.UNDESIRED_ALLOCATION_COUNT_METRIC_NAME) + .getFirst() + .getLong(), + equalTo(undesiredAllocations) + ); + assertThat( + meterRegistry.getRecorder() + .getMeasurements(InstrumentType.LONG_GAUGE, DesiredBalanceMetrics.TOTAL_SHARDS_METRIC_NAME) + .getFirst() + .getLong(), + equalTo(totalAllocations) + ); + assertThat( + meterRegistry.getRecorder() + .getMeasurements(InstrumentType.LONG_GAUGE, DesiredBalanceMetrics.UNASSIGNED_SHARDS_METRIC_NAME) + .getFirst() + .getLong(), + equalTo(unassignedShards) + ); + assertThat( + meterRegistry.getRecorder() + .getMeasurements(InstrumentType.DOUBLE_GAUGE, DesiredBalanceMetrics.UNDESIRED_ALLOCATION_RATIO_METRIC_NAME) + .getFirst() + .getDouble(), + equalTo((double) undesiredAllocations / totalAllocations) + ); + } + + public void testUndesiredAllocationRatioIsZeroWhenTotalShardsIsZero() { + RecordingMeterRegistry meterRegistry = new RecordingMeterRegistry(); + DesiredBalanceMetrics metrics = new DesiredBalanceMetrics(meterRegistry); + long unassignedShards = randomNonNegativeLong(); + metrics.updateMetrics(unassignedShards, 0, 0); + + metrics.setNodeIsMaster(true); + meterRegistry.getRecorder().collect(); + assertThat( + meterRegistry.getRecorder() + .getMeasurements(InstrumentType.DOUBLE_GAUGE, DesiredBalanceMetrics.UNDESIRED_ALLOCATION_RATIO_METRIC_NAME) + .getFirst() + .getDouble(), + equalTo(0d) + ); + } +} diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconcilerTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconcilerTests.java index 1ae73c9c08137..b5f44ee9e505f 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconcilerTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconcilerTests.java @@ -66,7 +66,6 @@ import org.elasticsearch.snapshots.SnapshotId; import org.elasticsearch.snapshots.SnapshotShardSizeInfo; import org.elasticsearch.snapshots.SnapshotsInfoService; -import org.elasticsearch.telemetry.metric.MeterRegistry; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.MockLog; import org.elasticsearch.threadpool.ThreadPool; @@ -1215,7 +1214,7 @@ public void testRebalanceDoesNotCauseHotSpots() { var reconciler = new DesiredBalanceReconciler( clusterSettings, new DeterministicTaskQueue().getThreadPool(), - mock(MeterRegistry.class) + DesiredBalanceMetrics.NOOP ); var totalOutgoingMoves = new HashMap(); @@ -1297,7 +1296,7 @@ public void testShouldLogOnTooManyUndesiredAllocations() { final var timeInMillisSupplier = new AtomicLong(); when(threadPool.relativeTimeInMillisSupplier()).thenReturn(timeInMillisSupplier::incrementAndGet); - var reconciler = new DesiredBalanceReconciler(createBuiltInClusterSettings(), threadPool, mock(MeterRegistry.class)); + var reconciler = new DesiredBalanceReconciler(createBuiltInClusterSettings(), threadPool, DesiredBalanceMetrics.NOOP); final long initialDelayInMillis = TimeValue.timeValueMinutes(5).getMillis(); timeInMillisSupplier.addAndGet(randomLongBetween(initialDelayInMillis, 2 * initialDelayInMillis)); @@ -1349,7 +1348,7 @@ public void testShouldLogOnTooManyUndesiredAllocations() { private static void reconcile(RoutingAllocation routingAllocation, DesiredBalance desiredBalance) { final var threadPool = mock(ThreadPool.class); when(threadPool.relativeTimeInMillisSupplier()).thenReturn(new AtomicLong()::incrementAndGet); - new DesiredBalanceReconciler(createBuiltInClusterSettings(), threadPool, mock(MeterRegistry.class)).reconcile( + new DesiredBalanceReconciler(createBuiltInClusterSettings(), threadPool, DesiredBalanceMetrics.NOOP).reconcile( desiredBalance, routingAllocation ); From 6e67da52d1d3a26ede8065d9b33ed2173b081eed Mon Sep 17 00:00:00 2001 From: Liam Thompson <32779855+leemthompo@users.noreply.github.com> Date: Thu, 24 Oct 2024 10:02:47 +0200 Subject: [PATCH 042/324] =?UTF-8?q?[DOCS=E2=80=93=20Fix=20typoUpdate=20tra?= =?UTF-8?q?inedmodel.asciidoc=20(#115420)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Closes https://github.com/elastic/elasticsearch/issues/114968 --- docs/reference/cat/trainedmodel.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/reference/cat/trainedmodel.asciidoc b/docs/reference/cat/trainedmodel.asciidoc index 45c87038f5d64..5b20a0b6e842f 100644 --- a/docs/reference/cat/trainedmodel.asciidoc +++ b/docs/reference/cat/trainedmodel.asciidoc @@ -116,7 +116,7 @@ include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=cat-v] [source,console] -------------------------------------------------- -GET _cat/ml/trained_models?h=c,o,l,ct,v&v=ture +GET _cat/ml/trained_models?h=c,o,l,ct,v&v=true -------------------------------------------------- // TEST[skip:kibana sample data] From a281d62988f190bb9e25361325e060e0c38d15cc Mon Sep 17 00:00:00 2001 From: Artem Prigoda Date: Thu, 24 Oct 2024 10:26:16 +0200 Subject: [PATCH 043/324] Remove auto_release_flood_stage_block property check (#114696) There's no option to disable the auto release of the write block when a node exceeds the flood-stage watermark. This property was deprecated in #45274 (8.0) --- .../routing/allocation/DiskThresholdSettings.java | 14 -------------- 1 file changed, 14 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdSettings.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdSettings.java index d1d6a9761a758..57abbb8b8ed94 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdSettings.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdSettings.java @@ -19,7 +19,6 @@ import org.elasticsearch.common.unit.RelativeByteSizeValue; import org.elasticsearch.core.Strings; import org.elasticsearch.core.TimeValue; -import org.elasticsearch.core.UpdateForV9; import java.io.IOException; import java.util.Iterator; @@ -156,19 +155,6 @@ public class DiskThresholdSettings implements Writeable { private volatile boolean enabled; private volatile TimeValue rerouteInterval; - static { - checkAutoReleaseIndexEnabled(); - } - - @UpdateForV9(owner = UpdateForV9.Owner.DISTRIBUTED_COORDINATION) // this check is unnecessary in v9 - private static void checkAutoReleaseIndexEnabled() { - final String AUTO_RELEASE_INDEX_ENABLED_KEY = "es.disk.auto_release_flood_stage_block"; - final String property = System.getProperty(AUTO_RELEASE_INDEX_ENABLED_KEY); - if (property != null) { - throw new IllegalArgumentException("system property [" + AUTO_RELEASE_INDEX_ENABLED_KEY + "] may not be set"); - } - } - public DiskThresholdSettings(Settings settings, ClusterSettings clusterSettings) { setLowWatermark(CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.get(settings)); setLowStageMaxHeadroom(CLUSTER_ROUTING_ALLOCATION_LOW_DISK_MAX_HEADROOM_SETTING.get(settings)); From cc6e7415c1d8d145f1ae65d58dbb72c3a439d32e Mon Sep 17 00:00:00 2001 From: Simon Cooper Date: Thu, 24 Oct 2024 09:44:45 +0100 Subject: [PATCH 044/324] Remove security bootstrap check that uses Version (#114923) --- .../elasticsearch/common/ReferenceDocs.java | 1 - .../org/elasticsearch/env/BuildVersion.java | 7 -- .../env/DefaultBuildVersion.java | 5 -- .../common/reference-docs-links.txt | 1 - .../xpack/security/Security.java | 12 ++- ...ecurityImplicitBehaviorBootstrapCheck.java | 67 ----------------- ...tyImplicitBehaviorBootstrapCheckTests.java | 73 ------------------- 7 files changed, 5 insertions(+), 161 deletions(-) delete mode 100644 x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/SecurityImplicitBehaviorBootstrapCheck.java delete mode 100644 x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityImplicitBehaviorBootstrapCheckTests.java diff --git a/server/src/main/java/org/elasticsearch/common/ReferenceDocs.java b/server/src/main/java/org/elasticsearch/common/ReferenceDocs.java index 43acda1e1ec2d..926056fec3ec8 100644 --- a/server/src/main/java/org/elasticsearch/common/ReferenceDocs.java +++ b/server/src/main/java/org/elasticsearch/common/ReferenceDocs.java @@ -66,7 +66,6 @@ public enum ReferenceDocs { BOOTSTRAP_CHECK_ROLE_MAPPINGS, BOOTSTRAP_CHECK_TLS, BOOTSTRAP_CHECK_TOKEN_SSL, - BOOTSTRAP_CHECK_SECURITY_MINIMAL_SETUP, CONTACT_SUPPORT, UNASSIGNED_SHARDS, EXECUTABLE_JNA_TMPDIR, diff --git a/server/src/main/java/org/elasticsearch/env/BuildVersion.java b/server/src/main/java/org/elasticsearch/env/BuildVersion.java index 42c45a14977eb..3fdf01d7e1bae 100644 --- a/server/src/main/java/org/elasticsearch/env/BuildVersion.java +++ b/server/src/main/java/org/elasticsearch/env/BuildVersion.java @@ -58,13 +58,6 @@ public abstract class BuildVersion { */ public abstract boolean isFutureVersion(); - // temporary - // TODO[wrb]: remove from security bootstrap checks - @Deprecated - public Version toVersion() { - return null; - } - /** * Create a {@link BuildVersion} from a version ID number. * diff --git a/server/src/main/java/org/elasticsearch/env/DefaultBuildVersion.java b/server/src/main/java/org/elasticsearch/env/DefaultBuildVersion.java index dcc5ed3aee3f8..f31b34e89c01d 100644 --- a/server/src/main/java/org/elasticsearch/env/DefaultBuildVersion.java +++ b/server/src/main/java/org/elasticsearch/env/DefaultBuildVersion.java @@ -52,11 +52,6 @@ public int id() { return versionId; } - @Override - public Version toVersion() { - return version; - } - @Override public boolean equals(Object o) { if (this == o) return true; diff --git a/server/src/main/resources/org/elasticsearch/common/reference-docs-links.txt b/server/src/main/resources/org/elasticsearch/common/reference-docs-links.txt index 3b0816aabf4aa..f9a8237d63717 100644 --- a/server/src/main/resources/org/elasticsearch/common/reference-docs-links.txt +++ b/server/src/main/resources/org/elasticsearch/common/reference-docs-links.txt @@ -28,7 +28,6 @@ BOOTSTRAP_CHECK_PKI_REALM bootstrap-checks BOOTSTRAP_CHECK_ROLE_MAPPINGS bootstrap-checks-xpack.html#bootstrap-checks-xpack-role-mappings BOOTSTRAP_CHECK_TLS bootstrap-checks-xpack.html#bootstrap-checks-tls BOOTSTRAP_CHECK_TOKEN_SSL bootstrap-checks-xpack.html#bootstrap-checks-xpack-token-ssl -BOOTSTRAP_CHECK_SECURITY_MINIMAL_SETUP security-minimal-setup.html CONTACT_SUPPORT troubleshooting.html#troubleshooting-contact-support UNASSIGNED_SHARDS red-yellow-cluster-status.html EXECUTABLE_JNA_TMPDIR executable-jna-tmpdir.html diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java index 8f32bcf7ace8a..0b387a738a2c5 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java @@ -810,13 +810,11 @@ Collection createComponents( // We need to construct the checks here while the secure settings are still available. // If we wait until #getBoostrapChecks the secure settings will have been cleared/closed. final List checks = new ArrayList<>(); - checks.addAll( - Arrays.asList( - new TokenSSLBootstrapCheck(), - new PkiRealmBootstrapCheck(getSslService()), - new SecurityImplicitBehaviorBootstrapCheck(nodeMetadata, getLicenseService()), - new TransportTLSBootstrapCheck() - ) + Collections.addAll( + checks, + new TokenSSLBootstrapCheck(), + new PkiRealmBootstrapCheck(getSslService()), + new TransportTLSBootstrapCheck() ); checks.addAll(InternalRealms.getBootstrapChecks(settings, environment)); this.bootstrapChecks.set(Collections.unmodifiableList(checks)); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/SecurityImplicitBehaviorBootstrapCheck.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/SecurityImplicitBehaviorBootstrapCheck.java deleted file mode 100644 index 2d535100d468d..0000000000000 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/SecurityImplicitBehaviorBootstrapCheck.java +++ /dev/null @@ -1,67 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -package org.elasticsearch.xpack.security; - -import org.elasticsearch.Version; -import org.elasticsearch.bootstrap.BootstrapCheck; -import org.elasticsearch.bootstrap.BootstrapContext; -import org.elasticsearch.common.ReferenceDocs; -import org.elasticsearch.env.NodeMetadata; -import org.elasticsearch.license.ClusterStateLicenseService; -import org.elasticsearch.license.License; -import org.elasticsearch.license.LicenseService; -import org.elasticsearch.xpack.core.XPackSettings; - -public class SecurityImplicitBehaviorBootstrapCheck implements BootstrapCheck { - - private final NodeMetadata nodeMetadata; - private final LicenseService licenseService; - - public SecurityImplicitBehaviorBootstrapCheck(NodeMetadata nodeMetadata, LicenseService licenseService) { - this.nodeMetadata = nodeMetadata; - this.licenseService = licenseService; - } - - @Override - public BootstrapCheckResult check(BootstrapContext context) { - if (nodeMetadata == null) { - return BootstrapCheckResult.success(); - } - if (licenseService instanceof ClusterStateLicenseService clusterStateLicenseService) { - final License license = clusterStateLicenseService.getLicense(context.metadata()); - // TODO[wrb]: Add an "isCurrentMajor" method to BuildVersion? - final Version lastKnownVersion = nodeMetadata.previousNodeVersion().toVersion(); - // pre v7.2.0 nodes have Version.EMPTY and its id is 0, so Version#before handles this successfully - if (lastKnownVersion.before(Version.V_8_0_0) - && XPackSettings.SECURITY_ENABLED.exists(context.settings()) == false - && (license.operationMode() == License.OperationMode.BASIC || license.operationMode() == License.OperationMode.TRIAL)) { - return BootstrapCheckResult.failure( - "The default value for [" - + XPackSettings.SECURITY_ENABLED.getKey() - + "] has changed in the current version. " - + " Security features were implicitly disabled for this node but they would now be enabled, possibly" - + " preventing access to the node. " - + "See " - + this.referenceDocs() - + " to configure security, or explicitly disable security by " - + "setting [xpack.security.enabled] to \"false\" in elasticsearch.yml before restarting the node." - ); - } - } - return BootstrapCheckResult.success(); - } - - public boolean alwaysEnforce() { - return true; - } - - @Override - public ReferenceDocs referenceDocs() { - return ReferenceDocs.BOOTSTRAP_CHECK_SECURITY_MINIMAL_SETUP; - } -} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityImplicitBehaviorBootstrapCheckTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityImplicitBehaviorBootstrapCheckTests.java deleted file mode 100644 index 85e8d6dd38125..0000000000000 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityImplicitBehaviorBootstrapCheckTests.java +++ /dev/null @@ -1,73 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -package org.elasticsearch.xpack.security; - -import org.elasticsearch.Version; -import org.elasticsearch.bootstrap.BootstrapCheck; -import org.elasticsearch.cluster.metadata.Metadata; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.core.TimeValue; -import org.elasticsearch.core.UpdateForV9; -import org.elasticsearch.env.BuildVersion; -import org.elasticsearch.env.NodeMetadata; -import org.elasticsearch.index.IndexVersion; -import org.elasticsearch.license.ClusterStateLicenseService; -import org.elasticsearch.license.License; -import org.elasticsearch.license.LicensesMetadata; -import org.elasticsearch.license.TestUtils; -import org.elasticsearch.license.internal.TrialLicenseVersion; -import org.elasticsearch.test.AbstractBootstrapCheckTestCase; -import org.elasticsearch.test.VersionUtils; -import org.elasticsearch.xpack.core.XPackSettings; - -import static org.hamcrest.Matchers.is; -import static org.mockito.Mockito.mock; - -public class SecurityImplicitBehaviorBootstrapCheckTests extends AbstractBootstrapCheckTestCase { - - @UpdateForV9(owner = UpdateForV9.Owner.SECURITY) - @AwaitsFix(bugUrl = "requires updates for version 9.0 bump") - public void testUpgradeFrom8xWithImplicitSecuritySettings() throws Exception { - final BuildVersion previousVersion = toBuildVersion(VersionUtils.randomVersionBetween(random(), Version.V_8_0_0, null)); - NodeMetadata nodeMetadata = new NodeMetadata(randomAlphaOfLength(10), previousVersion, IndexVersion.current()); - nodeMetadata = nodeMetadata.upgradeToCurrentVersion(); - ClusterStateLicenseService licenseService = mock(ClusterStateLicenseService.class); - BootstrapCheck.BootstrapCheckResult result = new SecurityImplicitBehaviorBootstrapCheck(nodeMetadata, licenseService).check( - createTestContext( - Settings.EMPTY, - createLicensesMetadata(TrialLicenseVersion.fromXContent(previousVersion.toString()), randomFrom("basic", "trial")) - ) - ); - assertThat(result.isSuccess(), is(true)); - } - - @UpdateForV9(owner = UpdateForV9.Owner.SECURITY) - @AwaitsFix(bugUrl = "requires updates for version 9.0 bump") - public void testUpgradeFrom8xWithExplicitSecuritySettings() throws Exception { - final BuildVersion previousVersion = toBuildVersion(VersionUtils.randomVersionBetween(random(), Version.V_8_0_0, null)); - NodeMetadata nodeMetadata = new NodeMetadata(randomAlphaOfLength(10), previousVersion, IndexVersion.current()); - nodeMetadata = nodeMetadata.upgradeToCurrentVersion(); - ClusterStateLicenseService licenseService = mock(ClusterStateLicenseService.class); - BootstrapCheck.BootstrapCheckResult result = new SecurityImplicitBehaviorBootstrapCheck(nodeMetadata, licenseService).check( - createTestContext( - Settings.builder().put(XPackSettings.SECURITY_ENABLED.getKey(), true).build(), - createLicensesMetadata(TrialLicenseVersion.fromXContent(previousVersion.toString()), randomFrom("basic", "trial")) - ) - ); - assertThat(result.isSuccess(), is(true)); - } - - private Metadata createLicensesMetadata(TrialLicenseVersion era, String licenseMode) throws Exception { - License license = TestUtils.generateSignedLicense(licenseMode, TimeValue.timeValueHours(2)); - return Metadata.builder().putCustom(LicensesMetadata.TYPE, new LicensesMetadata(license, era)).build(); - } - - private static BuildVersion toBuildVersion(Version version) { - return BuildVersion.fromVersionId(version.id()); - } -} From 7599d4cf43a45407bef3d88b2a0f5de8706fb540 Mon Sep 17 00:00:00 2001 From: Nick Tindall Date: Thu, 24 Oct 2024 19:51:52 +1100 Subject: [PATCH 045/324] Use Azure blob batch API to delete blobs in batches (#114566) Closes ES-9777 --- docs/changelog/114566.yaml | 5 + .../repository-azure.asciidoc | 9 + gradle/verification-metadata.xml | 5 + modules/repository-azure/build.gradle | 1 + .../AzureBlobStoreRepositoryMetricsTests.java | 91 +++++++++ .../azure/AzureBlobStoreRepositoryTests.java | 24 ++- .../AzureStorageCleanupThirdPartyTests.java | 5 + .../src/main/java/module-info.java | 5 +- .../azure/AzureBlobContainer.java | 2 +- .../repositories/azure/AzureBlobStore.java | 175 ++++++++++-------- .../azure/AzureClientProvider.java | 14 ++ .../repositories/azure/AzureRepository.java | 15 ++ .../azure/AzureBlobContainerStatsTests.java | 10 + .../java/fixture/azure/AzureHttpHandler.java | 101 ++++++++++ 14 files changed, 374 insertions(+), 88 deletions(-) create mode 100644 docs/changelog/114566.yaml diff --git a/docs/changelog/114566.yaml b/docs/changelog/114566.yaml new file mode 100644 index 0000000000000..6007152bb26ca --- /dev/null +++ b/docs/changelog/114566.yaml @@ -0,0 +1,5 @@ +pr: 114566 +summary: Use Azure blob batch API to delete blobs in batches +area: Distributed +type: enhancement +issues: [] diff --git a/docs/reference/snapshot-restore/repository-azure.asciidoc b/docs/reference/snapshot-restore/repository-azure.asciidoc index c361414052e14..0e6e1478cfc55 100644 --- a/docs/reference/snapshot-restore/repository-azure.asciidoc +++ b/docs/reference/snapshot-restore/repository-azure.asciidoc @@ -259,6 +259,15 @@ include::repository-shared-settings.asciidoc[] `primary_only` or `secondary_only`. Defaults to `primary_only`. Note that if you set it to `secondary_only`, it will force `readonly` to true. +`delete_objects_max_size`:: + + (integer) Sets the maxmimum batch size, betewen 1 and 256, used for `BlobBatch` requests. Defaults to 256 which is the maximum + number supported by the https://learn.microsoft.com/en-us/rest/api/storageservices/blob-batch#remarks[Azure blob batch API]. + +`max_concurrent_batch_deletes`:: + + (integer) Sets the maximum number of concurrent batch delete requests that will be submitted for any individual bulk delete with `BlobBatch`. Note that the effective number of concurrent deletes is further limited by the Azure client connection and event loop thread limits. Defaults to 10, minimum is 1, maximum is 100. + [[repository-azure-validation]] ==== Repository validation rules diff --git a/gradle/verification-metadata.xml b/gradle/verification-metadata.xml index e2dfa89c8f3b8..5cfe7adb5ea49 100644 --- a/gradle/verification-metadata.xml +++ b/gradle/verification-metadata.xml @@ -144,6 +144,11 @@ + + + + + diff --git a/modules/repository-azure/build.gradle b/modules/repository-azure/build.gradle index eb938f663c810..d011de81f4fb3 100644 --- a/modules/repository-azure/build.gradle +++ b/modules/repository-azure/build.gradle @@ -30,6 +30,7 @@ dependencies { api "com.azure:azure-identity:1.13.2" api "com.azure:azure-json:1.2.0" api "com.azure:azure-storage-blob:12.27.1" + api "com.azure:azure-storage-blob-batch:12.23.1" api "com.azure:azure-storage-common:12.26.1" api "com.azure:azure-storage-internal-avro:12.12.1" api "com.azure:azure-xml:1.1.0" diff --git a/modules/repository-azure/src/internalClusterTest/java/org/elasticsearch/repositories/azure/AzureBlobStoreRepositoryMetricsTests.java b/modules/repository-azure/src/internalClusterTest/java/org/elasticsearch/repositories/azure/AzureBlobStoreRepositoryMetricsTests.java index a9bf0afa37e18..61940be247861 100644 --- a/modules/repository-azure/src/internalClusterTest/java/org/elasticsearch/repositories/azure/AzureBlobStoreRepositoryMetricsTests.java +++ b/modules/repository-azure/src/internalClusterTest/java/org/elasticsearch/repositories/azure/AzureBlobStoreRepositoryMetricsTests.java @@ -9,14 +9,18 @@ package org.elasticsearch.repositories.azure; +import com.sun.net.httpserver.Headers; import com.sun.net.httpserver.HttpExchange; import com.sun.net.httpserver.HttpHandler; import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.common.Randomness; import org.elasticsearch.common.blobstore.BlobContainer; import org.elasticsearch.common.blobstore.BlobPath; import org.elasticsearch.common.blobstore.OperationPurpose; +import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.plugins.PluginsService; import org.elasticsearch.repositories.RepositoriesMetrics; @@ -31,6 +35,7 @@ import java.io.IOException; import java.nio.ByteBuffer; import java.nio.charset.StandardCharsets; +import java.util.ArrayList; import java.util.List; import java.util.Map; import java.util.Queue; @@ -43,6 +48,7 @@ import java.util.stream.IntStream; import static org.elasticsearch.repositories.azure.AbstractAzureServerTestCase.randomBlobContent; +import static org.elasticsearch.repositories.blobstore.BlobStoreTestUtil.randomPurpose; import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.lessThanOrEqualTo; @@ -225,6 +231,91 @@ public void testRequestTimeIsAccurate() throws IOException { assertThat(recordedRequestTime, lessThanOrEqualTo(elapsedTimeMillis)); } + public void testBatchDeleteFailure() throws IOException { + final int deleteBatchSize = randomIntBetween(1, 30); + final String repositoryName = randomRepositoryName(); + final String repository = createRepository( + repositoryName, + Settings.builder() + .put(repositorySettings(repositoryName)) + .put(AzureRepository.Repository.DELETION_BATCH_SIZE_SETTING.getKey(), deleteBatchSize) + .build(), + true + ); + final String dataNodeName = internalCluster().getNodeNameThat(DiscoveryNode::canContainData); + final BlobContainer container = getBlobContainer(dataNodeName, repository); + + final List blobsToDelete = new ArrayList<>(); + final int numberOfBatches = randomIntBetween(3, 20); + final int numberOfBlobs = numberOfBatches * deleteBatchSize; + final int failedBatches = randomIntBetween(1, numberOfBatches); + for (int i = 0; i < numberOfBlobs; i++) { + byte[] bytes = randomBytes(randomInt(100)); + String blobName = "index-" + randomAlphaOfLength(10); + container.writeBlob(randomPurpose(), blobName, new BytesArray(bytes), false); + blobsToDelete.add(blobName); + } + Randomness.shuffle(blobsToDelete); + clearMetrics(dataNodeName); + + // Handler will fail one or more of the batch requests + final RequestHandler failNRequestRequestHandler = createFailNRequestsHandler(failedBatches); + + // Exhaust the retries + IntStream.range(0, (numberOfBatches - failedBatches) + (failedBatches * (MAX_RETRIES + 1))) + .forEach(i -> requestHandlers.offer(failNRequestRequestHandler)); + + logger.info("--> Failing {} of {} batches", failedBatches, numberOfBatches); + + final IOException exception = assertThrows( + IOException.class, + () -> container.deleteBlobsIgnoringIfNotExists(randomPurpose(), blobsToDelete.iterator()) + ); + assertEquals(Math.min(failedBatches, 10), exception.getSuppressed().length); + assertEquals( + (numberOfBatches - failedBatches) + (failedBatches * (MAX_RETRIES + 1L)), + getLongCounterTotal(dataNodeName, RepositoriesMetrics.METRIC_REQUESTS_TOTAL) + ); + assertEquals((failedBatches * (MAX_RETRIES + 1L)), getLongCounterTotal(dataNodeName, RepositoriesMetrics.METRIC_EXCEPTIONS_TOTAL)); + assertEquals(failedBatches * deleteBatchSize, container.listBlobs(randomPurpose()).size()); + } + + private long getLongCounterTotal(String dataNodeName, String metricKey) { + return getTelemetryPlugin(dataNodeName).getLongCounterMeasurement(metricKey) + .stream() + .mapToLong(Measurement::getLong) + .reduce(0L, Long::sum); + } + + /** + * Creates a {@link RequestHandler} that will persistently fail the first numberToFail distinct requests + * it sees. Any other requests are passed through to the delegate. + * + * @param numberToFail The number of requests to fail + * @return the handler + */ + private static RequestHandler createFailNRequestsHandler(int numberToFail) { + final List requestsToFail = new ArrayList<>(numberToFail); + return (exchange, delegate) -> { + final Headers requestHeaders = exchange.getRequestHeaders(); + final String requestId = requestHeaders.get("X-ms-client-request-id").get(0); + boolean failRequest = false; + synchronized (requestsToFail) { + if (requestsToFail.contains(requestId)) { + failRequest = true; + } else if (requestsToFail.size() < numberToFail) { + requestsToFail.add(requestId); + failRequest = true; + } + } + if (failRequest) { + exchange.sendResponseHeaders(500, -1); + } else { + delegate.handle(exchange); + } + }; + } + private void clearMetrics(String discoveryNode) { internalCluster().getInstance(PluginsService.class, discoveryNode) .filterPlugins(TestTelemetryPlugin.class) diff --git a/modules/repository-azure/src/internalClusterTest/java/org/elasticsearch/repositories/azure/AzureBlobStoreRepositoryTests.java b/modules/repository-azure/src/internalClusterTest/java/org/elasticsearch/repositories/azure/AzureBlobStoreRepositoryTests.java index 473d91da6e34c..bd21f208faac4 100644 --- a/modules/repository-azure/src/internalClusterTest/java/org/elasticsearch/repositories/azure/AzureBlobStoreRepositoryTests.java +++ b/modules/repository-azure/src/internalClusterTest/java/org/elasticsearch/repositories/azure/AzureBlobStoreRepositoryTests.java @@ -89,7 +89,9 @@ protected Settings repositorySettings(String repoName) { .put(super.repositorySettings(repoName)) .put(AzureRepository.Repository.MAX_SINGLE_PART_UPLOAD_SIZE_SETTING.getKey(), new ByteSizeValue(1, ByteSizeUnit.MB)) .put(AzureRepository.Repository.CONTAINER_SETTING.getKey(), "container") - .put(AzureStorageSettings.ACCOUNT_SETTING.getKey(), "test"); + .put(AzureStorageSettings.ACCOUNT_SETTING.getKey(), "test") + .put(AzureRepository.Repository.DELETION_BATCH_SIZE_SETTING.getKey(), randomIntBetween(5, 256)) + .put(AzureRepository.Repository.MAX_CONCURRENT_BATCH_DELETES_SETTING.getKey(), randomIntBetween(1, 10)); if (randomBoolean()) { settingsBuilder.put(AzureRepository.Repository.BASE_PATH_SETTING.getKey(), randomFrom("test", "test/1")); } @@ -249,6 +251,8 @@ protected void maybeTrack(String request, Headers headers) { trackRequest("PutBlockList"); } else if (Regex.simpleMatch("PUT /*/*", request)) { trackRequest("PutBlob"); + } else if (Regex.simpleMatch("POST /*/*?*comp=batch*", request)) { + trackRequest("BlobBatch"); } } @@ -279,10 +283,22 @@ public void testLargeBlobCountDeletion() throws Exception { } public void testDeleteBlobsIgnoringIfNotExists() throws Exception { - try (BlobStore store = newBlobStore()) { + // Test with a smaller batch size here + final int deleteBatchSize = randomIntBetween(1, 30); + final String repositoryName = randomRepositoryName(); + createRepository( + repositoryName, + Settings.builder() + .put(repositorySettings(repositoryName)) + .put(AzureRepository.Repository.DELETION_BATCH_SIZE_SETTING.getKey(), deleteBatchSize) + .build(), + true + ); + try (BlobStore store = newBlobStore(repositoryName)) { final BlobContainer container = store.blobContainer(BlobPath.EMPTY); - List blobsToDelete = new ArrayList<>(); - for (int i = 0; i < 10; i++) { + final int toDeleteCount = randomIntBetween(deleteBatchSize, 3 * deleteBatchSize); + final List blobsToDelete = new ArrayList<>(); + for (int i = 0; i < toDeleteCount; i++) { byte[] bytes = randomBytes(randomInt(100)); String blobName = randomAlphaOfLength(10); container.writeBlob(randomPurpose(), blobName, new BytesArray(bytes), false); diff --git a/modules/repository-azure/src/internalClusterTest/java/org/elasticsearch/repositories/azure/AzureStorageCleanupThirdPartyTests.java b/modules/repository-azure/src/internalClusterTest/java/org/elasticsearch/repositories/azure/AzureStorageCleanupThirdPartyTests.java index abd4f506a0bb3..6d5c17c392141 100644 --- a/modules/repository-azure/src/internalClusterTest/java/org/elasticsearch/repositories/azure/AzureStorageCleanupThirdPartyTests.java +++ b/modules/repository-azure/src/internalClusterTest/java/org/elasticsearch/repositories/azure/AzureStorageCleanupThirdPartyTests.java @@ -30,6 +30,8 @@ import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.core.Booleans; +import org.elasticsearch.logging.LogManager; +import org.elasticsearch.logging.Logger; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.repositories.AbstractThirdPartyRepositoryTestCase; import org.elasticsearch.repositories.blobstore.BlobStoreRepository; @@ -46,6 +48,7 @@ import static org.hamcrest.Matchers.not; public class AzureStorageCleanupThirdPartyTests extends AbstractThirdPartyRepositoryTestCase { + private static final Logger logger = LogManager.getLogger(AzureStorageCleanupThirdPartyTests.class); private static final boolean USE_FIXTURE = Booleans.parseBoolean(System.getProperty("test.azure.fixture", "true")); private static final String AZURE_ACCOUNT = System.getProperty("test.azure.account"); @@ -89,8 +92,10 @@ protected SecureSettings credentials() { MockSecureSettings secureSettings = new MockSecureSettings(); secureSettings.setString("azure.client.default.account", System.getProperty("test.azure.account")); if (hasSasToken) { + logger.info("--> Using SAS token authentication"); secureSettings.setString("azure.client.default.sas_token", System.getProperty("test.azure.sas_token")); } else { + logger.info("--> Using key authentication"); secureSettings.setString("azure.client.default.key", System.getProperty("test.azure.key")); } return secureSettings; diff --git a/modules/repository-azure/src/main/java/module-info.java b/modules/repository-azure/src/main/java/module-info.java index cd6be56b71543..731f1e0a9986a 100644 --- a/modules/repository-azure/src/main/java/module-info.java +++ b/modules/repository-azure/src/main/java/module-info.java @@ -18,10 +18,7 @@ requires org.apache.logging.log4j; requires org.apache.logging.log4j.core; - requires com.azure.core; requires com.azure.http.netty; - requires com.azure.storage.blob; - requires com.azure.storage.common; requires com.azure.identity; requires io.netty.buffer; @@ -29,7 +26,7 @@ requires io.netty.resolver; requires io.netty.common; - requires reactor.core; requires reactor.netty.core; requires reactor.netty.http; + requires com.azure.storage.blob.batch; } diff --git a/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobContainer.java b/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobContainer.java index a3f26424324fa..52bc1ee1399d4 100644 --- a/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobContainer.java +++ b/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobContainer.java @@ -138,7 +138,7 @@ public void writeMetadataBlob( } @Override - public DeleteResult delete(OperationPurpose purpose) { + public DeleteResult delete(OperationPurpose purpose) throws IOException { return blobStore.deleteBlobDirectory(purpose, keyPath); } diff --git a/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobStore.java b/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobStore.java index 829868797e38c..3c64bb9f3b830 100644 --- a/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobStore.java +++ b/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobStore.java @@ -25,6 +25,10 @@ import com.azure.storage.blob.BlobContainerClient; import com.azure.storage.blob.BlobServiceAsyncClient; import com.azure.storage.blob.BlobServiceClient; +import com.azure.storage.blob.batch.BlobBatch; +import com.azure.storage.blob.batch.BlobBatchAsyncClient; +import com.azure.storage.blob.batch.BlobBatchClientBuilder; +import com.azure.storage.blob.batch.BlobBatchStorageException; import com.azure.storage.blob.models.BlobErrorCode; import com.azure.storage.blob.models.BlobItem; import com.azure.storage.blob.models.BlobItemProperties; @@ -99,6 +103,8 @@ public class AzureBlobStore implements BlobStore { private static final Logger logger = LogManager.getLogger(AzureBlobStore.class); + // See https://learn.microsoft.com/en-us/rest/api/storageservices/blob-batch#request-body + public static final int MAX_ELEMENTS_PER_BATCH = 256; private static final long DEFAULT_READ_CHUNK_SIZE = new ByteSizeValue(32, ByteSizeUnit.MB).getBytes(); private static final int DEFAULT_UPLOAD_BUFFERS_SIZE = (int) new ByteSizeValue(64, ByteSizeUnit.KB).getBytes(); @@ -110,6 +116,8 @@ public class AzureBlobStore implements BlobStore { private final String container; private final LocationMode locationMode; private final ByteSizeValue maxSinglePartUploadSize; + private final int deletionBatchSize; + private final int maxConcurrentBatchDeletes; private final RequestMetricsRecorder requestMetricsRecorder; private final AzureClientProvider.RequestMetricsHandler requestMetricsHandler; @@ -129,6 +137,8 @@ public AzureBlobStore( // locationMode is set per repository, not per client this.locationMode = Repository.LOCATION_MODE_SETTING.get(metadata.settings()); this.maxSinglePartUploadSize = Repository.MAX_SINGLE_PART_UPLOAD_SIZE_SETTING.get(metadata.settings()); + this.deletionBatchSize = Repository.DELETION_BATCH_SIZE_SETTING.get(metadata.settings()); + this.maxConcurrentBatchDeletes = Repository.MAX_CONCURRENT_BATCH_DELETES_SETTING.get(metadata.settings()); List requestMatchers = List.of( new RequestMatcher((httpMethod, url) -> httpMethod == HttpMethod.HEAD, Operation.GET_BLOB_PROPERTIES), @@ -147,17 +157,14 @@ public AzureBlobStore( && isPutBlockRequest(httpMethod, url) == false && isPutBlockListRequest(httpMethod, url) == false, Operation.PUT_BLOB - ) + ), + new RequestMatcher(AzureBlobStore::isBlobBatch, Operation.BLOB_BATCH) ); this.requestMetricsHandler = (purpose, method, url, metrics) -> { try { URI uri = url.toURI(); String path = uri.getPath() == null ? "" : uri.getPath(); - // Batch delete requests - if (path.contains(container) == false) { - return; - } assert path.contains(container) : uri.toString(); } catch (URISyntaxException ignored) { return; @@ -172,6 +179,10 @@ && isPutBlockListRequest(httpMethod, url) == false, }; } + private static boolean isBlobBatch(HttpMethod method, URL url) { + return method == HttpMethod.POST && url.getQuery() != null && url.getQuery().contains("comp=batch"); + } + private static boolean isListRequest(HttpMethod httpMethod, URL url) { return httpMethod == HttpMethod.GET && url.getQuery() != null && url.getQuery().contains("comp=list"); } @@ -231,95 +242,101 @@ public boolean blobExists(OperationPurpose purpose, String blob) throws IOExcept } } - // number of concurrent blob delete requests to use while bulk deleting - private static final int CONCURRENT_DELETES = 100; - - public DeleteResult deleteBlobDirectory(OperationPurpose purpose, String path) { + public DeleteResult deleteBlobDirectory(OperationPurpose purpose, String path) throws IOException { final AtomicInteger blobsDeleted = new AtomicInteger(0); final AtomicLong bytesDeleted = new AtomicLong(0); SocketAccess.doPrivilegedVoidException(() -> { - final BlobContainerAsyncClient blobContainerAsyncClient = asyncClient(purpose).getBlobContainerAsyncClient(container); + final AzureBlobServiceClient client = getAzureBlobServiceClientClient(purpose); + final BlobContainerAsyncClient blobContainerAsyncClient = client.getAsyncClient().getBlobContainerAsyncClient(container); final ListBlobsOptions options = new ListBlobsOptions().setPrefix(path) .setDetails(new BlobListDetails().setRetrieveMetadata(true)); - try { - blobContainerAsyncClient.listBlobs(options, null).flatMap(blobItem -> { - if (blobItem.isPrefix() != null && blobItem.isPrefix()) { - return Mono.empty(); - } else { - final String blobName = blobItem.getName(); - BlobAsyncClient blobAsyncClient = blobContainerAsyncClient.getBlobAsyncClient(blobName); - final Mono deleteTask = getDeleteTask(blobName, blobAsyncClient); - bytesDeleted.addAndGet(blobItem.getProperties().getContentLength()); - blobsDeleted.incrementAndGet(); - return deleteTask; - } - }, CONCURRENT_DELETES).then().block(); - } catch (Exception e) { - filterDeleteExceptionsAndRethrow(e, new IOException("Deleting directory [" + path + "] failed")); - } + final Flux blobsFlux = blobContainerAsyncClient.listBlobs(options).filter(bi -> bi.isPrefix() == false).map(bi -> { + bytesDeleted.addAndGet(bi.getProperties().getContentLength()); + blobsDeleted.incrementAndGet(); + return bi.getName(); + }); + deleteListOfBlobs(client, blobsFlux); }); return new DeleteResult(blobsDeleted.get(), bytesDeleted.get()); } - private static void filterDeleteExceptionsAndRethrow(Exception e, IOException exception) throws IOException { - int suppressedCount = 0; - for (Throwable suppressed : e.getSuppressed()) { - // We're only interested about the blob deletion exceptions and not in the reactor internals exceptions - if (suppressed instanceof IOException) { - exception.addSuppressed(suppressed); - suppressedCount++; - if (suppressedCount > 10) { - break; - } - } + @Override + public void deleteBlobsIgnoringIfNotExists(OperationPurpose purpose, Iterator blobNames) throws IOException { + if (blobNames.hasNext() == false) { + return; + } + SocketAccess.doPrivilegedVoidException( + () -> deleteListOfBlobs( + getAzureBlobServiceClientClient(purpose), + Flux.fromStream(StreamSupport.stream(Spliterators.spliteratorUnknownSize(blobNames, Spliterator.ORDERED), false)) + ) + ); + } + + private void deleteListOfBlobs(AzureBlobServiceClient azureBlobServiceClient, Flux blobNames) throws IOException { + // We need to use a container-scoped BlobBatchClient, so the restype=container parameter + // is sent, and we can support all SAS token types + // See https://learn.microsoft.com/en-us/rest/api/storageservices/blob-batch?tabs=shared-access-signatures#authorization + final BlobBatchAsyncClient batchAsyncClient = new BlobBatchClientBuilder( + azureBlobServiceClient.getAsyncClient().getBlobContainerAsyncClient(container) + ).buildAsyncClient(); + final List errors; + final AtomicInteger errorsCollected = new AtomicInteger(0); + try { + errors = blobNames.buffer(deletionBatchSize).flatMap(blobs -> { + final BlobBatch blobBatch = batchAsyncClient.getBlobBatch(); + blobs.forEach(blob -> blobBatch.deleteBlob(container, blob)); + return batchAsyncClient.submitBatch(blobBatch).then(Mono.empty()).onErrorResume(t -> { + // Ignore errors that are just 404s, send other errors downstream as values + if (AzureBlobStore.isIgnorableBatchDeleteException(t)) { + return Mono.empty(); + } else { + // Propagate the first 10 errors only + if (errorsCollected.getAndIncrement() < 10) { + return Mono.just(t); + } else { + return Mono.empty(); + } + } + }); + }, maxConcurrentBatchDeletes).collectList().block(); + } catch (Exception e) { + throw new IOException("Error deleting batches", e); + } + if (errors.isEmpty() == false) { + final int totalErrorCount = errorsCollected.get(); + final String errorMessage = totalErrorCount > errors.size() + ? "Some errors occurred deleting batches, the first " + + errors.size() + + " are included as suppressed, but the total count was " + + totalErrorCount + : "Some errors occurred deleting batches, all errors included as suppressed"; + final IOException ex = new IOException(errorMessage); + errors.forEach(ex::addSuppressed); + throw ex; } - throw exception; } /** - * {@inheritDoc} - *

- * Note that in this Azure implementation we issue a series of individual - * delete blob calls rather than aggregating - * deletions into blob batch calls. - * The reason for this is that the blob batch endpoint has limited support for SAS token authentication. + * We can ignore {@link BlobBatchStorageException}s when they are just telling us some of the files were not found * - * @see - * API docs around SAS auth limitations - * @see Java SDK issue - * @see Discussion on implementing PR + * @param exception An exception throw by batch delete + * @return true if it is safe to ignore, false otherwise */ - @Override - public void deleteBlobsIgnoringIfNotExists(OperationPurpose purpose, Iterator blobs) { - if (blobs.hasNext() == false) { - return; - } - - BlobServiceAsyncClient asyncClient = asyncClient(purpose); - SocketAccess.doPrivilegedVoidException(() -> { - final BlobContainerAsyncClient blobContainerClient = asyncClient.getBlobContainerAsyncClient(container); - try { - Flux.fromStream(StreamSupport.stream(Spliterators.spliteratorUnknownSize(blobs, Spliterator.ORDERED), false)) - .flatMap(blob -> getDeleteTask(blob, blobContainerClient.getBlobAsyncClient(blob)), CONCURRENT_DELETES) - .then() - .block(); - } catch (Exception e) { - filterDeleteExceptionsAndRethrow(e, new IOException("Unable to delete blobs")); + private static boolean isIgnorableBatchDeleteException(Throwable exception) { + if (exception instanceof BlobBatchStorageException bbse) { + final Iterable batchExceptions = bbse.getBatchExceptions(); + for (BlobStorageException bse : batchExceptions) { + // If any requests failed with something other than a BLOB_NOT_FOUND, it is not ignorable + if (BlobErrorCode.BLOB_NOT_FOUND.equals(bse.getErrorCode()) == false) { + return false; + } } - }); - } - - private static Mono getDeleteTask(String blobName, BlobAsyncClient blobAsyncClient) { - return blobAsyncClient.delete() - // Ignore not found blobs, as it's possible that due to network errors a request - // for an already deleted blob is retried, causing an error. - .onErrorResume( - e -> e instanceof BlobStorageException blobStorageException && blobStorageException.getStatusCode() == 404, - throwable -> Mono.empty() - ) - .onErrorMap(throwable -> new IOException("Error deleting blob " + blobName, throwable)); + return true; + } + return false; } public InputStream getInputStream(OperationPurpose purpose, String blob, long position, final @Nullable Long length) { @@ -363,8 +380,7 @@ public Map listBlobsByPrefix(OperationPurpose purpose, Str for (final BlobItem blobItem : containerClient.listBlobsByHierarchy("/", listBlobsOptions, null)) { BlobItemProperties properties = blobItem.getProperties(); - Boolean isPrefix = blobItem.isPrefix(); - if (isPrefix != null && isPrefix) { + if (blobItem.isPrefix()) { continue; } String blobName = blobItem.getName().substring(keyPath.length()); @@ -689,7 +705,8 @@ enum Operation { GET_BLOB_PROPERTIES("GetBlobProperties"), PUT_BLOB("PutBlob"), PUT_BLOCK("PutBlock"), - PUT_BLOCK_LIST("PutBlockList"); + PUT_BLOCK_LIST("PutBlockList"), + BLOB_BATCH("BlobBatch"); private final String key; diff --git a/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureClientProvider.java b/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureClientProvider.java index 654742c980268..f92bbcbdd716d 100644 --- a/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureClientProvider.java +++ b/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureClientProvider.java @@ -317,6 +317,11 @@ private enum RetryMetricsTracker implements HttpPipelinePolicy { @Override public Mono process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { + if (requestIsPartOfABatch(context)) { + // Batch deletes fire once for each of the constituent requests, and they have a null response. Ignore those, we'll track + // metrics at the bulk level. + return next.process(); + } Optional metricsData = context.getData(RequestMetricsTracker.ES_REQUEST_METRICS_CONTEXT_KEY); if (metricsData.isPresent() == false) { assert false : "No metrics object associated with request " + context.getHttpRequest(); @@ -361,6 +366,11 @@ private RequestMetricsTracker(OperationPurpose purpose, RequestMetricsHandler re @Override public Mono process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { + if (requestIsPartOfABatch(context)) { + // Batch deletes fire once for each of the constituent requests, and they have a null response. Ignore those, we'll track + // metrics at the bulk level. + return next.process(); + } final RequestMetrics requestMetrics = new RequestMetrics(); context.setData(ES_REQUEST_METRICS_CONTEXT_KEY, requestMetrics); return next.process().doOnSuccess((httpResponse) -> { @@ -389,6 +399,10 @@ public HttpPipelinePosition getPipelinePosition() { } } + private static boolean requestIsPartOfABatch(HttpPipelineCallContext context) { + return context.getData("Batch-Operation-Info").isPresent(); + } + /** * The {@link RequestMetricsTracker} calls this when a request completes */ diff --git a/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java b/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java index 80e662343baee..316db4844e598 100644 --- a/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java +++ b/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java @@ -87,6 +87,21 @@ public static final class Repository { DEFAULT_MAX_SINGLE_UPLOAD_SIZE, Property.NodeScope ); + + /** + * The batch size for batched delete requests + */ + static final Setting DELETION_BATCH_SIZE_SETTING = Setting.intSetting( + "delete_objects_max_size", + AzureBlobStore.MAX_ELEMENTS_PER_BATCH, + 1, + AzureBlobStore.MAX_ELEMENTS_PER_BATCH + ); + + /** + * The maximum number of concurrent batch deletes + */ + static final Setting MAX_CONCURRENT_BATCH_DELETES_SETTING = Setting.intSetting("max_concurrent_batch_deletes", 10, 1, 100); } private final ByteSizeValue chunkSize; diff --git a/modules/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureBlobContainerStatsTests.java b/modules/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureBlobContainerStatsTests.java index 1ed01bbadc07e..6730e5c3c81bd 100644 --- a/modules/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureBlobContainerStatsTests.java +++ b/modules/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureBlobContainerStatsTests.java @@ -18,6 +18,7 @@ import java.io.IOException; import java.nio.ByteBuffer; +import java.util.List; import java.util.Map; public class AzureBlobContainerStatsTests extends AbstractAzureServerTestCase { @@ -47,6 +48,8 @@ public void testOperationPurposeIsReflectedInBlobStoreStats() throws IOException os.write(blobContent); os.flush(); }); + // BLOB_BATCH + blobStore.deleteBlobsIgnoringIfNotExists(purpose, List.of(randomIdentifier(), randomIdentifier(), randomIdentifier()).iterator()); Map stats = blobStore.stats(); String statsMapString = stats.toString(); @@ -55,6 +58,7 @@ public void testOperationPurposeIsReflectedInBlobStoreStats() throws IOException assertEquals(statsMapString, Long.valueOf(1L), stats.get(statsKey(purpose, AzureBlobStore.Operation.GET_BLOB_PROPERTIES))); assertEquals(statsMapString, Long.valueOf(1L), stats.get(statsKey(purpose, AzureBlobStore.Operation.PUT_BLOCK))); assertEquals(statsMapString, Long.valueOf(1L), stats.get(statsKey(purpose, AzureBlobStore.Operation.PUT_BLOCK_LIST))); + assertEquals(statsMapString, Long.valueOf(1L), stats.get(statsKey(purpose, AzureBlobStore.Operation.BLOB_BATCH))); } public void testOperationPurposeIsNotReflectedInBlobStoreStatsWhenNotServerless() throws IOException { @@ -79,6 +83,11 @@ public void testOperationPurposeIsNotReflectedInBlobStoreStatsWhenNotServerless( os.write(blobContent); os.flush(); }); + // BLOB_BATCH + blobStore.deleteBlobsIgnoringIfNotExists( + purpose, + List.of(randomIdentifier(), randomIdentifier(), randomIdentifier()).iterator() + ); } Map stats = blobStore.stats(); @@ -88,6 +97,7 @@ public void testOperationPurposeIsNotReflectedInBlobStoreStatsWhenNotServerless( assertEquals(statsMapString, Long.valueOf(repeatTimes), stats.get(AzureBlobStore.Operation.GET_BLOB_PROPERTIES.getKey())); assertEquals(statsMapString, Long.valueOf(repeatTimes), stats.get(AzureBlobStore.Operation.PUT_BLOCK.getKey())); assertEquals(statsMapString, Long.valueOf(repeatTimes), stats.get(AzureBlobStore.Operation.PUT_BLOCK_LIST.getKey())); + assertEquals(statsMapString, Long.valueOf(repeatTimes), stats.get(AzureBlobStore.Operation.BLOB_BATCH.getKey())); } private static String statsKey(OperationPurpose purpose, AzureBlobStore.Operation operation) { diff --git a/test/fixtures/azure-fixture/src/main/java/fixture/azure/AzureHttpHandler.java b/test/fixtures/azure-fixture/src/main/java/fixture/azure/AzureHttpHandler.java index d8716fd987f3e..92ce04b6bea5b 100644 --- a/test/fixtures/azure-fixture/src/main/java/fixture/azure/AzureHttpHandler.java +++ b/test/fixtures/azure-fixture/src/main/java/fixture/azure/AzureHttpHandler.java @@ -12,6 +12,9 @@ import com.sun.net.httpserver.HttpExchange; import com.sun.net.httpserver.HttpHandler; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.Streams; @@ -23,10 +26,14 @@ import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentType; +import java.io.BufferedReader; import java.io.ByteArrayOutputStream; import java.io.IOException; import java.io.InputStreamReader; import java.nio.charset.StandardCharsets; +import java.time.ZoneId; +import java.time.ZonedDateTime; +import java.time.format.DateTimeFormatter; import java.util.Arrays; import java.util.HashMap; import java.util.HashSet; @@ -35,6 +42,7 @@ import java.util.Map; import java.util.Objects; import java.util.Set; +import java.util.UUID; import java.util.concurrent.ConcurrentHashMap; import java.util.function.Predicate; import java.util.regex.Matcher; @@ -47,6 +55,8 @@ */ @SuppressForbidden(reason = "Uses a HttpServer to emulate an Azure endpoint") public class AzureHttpHandler implements HttpHandler { + private static final Logger logger = LogManager.getLogger(AzureHttpHandler.class); + private final Map blobs; private final String account; private final String container; @@ -264,7 +274,98 @@ public void handle(final HttpExchange exchange) throws IOException { exchange.sendResponseHeaders(RestStatus.OK.getStatus(), response.length); exchange.getResponseBody().write(response); + } else if (Regex.simpleMatch("POST /" + account + "/" + container + "*restype=container*comp=batch*", request)) { + // Blob Batch (https://learn.microsoft.com/en-us/rest/api/storageservices/blob-batch) + final StringBuilder response = new StringBuilder(); + + try (BufferedReader requestReader = new BufferedReader(new InputStreamReader(exchange.getRequestBody()))) { + final String batchBoundary = requestReader.readLine(); + final String responseBoundary = "batch_" + UUID.randomUUID(); + + String line; + String contentId = null, requestId = null, toDelete = null; + while ((line = requestReader.readLine()) != null) { + if (batchBoundary.equals(line) || (batchBoundary + "--").equals(line)) { + // Found the end of a single request, process it + if (contentId == null || requestId == null || toDelete == null) { + throw new IllegalStateException( + "Missing contentId/requestId/toDelete: " + contentId + "/" + requestId + "/" + toDelete + ); + } + + // Process the deletion + if (blobs.remove("/" + account + toDelete) != null) { + final String acceptedPart = Strings.format(""" + --%s + Content-Type: application/http + Content-ID: %s + + HTTP/1.1 202 Accepted + x-ms-delete-type-permanent: true + x-ms-request-id: %s + x-ms-version: 2018-11-09 + + """, responseBoundary, contentId, requestId).replaceAll("\n", "\r\n"); + response.append(acceptedPart); + } else { + final String notFoundBody = Strings.format( + """ + + BlobNotFoundThe specified blob does not exist. + RequestId:%s + Time:%s""", + requestId, + DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now(ZoneId.of("UTC"))) + ); + final String notFoundPart = Strings.format(""" + --%s + Content-Type: application/http + Content-ID: %s + + HTTP/1.1 404 The specified blob does not exist. + x-ms-error-code: BlobNotFound + x-ms-request-id: %s + x-ms-version: 2018-11-09 + Content-Length: %d + Content-Type: application/xml + + %s + """, responseBoundary, contentId, requestId, notFoundBody.length(), notFoundBody) + .replaceAll("\n", "\r\n"); + response.append(notFoundPart); + } + + // Clear the state + toDelete = null; + contentId = null; + requestId = null; + } else if (Regex.simpleMatch("x-ms-client-request-id: *", line)) { + if (requestId != null) { + throw new IllegalStateException("Got multiple request IDs in a single request?"); + } + requestId = line.split("\\s")[1]; + } else if (Regex.simpleMatch("Content-ID: *", line)) { + if (contentId != null) { + throw new IllegalStateException("Got multiple content IDs in a single request?"); + } + contentId = line.split("\\s")[1]; + } else if (Regex.simpleMatch("DELETE /" + container + "/*", line)) { + String blobName = RestUtils.decodeComponent(line.split("(\\s|\\?)")[1]); + if (toDelete != null) { + throw new IllegalStateException("Got multiple deletes in a single request?"); + } + toDelete = blobName; + } + } + response.append("--").append(responseBoundary).append("--\r\n0\r\n"); + // Send the response + exchange.getResponseHeaders().add("Content-Type", "multipart/mixed; boundary=" + responseBoundary); + exchange.sendResponseHeaders(RestStatus.ACCEPTED.getStatus(), response.length()); + logger.debug("--> Sending response:\n{}", response); + exchange.getResponseBody().write(response.toString().getBytes(StandardCharsets.UTF_8)); + } } else { + logger.warn("--> Unrecognised request received: {}", request); sendError(exchange, RestStatus.BAD_REQUEST); } } finally { From e0a458441cff9a4242cd93f4c02f06d72f2d63c4 Mon Sep 17 00:00:00 2001 From: Mary Gouseti Date: Thu, 24 Oct 2024 11:55:54 +0300 Subject: [PATCH 046/324] [Failure store - selector syntax] Introduce the `::*` selector (#115389) **Introduction** > In order to make adoption of failure stores simpler for all users, we are introducing a new syntactical feature to index expression resolution: The selector. > > Selectors, denoted with a :: followed by a recognized suffix will allow users to specify which component of an index abstraction they would like to operate on within an API call. In this case, an index abstraction is a concrete index, data stream, or alias; Any abstraction that can be resolved to a set of indices/shards. We define a component of an index abstraction to be some searchable unit of the index abstraction. > > To start, we will support two components: data and failures. Concrete indices are their own data components, while the data component for index aliases are all of the indices contained therein. For data streams, the data component corresponds to their backing indices. Data stream aliases mirror this, treating all backing indices of the data streams they correspond to as their data component. > > The failure component is only supported by data streams and data stream aliases. The failure component of these abstractions refer to the data streams' failure stores. Indices and index aliases do not have a failure component. For more details and examples see https://github.com/elastic/elasticsearch/pull/113144. All this work has been cherry picked from there. **Purpose of this PR** This PR is introducing the `::*` as another selector option and not as a combination of `::data` and `::failure`. The reason for this change is that we need to differentiate between: - `my-index::*` which should resolve to `my-index::data` only and not to `my-index::failures` and - a user explicitly requesting `my-index::data, my-index::failures` which should result potentially to an error. --- .../datastreams/DataStreamsSnapshotsIT.java | 2 +- .../IngestFailureStoreMetricsIT.java | 2 +- .../lifecycle/DataStreamLifecycleService.java | 6 +- .../DataStreamLifecycleServiceTests.java | 8 +- .../org/elasticsearch/TransportVersions.java | 1 + .../admin/indices/get/GetIndexRequest.java | 2 +- .../indices/rollover/RolloverRequest.java | 5 +- .../action/bulk/BulkOperation.java | 2 +- .../action/bulk/TransportBulkAction.java | 2 +- .../datastreams/DataStreamsStatsAction.java | 2 +- .../support/IndexComponentSelector.java | 73 ++++++++-- .../action/support/IndicesOptions.java | 133 ++++++++---------- .../indices/RestRolloverIndexAction.java | 2 +- .../indices/get/GetIndexRequestTests.java | 2 +- .../MetadataRolloverServiceTests.java | 4 +- .../rollover/RolloverRequestTests.java | 14 +- .../support/IndexComponentSelectorTests.java | 41 ++++++ .../action/support/IndicesOptionsTests.java | 18 +-- .../IndexNameExpressionResolverTests.java | 16 +-- .../xpack/core/ilm/RolloverStep.java | 4 +- .../core/ilm/WaitForRolloverReadyStep.java | 4 +- 21 files changed, 197 insertions(+), 146 deletions(-) create mode 100644 server/src/test/java/org/elasticsearch/action/support/IndexComponentSelectorTests.java diff --git a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamsSnapshotsIT.java b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamsSnapshotsIT.java index 212b869c6d933..286ad68896797 100644 --- a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamsSnapshotsIT.java +++ b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamsSnapshotsIT.java @@ -138,7 +138,7 @@ public void setup() throws Exception { // Initialize the failure store. RolloverRequest rolloverRequest = new RolloverRequest("with-fs", null); rolloverRequest.setIndicesOptions( - IndicesOptions.builder(rolloverRequest.indicesOptions()).selectorOptions(IndicesOptions.SelectorOptions.ONLY_FAILURES).build() + IndicesOptions.builder(rolloverRequest.indicesOptions()).selectorOptions(IndicesOptions.SelectorOptions.FAILURES).build() ); response = client.execute(RolloverAction.INSTANCE, rolloverRequest).get(); assertTrue(response.isAcknowledged()); diff --git a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/IngestFailureStoreMetricsIT.java b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/IngestFailureStoreMetricsIT.java index 679ad5b000c8f..96def04069e24 100644 --- a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/IngestFailureStoreMetricsIT.java +++ b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/IngestFailureStoreMetricsIT.java @@ -195,7 +195,7 @@ public void testRejectionFromFailureStore() throws IOException { // Initialize failure store. var rolloverRequest = new RolloverRequest(dataStream, null); rolloverRequest.setIndicesOptions( - IndicesOptions.builder(rolloverRequest.indicesOptions()).selectorOptions(IndicesOptions.SelectorOptions.ONLY_FAILURES).build() + IndicesOptions.builder(rolloverRequest.indicesOptions()).selectorOptions(IndicesOptions.SelectorOptions.FAILURES).build() ); var rolloverResponse = client().execute(RolloverAction.INSTANCE, rolloverRequest).actionGet(); var failureStoreIndex = rolloverResponse.getNewIndex(); diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleService.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleService.java index 7bbf7137d290e..7d2828e30d5ab 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleService.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleService.java @@ -946,7 +946,7 @@ private Set maybeExecuteForceMerge(ClusterState state, List indice UpdateSettingsRequest updateMergePolicySettingsRequest = new UpdateSettingsRequest(); updateMergePolicySettingsRequest.indicesOptions( IndicesOptions.builder(updateMergePolicySettingsRequest.indicesOptions()) - .selectorOptions(IndicesOptions.SelectorOptions.DATA_AND_FAILURE) + .selectorOptions(IndicesOptions.SelectorOptions.ALL_APPLICABLE) .build() ); updateMergePolicySettingsRequest.indices(indexName); @@ -1408,9 +1408,7 @@ static RolloverRequest getDefaultRolloverRequest( RolloverRequest rolloverRequest = new RolloverRequest(dataStream, null).masterNodeTimeout(TimeValue.MAX_VALUE); if (rolloverFailureStore) { rolloverRequest.setIndicesOptions( - IndicesOptions.builder(rolloverRequest.indicesOptions()) - .selectorOptions(IndicesOptions.SelectorOptions.ONLY_FAILURES) - .build() + IndicesOptions.builder(rolloverRequest.indicesOptions()).selectorOptions(IndicesOptions.SelectorOptions.FAILURES).build() ); } rolloverRequest.setConditions(rolloverConfiguration.resolveRolloverConditions(dataRetention)); diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceTests.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceTests.java index d6bf80798764d..698ab427ab040 100644 --- a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceTests.java +++ b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceTests.java @@ -225,11 +225,11 @@ public void testOperationsExecutedOnce() { assertThat(clientSeenRequests.get(0), instanceOf(RolloverRequest.class)); RolloverRequest rolloverBackingIndexRequest = (RolloverRequest) clientSeenRequests.get(0); assertThat(rolloverBackingIndexRequest.getRolloverTarget(), is(dataStreamName)); - assertThat(rolloverBackingIndexRequest.indicesOptions().selectorOptions(), equalTo(IndicesOptions.SelectorOptions.ONLY_DATA)); + assertThat(rolloverBackingIndexRequest.indicesOptions().selectorOptions(), equalTo(IndicesOptions.SelectorOptions.DATA)); assertThat(clientSeenRequests.get(1), instanceOf(RolloverRequest.class)); RolloverRequest rolloverFailureIndexRequest = (RolloverRequest) clientSeenRequests.get(1); assertThat(rolloverFailureIndexRequest.getRolloverTarget(), is(dataStreamName)); - assertThat(rolloverFailureIndexRequest.indicesOptions().selectorOptions(), equalTo(IndicesOptions.SelectorOptions.ONLY_FAILURES)); + assertThat(rolloverFailureIndexRequest.indicesOptions().selectorOptions(), equalTo(IndicesOptions.SelectorOptions.FAILURES)); List deleteRequests = clientSeenRequests.subList(2, 5) .stream() .map(transportRequest -> (DeleteIndexRequest) transportRequest) @@ -1546,11 +1546,11 @@ public void testFailureStoreIsManagedEvenWhenDisabled() { assertThat(clientSeenRequests.get(0), instanceOf(RolloverRequest.class)); RolloverRequest rolloverBackingIndexRequest = (RolloverRequest) clientSeenRequests.get(0); assertThat(rolloverBackingIndexRequest.getRolloverTarget(), is(dataStreamName)); - assertThat(rolloverBackingIndexRequest.indicesOptions().selectorOptions(), equalTo(IndicesOptions.SelectorOptions.ONLY_DATA)); + assertThat(rolloverBackingIndexRequest.indicesOptions().selectorOptions(), equalTo(IndicesOptions.SelectorOptions.DATA)); assertThat(clientSeenRequests.get(1), instanceOf(RolloverRequest.class)); RolloverRequest rolloverFailureIndexRequest = (RolloverRequest) clientSeenRequests.get(1); assertThat(rolloverFailureIndexRequest.getRolloverTarget(), is(dataStreamName)); - assertThat(rolloverFailureIndexRequest.indicesOptions().selectorOptions(), equalTo(IndicesOptions.SelectorOptions.ONLY_FAILURES)); + assertThat(rolloverFailureIndexRequest.indicesOptions().selectorOptions(), equalTo(IndicesOptions.SelectorOptions.FAILURES)); assertThat( ((DeleteIndexRequest) clientSeenRequests.get(2)).indices()[0], is(dataStream.getFailureIndices().getIndices().get(0).getName()) diff --git a/server/src/main/java/org/elasticsearch/TransportVersions.java b/server/src/main/java/org/elasticsearch/TransportVersions.java index 6d9bf2ac52f2d..777ff083f33f8 100644 --- a/server/src/main/java/org/elasticsearch/TransportVersions.java +++ b/server/src/main/java/org/elasticsearch/TransportVersions.java @@ -180,6 +180,7 @@ static TransportVersion def(int id) { public static final TransportVersion ESQL_FIELD_ATTRIBUTE_PARENT_SIMPLIFIED = def(8_775_00_0); public static final TransportVersion INFERENCE_DONT_PERSIST_ON_READ = def(8_776_00_0); public static final TransportVersion SIMULATE_MAPPING_ADDITION = def(8_777_00_0); + public static final TransportVersion INTRODUCE_ALL_APPLICABLE_SELECTOR = def(8_778_00_0); /* * STOP! READ THIS FIRST! No, really, diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexRequest.java index 4c5ee08beb192..801dbbdee0858 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexRequest.java @@ -98,7 +98,7 @@ public GetIndexRequest() { super( DataStream.isFailureStoreFeatureFlagEnabled() ? IndicesOptions.builder(IndicesOptions.strictExpandOpen()) - .selectorOptions(IndicesOptions.SelectorOptions.DATA_AND_FAILURE) + .selectorOptions(IndicesOptions.SelectorOptions.ALL_APPLICABLE) .build() : IndicesOptions.strictExpandOpen() ); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequest.java index 5a7f330be50c0..552ce727d4249 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequest.java @@ -13,6 +13,7 @@ import org.elasticsearch.action.IndicesRequest; import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; import org.elasticsearch.action.support.ActiveShardCount; +import org.elasticsearch.action.support.IndexComponentSelector; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.master.AcknowledgedRequest; import org.elasticsearch.cluster.metadata.DataStream; @@ -124,8 +125,8 @@ public ActionRequestValidationException validate() { ); } - var selectors = indicesOptions.selectorOptions().defaultSelectors(); - if (selectors.size() > 1) { + var selector = indicesOptions.selectorOptions().defaultSelector(); + if (selector == IndexComponentSelector.ALL_APPLICABLE) { validationException = addValidationError( "rollover cannot be applied to both regular and failure indices at the same time", validationException diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkOperation.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkOperation.java index 130d6286f7e02..ce3e189149451 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/BulkOperation.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkOperation.java @@ -212,7 +212,7 @@ private void rollOverFailureStores(Runnable runnable) { RolloverRequest rolloverRequest = new RolloverRequest(dataStream, null); rolloverRequest.setIndicesOptions( IndicesOptions.builder(rolloverRequest.indicesOptions()) - .selectorOptions(IndicesOptions.SelectorOptions.ONLY_FAILURES) + .selectorOptions(IndicesOptions.SelectorOptions.FAILURES) .build() ); // We are executing a lazy rollover because it is an action specialised for this situation, when we want an diff --git a/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java b/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java index a3a73415ec4f6..cef68324e2a45 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java @@ -418,7 +418,7 @@ private void rollOverDataStreams( if (targetFailureStore) { rolloverRequest.setIndicesOptions( IndicesOptions.builder(rolloverRequest.indicesOptions()) - .selectorOptions(IndicesOptions.SelectorOptions.ONLY_FAILURES) + .selectorOptions(IndicesOptions.SelectorOptions.FAILURES) .build() ); } diff --git a/server/src/main/java/org/elasticsearch/action/datastreams/DataStreamsStatsAction.java b/server/src/main/java/org/elasticsearch/action/datastreams/DataStreamsStatsAction.java index 1c30303915c8e..9266bae439b73 100644 --- a/server/src/main/java/org/elasticsearch/action/datastreams/DataStreamsStatsAction.java +++ b/server/src/main/java/org/elasticsearch/action/datastreams/DataStreamsStatsAction.java @@ -61,7 +61,7 @@ public Request() { .allowFailureIndices(true) .build() ) - .selectorOptions(IndicesOptions.SelectorOptions.DATA_AND_FAILURE) + .selectorOptions(IndicesOptions.SelectorOptions.ALL_APPLICABLE) .build() ); } diff --git a/server/src/main/java/org/elasticsearch/action/support/IndexComponentSelector.java b/server/src/main/java/org/elasticsearch/action/support/IndexComponentSelector.java index 65b48db8f5cf3..910be151d1bf5 100644 --- a/server/src/main/java/org/elasticsearch/action/support/IndexComponentSelector.java +++ b/server/src/main/java/org/elasticsearch/action/support/IndexComponentSelector.java @@ -9,6 +9,12 @@ package org.elasticsearch.action.support; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.core.Nullable; + +import java.io.IOException; import java.util.Collections; import java.util.HashMap; import java.util.Map; @@ -17,33 +23,82 @@ * We define as index components the two different sets of indices a data stream could consist of: * - DATA: represents the backing indices * - FAILURES: represent the failing indices + * - ALL: represents all available in this expression components, meaning if it's a data stream both backing and failure indices and if it's + * an index only the index itself. * Note: An index is its own DATA component, but it cannot have a FAILURE component. */ -public enum IndexComponentSelector { - DATA("data"), - FAILURES("failures"); +public enum IndexComponentSelector implements Writeable { + DATA("data", (byte) 0), + FAILURES("failures", (byte) 1), + ALL_APPLICABLE("*", (byte) 2); private final String key; + private final byte id; - IndexComponentSelector(String key) { + IndexComponentSelector(String key, byte id) { this.key = key; + this.id = id; } public String getKey() { return key; } - private static final Map REGISTRY; + public byte getId() { + return id; + } + + private static final Map KEY_REGISTRY; + private static final Map ID_REGISTRY; static { - Map registry = new HashMap<>(IndexComponentSelector.values().length); + Map keyRegistry = new HashMap<>(IndexComponentSelector.values().length); + for (IndexComponentSelector value : IndexComponentSelector.values()) { + keyRegistry.put(value.getKey(), value); + } + KEY_REGISTRY = Collections.unmodifiableMap(keyRegistry); + Map idRegistry = new HashMap<>(IndexComponentSelector.values().length); for (IndexComponentSelector value : IndexComponentSelector.values()) { - registry.put(value.getKey(), value); + idRegistry.put(value.getId(), value); } - REGISTRY = Collections.unmodifiableMap(registry); + ID_REGISTRY = Collections.unmodifiableMap(idRegistry); } + /** + * Retrieves the respective selector when the suffix key is recognised + * @param key the suffix key, probably parsed from an expression + * @return the selector or null if the key was not recognised. + */ + @Nullable public static IndexComponentSelector getByKey(String key) { - return REGISTRY.get(key); + return KEY_REGISTRY.get(key); + } + + public static IndexComponentSelector read(StreamInput in) throws IOException { + return getById(in.readByte()); + } + + // Visible for testing + static IndexComponentSelector getById(byte id) { + IndexComponentSelector indexComponentSelector = ID_REGISTRY.get(id); + if (indexComponentSelector == null) { + throw new IllegalArgumentException( + "Unknown id of index component selector [" + id + "], available options are: " + ID_REGISTRY + ); + } + return indexComponentSelector; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeByte(id); + } + + public boolean shouldIncludeData() { + return this == ALL_APPLICABLE || this == DATA; + } + + public boolean shouldIncludeFailures() { + return this == ALL_APPLICABLE || this == FAILURES; } } diff --git a/server/src/main/java/org/elasticsearch/action/support/IndicesOptions.java b/server/src/main/java/org/elasticsearch/action/support/IndicesOptions.java index 22d019f80837d..85889d8398cb1 100644 --- a/server/src/main/java/org/elasticsearch/action/support/IndicesOptions.java +++ b/server/src/main/java/org/elasticsearch/action/support/IndicesOptions.java @@ -421,61 +421,45 @@ public static Builder builder(GatekeeperOptions gatekeeperOptions) { /** * Defines which selectors should be used by default for an index operation in the event that no selectors are provided. */ - public record SelectorOptions(EnumSet defaultSelectors) implements Writeable { + public record SelectorOptions(IndexComponentSelector defaultSelector) implements Writeable { - public static final SelectorOptions DATA_AND_FAILURE = new SelectorOptions( - EnumSet.of(IndexComponentSelector.DATA, IndexComponentSelector.FAILURES) - ); - public static final SelectorOptions ONLY_DATA = new SelectorOptions(EnumSet.of(IndexComponentSelector.DATA)); - public static final SelectorOptions ONLY_FAILURES = new SelectorOptions(EnumSet.of(IndexComponentSelector.FAILURES)); + public static final SelectorOptions ALL_APPLICABLE = new SelectorOptions(IndexComponentSelector.ALL_APPLICABLE); + public static final SelectorOptions DATA = new SelectorOptions(IndexComponentSelector.DATA); + public static final SelectorOptions FAILURES = new SelectorOptions(IndexComponentSelector.FAILURES); /** * Default instance. Uses
::data
as the default selector if none are present in an index expression. */ - public static final SelectorOptions DEFAULT = ONLY_DATA; + public static final SelectorOptions DEFAULT = DATA; public static SelectorOptions read(StreamInput in) throws IOException { - return new SelectorOptions(in.readEnumSet(IndexComponentSelector.class)); + if (in.getTransportVersion().before(TransportVersions.INTRODUCE_ALL_APPLICABLE_SELECTOR)) { + EnumSet set = in.readEnumSet(IndexComponentSelector.class); + if (set.isEmpty() || set.size() == 2) { + assert set.contains(IndexComponentSelector.DATA) && set.contains(IndexComponentSelector.FAILURES) + : "The enum set only supported ::data and ::failures"; + return SelectorOptions.ALL_APPLICABLE; + } else if (set.contains(IndexComponentSelector.DATA)) { + return SelectorOptions.DATA; + } else { + return SelectorOptions.FAILURES; + } + } else { + return new SelectorOptions(IndexComponentSelector.read(in)); + } } @Override public void writeTo(StreamOutput out) throws IOException { - out.writeEnumSet(defaultSelectors); - } - - public static class Builder { - private EnumSet defaultSelectors; - - public Builder() { - this(DEFAULT); - } - - Builder(SelectorOptions options) { - defaultSelectors = EnumSet.copyOf(options.defaultSelectors); - } - - public Builder setDefaultSelectors(IndexComponentSelector first, IndexComponentSelector... remaining) { - defaultSelectors = EnumSet.of(first, remaining); - return this; - } - - public Builder setDefaultSelectors(EnumSet defaultSelectors) { - this.defaultSelectors = EnumSet.copyOf(defaultSelectors); - return this; - } - - public SelectorOptions build() { - assert defaultSelectors.isEmpty() != true : "Default selectors cannot be an empty set"; - return new SelectorOptions(EnumSet.copyOf(defaultSelectors)); + if (out.getTransportVersion().before(TransportVersions.INTRODUCE_ALL_APPLICABLE_SELECTOR)) { + switch (defaultSelector) { + case ALL_APPLICABLE -> out.writeEnumSet(EnumSet.of(IndexComponentSelector.DATA, IndexComponentSelector.FAILURES)); + case DATA -> out.writeEnumSet(EnumSet.of(IndexComponentSelector.DATA)); + case FAILURES -> out.writeEnumSet(EnumSet.of(IndexComponentSelector.FAILURES)); + } + } else { + defaultSelector.writeTo(out); } } - - public static Builder builder() { - return new Builder(); - } - - public static Builder builder(SelectorOptions selectorOptions) { - return new Builder(selectorOptions); - } } /** @@ -547,7 +531,7 @@ private enum Option { .allowFailureIndices(true) .ignoreThrottled(false) ) - .selectorOptions(SelectorOptions.ONLY_DATA) + .selectorOptions(SelectorOptions.DATA) .build(); public static final IndicesOptions STRICT_EXPAND_OPEN_FAILURE_STORE = IndicesOptions.builder() .concreteTargetOptions(ConcreteTargetOptions.ERROR_WHEN_UNAVAILABLE_TARGETS) @@ -566,7 +550,7 @@ private enum Option { .allowFailureIndices(true) .ignoreThrottled(false) ) - .selectorOptions(SelectorOptions.DATA_AND_FAILURE) + .selectorOptions(SelectorOptions.ALL_APPLICABLE) .build(); public static final IndicesOptions LENIENT_EXPAND_OPEN = IndicesOptions.builder() .concreteTargetOptions(ConcreteTargetOptions.ALLOW_UNAVAILABLE_TARGETS) @@ -585,7 +569,7 @@ private enum Option { .allowFailureIndices(true) .ignoreThrottled(false) ) - .selectorOptions(SelectorOptions.ONLY_DATA) + .selectorOptions(SelectorOptions.DATA) .build(); public static final IndicesOptions LENIENT_EXPAND_OPEN_NO_SELECTORS = IndicesOptions.builder() .concreteTargetOptions(ConcreteTargetOptions.ALLOW_UNAVAILABLE_TARGETS) @@ -622,7 +606,7 @@ private enum Option { .allowFailureIndices(true) .ignoreThrottled(false) ) - .selectorOptions(SelectorOptions.ONLY_DATA) + .selectorOptions(SelectorOptions.DATA) .build(); public static final IndicesOptions LENIENT_EXPAND_OPEN_CLOSED = IndicesOptions.builder() .concreteTargetOptions(ConcreteTargetOptions.ALLOW_UNAVAILABLE_TARGETS) @@ -641,7 +625,7 @@ private enum Option { .allowFailureIndices(true) .ignoreThrottled(false) ) - .selectorOptions(SelectorOptions.ONLY_DATA) + .selectorOptions(SelectorOptions.DATA) .build(); public static final IndicesOptions LENIENT_EXPAND_OPEN_CLOSED_HIDDEN = IndicesOptions.builder() .concreteTargetOptions(ConcreteTargetOptions.ALLOW_UNAVAILABLE_TARGETS) @@ -655,7 +639,7 @@ private enum Option { .allowFailureIndices(true) .ignoreThrottled(false) ) - .selectorOptions(SelectorOptions.ONLY_DATA) + .selectorOptions(SelectorOptions.DATA) .build(); public static final IndicesOptions LENIENT_EXPAND_OPEN_CLOSED_HIDDEN_NO_SELECTOR = IndicesOptions.builder() .concreteTargetOptions(ConcreteTargetOptions.ALLOW_UNAVAILABLE_TARGETS) @@ -687,7 +671,7 @@ private enum Option { .allowFailureIndices(true) .ignoreThrottled(false) ) - .selectorOptions(SelectorOptions.ONLY_DATA) + .selectorOptions(SelectorOptions.DATA) .build(); public static final IndicesOptions STRICT_EXPAND_OPEN_CLOSED_HIDDEN = IndicesOptions.builder() .concreteTargetOptions(ConcreteTargetOptions.ERROR_WHEN_UNAVAILABLE_TARGETS) @@ -701,7 +685,7 @@ private enum Option { .allowFailureIndices(true) .ignoreThrottled(false) ) - .selectorOptions(SelectorOptions.ONLY_DATA) + .selectorOptions(SelectorOptions.DATA) .build(); public static final IndicesOptions STRICT_EXPAND_OPEN_CLOSED_HIDDEN_NO_SELECTORS = IndicesOptions.builder() .concreteTargetOptions(ConcreteTargetOptions.ERROR_WHEN_UNAVAILABLE_TARGETS) @@ -733,7 +717,7 @@ private enum Option { .allowFailureIndices(true) .ignoreThrottled(false) ) - .selectorOptions(SelectorOptions.DATA_AND_FAILURE) + .selectorOptions(SelectorOptions.ALL_APPLICABLE) .build(); public static final IndicesOptions STRICT_EXPAND_OPEN_CLOSED_HIDDEN_FAILURE_STORE = IndicesOptions.builder() .concreteTargetOptions(ConcreteTargetOptions.ERROR_WHEN_UNAVAILABLE_TARGETS) @@ -747,7 +731,7 @@ private enum Option { .allowFailureIndices(true) .ignoreThrottled(false) ) - .selectorOptions(SelectorOptions.DATA_AND_FAILURE) + .selectorOptions(SelectorOptions.ALL_APPLICABLE) .build(); public static final IndicesOptions STRICT_EXPAND_OPEN_CLOSED_FAILURE_STORE = IndicesOptions.builder() .concreteTargetOptions(ConcreteTargetOptions.ERROR_WHEN_UNAVAILABLE_TARGETS) @@ -766,7 +750,7 @@ private enum Option { .allowFailureIndices(true) .ignoreThrottled(false) ) - .selectorOptions(SelectorOptions.DATA_AND_FAILURE) + .selectorOptions(SelectorOptions.ALL_APPLICABLE) .build(); public static final IndicesOptions STRICT_EXPAND_OPEN_FORBID_CLOSED = IndicesOptions.builder() .concreteTargetOptions(ConcreteTargetOptions.ERROR_WHEN_UNAVAILABLE_TARGETS) @@ -785,7 +769,7 @@ private enum Option { .allowFailureIndices(true) .ignoreThrottled(false) ) - .selectorOptions(SelectorOptions.ONLY_DATA) + .selectorOptions(SelectorOptions.DATA) .build(); public static final IndicesOptions STRICT_EXPAND_OPEN_HIDDEN_FORBID_CLOSED = IndicesOptions.builder() .concreteTargetOptions(ConcreteTargetOptions.ERROR_WHEN_UNAVAILABLE_TARGETS) @@ -804,7 +788,7 @@ private enum Option { .allowFailureIndices(true) .ignoreThrottled(false) ) - .selectorOptions(SelectorOptions.ONLY_DATA) + .selectorOptions(SelectorOptions.DATA) .build(); public static final IndicesOptions STRICT_EXPAND_OPEN_FORBID_CLOSED_IGNORE_THROTTLED = IndicesOptions.builder() .concreteTargetOptions(ConcreteTargetOptions.ERROR_WHEN_UNAVAILABLE_TARGETS) @@ -823,7 +807,7 @@ private enum Option { .allowFailureIndices(true) .allowAliasToMultipleIndices(true) ) - .selectorOptions(SelectorOptions.ONLY_DATA) + .selectorOptions(SelectorOptions.DATA) .build(); public static final IndicesOptions STRICT_SINGLE_INDEX_NO_EXPAND_FORBID_CLOSED = IndicesOptions.builder() .concreteTargetOptions(ConcreteTargetOptions.ERROR_WHEN_UNAVAILABLE_TARGETS) @@ -842,7 +826,7 @@ private enum Option { .allowFailureIndices(true) .ignoreThrottled(false) ) - .selectorOptions(SelectorOptions.ONLY_DATA) + .selectorOptions(SelectorOptions.DATA) .build(); public static final IndicesOptions STRICT_NO_EXPAND_FORBID_CLOSED = IndicesOptions.builder() .concreteTargetOptions(ConcreteTargetOptions.ERROR_WHEN_UNAVAILABLE_TARGETS) @@ -861,7 +845,7 @@ private enum Option { .allowFailureIndices(true) .ignoreThrottled(false) ) - .selectorOptions(SelectorOptions.ONLY_DATA) + .selectorOptions(SelectorOptions.DATA) .build(); /** @@ -919,7 +903,7 @@ public boolean forbidClosedIndices() { } /** - * @return Whether execution on closed indices is allowed. + * @return Whether execution on failure indices is allowed. */ public boolean allowFailureIndices() { return gatekeeperOptions.allowFailureIndices(); @@ -950,14 +934,14 @@ public boolean ignoreThrottled() { * @return whether regular indices (stand-alone or backing indices) will be included in the response */ public boolean includeRegularIndices() { - return selectorOptions().defaultSelectors().contains(IndexComponentSelector.DATA); + return selectorOptions().defaultSelector().shouldIncludeData(); } /** * @return whether failure indices (only supported by certain data streams) will be included in the response */ public boolean includeFailureIndices() { - return selectorOptions().defaultSelectors().contains(IndexComponentSelector.FAILURES); + return selectorOptions().defaultSelector().shouldIncludeFailures(); } public void writeIndicesOptions(StreamOutput out) throws IOException { @@ -1004,7 +988,7 @@ public void writeIndicesOptions(StreamOutput out) throws IOException { out.writeBoolean(includeFailureIndices()); } if (out.getTransportVersion().onOrAfter(TransportVersions.CONVERT_FAILURE_STORE_OPTIONS_TO_SELECTOR_OPTIONS_INTERNALLY)) { - out.writeEnumSet(selectorOptions.defaultSelectors); + selectorOptions.writeTo(out); } } @@ -1032,15 +1016,15 @@ public static IndicesOptions readIndicesOptions(StreamInput in) throws IOExcepti var includeData = in.readBoolean(); var includeFailures = in.readBoolean(); if (includeData && includeFailures) { - selectorOptions = SelectorOptions.DATA_AND_FAILURE; + selectorOptions = SelectorOptions.ALL_APPLICABLE; } else if (includeData) { - selectorOptions = SelectorOptions.ONLY_DATA; + selectorOptions = SelectorOptions.DATA; } else { - selectorOptions = SelectorOptions.ONLY_FAILURES; + selectorOptions = SelectorOptions.FAILURES; } } if (in.getTransportVersion().onOrAfter(TransportVersions.CONVERT_FAILURE_STORE_OPTIONS_TO_SELECTOR_OPTIONS_INTERNALLY)) { - selectorOptions = new SelectorOptions(in.readEnumSet(IndexComponentSelector.class)); + selectorOptions = SelectorOptions.read(in); } return new IndicesOptions( options.contains(Option.ALLOW_UNAVAILABLE_CONCRETE_TARGETS) @@ -1099,11 +1083,6 @@ public Builder selectorOptions(SelectorOptions selectorOptions) { return this; } - public Builder selectorOptions(SelectorOptions.Builder selectorOptions) { - this.selectorOptions = selectorOptions.build(); - return this; - } - public IndicesOptions build() { return new IndicesOptions(concreteTargetOptions, wildcardOptions, gatekeeperOptions, selectorOptions); } @@ -1322,9 +1301,9 @@ private static SelectorOptions parseFailureStoreParameters(Object failureStoreVa return defaultOptions; } return switch (failureStoreValue.toString()) { - case INCLUDE_ALL -> SelectorOptions.DATA_AND_FAILURE; - case INCLUDE_ONLY_REGULAR_INDICES -> SelectorOptions.ONLY_DATA; - case INCLUDE_ONLY_FAILURE_INDICES -> SelectorOptions.ONLY_FAILURES; + case INCLUDE_ALL -> SelectorOptions.ALL_APPLICABLE; + case INCLUDE_ONLY_REGULAR_INDICES -> SelectorOptions.DATA; + case INCLUDE_ONLY_FAILURE_INDICES -> SelectorOptions.FAILURES; default -> throw new IllegalArgumentException("No valid " + FAILURE_STORE_QUERY_PARAM + " value [" + failureStoreValue + "]"); }; } @@ -1336,9 +1315,9 @@ public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params par gatekeeperOptions.toXContent(builder, params); if (DataStream.isFailureStoreFeatureFlagEnabled()) { String displayValue; - if (SelectorOptions.DATA_AND_FAILURE.equals(selectorOptions())) { + if (SelectorOptions.ALL_APPLICABLE.equals(selectorOptions())) { displayValue = INCLUDE_ALL; - } else if (SelectorOptions.ONLY_DATA.equals(selectorOptions())) { + } else if (SelectorOptions.DATA.equals(selectorOptions())) { displayValue = INCLUDE_ONLY_REGULAR_INDICES; } else { displayValue = INCLUDE_ONLY_FAILURE_INDICES; diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestRolloverIndexAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestRolloverIndexAction.java index 942844dd1dd16..776302296b1a2 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestRolloverIndexAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestRolloverIndexAction.java @@ -69,7 +69,7 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC if (failureStore) { rolloverIndexRequest.setIndicesOptions( IndicesOptions.builder(rolloverIndexRequest.indicesOptions()) - .selectorOptions(IndicesOptions.SelectorOptions.ONLY_FAILURES) + .selectorOptions(IndicesOptions.SelectorOptions.FAILURES) .build() ); } diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/get/GetIndexRequestTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/get/GetIndexRequestTests.java index a75b50e3a88f4..3bbc03a333438 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/get/GetIndexRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/get/GetIndexRequestTests.java @@ -82,6 +82,6 @@ public void testIndicesOptions() { ); assertThat(getIndexRequest.indicesOptions().wildcardOptions(), equalTo(IndicesOptions.strictExpandOpen().wildcardOptions())); assertThat(getIndexRequest.indicesOptions().gatekeeperOptions(), equalTo(IndicesOptions.strictExpandOpen().gatekeeperOptions())); - assertThat(getIndexRequest.indicesOptions().selectorOptions(), equalTo(IndicesOptions.SelectorOptions.DATA_AND_FAILURE)); + assertThat(getIndexRequest.indicesOptions().selectorOptions(), equalTo(IndicesOptions.SelectorOptions.ALL_APPLICABLE)); } } diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/MetadataRolloverServiceTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/MetadataRolloverServiceTests.java index b9fdb13958632..1a30fae1ebc00 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/MetadataRolloverServiceTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/MetadataRolloverServiceTests.java @@ -754,7 +754,7 @@ public void testValidation() throws Exception { .promoteDataStream(); rolloverTarget = dataStream.getName(); if (dataStream.isFailureStoreEnabled() && randomBoolean()) { - defaultSelectorOptions = IndicesOptions.SelectorOptions.ONLY_FAILURES; + defaultSelectorOptions = IndicesOptions.SelectorOptions.FAILURES; sourceIndexName = dataStream.getFailureStoreWriteIndex().getName(); defaultRolloverIndexName = DataStream.getDefaultFailureStoreName( dataStream.getName(), @@ -815,7 +815,7 @@ public void testValidation() throws Exception { true, null, null, - IndicesOptions.SelectorOptions.ONLY_FAILURES.equals(defaultSelectorOptions) + IndicesOptions.SelectorOptions.FAILURES.equals(defaultSelectorOptions) ); newIndexName = newIndexName == null ? defaultRolloverIndexName : newIndexName; diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequestTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequestTests.java index 08e92c833dc85..f0190790ba001 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequestTests.java @@ -11,7 +11,6 @@ import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; -import org.elasticsearch.action.support.IndexComponentSelector; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; @@ -34,9 +33,7 @@ import org.junit.Before; import java.io.IOException; -import java.util.EnumSet; import java.util.Map; -import java.util.Set; import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.Matchers.equalTo; @@ -178,14 +175,7 @@ public void testSerialize() throws Exception { ); originalRequest.lazy(randomBoolean()); originalRequest.setIndicesOptions( - IndicesOptions.builder(originalRequest.indicesOptions()) - .selectorOptions( - IndicesOptions.SelectorOptions.builder() - .setDefaultSelectors( - EnumSet.copyOf(randomNonEmptySubsetOf(Set.of(IndexComponentSelector.DATA, IndexComponentSelector.FAILURES))) - ) - ) - .build() + IndicesOptions.builder(originalRequest.indicesOptions()).selectorOptions(IndicesOptions.SelectorOptions.ALL_APPLICABLE).build() ); try (BytesStreamOutput out = new BytesStreamOutput()) { @@ -266,7 +256,7 @@ public void testValidation() { RolloverRequest rolloverRequest = new RolloverRequest("alias-index", "new-index-name"); rolloverRequest.setIndicesOptions( IndicesOptions.builder(rolloverRequest.indicesOptions()) - .selectorOptions(IndicesOptions.SelectorOptions.DATA_AND_FAILURE) + .selectorOptions(IndicesOptions.SelectorOptions.ALL_APPLICABLE) .build() ); ActionRequestValidationException validationException = rolloverRequest.validate(); diff --git a/server/src/test/java/org/elasticsearch/action/support/IndexComponentSelectorTests.java b/server/src/test/java/org/elasticsearch/action/support/IndexComponentSelectorTests.java new file mode 100644 index 0000000000000..73d4ab59ce479 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/action/support/IndexComponentSelectorTests.java @@ -0,0 +1,41 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.action.support; + +import org.elasticsearch.test.ESTestCase; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.nullValue; + +public class IndexComponentSelectorTests extends ESTestCase { + + public void testIndexComponentSelectorFromKey() { + assertThat(IndexComponentSelector.getByKey("data"), equalTo(IndexComponentSelector.DATA)); + assertThat(IndexComponentSelector.getByKey("failures"), equalTo(IndexComponentSelector.FAILURES)); + assertThat(IndexComponentSelector.getByKey("*"), equalTo(IndexComponentSelector.ALL_APPLICABLE)); + assertThat(IndexComponentSelector.getByKey("d*ta"), nullValue()); + assertThat(IndexComponentSelector.getByKey("_all"), nullValue()); + assertThat(IndexComponentSelector.getByKey("**"), nullValue()); + assertThat(IndexComponentSelector.getByKey("failure"), nullValue()); + } + + public void testIndexComponentSelectorFromId() { + assertThat(IndexComponentSelector.getById((byte) 0), equalTo(IndexComponentSelector.DATA)); + assertThat(IndexComponentSelector.getById((byte) 1), equalTo(IndexComponentSelector.FAILURES)); + assertThat(IndexComponentSelector.getById((byte) 2), equalTo(IndexComponentSelector.ALL_APPLICABLE)); + IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> IndexComponentSelector.getById((byte) 3)); + assertThat( + exception.getMessage(), + containsString("Unknown id of index component selector [3], available options are: {0=DATA, 1=FAILURES, 2=ALL_APPLICABLE}") + ); + } + +} diff --git a/server/src/test/java/org/elasticsearch/action/support/IndicesOptionsTests.java b/server/src/test/java/org/elasticsearch/action/support/IndicesOptionsTests.java index 1784ab863bf1c..de7b43ad091fa 100644 --- a/server/src/test/java/org/elasticsearch/action/support/IndicesOptionsTests.java +++ b/server/src/test/java/org/elasticsearch/action/support/IndicesOptionsTests.java @@ -30,11 +30,9 @@ import java.util.Arrays; import java.util.Collection; import java.util.Collections; -import java.util.EnumSet; import java.util.HashMap; import java.util.List; import java.util.Map; -import java.util.Set; import static org.hamcrest.CoreMatchers.equalTo; @@ -58,13 +56,7 @@ public void testSerialization() throws Exception { .allowAliasToMultipleIndices(randomBoolean()) .allowClosedIndices(randomBoolean()) ) - .selectorOptions( - IndicesOptions.SelectorOptions.builder() - .setDefaultSelectors( - EnumSet.copyOf(randomNonEmptySubsetOf(Set.of(IndexComponentSelector.DATA, IndexComponentSelector.FAILURES))) - ) - .build() - ) + .selectorOptions(IndicesOptions.SelectorOptions.ALL_APPLICABLE) .build(); BytesStreamOutput output = new BytesStreamOutput(); @@ -350,9 +342,7 @@ public void testToXContent() throws IOException { randomBoolean() ); GatekeeperOptions gatekeeperOptions = new GatekeeperOptions(randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean()); - IndicesOptions.SelectorOptions selectorOptions = new IndicesOptions.SelectorOptions( - EnumSet.copyOf(randomNonEmptySubsetOf(Set.of(IndexComponentSelector.DATA, IndexComponentSelector.FAILURES))) - ); + IndicesOptions.SelectorOptions selectorOptions = new IndicesOptions.SelectorOptions(randomFrom(IndexComponentSelector.values())); IndicesOptions indicesOptions = new IndicesOptions(concreteTargetOptions, wildcardOptions, gatekeeperOptions, selectorOptions); @@ -370,9 +360,9 @@ public void testToXContent() throws IOException { assertThat(map.get("allow_no_indices"), equalTo(wildcardOptions.allowEmptyExpressions())); assertThat(map.get("ignore_throttled"), equalTo(gatekeeperOptions.ignoreThrottled())); String displayValue; - if (IndicesOptions.SelectorOptions.DATA_AND_FAILURE.equals(selectorOptions)) { + if (IndicesOptions.SelectorOptions.ALL_APPLICABLE.equals(selectorOptions)) { displayValue = "include"; - } else if (IndicesOptions.SelectorOptions.ONLY_DATA.equals(selectorOptions)) { + } else if (IndicesOptions.SelectorOptions.DATA.equals(selectorOptions)) { displayValue = "exclude"; } else { displayValue = "only"; diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolverTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolverTests.java index d58de5ca65ea0..99470918ce063 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolverTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolverTests.java @@ -2737,7 +2737,7 @@ public void testDataStreamsWithFailureStore() { // Test include failure store with an exact data stream name { IndicesOptions indicesOptions = IndicesOptions.builder(IndicesOptions.STRICT_EXPAND_OPEN) - .selectorOptions(IndicesOptions.SelectorOptions.DATA_AND_FAILURE) + .selectorOptions(IndicesOptions.SelectorOptions.ALL_APPLICABLE) .build(); Index[] result = indexNameExpressionResolver.concreteIndices(state, indicesOptions, true, "my-data-stream"); assertThat(result.length, equalTo(4)); @@ -2751,7 +2751,7 @@ public void testDataStreamsWithFailureStore() { // We expect that they will be skipped { IndicesOptions indicesOptions = IndicesOptions.builder(IndicesOptions.STRICT_EXPAND_OPEN) - .selectorOptions(IndicesOptions.SelectorOptions.DATA_AND_FAILURE) + .selectorOptions(IndicesOptions.SelectorOptions.ALL_APPLICABLE) .gatekeeperOptions(IndicesOptions.GatekeeperOptions.builder().allowFailureIndices(false).build()) .concreteTargetOptions(IndicesOptions.ConcreteTargetOptions.ALLOW_UNAVAILABLE_TARGETS) .build(); @@ -2765,7 +2765,7 @@ public void testDataStreamsWithFailureStore() { // We expect an error { IndicesOptions indicesOptions = IndicesOptions.builder(IndicesOptions.STRICT_EXPAND_OPEN) - .selectorOptions(IndicesOptions.SelectorOptions.DATA_AND_FAILURE) + .selectorOptions(IndicesOptions.SelectorOptions.ALL_APPLICABLE) .gatekeeperOptions(IndicesOptions.GatekeeperOptions.builder().allowFailureIndices(false).build()) .build(); FailureIndexNotSupportedException failureIndexNotSupportedException = expectThrows( @@ -2781,7 +2781,7 @@ public void testDataStreamsWithFailureStore() { // Test only failure store with an exact data stream name { IndicesOptions indicesOptions = IndicesOptions.builder(IndicesOptions.STRICT_EXPAND_OPEN) - .selectorOptions(IndicesOptions.SelectorOptions.ONLY_FAILURES) + .selectorOptions(IndicesOptions.SelectorOptions.FAILURES) .build(); Index[] result = indexNameExpressionResolver.concreteIndices(state, indicesOptions, true, "my-data-stream"); assertThat(result.length, equalTo(2)); @@ -2808,7 +2808,7 @@ public void testDataStreamsWithFailureStore() { // Test include failure store without any expressions { IndicesOptions indicesOptions = IndicesOptions.builder(IndicesOptions.STRICT_EXPAND_OPEN) - .selectorOptions(IndicesOptions.SelectorOptions.DATA_AND_FAILURE) + .selectorOptions(IndicesOptions.SelectorOptions.ALL_APPLICABLE) .build(); Index[] result = indexNameExpressionResolver.concreteIndices(state, indicesOptions, true); assertThat(result.length, equalTo(5)); @@ -2828,7 +2828,7 @@ public void testDataStreamsWithFailureStore() { // Test only failure store without any expressions { IndicesOptions indicesOptions = IndicesOptions.builder(IndicesOptions.STRICT_EXPAND_OPEN) - .selectorOptions(IndicesOptions.SelectorOptions.ONLY_FAILURES) + .selectorOptions(IndicesOptions.SelectorOptions.FAILURES) .build(); Index[] result = indexNameExpressionResolver.concreteIndices(state, indicesOptions, true); assertThat(result.length, equalTo(2)); @@ -2861,7 +2861,7 @@ public void testDataStreamsWithFailureStore() { // Test include failure store with wildcard expression { IndicesOptions indicesOptions = IndicesOptions.builder(IndicesOptions.STRICT_EXPAND_OPEN) - .selectorOptions(IndicesOptions.SelectorOptions.DATA_AND_FAILURE) + .selectorOptions(IndicesOptions.SelectorOptions.ALL_APPLICABLE) .build(); Index[] result = indexNameExpressionResolver.concreteIndices(state, indicesOptions, true, "my-*"); assertThat(result.length, equalTo(5)); @@ -2881,7 +2881,7 @@ public void testDataStreamsWithFailureStore() { // Test only failure store with wildcard expression { IndicesOptions indicesOptions = IndicesOptions.builder(IndicesOptions.STRICT_EXPAND_OPEN) - .selectorOptions(IndicesOptions.SelectorOptions.ONLY_FAILURES) + .selectorOptions(IndicesOptions.SelectorOptions.FAILURES) .build(); Index[] result = indexNameExpressionResolver.concreteIndices(state, indicesOptions, true, "my-*"); assertThat(result.length, equalTo(2)); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/RolloverStep.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/RolloverStep.java index d648dd1c7edf8..3d140f5a9d764 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/RolloverStep.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/RolloverStep.java @@ -126,9 +126,7 @@ public void performAction( RolloverRequest rolloverRequest = new RolloverRequest(rolloverTarget, null).masterNodeTimeout(TimeValue.MAX_VALUE); if (targetFailureStore) { rolloverRequest.setIndicesOptions( - IndicesOptions.builder(rolloverRequest.indicesOptions()) - .selectorOptions(IndicesOptions.SelectorOptions.ONLY_FAILURES) - .build() + IndicesOptions.builder(rolloverRequest.indicesOptions()).selectorOptions(IndicesOptions.SelectorOptions.FAILURES).build() ); } // We don't wait for active shards when we perform the rollover because the diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/WaitForRolloverReadyStep.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/WaitForRolloverReadyStep.java index 67f65481ef63e..aa20e33a3fbf2 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/WaitForRolloverReadyStep.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/WaitForRolloverReadyStep.java @@ -247,9 +247,7 @@ RolloverRequest createRolloverRequest( rolloverRequest.setConditions(applyDefaultConditions(conditions, rolloverOnlyIfHasDocuments)); if (targetFailureStore) { rolloverRequest.setIndicesOptions( - IndicesOptions.builder(rolloverRequest.indicesOptions()) - .selectorOptions(IndicesOptions.SelectorOptions.ONLY_FAILURES) - .build() + IndicesOptions.builder(rolloverRequest.indicesOptions()).selectorOptions(IndicesOptions.SelectorOptions.FAILURES).build() ); } return rolloverRequest; From 8a3540fa74de36f62bafebdb719b22ef88879bf7 Mon Sep 17 00:00:00 2001 From: Liam Thompson <32779855+leemthompo@users.noreply.github.com> Date: Thu, 24 Oct 2024 11:44:13 +0200 Subject: [PATCH 047/324] [DOCS] Clarify start-local trial license info (#115504) --- README.asciidoc | 2 +- docs/reference/run-elasticsearch-locally.asciidoc | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/README.asciidoc b/README.asciidoc index 8d3c96c659896..bac6d0ed71752 100644 --- a/README.asciidoc +++ b/README.asciidoc @@ -56,8 +56,8 @@ Quickly set up Elasticsearch and Kibana in Docker for local development or testi - If you're using Microsoft Windows, then install https://learn.microsoft.com/en-us/windows/wsl/install[Windows Subsystem for Linux (WSL)]. ==== Trial license +This setup comes with a one-month trial license that includes all Elastic features. -This setup comes with a one-month trial of the Elastic *Platinum* license. After the trial period, the license reverts to *Free and open - Basic*. Refer to https://www.elastic.co/subscriptions[Elastic subscriptions] for more information. diff --git a/docs/reference/run-elasticsearch-locally.asciidoc b/docs/reference/run-elasticsearch-locally.asciidoc index 1a115ae926ea2..03885132e4050 100644 --- a/docs/reference/run-elasticsearch-locally.asciidoc +++ b/docs/reference/run-elasticsearch-locally.asciidoc @@ -20,7 +20,7 @@ Refer to <> for a list of produc Quickly set up {es} and {kib} in Docker for local development or testing, using the https://github.com/elastic/start-local?tab=readme-ov-file#-try-elasticsearch-and-kibana-locally[`start-local` script]. -This setup comes with a one-month trial of the Elastic *Platinum* license. +This setup comes with a one-month trial license that includes all Elastic features. After the trial period, the license reverts to *Free and open - Basic*. Refer to https://www.elastic.co/subscriptions[Elastic subscriptions] for more information. @@ -84,4 +84,4 @@ Learn about customizing the setup, logging, and more. [[local-dev-next-steps]] === Next steps -Use our <> to learn the basics of {es}. \ No newline at end of file +Use our <> to learn the basics of {es}. From 031a80d2dc8509d9a48a50261e18f6252c00560b Mon Sep 17 00:00:00 2001 From: Simon Cooper Date: Thu, 24 Oct 2024 11:30:33 +0100 Subject: [PATCH 048/324] Use BuildVersion rather than Version for reserved state version (#115406) --- .../settings/LocallyMountedSecrets.java | 4 +-- .../org/elasticsearch/env/BuildVersion.java | 15 +++++++++++ .../env/DefaultBuildVersion.java | 12 +++++---- .../internal/BuildExtension.java | 5 ++++ .../service/ReservedClusterStateService.java | 4 +-- .../service/ReservedStateUpdateTask.java | 5 ++-- .../service/ReservedStateVersion.java | 14 ++++------ .../service/FileSettingsServiceTests.java | 4 +-- .../ReservedClusterStateServiceTests.java | 27 ++++++++++--------- .../ReservedLifecycleStateServiceTests.java | 4 +-- 10 files changed, 57 insertions(+), 37 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/common/settings/LocallyMountedSecrets.java b/server/src/main/java/org/elasticsearch/common/settings/LocallyMountedSecrets.java index e4f1608a52d15..4a2e1cd92d4da 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/LocallyMountedSecrets.java +++ b/server/src/main/java/org/elasticsearch/common/settings/LocallyMountedSecrets.java @@ -11,11 +11,11 @@ import org.apache.lucene.util.SetOnce; import org.elasticsearch.TransportVersion; -import org.elasticsearch.Version; import org.elasticsearch.common.hash.MessageDigests; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.env.BuildVersion; import org.elasticsearch.env.Environment; import org.elasticsearch.reservedstate.service.ReservedStateVersion; import org.elasticsearch.xcontent.ConstructingObjectParser; @@ -130,7 +130,7 @@ public LocallyMountedSecrets(Environment environment) { throw new IllegalStateException("Error processing secrets file", e); } } else { - secrets.set(new LocalFileSecrets(Map.of(), new ReservedStateVersion(-1L, Version.CURRENT))); + secrets.set(new LocalFileSecrets(Map.of(), new ReservedStateVersion(-1L, BuildVersion.current()))); } this.secretsDir = secretsDirPath.toString(); this.secretsFile = secretsFilePath.toString(); diff --git a/server/src/main/java/org/elasticsearch/env/BuildVersion.java b/server/src/main/java/org/elasticsearch/env/BuildVersion.java index 3fdf01d7e1bae..5536b06d4d587 100644 --- a/server/src/main/java/org/elasticsearch/env/BuildVersion.java +++ b/server/src/main/java/org/elasticsearch/env/BuildVersion.java @@ -72,6 +72,16 @@ public static BuildVersion fromVersionId(int versionId) { return CurrentExtensionHolder.BUILD_EXTENSION.fromVersionId(versionId); } + /** + * Create a {@link BuildVersion} from a version string. + * + * @param version A string representation of a version + * @return a version representing a build or release of Elasticsearch + */ + public static BuildVersion fromString(String version) { + return CurrentExtensionHolder.BUILD_EXTENSION.fromString(version); + } + /** * Get the current build version. * @@ -110,6 +120,11 @@ public BuildVersion currentBuildVersion() { public BuildVersion fromVersionId(int versionId) { return new DefaultBuildVersion(versionId); } + + @Override + public BuildVersion fromString(String version) { + return new DefaultBuildVersion(version); + } } } diff --git a/server/src/main/java/org/elasticsearch/env/DefaultBuildVersion.java b/server/src/main/java/org/elasticsearch/env/DefaultBuildVersion.java index f31b34e89c01d..9cf0d60719653 100644 --- a/server/src/main/java/org/elasticsearch/env/DefaultBuildVersion.java +++ b/server/src/main/java/org/elasticsearch/env/DefaultBuildVersion.java @@ -28,15 +28,17 @@ final class DefaultBuildVersion extends BuildVersion { public static BuildVersion CURRENT = new DefaultBuildVersion(Version.CURRENT.id()); - private final int versionId; private final Version version; DefaultBuildVersion(int versionId) { assert versionId >= 0 : "Release version IDs must be non-negative integers"; - this.versionId = versionId; this.version = Version.fromId(versionId); } + DefaultBuildVersion(String version) { + this.version = Version.fromString(Objects.requireNonNull(version)); + } + @Override public boolean onOrAfterMinimumCompatible() { return Version.CURRENT.minimumCompatibilityVersion().onOrBefore(version); @@ -49,7 +51,7 @@ public boolean isFutureVersion() { @Override public int id() { - return versionId; + return version.id(); } @Override @@ -57,12 +59,12 @@ public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; DefaultBuildVersion that = (DefaultBuildVersion) o; - return versionId == that.versionId; + return version.equals(that.version); } @Override public int hashCode() { - return Objects.hash(versionId); + return Objects.hash(version.id()); } @Override diff --git a/server/src/main/java/org/elasticsearch/internal/BuildExtension.java b/server/src/main/java/org/elasticsearch/internal/BuildExtension.java index a23270cb5550c..427e186bc40cf 100644 --- a/server/src/main/java/org/elasticsearch/internal/BuildExtension.java +++ b/server/src/main/java/org/elasticsearch/internal/BuildExtension.java @@ -38,4 +38,9 @@ default boolean hasReleaseVersioning() { * Returns the {@link BuildVersion} for a given version identifier. */ BuildVersion fromVersionId(int versionId); + + /** + * Returns the {@link BuildVersion} for a given version string. + */ + BuildVersion fromString(String version); } diff --git a/server/src/main/java/org/elasticsearch/reservedstate/service/ReservedClusterStateService.java b/server/src/main/java/org/elasticsearch/reservedstate/service/ReservedClusterStateService.java index 0c5fa61b29cfe..499b5e6515a8c 100644 --- a/server/src/main/java/org/elasticsearch/reservedstate/service/ReservedClusterStateService.java +++ b/server/src/main/java/org/elasticsearch/reservedstate/service/ReservedClusterStateService.java @@ -11,7 +11,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.cluster.ClusterState; @@ -22,6 +21,7 @@ import org.elasticsearch.cluster.service.MasterServiceTaskQueue; import org.elasticsearch.common.Priority; import org.elasticsearch.core.Tuple; +import org.elasticsearch.env.BuildVersion; import org.elasticsearch.reservedstate.ReservedClusterStateHandler; import org.elasticsearch.reservedstate.TransformState; import org.elasticsearch.xcontent.ConstructingObjectParser; @@ -158,7 +158,7 @@ public void process( } public void initEmpty(String namespace, ActionListener listener) { - var missingVersion = new ReservedStateVersion(EMPTY_VERSION, Version.CURRENT); + var missingVersion = new ReservedStateVersion(EMPTY_VERSION, BuildVersion.current()); var emptyState = new ReservedStateChunk(Map.of(), missingVersion); updateTaskQueue.submitTask( "empty initial cluster state [" + namespace + "]", diff --git a/server/src/main/java/org/elasticsearch/reservedstate/service/ReservedStateUpdateTask.java b/server/src/main/java/org/elasticsearch/reservedstate/service/ReservedStateUpdateTask.java index c85997f72cc78..90ae9923910d1 100644 --- a/server/src/main/java/org/elasticsearch/reservedstate/service/ReservedStateUpdateTask.java +++ b/server/src/main/java/org/elasticsearch/reservedstate/service/ReservedStateUpdateTask.java @@ -11,7 +11,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.cluster.ClusterState; @@ -162,11 +161,11 @@ static boolean checkMetadataVersion( ReservedStateVersion reservedStateVersion, ReservedStateVersionCheck versionCheck ) { - if (Version.CURRENT.before(reservedStateVersion.minCompatibleVersion())) { + if (reservedStateVersion.buildVersion().isFutureVersion()) { logger.warn( () -> format( "Reserved cluster state version [%s] for namespace [%s] is not compatible with this Elasticsearch node", - reservedStateVersion.minCompatibleVersion(), + reservedStateVersion.buildVersion(), namespace ) ); diff --git a/server/src/main/java/org/elasticsearch/reservedstate/service/ReservedStateVersion.java b/server/src/main/java/org/elasticsearch/reservedstate/service/ReservedStateVersion.java index e2a21689b9815..116d470755e1c 100644 --- a/server/src/main/java/org/elasticsearch/reservedstate/service/ReservedStateVersion.java +++ b/server/src/main/java/org/elasticsearch/reservedstate/service/ReservedStateVersion.java @@ -9,10 +9,10 @@ package org.elasticsearch.reservedstate.service; -import org.elasticsearch.Version; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.env.BuildVersion; import org.elasticsearch.xcontent.ConstructingObjectParser; import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.XContentParser; @@ -23,7 +23,7 @@ * File settings metadata class that holds information about * versioning and Elasticsearch version compatibility */ -public record ReservedStateVersion(Long version, Version compatibleWith) implements Writeable { +public record ReservedStateVersion(Long version, BuildVersion buildVersion) implements Writeable { public static final ParseField VERSION = new ParseField("version"); public static final ParseField COMPATIBILITY = new ParseField("compatibility"); @@ -32,7 +32,7 @@ public record ReservedStateVersion(Long version, Version compatibleWith) impleme "reserved_cluster_state_version_metadata", a -> { Long updateId = Long.parseLong((String) a[0]); - Version minCompatVersion = Version.fromString((String) a[1]); + BuildVersion minCompatVersion = BuildVersion.fromString((String) a[1]); return new ReservedStateVersion(updateId, minCompatVersion); } @@ -47,17 +47,13 @@ public static ReservedStateVersion parse(XContentParser parser) { return PARSER.apply(parser, null); } - public Version minCompatibleVersion() { - return compatibleWith; - } - public static ReservedStateVersion readFrom(StreamInput input) throws IOException { - return new ReservedStateVersion(input.readLong(), Version.readVersion(input)); + return new ReservedStateVersion(input.readLong(), BuildVersion.fromVersionId(input.readVInt())); } @Override public void writeTo(StreamOutput out) throws IOException { out.writeLong(version()); - Version.writeVersion(compatibleWith(), out); + out.writeVInt(buildVersion().id()); } } diff --git a/server/src/test/java/org/elasticsearch/reservedstate/service/FileSettingsServiceTests.java b/server/src/test/java/org/elasticsearch/reservedstate/service/FileSettingsServiceTests.java index c0657b5888ad2..8af36e2f9677e 100644 --- a/server/src/test/java/org/elasticsearch/reservedstate/service/FileSettingsServiceTests.java +++ b/server/src/test/java/org/elasticsearch/reservedstate/service/FileSettingsServiceTests.java @@ -9,7 +9,6 @@ package org.elasticsearch.reservedstate.service; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterName; @@ -26,6 +25,7 @@ import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.env.BuildVersion; import org.elasticsearch.env.Environment; import org.elasticsearch.reservedstate.action.ReservedClusterSettingsAction; import org.elasticsearch.tasks.TaskManager; @@ -277,7 +277,7 @@ public void testStopWorksInMiddleOfProcessing() throws Exception { throw new RuntimeException(e); } }).start(); - return new ReservedStateChunk(Map.of(), new ReservedStateVersion(1L, Version.CURRENT)); + return new ReservedStateChunk(Map.of(), new ReservedStateVersion(1L, BuildVersion.current())); }).when(controller).parse(any(String.class), any()); doAnswer((Answer) invocation -> { diff --git a/server/src/test/java/org/elasticsearch/reservedstate/service/ReservedClusterStateServiceTests.java b/server/src/test/java/org/elasticsearch/reservedstate/service/ReservedClusterStateServiceTests.java index d96387618e6bd..5c7dd6cb346b9 100644 --- a/server/src/test/java/org/elasticsearch/reservedstate/service/ReservedClusterStateServiceTests.java +++ b/server/src/test/java/org/elasticsearch/reservedstate/service/ReservedClusterStateServiceTests.java @@ -9,7 +9,6 @@ package org.elasticsearch.reservedstate.service; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; @@ -26,6 +25,7 @@ import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.Releasable; +import org.elasticsearch.env.BuildVersion; import org.elasticsearch.reservedstate.ReservedClusterStateHandler; import org.elasticsearch.reservedstate.TransformState; import org.elasticsearch.reservedstate.action.ReservedClusterSettingsAction; @@ -396,7 +396,7 @@ public TransformState transform(Object source, TransformState prevState) throws assertTrue(ReservedStateErrorTask.isNewError(null, 1L, ReservedStateVersionCheck.HIGHER_VERSION_ONLY)); assertTrue(ReservedStateErrorTask.isNewError(null, 1L, ReservedStateVersionCheck.HIGHER_OR_SAME_VERSION)); - var chunk = new ReservedStateChunk(Map.of("one", "two", "maker", "three"), new ReservedStateVersion(2L, Version.CURRENT)); + var chunk = new ReservedStateChunk(Map.of("one", "two", "maker", "three"), new ReservedStateVersion(2L, BuildVersion.current())); var orderedHandlers = List.of(exceptionThrower.name(), newStateMaker.name()); // We submit a task with two handler, one will cause an exception, the other will create a new state. @@ -456,7 +456,7 @@ public void testCheckMetadataVersion() { ReservedStateUpdateTask task = new ReservedStateUpdateTask( "test", - new ReservedStateChunk(Map.of(), new ReservedStateVersion(124L, Version.CURRENT)), + new ReservedStateChunk(Map.of(), new ReservedStateVersion(124L, BuildVersion.current())), ReservedStateVersionCheck.HIGHER_VERSION_ONLY, Map.of(), List.of(), @@ -466,7 +466,7 @@ public void testCheckMetadataVersion() { assertThat("Cluster state should be modified", task.execute(state), not(sameInstance(state))); task = new ReservedStateUpdateTask( "test", - new ReservedStateChunk(Map.of(), new ReservedStateVersion(124L, Version.CURRENT)), + new ReservedStateChunk(Map.of(), new ReservedStateVersion(124L, BuildVersion.current())), ReservedStateVersionCheck.HIGHER_VERSION_ONLY, Map.of(), List.of(), @@ -477,7 +477,7 @@ public void testCheckMetadataVersion() { task = new ReservedStateUpdateTask( "test", - new ReservedStateChunk(Map.of(), new ReservedStateVersion(123L, Version.CURRENT)), + new ReservedStateChunk(Map.of(), new ReservedStateVersion(123L, BuildVersion.current())), ReservedStateVersionCheck.HIGHER_VERSION_ONLY, Map.of(), List.of(), @@ -487,7 +487,7 @@ public void testCheckMetadataVersion() { assertThat("Cluster state should not be modified", task.execute(state), sameInstance(state)); task = new ReservedStateUpdateTask( "test", - new ReservedStateChunk(Map.of(), new ReservedStateVersion(123L, Version.CURRENT)), + new ReservedStateChunk(Map.of(), new ReservedStateVersion(123L, BuildVersion.current())), ReservedStateVersionCheck.HIGHER_OR_SAME_VERSION, Map.of(), List.of(), @@ -498,7 +498,7 @@ public void testCheckMetadataVersion() { task = new ReservedStateUpdateTask( "test", - new ReservedStateChunk(Map.of(), new ReservedStateVersion(122L, Version.CURRENT)), + new ReservedStateChunk(Map.of(), new ReservedStateVersion(122L, BuildVersion.current())), ReservedStateVersionCheck.HIGHER_VERSION_ONLY, Map.of(), List.of(), @@ -508,7 +508,7 @@ public void testCheckMetadataVersion() { assertThat("Cluster state should not be modified", task.execute(state), sameInstance(state)); task = new ReservedStateUpdateTask( "test", - new ReservedStateChunk(Map.of(), new ReservedStateVersion(122L, Version.CURRENT)), + new ReservedStateChunk(Map.of(), new ReservedStateVersion(122L, BuildVersion.current())), ReservedStateVersionCheck.HIGHER_OR_SAME_VERSION, Map.of(), List.of(), @@ -519,7 +519,7 @@ public void testCheckMetadataVersion() { task = new ReservedStateUpdateTask( "test", - new ReservedStateChunk(Map.of(), new ReservedStateVersion(124L, Version.fromId(Version.CURRENT.id + 1))), + new ReservedStateChunk(Map.of(), new ReservedStateVersion(124L, BuildVersion.fromVersionId(BuildVersion.current().id() + 1))), ReservedStateVersionCheck.HIGHER_VERSION_ONLY, Map.of(), List.of(), @@ -529,7 +529,7 @@ public void testCheckMetadataVersion() { assertThat("Cluster state should not be modified", task.execute(state), sameInstance(state)); task = new ReservedStateUpdateTask( "test", - new ReservedStateChunk(Map.of(), new ReservedStateVersion(124L, Version.fromId(Version.CURRENT.id + 1))), + new ReservedStateChunk(Map.of(), new ReservedStateVersion(124L, BuildVersion.fromVersionId(BuildVersion.current().id() + 1))), ReservedStateVersionCheck.HIGHER_OR_SAME_VERSION, Map.of(), List.of(), @@ -627,7 +627,7 @@ public void testCheckAndReportError() { assertNull(controller.checkAndReportError("test", List.of(), null, ReservedStateVersionCheck.HIGHER_VERSION_ONLY)); verify(controller, times(0)).updateErrorState(any()); - var version = new ReservedStateVersion(2L, Version.CURRENT); + var version = new ReservedStateVersion(2L, BuildVersion.current()); var error = controller.checkAndReportError("test", List.of("test error"), version, ReservedStateVersionCheck.HIGHER_VERSION_ONLY); assertThat(error, instanceOf(IllegalStateException.class)); assertThat(error.getMessage(), is("Error processing state change request for test, errors: test error")); @@ -659,7 +659,10 @@ public TransformState transform(Object source, TransformState prevState) { Metadata metadata = Metadata.builder().put(operatorMetadata).build(); ClusterState state = ClusterState.builder(new ClusterName("test")).metadata(metadata).build(); - var chunk = new ReservedStateChunk(Map.of("non-state", "two", "maker", "three"), new ReservedStateVersion(2L, Version.CURRENT)); + var chunk = new ReservedStateChunk( + Map.of("non-state", "two", "maker", "three"), + new ReservedStateVersion(2L, BuildVersion.current()) + ); var orderedHandlers = List.of(exceptionThrower.name(), newStateMaker.name()); ClusterService clusterService = mock(ClusterService.class); diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/action/ReservedLifecycleStateServiceTests.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/action/ReservedLifecycleStateServiceTests.java index aab89c6620b52..bcd6026618a05 100644 --- a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/action/ReservedLifecycleStateServiceTests.java +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/action/ReservedLifecycleStateServiceTests.java @@ -7,7 +7,6 @@ package org.elasticsearch.xpack.ilm.action; -import org.elasticsearch.Version; import org.elasticsearch.action.admin.cluster.repositories.reservedstate.ReservedRepositoryAction; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.ClusterModule; @@ -22,6 +21,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.Releasable; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.env.BuildVersion; import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.reservedstate.TransformState; import org.elasticsearch.reservedstate.action.ReservedClusterSettingsAction; @@ -418,7 +418,7 @@ public void testOperatorControllerWithPluginPackage() { ) ) ), - new ReservedStateVersion(123L, Version.CURRENT) + new ReservedStateVersion(123L, BuildVersion.current()) ); controller.process("operator", pack, randomFrom(ReservedStateVersionCheck.values()), x::set); From 327f23254a8ea26bb462488b4cd06ab8604acd8e Mon Sep 17 00:00:00 2001 From: Andrei Dan Date: Thu, 24 Oct 2024 11:41:47 +0100 Subject: [PATCH 049/324] Allow for queries on _tier to skip shards during coordinator rewrite (#114990) The `_tier` metadata field was not used on the coordinator when rewriting queries in order to exclude shards that don't match. This lead to queries in the following form to continue to report failures even though the only unavailable shards were in the tier that was excluded from search (frozen tier in this example): ``` POST testing/_search { "query": { "bool": { "must_not": [ { "term": { "_tier": "data_frozen" } } ] } } } ``` This PR addresses this by having the queries that can execute on `_tier` (term, match, query string, simple query string, prefix, wildcard) execute a coordinator rewrite to exclude the indices that don't match the `_tier` query **before** attempting to reach to the shards (shards, that might not be available and raise errors). Fixes #114910 --- docs/changelog/114990.yaml | 6 + .../query/CoordinatorRewriteContext.java | 65 +++++++- .../CoordinatorRewriteContextProvider.java | 9 +- .../index/query/PrefixQueryBuilder.java | 18 ++- .../index/query/QueryRewriteContext.java | 21 +++ .../index/query/TermQueryBuilder.java | 18 ++- .../index/query/TermsQueryBuilder.java | 17 ++- .../index/query/WildcardQueryBuilder.java | 20 ++- .../index/query/PrefixQueryBuilderTests.java | 35 +++++ .../index/query/QueryRewriteContextTests.java | 131 ++++++++++++++++ .../index/query/TermQueryBuilderTests.java | 34 +++++ .../index/query/TermsQueryBuilderTests.java | 33 ++++ .../query/WildcardQueryBuilderTests.java | 34 +++++ .../test/AbstractBuilderTestCase.java | 15 +- .../mapper/DataTierFieldMapper.java | 26 +--- .../core/LocalStateCompositeXPackPlugin.java | 7 +- ...pshotsCanMatchOnCoordinatorIntegTests.java | 143 +++++++++++++++++- 17 files changed, 594 insertions(+), 38 deletions(-) create mode 100644 docs/changelog/114990.yaml create mode 100644 server/src/test/java/org/elasticsearch/index/query/QueryRewriteContextTests.java diff --git a/docs/changelog/114990.yaml b/docs/changelog/114990.yaml new file mode 100644 index 0000000000000..2575942d15bf5 --- /dev/null +++ b/docs/changelog/114990.yaml @@ -0,0 +1,6 @@ +pr: 114990 +summary: Allow for querries on `_tier` to skip shards in the `can_match` phase +area: Search +type: bug +issues: + - 114910 diff --git a/server/src/main/java/org/elasticsearch/index/query/CoordinatorRewriteContext.java b/server/src/main/java/org/elasticsearch/index/query/CoordinatorRewriteContext.java index 3e5deeeebae5d..964358610e074 100644 --- a/server/src/main/java/org/elasticsearch/index/query/CoordinatorRewriteContext.java +++ b/server/src/main/java/org/elasticsearch/index/query/CoordinatorRewriteContext.java @@ -9,17 +9,23 @@ package org.elasticsearch.index.query; +import org.apache.lucene.search.Query; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.metadata.DataStream; import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.regex.Regex; import org.elasticsearch.core.Nullable; +import org.elasticsearch.index.mapper.ConstantFieldType; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.MappingLookup; +import org.elasticsearch.index.mapper.ValueFetcher; import org.elasticsearch.index.shard.IndexLongFieldRange; import org.elasticsearch.indices.DateFieldRangeInfo; import org.elasticsearch.xcontent.XContentParserConfiguration; import java.util.Collections; +import java.util.Map; import java.util.function.LongSupplier; /** @@ -30,20 +36,57 @@ * and skip the shards that don't hold queried data. See IndexMetadata for more details. */ public class CoordinatorRewriteContext extends QueryRewriteContext { + + public static final String TIER_FIELD_NAME = "_tier"; + + private static final ConstantFieldType TIER_FIELD_TYPE = new ConstantFieldType(TIER_FIELD_NAME, Map.of()) { + @Override + public ValueFetcher valueFetcher(SearchExecutionContext context, String format) { + throw new UnsupportedOperationException("fetching field values is not supported on the coordinator node"); + } + + @Override + public String typeName() { + return TIER_FIELD_NAME; + } + + @Override + protected boolean matches(String pattern, boolean caseInsensitive, QueryRewriteContext context) { + if (caseInsensitive) { + pattern = Strings.toLowercaseAscii(pattern); + } + + String tierPreference = context.getTierPreference(); + if (tierPreference == null) { + return false; + } + return Regex.simpleMatch(pattern, tierPreference); + } + + @Override + public Query existsQuery(SearchExecutionContext context) { + throw new UnsupportedOperationException("field exists query is not supported on the coordinator node"); + } + }; + private final DateFieldRangeInfo dateFieldRangeInfo; + private final String tier; /** * Context for coordinator search rewrites based on time ranges for the @timestamp field and/or 'event.ingested' field + * * @param parserConfig * @param client * @param nowInMillis * @param dateFieldRangeInfo range and field type info for @timestamp and 'event.ingested' + * @param tier the configured data tier (via the _tier_preference setting) for the index */ public CoordinatorRewriteContext( XContentParserConfiguration parserConfig, Client client, LongSupplier nowInMillis, - DateFieldRangeInfo dateFieldRangeInfo + DateFieldRangeInfo dateFieldRangeInfo, + String tier ) { super( parserConfig, @@ -63,10 +106,12 @@ public CoordinatorRewriteContext( null ); this.dateFieldRangeInfo = dateFieldRangeInfo; + this.tier = tier; } /** - * @param fieldName Must be one of DataStream.TIMESTAMP_FIELD_FIELD or IndexMetadata.EVENT_INGESTED_FIELD_NAME + * @param fieldName Must be one of DataStream.TIMESTAMP_FIELD_FIELD, IndexMetadata.EVENT_INGESTED_FIELD_NAME, or + * DataTierFiledMapper.NAME * @return MappedField with type for the field. Returns null if fieldName is not one of the allowed field names. */ @Nullable @@ -75,6 +120,8 @@ public MappedFieldType getFieldType(String fieldName) { return dateFieldRangeInfo.timestampFieldType(); } else if (IndexMetadata.EVENT_INGESTED_FIELD_NAME.equals(fieldName)) { return dateFieldRangeInfo.eventIngestedFieldType(); + } else if (TIER_FIELD_NAME.equals(fieldName)) { + return TIER_FIELD_TYPE; } else { return null; } @@ -99,4 +146,18 @@ public IndexLongFieldRange getFieldRange(String fieldName) { public CoordinatorRewriteContext convertToCoordinatorRewriteContext() { return this; } + + @Override + public String getTierPreference() { + // dominant branch first (tier preference is configured) + return tier.isEmpty() == false ? tier : null; + } + + /** + * We're holding on to the index tier in the context as otherwise we'd need + * to re-parse it from the index settings when evaluating the _tier field. + */ + public String tier() { + return tier; + } } diff --git a/server/src/main/java/org/elasticsearch/index/query/CoordinatorRewriteContextProvider.java b/server/src/main/java/org/elasticsearch/index/query/CoordinatorRewriteContextProvider.java index 67042a98db42a..e48d7699d03ef 100644 --- a/server/src/main/java/org/elasticsearch/index/query/CoordinatorRewriteContextProvider.java +++ b/server/src/main/java/org/elasticsearch/index/query/CoordinatorRewriteContextProvider.java @@ -52,6 +52,12 @@ public CoordinatorRewriteContext getCoordinatorRewriteContext(Index index) { return null; } DateFieldRangeInfo dateFieldRangeInfo = mappingSupplier.apply(index); + // we've now added a coordinator rewrite based on the _tier field so the requirement + // for the timestamps fields to be present is artificial (we could do a coordinator + // rewrite only based on the _tier field) and we might decide to remove this artificial + // limitation to enable coordinator rewrites based on _tier for hot and warm indices + // (currently the _tier coordinator rewrite is only available for mounted and partially mounted + // indices) if (dateFieldRangeInfo == null) { return null; } @@ -74,7 +80,8 @@ public CoordinatorRewriteContext getCoordinatorRewriteContext(Index index) { parserConfig, client, nowInMillis, - new DateFieldRangeInfo(timestampFieldType, timestampRange, dateFieldRangeInfo.eventIngestedFieldType(), eventIngestedRange) + new DateFieldRangeInfo(timestampFieldType, timestampRange, dateFieldRangeInfo.eventIngestedFieldType(), eventIngestedRange), + indexMetadata.getTierPreference().isEmpty() == false ? indexMetadata.getTierPreference().getFirst() : "" ); } } diff --git a/server/src/main/java/org/elasticsearch/index/query/PrefixQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/PrefixQueryBuilder.java index 24817b778a4da..fcf986191da23 100644 --- a/server/src/main/java/org/elasticsearch/index/query/PrefixQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/PrefixQueryBuilder.java @@ -20,6 +20,7 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; +import org.elasticsearch.core.Nullable; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.mapper.ConstantFieldType; import org.elasticsearch.index.mapper.MappedFieldType; @@ -189,11 +190,24 @@ public String getWriteableName() { } @Override - protected QueryBuilder doIndexMetadataRewrite(QueryRewriteContext context) throws IOException { + protected QueryBuilder doIndexMetadataRewrite(QueryRewriteContext context) { MappedFieldType fieldType = context.getFieldType(this.fieldName); if (fieldType == null) { return new MatchNoneQueryBuilder("The \"" + getName() + "\" query is against a field that does not exist"); - } else if (fieldType instanceof ConstantFieldType constantFieldType) { + } + return maybeRewriteBasedOnConstantFields(fieldType, context); + } + + @Override + protected QueryBuilder doCoordinatorRewrite(CoordinatorRewriteContext coordinatorRewriteContext) { + MappedFieldType fieldType = coordinatorRewriteContext.getFieldType(this.fieldName); + // we don't rewrite a null field type to `match_none` on the coordinator because the coordinator has access + // to only a subset of fields see {@link CoordinatorRewriteContext#getFieldType} + return maybeRewriteBasedOnConstantFields(fieldType, coordinatorRewriteContext); + } + + private QueryBuilder maybeRewriteBasedOnConstantFields(@Nullable MappedFieldType fieldType, QueryRewriteContext context) { + if (fieldType instanceof ConstantFieldType constantFieldType) { // This logic is correct for all field types, but by only applying it to constant // fields we also have the guarantee that it doesn't perform I/O, which is important // since rewrites might happen on a network thread. diff --git a/server/src/main/java/org/elasticsearch/index/query/QueryRewriteContext.java b/server/src/main/java/org/elasticsearch/index/query/QueryRewriteContext.java index 8808cd79072f6..fce74aa60ab16 100644 --- a/server/src/main/java/org/elasticsearch/index/query/QueryRewriteContext.java +++ b/server/src/main/java/org/elasticsearch/index/query/QueryRewriteContext.java @@ -11,9 +11,12 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ResolvedIndices; import org.elasticsearch.client.internal.Client; +import org.elasticsearch.cluster.routing.allocation.DataTier; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.regex.Regex; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.CountDown; import org.elasticsearch.core.Nullable; import org.elasticsearch.index.Index; @@ -407,4 +410,22 @@ public ResolvedIndices getResolvedIndices() { public PointInTimeBuilder getPointInTimeBuilder() { return pit; } + + /** + * Retrieve the first tier preference from the index setting. If the setting is not + * present, then return null. + */ + @Nullable + public String getTierPreference() { + Settings settings = getIndexSettings().getSettings(); + String value = DataTier.TIER_PREFERENCE_SETTING.get(settings); + + if (Strings.hasText(value) == false) { + return null; + } + + // Tier preference can be a comma-delimited list of tiers, ordered by preference + // It was decided we should only test the first of these potentially multiple preferences. + return value.split(",")[0].trim(); + } } diff --git a/server/src/main/java/org/elasticsearch/index/query/TermQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/TermQueryBuilder.java index 2978b3bfbf69c..113f66f3e58de 100644 --- a/server/src/main/java/org/elasticsearch/index/query/TermQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/TermQueryBuilder.java @@ -17,6 +17,7 @@ import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.core.Nullable; import org.elasticsearch.index.mapper.ConstantFieldType; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.xcontent.ParseField; @@ -170,11 +171,24 @@ protected void addExtraXContent(XContentBuilder builder, Params params) throws I } @Override - protected QueryBuilder doIndexMetadataRewrite(QueryRewriteContext context) throws IOException { + protected QueryBuilder doIndexMetadataRewrite(QueryRewriteContext context) { MappedFieldType fieldType = context.getFieldType(this.fieldName); if (fieldType == null) { return new MatchNoneQueryBuilder("The \"" + getName() + "\" query is against a field that does not exist"); - } else if (fieldType instanceof ConstantFieldType constantFieldType) { + } + return maybeRewriteBasedOnConstantFields(fieldType, context); + } + + @Override + protected QueryBuilder doCoordinatorRewrite(CoordinatorRewriteContext coordinatorRewriteContext) { + MappedFieldType fieldType = coordinatorRewriteContext.getFieldType(this.fieldName); + // we don't rewrite a null field type to `match_none` on the coordinator because the coordinator has access + // to only a subset of fields see {@link CoordinatorRewriteContext#getFieldType} + return maybeRewriteBasedOnConstantFields(fieldType, coordinatorRewriteContext); + } + + private QueryBuilder maybeRewriteBasedOnConstantFields(@Nullable MappedFieldType fieldType, QueryRewriteContext context) { + if (fieldType instanceof ConstantFieldType constantFieldType) { // This logic is correct for all field types, but by only applying it to constant // fields we also have the guarantee that it doesn't perform I/O, which is important // since rewrites might happen on a network thread. diff --git a/server/src/main/java/org/elasticsearch/index/query/TermsQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/TermsQueryBuilder.java index 4035bc02fba79..dec4090a3e6bd 100644 --- a/server/src/main/java/org/elasticsearch/index/query/TermsQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/TermsQueryBuilder.java @@ -393,11 +393,24 @@ protected QueryBuilder doRewrite(QueryRewriteContext queryRewriteContext) throws } @Override - protected QueryBuilder doIndexMetadataRewrite(QueryRewriteContext context) throws IOException { + protected QueryBuilder doIndexMetadataRewrite(QueryRewriteContext context) { MappedFieldType fieldType = context.getFieldType(this.fieldName); if (fieldType == null) { return new MatchNoneQueryBuilder("The \"" + getName() + "\" query is against a field that does not exist"); - } else if (fieldType instanceof ConstantFieldType constantFieldType) { + } + return maybeRewriteBasedOnConstantFields(fieldType, context); + } + + @Override + protected QueryBuilder doCoordinatorRewrite(CoordinatorRewriteContext coordinatorRewriteContext) { + MappedFieldType fieldType = coordinatorRewriteContext.getFieldType(this.fieldName); + // we don't rewrite a null field type to `match_none` on the coordinator because the coordinator has access + // to only a subset of fields see {@link CoordinatorRewriteContext#getFieldType} + return maybeRewriteBasedOnConstantFields(fieldType, coordinatorRewriteContext); + } + + private QueryBuilder maybeRewriteBasedOnConstantFields(@Nullable MappedFieldType fieldType, QueryRewriteContext context) { + if (fieldType instanceof ConstantFieldType constantFieldType) { // This logic is correct for all field types, but by only applying it to constant // fields we also have the guarantee that it doesn't perform I/O, which is important // since rewrites might happen on a network thread. diff --git a/server/src/main/java/org/elasticsearch/index/query/WildcardQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/WildcardQueryBuilder.java index f287812ebbc10..419195e5e5ba5 100644 --- a/server/src/main/java/org/elasticsearch/index/query/WildcardQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/WildcardQueryBuilder.java @@ -20,6 +20,7 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; +import org.elasticsearch.core.Nullable; import org.elasticsearch.index.mapper.ConstantFieldType; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.query.support.QueryParsers; @@ -200,11 +201,24 @@ public static WildcardQueryBuilder fromXContent(XContentParser parser) throws IO } @Override - protected QueryBuilder doIndexMetadataRewrite(QueryRewriteContext context) throws IOException { + protected QueryBuilder doIndexMetadataRewrite(QueryRewriteContext context) { MappedFieldType fieldType = context.getFieldType(this.fieldName); if (fieldType == null) { - return new MatchNoneQueryBuilder("The \"" + getName() + "\" query is against a field that does not exist"); - } else if (fieldType instanceof ConstantFieldType constantFieldType) { + return new MatchNoneQueryBuilder("The \"" + getName() + "\" query is against a field that does not exist"); + } + return maybeRewriteBasedOnConstantFields(fieldType, context); + } + + @Override + protected QueryBuilder doCoordinatorRewrite(CoordinatorRewriteContext coordinatorRewriteContext) { + MappedFieldType fieldType = coordinatorRewriteContext.getFieldType(this.fieldName); + // we don't rewrite a null field type to `match_none` on the coordinator because the coordinator has access + // to only a subset of fields see {@link CoordinatorRewriteContext#getFieldType} + return maybeRewriteBasedOnConstantFields(fieldType, coordinatorRewriteContext); + } + + private QueryBuilder maybeRewriteBasedOnConstantFields(@Nullable MappedFieldType fieldType, QueryRewriteContext context) { + if (fieldType instanceof ConstantFieldType constantFieldType) { // This logic is correct for all field types, but by only applying it to constant // fields we also have the guarantee that it doesn't perform I/O, which is important // since rewrites might happen on a network thread. diff --git a/server/src/test/java/org/elasticsearch/index/query/PrefixQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/PrefixQueryBuilderTests.java index 0260fa2ef4cc8..918815f2a4f77 100644 --- a/server/src/test/java/org/elasticsearch/index/query/PrefixQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/PrefixQueryBuilderTests.java @@ -17,7 +17,9 @@ import org.apache.lucene.search.Query; import org.elasticsearch.common.ParsingException; import org.elasticsearch.core.Strings; +import org.elasticsearch.index.mapper.DateFieldMapper; import org.elasticsearch.test.AbstractQueryTestCase; +import org.hamcrest.CoreMatchers; import org.hamcrest.Matchers; import java.io.IOException; @@ -175,4 +177,37 @@ public void testMustRewrite() throws IOException { IllegalStateException e = expectThrows(IllegalStateException.class, () -> queryBuilder.toQuery(context)); assertEquals("Rewrite first", e.getMessage()); } + + public void testCoordinatorTierRewriteToMatchAll() throws IOException { + QueryBuilder query = new PrefixQueryBuilder("_tier", "data_fro"); + final String timestampFieldName = "@timestamp"; + long minTimestamp = 1685714000000L; + long maxTimestamp = 1685715000000L; + final CoordinatorRewriteContext coordinatorRewriteContext = createCoordinatorRewriteContext( + new DateFieldMapper.DateFieldType(timestampFieldName), + minTimestamp, + maxTimestamp, + "data_frozen" + ); + + QueryBuilder rewritten = query.rewrite(coordinatorRewriteContext); + assertThat(rewritten, CoreMatchers.instanceOf(MatchAllQueryBuilder.class)); + } + + public void testCoordinatorTierRewriteToMatchNone() throws IOException { + QueryBuilder query = QueryBuilders.boolQuery().mustNot(new PrefixQueryBuilder("_tier", "data_fro")); + final String timestampFieldName = "@timestamp"; + long minTimestamp = 1685714000000L; + long maxTimestamp = 1685715000000L; + final CoordinatorRewriteContext coordinatorRewriteContext = createCoordinatorRewriteContext( + new DateFieldMapper.DateFieldType(timestampFieldName), + minTimestamp, + maxTimestamp, + "data_frozen" + ); + + QueryBuilder rewritten = query.rewrite(coordinatorRewriteContext); + assertThat(rewritten, CoreMatchers.instanceOf(MatchNoneQueryBuilder.class)); + } + } diff --git a/server/src/test/java/org/elasticsearch/index/query/QueryRewriteContextTests.java b/server/src/test/java/org/elasticsearch/index/query/QueryRewriteContextTests.java new file mode 100644 index 0000000000000..0b2a8ab4856b3 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/index/query/QueryRewriteContextTests.java @@ -0,0 +1,131 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.index.query; + +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.cluster.routing.allocation.DataTier; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.IndexVersion; +import org.elasticsearch.index.mapper.DateFieldMapper; +import org.elasticsearch.index.mapper.MappingLookup; +import org.elasticsearch.indices.DateFieldRangeInfo; +import org.elasticsearch.test.ESTestCase; + +import java.util.Collections; + +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.nullValue; + +public class QueryRewriteContextTests extends ESTestCase { + + public void testGetTierPreference() { + { + // cold->hot tier preference + IndexMetadata metadata = newIndexMeta( + "index", + Settings.builder() + .put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current()) + .put(DataTier.TIER_PREFERENCE, "data_cold,data_warm,data_hot") + .build() + ); + QueryRewriteContext context = new QueryRewriteContext( + parserConfig(), + null, + System::currentTimeMillis, + null, + MappingLookup.EMPTY, + Collections.emptyMap(), + new IndexSettings(metadata, Settings.EMPTY), + null, + null, + null, + null, + null, + null, + null, + null + ); + + assertThat(context.getTierPreference(), is("data_cold")); + } + + { + // missing tier preference + IndexMetadata metadata = newIndexMeta( + "index", + Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current()).build() + ); + QueryRewriteContext context = new QueryRewriteContext( + parserConfig(), + null, + System::currentTimeMillis, + null, + MappingLookup.EMPTY, + Collections.emptyMap(), + new IndexSettings(metadata, Settings.EMPTY), + null, + null, + null, + null, + null, + null, + null, + null + ); + + assertThat(context.getTierPreference(), is(nullValue())); + } + + { + // coordinator rewrite context + IndexMetadata metadata = newIndexMeta( + "index", + Settings.builder() + .put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current()) + .put(DataTier.TIER_PREFERENCE, "data_cold,data_warm,data_hot") + .build() + ); + CoordinatorRewriteContext coordinatorRewriteContext = new CoordinatorRewriteContext( + parserConfig(), + null, + System::currentTimeMillis, + new DateFieldRangeInfo(null, null, new DateFieldMapper.DateFieldType(IndexMetadata.EVENT_INGESTED_FIELD_NAME), null), + "data_frozen" + ); + + assertThat(coordinatorRewriteContext.getTierPreference(), is("data_frozen")); + } + { + // coordinator rewrite context empty tier + IndexMetadata metadata = newIndexMeta( + "index", + Settings.builder() + .put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current()) + .put(DataTier.TIER_PREFERENCE, "data_cold,data_warm,data_hot") + .build() + ); + CoordinatorRewriteContext coordinatorRewriteContext = new CoordinatorRewriteContext( + parserConfig(), + null, + System::currentTimeMillis, + new DateFieldRangeInfo(null, null, new DateFieldMapper.DateFieldType(IndexMetadata.EVENT_INGESTED_FIELD_NAME), null), + "" + ); + + assertThat(coordinatorRewriteContext.getTierPreference(), is(nullValue())); + } + } + + public static IndexMetadata newIndexMeta(String name, Settings indexSettings) { + return IndexMetadata.builder(name).settings(indexSettings(IndexVersion.current(), 1, 1).put(indexSettings)).build(); + } + +} diff --git a/server/src/test/java/org/elasticsearch/index/query/TermQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/TermQueryBuilderTests.java index b5cf42cf5df28..bbac216754eed 100644 --- a/server/src/test/java/org/elasticsearch/index/query/TermQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/TermQueryBuilderTests.java @@ -17,9 +17,11 @@ import org.apache.lucene.search.Query; import org.apache.lucene.search.TermQuery; import org.elasticsearch.common.ParsingException; +import org.elasticsearch.index.mapper.DateFieldMapper; import org.elasticsearch.index.mapper.FieldTypeTestCase; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.xcontent.json.JsonStringEncoder; +import org.hamcrest.CoreMatchers; import java.io.IOException; import java.util.Locale; @@ -238,4 +240,36 @@ public void testLongTerm() throws IOException { { "term" : { "foo" : "%s" } }""", longTerm))); assertThat(e.getMessage(), containsString("term starting with [aaaaa")); } + + public void testCoordinatorTierRewriteToMatchAll() throws IOException { + QueryBuilder query = new TermQueryBuilder("_tier", "data_frozen"); + final String timestampFieldName = "@timestamp"; + long minTimestamp = 1685714000000L; + long maxTimestamp = 1685715000000L; + final CoordinatorRewriteContext coordinatorRewriteContext = createCoordinatorRewriteContext( + new DateFieldMapper.DateFieldType(timestampFieldName), + minTimestamp, + maxTimestamp, + "data_frozen" + ); + + QueryBuilder rewritten = query.rewrite(coordinatorRewriteContext); + assertThat(rewritten, CoreMatchers.instanceOf(MatchAllQueryBuilder.class)); + } + + public void testCoordinatorTierRewriteToMatchNone() throws IOException { + QueryBuilder query = QueryBuilders.boolQuery().mustNot(new TermQueryBuilder("_tier", "data_frozen")); + final String timestampFieldName = "@timestamp"; + long minTimestamp = 1685714000000L; + long maxTimestamp = 1685715000000L; + final CoordinatorRewriteContext coordinatorRewriteContext = createCoordinatorRewriteContext( + new DateFieldMapper.DateFieldType(timestampFieldName), + minTimestamp, + maxTimestamp, + "data_frozen" + ); + + QueryBuilder rewritten = query.rewrite(coordinatorRewriteContext); + assertThat(rewritten, CoreMatchers.instanceOf(MatchNoneQueryBuilder.class)); + } } diff --git a/server/src/test/java/org/elasticsearch/index/query/TermsQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/TermsQueryBuilderTests.java index 1ce69355379de..2faee7bc89eb5 100644 --- a/server/src/test/java/org/elasticsearch/index/query/TermsQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/TermsQueryBuilderTests.java @@ -25,6 +25,7 @@ import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.index.get.GetResult; +import org.elasticsearch.index.mapper.DateFieldMapper; import org.elasticsearch.indices.TermsLookup; import org.elasticsearch.test.AbstractQueryTestCase; import org.elasticsearch.xcontent.XContentBuilder; @@ -317,6 +318,38 @@ public void testLongTerm() throws IOException { assertThat(e.getMessage(), containsString("term starting with [aaaaa")); } + public void testCoordinatorTierRewriteToMatchAll() throws IOException { + QueryBuilder query = new TermsQueryBuilder("_tier", "data_frozen"); + final String timestampFieldName = "@timestamp"; + long minTimestamp = 1685714000000L; + long maxTimestamp = 1685715000000L; + final CoordinatorRewriteContext coordinatorRewriteContext = createCoordinatorRewriteContext( + new DateFieldMapper.DateFieldType(timestampFieldName), + minTimestamp, + maxTimestamp, + "data_frozen" + ); + + QueryBuilder rewritten = query.rewrite(coordinatorRewriteContext); + assertThat(rewritten, CoreMatchers.instanceOf(MatchAllQueryBuilder.class)); + } + + public void testCoordinatorTierRewriteToMatchNone() throws IOException { + QueryBuilder query = QueryBuilders.boolQuery().mustNot(new TermsQueryBuilder("_tier", "data_frozen")); + final String timestampFieldName = "@timestamp"; + long minTimestamp = 1685714000000L; + long maxTimestamp = 1685715000000L; + final CoordinatorRewriteContext coordinatorRewriteContext = createCoordinatorRewriteContext( + new DateFieldMapper.DateFieldType(timestampFieldName), + minTimestamp, + maxTimestamp, + "data_frozen" + ); + + QueryBuilder rewritten = query.rewrite(coordinatorRewriteContext); + assertThat(rewritten, CoreMatchers.instanceOf(MatchNoneQueryBuilder.class)); + } + @Override protected QueryBuilder parseQuery(XContentParser parser) throws IOException { QueryBuilder query = super.parseQuery(parser); diff --git a/server/src/test/java/org/elasticsearch/index/query/WildcardQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/WildcardQueryBuilderTests.java index 7ee6d75a08736..182bd4d6b5b86 100644 --- a/server/src/test/java/org/elasticsearch/index/query/WildcardQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/WildcardQueryBuilderTests.java @@ -15,7 +15,9 @@ import org.apache.lucene.search.WildcardQuery; import org.elasticsearch.common.ParsingException; import org.elasticsearch.core.Strings; +import org.elasticsearch.index.mapper.DateFieldMapper; import org.elasticsearch.test.AbstractQueryTestCase; +import org.hamcrest.CoreMatchers; import java.io.IOException; import java.util.HashMap; @@ -166,4 +168,36 @@ public void testMustRewrite() throws IOException { IllegalStateException e = expectThrows(IllegalStateException.class, () -> queryBuilder.toQuery(context)); assertEquals("Rewrite first", e.getMessage()); } + + public void testCoordinatorTierRewriteToMatchAll() throws IOException { + QueryBuilder query = new WildcardQueryBuilder("_tier", "data_fr*"); + final String timestampFieldName = "@timestamp"; + long minTimestamp = 1685714000000L; + long maxTimestamp = 1685715000000L; + final CoordinatorRewriteContext coordinatorRewriteContext = createCoordinatorRewriteContext( + new DateFieldMapper.DateFieldType(timestampFieldName), + minTimestamp, + maxTimestamp, + "data_frozen" + ); + + QueryBuilder rewritten = query.rewrite(coordinatorRewriteContext); + assertThat(rewritten, CoreMatchers.instanceOf(MatchAllQueryBuilder.class)); + } + + public void testCoordinatorTierRewriteToMatchNone() throws IOException { + QueryBuilder query = QueryBuilders.boolQuery().mustNot(new WildcardQueryBuilder("_tier", "data_fro*")); + final String timestampFieldName = "@timestamp"; + long minTimestamp = 1685714000000L; + long maxTimestamp = 1685715000000L; + final CoordinatorRewriteContext coordinatorRewriteContext = createCoordinatorRewriteContext( + new DateFieldMapper.DateFieldType(timestampFieldName), + minTimestamp, + maxTimestamp, + "data_frozen" + ); + + QueryBuilder rewritten = query.rewrite(coordinatorRewriteContext); + assertThat(rewritten, CoreMatchers.instanceOf(MatchNoneQueryBuilder.class)); + } } diff --git a/test/framework/src/main/java/org/elasticsearch/test/AbstractBuilderTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/AbstractBuilderTestCase.java index 77ff194e2681d..0543bc7a78f8b 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/AbstractBuilderTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/AbstractBuilderTestCase.java @@ -342,6 +342,15 @@ protected static CoordinatorRewriteContext createCoordinatorRewriteContext( return serviceHolder.createCoordinatorContext(dateFieldType, min, max); } + protected static CoordinatorRewriteContext createCoordinatorRewriteContext( + DateFieldMapper.DateFieldType dateFieldType, + long min, + long max, + String tier + ) { + return serviceHolder.createCoordinatorContext(dateFieldType, min, max, tier); + } + protected static DataRewriteContext dataRewriteContext() { return serviceHolder.createDataContext(); } @@ -625,13 +634,17 @@ QueryRewriteContext createQueryRewriteContext() { } CoordinatorRewriteContext createCoordinatorContext(DateFieldMapper.DateFieldType dateFieldType, long min, long max) { + return createCoordinatorContext(dateFieldType, min, max, ""); + } + + CoordinatorRewriteContext createCoordinatorContext(DateFieldMapper.DateFieldType dateFieldType, long min, long max, String tier) { DateFieldRangeInfo timestampFieldInfo = new DateFieldRangeInfo( dateFieldType, IndexLongFieldRange.NO_SHARDS.extendWithShardRange(0, 1, ShardLongFieldRange.of(min, max)), dateFieldType, IndexLongFieldRange.NO_SHARDS.extendWithShardRange(0, 1, ShardLongFieldRange.of(min, max)) ); - return new CoordinatorRewriteContext(parserConfiguration, this.client, () -> nowInMillis, timestampFieldInfo); + return new CoordinatorRewriteContext(parserConfiguration, this.client, () -> nowInMillis, timestampFieldInfo, tier); } DataRewriteContext createDataContext() { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/cluster/routing/allocation/mapper/DataTierFieldMapper.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/cluster/routing/allocation/mapper/DataTierFieldMapper.java index 527f8d1c176ec..0e185a90ed39b 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/cluster/routing/allocation/mapper/DataTierFieldMapper.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/cluster/routing/allocation/mapper/DataTierFieldMapper.java @@ -10,10 +10,8 @@ import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.Query; -import org.elasticsearch.cluster.routing.allocation.DataTier; import org.elasticsearch.common.Strings; import org.elasticsearch.common.regex.Regex; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.mapper.ConstantFieldType; import org.elasticsearch.index.mapper.KeywordFieldMapper; import org.elasticsearch.index.mapper.MetadataFieldMapper; @@ -55,7 +53,7 @@ protected boolean matches(String pattern, boolean caseInsensitive, QueryRewriteC pattern = Strings.toLowercaseAscii(pattern); } - String tierPreference = getTierPreference(context); + String tierPreference = context.getTierPreference(); if (tierPreference == null) { return false; } @@ -64,7 +62,7 @@ protected boolean matches(String pattern, boolean caseInsensitive, QueryRewriteC @Override public Query existsQuery(SearchExecutionContext context) { - String tierPreference = getTierPreference(context); + String tierPreference = context.getTierPreference(); if (tierPreference == null) { return new MatchNoDocsQuery(); } @@ -77,26 +75,9 @@ public ValueFetcher valueFetcher(SearchExecutionContext context, String format) throw new IllegalArgumentException("Field [" + name() + "] of type [" + typeName() + "] doesn't support formats."); } - String tierPreference = getTierPreference(context); + String tierPreference = context.getTierPreference(); return tierPreference == null ? ValueFetcher.EMPTY : ValueFetcher.singleton(tierPreference); } - - /** - * Retrieve the first tier preference from the index setting. If the setting is not - * present, then return null. - */ - private static String getTierPreference(QueryRewriteContext context) { - Settings settings = context.getIndexSettings().getSettings(); - String value = DataTier.TIER_PREFERENCE_SETTING.get(settings); - - if (Strings.hasText(value) == false) { - return null; - } - - // Tier preference can be a comma-delimited list of tiers, ordered by preference - // It was decided we should only test the first of these potentially multiple preferences. - return value.split(",")[0].trim(); - } } public DataTierFieldMapper() { @@ -107,4 +88,5 @@ public DataTierFieldMapper() { protected String contentType() { return CONTENT_TYPE; } + } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/LocalStateCompositeXPackPlugin.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/LocalStateCompositeXPackPlugin.java index 918976c0d3db8..1f2c89c473a62 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/LocalStateCompositeXPackPlugin.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/LocalStateCompositeXPackPlugin.java @@ -637,10 +637,15 @@ public Collection getSystemIndexDescriptors(Settings sett @Override public Map getMetadataMappers() { - return filterPlugins(MapperPlugin.class).stream() + Map pluginsMetadataMappers = filterPlugins(MapperPlugin.class).stream() .map(MapperPlugin::getMetadataMappers) .flatMap(map -> map.entrySet().stream()) .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); + + // the xpack plugin itself exposes a metadata mapper so let's include it as well + Map metadataMappersIncludingXPackPlugin = new HashMap<>(pluginsMetadataMappers); + metadataMappersIncludingXPackPlugin.putAll(super.getMetadataMappers()); + return metadataMappersIncludingXPackPlugin; } @Override diff --git a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsCanMatchOnCoordinatorIntegTests.java b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsCanMatchOnCoordinatorIntegTests.java index eab73fbe5ad04..ed42d86bc8c49 100644 --- a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsCanMatchOnCoordinatorIntegTests.java +++ b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsCanMatchOnCoordinatorIntegTests.java @@ -20,14 +20,18 @@ import org.elasticsearch.cluster.metadata.DataStream; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.node.DiscoveryNodeRole; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.util.CollectionUtils; import org.elasticsearch.index.Index; import org.elasticsearch.index.mapper.DateFieldMapper; +import org.elasticsearch.index.query.BoolQueryBuilder; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.index.query.RangeQueryBuilder; +import org.elasticsearch.index.query.TermQueryBuilder; +import org.elasticsearch.index.query.TermsQueryBuilder; import org.elasticsearch.index.shard.IndexLongFieldRange; import org.elasticsearch.indices.DateFieldRangeInfo; import org.elasticsearch.indices.IndicesService; @@ -36,6 +40,7 @@ import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.snapshots.SnapshotId; import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.test.NodeRoles; import org.elasticsearch.test.junit.annotations.TestIssueLogging; import org.elasticsearch.test.transport.MockTransportService; import org.elasticsearch.xcontent.XContentFactory; @@ -51,6 +56,7 @@ import java.util.stream.Collectors; import static org.elasticsearch.cluster.metadata.IndexMetadata.INDEX_ROUTING_REQUIRE_GROUP_SETTING; +import static org.elasticsearch.cluster.node.DiscoveryNode.getRolesFromSettings; import static org.elasticsearch.index.IndexSettings.INDEX_SOFT_DELETES_SETTING; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; @@ -76,14 +82,24 @@ protected Collection> nodePlugins() { @Override protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) { final Settings initialSettings = super.nodeSettings(nodeOrdinal, otherSettings); - if (DiscoveryNode.canContainData(otherSettings)) { + + if (DiscoveryNode.canContainData(otherSettings) + && getRolesFromSettings(otherSettings).stream() + .anyMatch( + nr -> nr.roleName().equals(DiscoveryNodeRole.DATA_FROZEN_NODE_ROLE.roleName()) + || nr.roleName().equals(DiscoveryNodeRole.DATA_ROLE.roleName()) + )) { return Settings.builder() .put(initialSettings) // Have a shared cache of reasonable size available on each node because tests randomize over frozen and cold allocation .put(SharedBlobCacheService.SHARED_CACHE_SIZE_SETTING.getKey(), ByteSizeValue.ofMb(randomLongBetween(1, 10))) .build(); } else { - return initialSettings; + return Settings.builder() + .put(initialSettings) + // Have a shared cache of reasonable size available on each node because tests randomize over frozen and cold allocation + .putNull(SharedBlobCacheService.SHARED_CACHE_SIZE_SETTING.getKey()) + .build(); } } @@ -955,6 +971,129 @@ public void testSearchableSnapshotShardsThatHaveMatchingDataAreNotSkippedOnTheCo } } + public void testCanMatchSkipsPartiallyMountedIndicesWhenFrozenNodesUnavailable() throws Exception { + internalCluster().startMasterOnlyNode(); + internalCluster().startCoordinatingOnlyNode(Settings.EMPTY); + final String dataNodeHoldingRegularIndex = internalCluster().startNode( + NodeRoles.onlyRole(DiscoveryNodeRole.DATA_CONTENT_NODE_ROLE) + ); + final String dataNodeHoldingSearchableSnapshot = internalCluster().startNode( + NodeRoles.onlyRole(DiscoveryNodeRole.DATA_FROZEN_NODE_ROLE) + ); + + final String indexToMountInFrozen = "frozen-" + randomAlphaOfLength(10).toLowerCase(Locale.ROOT); + final int shardCount = randomIntBetween(2, 3); + createIndexWithTimestampAndEventIngested(indexToMountInFrozen, shardCount, Settings.EMPTY); + final int numDocsFrozenIndex = between(350, 1000); + indexRandomDocs(indexToMountInFrozen, numDocsFrozenIndex); + + final String regularIndex = "regular-" + randomAlphaOfLength(10).toLowerCase(Locale.ROOT); + createIndexWithTimestampAndEventIngested( + regularIndex, + shardCount, + Settings.builder() + .put(INDEX_ROUTING_REQUIRE_GROUP_SETTING.getConcreteSettingForNamespace("_name").getKey(), dataNodeHoldingRegularIndex) + .build() + ); + int numDocsRegularIndex = between(100, 1000); + indexDocumentsWithTimestampAndEventIngestedDates(regularIndex, numDocsRegularIndex, TIMESTAMP_TEMPLATE_WITHIN_RANGE); + + final String repositoryName = randomAlphaOfLength(10).toLowerCase(Locale.ROOT); + createRepository(repositoryName, "mock"); + + final SnapshotId snapshotId = createSnapshot(repositoryName, "snapshot-1", List.of(indexToMountInFrozen)).snapshotId(); + assertAcked(indicesAdmin().prepareDelete(indexToMountInFrozen)); + + final String partiallyMountedIndex = randomAlphaOfLength(10).toLowerCase(Locale.ROOT); + + final MountSearchableSnapshotRequest mountRequest = new MountSearchableSnapshotRequest( + TEST_REQUEST_TIMEOUT, + partiallyMountedIndex, + repositoryName, + snapshotId.getName(), + indexToMountInFrozen, + Settings.EMPTY, + Strings.EMPTY_ARRAY, + false, + MountSearchableSnapshotRequest.Storage.SHARED_CACHE + ); + client().execute(MountSearchableSnapshotAction.INSTANCE, mountRequest).actionGet(); + + ensureGreen(regularIndex, partiallyMountedIndex); + + // Stop the node holding the searchable snapshots, and since we defined + // the index allocation criteria to require the searchable snapshot + // index to be allocated in that node, the shards should remain unassigned + internalCluster().stopNode(dataNodeHoldingSearchableSnapshot); + final IndexMetadata partiallyMountedIndexMetadata = getIndexMetadata(partiallyMountedIndex); + waitUntilAllShardsAreUnassigned(partiallyMountedIndexMetadata.getIndex()); + + { + // term query + TermQueryBuilder termQueryBuilder = QueryBuilders.termQuery("_tier", "data_content"); + List indicesToSearch = List.of(regularIndex, partiallyMountedIndex); + SearchRequest request = new SearchRequest().indices(indicesToSearch.toArray(new String[0])) + .source(new SearchSourceBuilder().query(termQueryBuilder)); + + assertResponse(client().search(request), searchResponse -> { + // as we excluded the frozen tier we shouldn't get any failures + assertThat(searchResponse.getFailedShards(), equalTo(0)); + // we should be receiving all the hits from the index that's in the data_content tier + assertNotNull(searchResponse.getHits().getTotalHits()); + assertThat(searchResponse.getHits().getTotalHits().value(), equalTo((long) numDocsRegularIndex)); + }); + } + + { + // termS query + TermsQueryBuilder termsQueryBuilder = QueryBuilders.termsQuery("_tier", "data_hot", "data_content"); + List indicesToSearch = List.of(regularIndex, partiallyMountedIndex); + SearchRequest request = new SearchRequest().indices(indicesToSearch.toArray(new String[0])) + .source(new SearchSourceBuilder().query(termsQueryBuilder)); + + assertResponse(client().search(request), searchResponse -> { + // as we excluded the frozen tier we shouldn't get any failures + assertThat(searchResponse.getFailedShards(), equalTo(0)); + // we should be receiving all the hits from the index that's in the data_content tier + assertNotNull(searchResponse.getHits().getTotalHits()); + assertThat(searchResponse.getHits().getTotalHits().value(), equalTo((long) numDocsRegularIndex)); + }); + } + + { + // bool term query + BoolQueryBuilder boolQueryBuilder = QueryBuilders.boolQuery().mustNot(QueryBuilders.termQuery("_tier", "data_frozen")); + List indicesToSearch = List.of(regularIndex, partiallyMountedIndex); + SearchRequest request = new SearchRequest().indices(indicesToSearch.toArray(new String[0])) + .source(new SearchSourceBuilder().query(boolQueryBuilder)); + + assertResponse(client().search(request), searchResponse -> { + // as we excluded the frozen tier we shouldn't get any failures + assertThat(searchResponse.getFailedShards(), equalTo(0)); + // we should be receiving all the hits from the index that's in the data_content tier + assertNotNull(searchResponse.getHits().getTotalHits()); + assertThat(searchResponse.getHits().getTotalHits().value(), equalTo((long) numDocsRegularIndex)); + }); + } + + { + // bool prefix, wildcard + BoolQueryBuilder boolQueryBuilder = QueryBuilders.boolQuery() + .mustNot(randomFrom(QueryBuilders.wildcardQuery("_tier", "dat*ozen"), QueryBuilders.prefixQuery("_tier", "data_fro"))); + List indicesToSearch = List.of(regularIndex, partiallyMountedIndex); + SearchRequest request = new SearchRequest().indices(indicesToSearch.toArray(new String[0])) + .source(new SearchSourceBuilder().query(boolQueryBuilder)); + + assertResponse(client().search(request), searchResponse -> { + // as we excluded the frozen tier we shouldn't get any failures + assertThat(searchResponse.getFailedShards(), equalTo(0)); + // we should be receiving all the hits from the index that's in the data_content tier + assertNotNull(searchResponse.getHits().getTotalHits()); + assertThat(searchResponse.getHits().getTotalHits().value(), equalTo((long) numDocsRegularIndex)); + }); + } + } + private void createIndexWithTimestampAndEventIngested(String indexName, int numShards, Settings extraSettings) throws IOException { assertAcked( indicesAdmin().prepareCreate(indexName) From 4fb7a4f1e98cb2934bf1427bb9dba0140a481dd6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Istv=C3=A1n=20Zolt=C3=A1n=20Szab=C3=B3?= Date: Thu, 24 Oct 2024 14:07:06 +0200 Subject: [PATCH 050/324] [DOCS] Improve inference API documentation (#115235) Co-authored-by: David Kyle --- .../inference/inference-apis.asciidoc | 18 ++++ .../inference/service-elasticsearch.asciidoc | 94 +++++++++++++++++-- .../inference/service-elser.asciidoc | 3 +- 3 files changed, 104 insertions(+), 11 deletions(-) diff --git a/docs/reference/inference/inference-apis.asciidoc b/docs/reference/inference/inference-apis.asciidoc index b291b464be498..ddcff1abc7dce 100644 --- a/docs/reference/inference/inference-apis.asciidoc +++ b/docs/reference/inference/inference-apis.asciidoc @@ -34,6 +34,24 @@ Elastic –, then create an {infer} endpoint by the <>. Now use <> to perform <> on your data. + +[discrete] +[[default-enpoints]] +=== Default {infer} endpoints + +Your {es} deployment contains some preconfigured {infer} endpoints that makes it easier for you to use them when defining `semantic_text` fields or {infer} processors. +The following list contains the default {infer} endpoints listed by `inference_id`: + +* `.elser-2-elasticsearch`: uses the {ml-docs}/ml-nlp-elser.html[ELSER] built-in trained model for `sparse_embedding` tasks (recommended for English language texts) +* `.multilingual-e5-small-elasticsearch`: uses the {ml-docs}/ml-nlp-e5.html[E5] built-in trained model for `text_embedding` tasks (recommended for non-English language texts) + +Use the `inference_id` of the endpoint in a <> field definition or when creating an <>. +The API call will automatically download and deploy the model which might take a couple of minutes. +Default {infer} enpoints have {ml-docs}/ml-nlp-auto-scale.html#nlp-model-adaptive-allocations[adaptive allocations] enabled. +For these models, the minimum number of allocations is `0`. +If there is no {infer} activity that uses the endpoint, the number of allocations will scale down to `0` automatically after 15 minutes. + + include::delete-inference.asciidoc[] include::get-inference.asciidoc[] include::post-inference.asciidoc[] diff --git a/docs/reference/inference/service-elasticsearch.asciidoc b/docs/reference/inference/service-elasticsearch.asciidoc index efa0c78b8356f..259779a12134d 100644 --- a/docs/reference/inference/service-elasticsearch.asciidoc +++ b/docs/reference/inference/service-elasticsearch.asciidoc @@ -1,12 +1,9 @@ [[infer-service-elasticsearch]] === Elasticsearch {infer} service -Creates an {infer} endpoint to perform an {infer} task with the `elasticsearch` -service. +Creates an {infer} endpoint to perform an {infer} task with the `elasticsearch` service. -NOTE: If you use the E5 model through the `elasticsearch` service, the API -request will automatically download and deploy the model if it isn't downloaded -yet. +NOTE: If you use the ELSER or the E5 model through the `elasticsearch` service, the API request will automatically download and deploy the model if it isn't downloaded yet. [discrete] @@ -56,6 +53,11 @@ These settings are specific to the `elasticsearch` service. (Optional, object) include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=adaptive-allocation] +`deployment_id`::: +(Optional, string) +The `deployment_id` of an existing trained model deployment. +When `deployment_id` is used the `model_id` is optional. + `enabled`:::: (Optional, Boolean) include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=adaptive-allocation-enabled] @@ -71,7 +73,7 @@ include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=adaptive-allocation-min-number] `model_id`::: (Required, string) The name of the model to use for the {infer} task. -It can be the ID of either a built-in model (for example, `.multilingual-e5-small` for E5) or a text embedding model already +It can be the ID of either a built-in model (for example, `.multilingual-e5-small` for E5), a text embedding model already {ml-docs}/ml-nlp-import-model.html#ml-nlp-import-script[uploaded through Eland]. `num_allocations`::: @@ -98,15 +100,44 @@ Returns the document instead of only the index. Defaults to `true`. ===== +[discrete] +[[inference-example-elasticsearch-elser]] +==== ELSER via the `elasticsearch` service + +The following example shows how to create an {infer} endpoint called `my-elser-model` to perform a `sparse_embedding` task type. + +The API request below will automatically download the ELSER model if it isn't already downloaded and then deploy the model. + +[source,console] +------------------------------------------------------------ +PUT _inference/sparse_embedding/my-elser-model +{ + "service": "elasticsearch", + "service_settings": { + "adaptive_allocations": { <1> + "enabled": true, + "min_number_of_allocations": 1, + "max_number_of_allocations": 10 + }, + "num_threads": 1, + "model_id": ".elser_model_2" <2> + } +} +------------------------------------------------------------ +// TEST[skip:TBD] +<1> Adaptive allocations will be enabled with the minimum of 1 and the maximum of 10 allocations. +<2> The `model_id` must be the ID of one of the built-in ELSER models. +Valid values are `.elser_model_2` and `.elser_model_2_linux-x86_64`. +For further details, refer to the {ml-docs}/ml-nlp-elser.html[ELSER model documentation]. + + [discrete] [[inference-example-elasticsearch]] ==== E5 via the `elasticsearch` service -The following example shows how to create an {infer} endpoint called -`my-e5-model` to perform a `text_embedding` task type. +The following example shows how to create an {infer} endpoint called `my-e5-model` to perform a `text_embedding` task type. -The API request below will automatically download the E5 model if it isn't -already downloaded and then deploy the model. +The API request below will automatically download the E5 model if it isn't already downloaded and then deploy the model. [source,console] ------------------------------------------------------------ @@ -185,3 +216,46 @@ PUT _inference/text_embedding/my-e5-model } ------------------------------------------------------------ // TEST[skip:TBD] + + +[discrete] +[[inference-example-existing-deployment]] +==== Using an existing model deployment with the `elasticsearch` service + +The following example shows how to use an already existing model deployment when creating an {infer} endpoint. + +[source,console] +------------------------------------------------------------ +PUT _inference/sparse_embedding/use_existing_deployment +{ + "service": "elasticsearch", + "service_settings": { + "deployment_id": ".elser_model_2" <1> + } +} +------------------------------------------------------------ +// TEST[skip:TBD] +<1> The `deployment_id` of the already existing model deployment. + +The API response contains the `model_id`, and the threads and allocations settings from the model deployment: + +[source,console-result] +------------------------------------------------------------ +{ + "inference_id": "use_existing_deployment", + "task_type": "sparse_embedding", + "service": "elasticsearch", + "service_settings": { + "num_allocations": 2, + "num_threads": 1, + "model_id": ".elser_model_2", + "deployment_id": ".elser_model_2" + }, + "chunking_settings": { + "strategy": "sentence", + "max_chunk_size": 250, + "sentence_overlap": 1 + } +} +------------------------------------------------------------ +// NOTCONSOLE \ No newline at end of file diff --git a/docs/reference/inference/service-elser.asciidoc b/docs/reference/inference/service-elser.asciidoc index 6afc2a2e3ef65..521fab0375584 100644 --- a/docs/reference/inference/service-elser.asciidoc +++ b/docs/reference/inference/service-elser.asciidoc @@ -2,6 +2,7 @@ === ELSER {infer} service Creates an {infer} endpoint to perform an {infer} task with the `elser` service. +You can also deploy ELSER by using the <>. NOTE: The API request will automatically download and deploy the ELSER model if it isn't already downloaded. @@ -128,7 +129,7 @@ If using the Python client, you can set the `timeout` parameter to a higher valu [discrete] [[inference-example-elser-adaptive-allocation]] -==== Setting adaptive allocation for the ELSER service +==== Setting adaptive allocations for the ELSER service NOTE: For more information on how to optimize your ELSER endpoints, refer to {ml-docs}/ml-nlp-elser.html#elser-recommendations[the ELSER recommendations] section in the model documentation. To learn more about model autoscaling, refer to the {ml-docs}/ml-nlp-auto-scale.html[trained model autoscaling] page. From 4e4fe9c3a99faaded41b6c08d98bf8eda6f3ea6b Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Thu, 24 Oct 2024 23:28:55 +1100 Subject: [PATCH 051/324] Mute org.elasticsearch.xpack.restart.MLModelDeploymentFullClusterRestartIT testDeploymentSurvivesRestart {cluster=UPGRADED} #115528 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index 8b9c3cc6ce712..2d5349ed03b48 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -282,6 +282,9 @@ tests: - class: org.elasticsearch.xpack.security.FileSettingsRoleMappingsRestartIT method: testFileSettingsReprocessedOnRestartWithoutVersionChange issue: https://github.com/elastic/elasticsearch/issues/115450 +- class: org.elasticsearch.xpack.restart.MLModelDeploymentFullClusterRestartIT + method: testDeploymentSurvivesRestart {cluster=UPGRADED} + issue: https://github.com/elastic/elasticsearch/issues/115528 # Examples: # From f774d0ee8249fef76182f76d401a97e217c53981 Mon Sep 17 00:00:00 2001 From: Rene Groeschke Date: Thu, 24 Oct 2024 14:58:37 +0200 Subject: [PATCH 052/324] Remove Delivery team as codeowners for gradle build scripts (#115523) --- .github/CODEOWNERS | 1 - 1 file changed, 1 deletion(-) diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 5b98444c044d2..540da14402192 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -39,7 +39,6 @@ gradle @elastic/es-delivery build-conventions @elastic/es-delivery build-tools @elastic/es-delivery build-tools-internal @elastic/es-delivery -*.gradle @elastic/es-delivery .buildkite @elastic/es-delivery .ci @elastic/es-delivery .idea @elastic/es-delivery From 889d2c346e4ab498875b1bb0aaaee88c54f4c1a2 Mon Sep 17 00:00:00 2001 From: Mark Tozzi Date: Thu, 24 Oct 2024 09:03:12 -0400 Subject: [PATCH 053/324] [ESQL] Enable "any type" aggregations on Date Nanos (#114438) Resolves #110002 Resolves #110003 Resolves #110005 Enable Values, Count, CountDistinct, Min and Max aggregations on date nanos. In the course of addressing this, I had to make some changes to AggregateMapper where it maps types into string names. I tried to refactor this once before (#110841) but at the time we decided not to go ahead with it. That bit me while working on this, and so I am trying again to refactor it. This time I've made a more localized change, just replacing the cascading if block with a switch. That will cause a compile time failure when future new data types are added, unless they correctly update this section. I've also done a small refactoring on the aggregators themselves, to make the supplier function consistent with the typeResolution. --------- Co-authored-by: Elastic Machine --- .../src/main/resources/date_nanos.csv | 1 + .../src/main/resources/date_nanos.csv-spec | 31 ++++++++++++++ .../xpack/esql/action/EsqlCapabilities.java | 5 +++ .../function/aggregate/CountDistinct.java | 40 ++++++++++-------- .../expression/function/aggregate/Max.java | 42 +++++++++---------- .../expression/function/aggregate/Min.java | 42 +++++++++---------- .../expression/function/aggregate/Values.java | 38 +++++++++-------- .../xpack/esql/planner/AggregateMapper.java | 31 ++++++-------- 8 files changed, 131 insertions(+), 99 deletions(-) diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/date_nanos.csv b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/date_nanos.csv index 029c3baf3cbfb..26b6f055221a6 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/date_nanos.csv +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/date_nanos.csv @@ -6,5 +6,6 @@ millis:date,nanos:date_nanos,num:long 2023-10-23T13:33:34.937Z,2023-10-23T13:33:34.937193000Z,1698068014937193000 2023-10-23T12:27:28.948Z,2023-10-23T12:27:28.948000000Z,1698064048948000000 2023-10-23T12:15:03.360Z,2023-10-23T12:15:03.360103847Z,1698063303360103847 +2023-10-23T12:15:03.360Z,2023-10-23T12:15:03.360103847Z,1698063303360103847 1999-10-23T12:15:03.360Z,[2023-03-23T12:15:03.360103847Z, 2023-02-23T13:33:34.937193000Z, 2023-01-23T13:55:01.543123456Z], 0 1999-10-22T12:15:03.360Z,[2023-03-23T12:15:03.360103847Z, 2023-03-23T12:15:03.360103847Z, 2023-03-23T12:15:03.360103847Z], 0 diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/date_nanos.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/date_nanos.csv-spec index 515e2c9c6587f..d0edc1f07d021 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/date_nanos.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/date_nanos.csv-spec @@ -216,6 +216,7 @@ l:long 1698068014937193000 1698064048948000000 1698063303360103847 +1698063303360103847 ; long to date nanos, index version @@ -231,6 +232,7 @@ d:date_nanos 2023-10-23T13:33:34.937193000Z 2023-10-23T12:27:28.948000000Z 2023-10-23T12:15:03.360103847Z +2023-10-23T12:15:03.360103847Z ; date_nanos to date nanos, index version @@ -246,6 +248,7 @@ d:date_nanos 2023-10-23T13:33:34.937193000Z 2023-10-23T12:27:28.948000000Z 2023-10-23T12:15:03.360103847Z +2023-10-23T12:15:03.360103847Z ; attempt to cast the result of a fold to date nanos @@ -331,3 +334,31 @@ a:date_nanos [2023-02-23T13:33:34.937193000Z, 2023-03-23T12:15:03.360103847Z] [2023-03-23T12:15:03.360103847Z, 2023-03-23T12:15:03.360103847Z] ; + + +Max and Min of date nanos +required_capability: date_nanos_aggregations + +FROM date_nanos | STATS max = MAX(nanos), min = MIN(nanos); + +max:date_nanos | min:date_nanos +2023-10-23T13:55:01.543123456Z | 2023-01-23T13:55:01.543123456Z +; + +Count and count distinct of date nanos +required_capability: date_nanos_aggregations + +FROM date_nanos | WHERE millis > "2020-01-01" | STATS count = COUNT(nanos), count_distinct = COUNT_DISTINCT(nanos); + +count:long | count_distinct:long +8 | 7 +; + +Values aggregation on date nanos +required_capability: date_nanos_aggregations + +FROM date_nanos | WHERE millis > "2020-01-01" | STATS v = MV_SORT(VALUES(nanos), "DESC"); + +v:date_nanos +[2023-10-23T13:55:01.543123456Z, 2023-10-23T13:53:55.832987654Z, 2023-10-23T13:52:55.015787878Z, 2023-10-23T13:51:54.732102837Z, 2023-10-23T13:33:34.937193000Z, 2023-10-23T12:27:28.948000000Z, 2023-10-23T12:15:03.360103847Z] +; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java index f22ad07a4c6f6..55236af648236 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java @@ -313,6 +313,11 @@ public enum Cap { */ LEAST_GREATEST_FOR_DATENANOS(EsqlCorePlugin.DATE_NANOS_FEATURE_FLAG), + /** + * support aggregations on date nanos + */ + DATE_NANOS_AGGREGATIONS(EsqlCorePlugin.DATE_NANOS_FEATURE_FLAG), + /** * Support for datetime in least and greatest functions */ diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/CountDistinct.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/CountDistinct.java index 756000dfbb187..5ae162f1fbb12 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/CountDistinct.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/CountDistinct.java @@ -38,6 +38,8 @@ import java.io.IOException; import java.util.List; +import java.util.Map; +import java.util.function.BiFunction; import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.ParamOrdinal.DEFAULT; import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.ParamOrdinal.SECOND; @@ -53,6 +55,20 @@ public class CountDistinct extends AggregateFunction implements OptionalArgument CountDistinct::new ); + private static final Map, Integer, AggregatorFunctionSupplier>> SUPPLIERS = Map.ofEntries( + // Booleans ignore the precision because there are only two possible values anyway + Map.entry(DataType.BOOLEAN, (inputChannels, precision) -> new CountDistinctBooleanAggregatorFunctionSupplier(inputChannels)), + Map.entry(DataType.LONG, CountDistinctLongAggregatorFunctionSupplier::new), + Map.entry(DataType.DATETIME, CountDistinctLongAggregatorFunctionSupplier::new), + Map.entry(DataType.DATE_NANOS, CountDistinctLongAggregatorFunctionSupplier::new), + Map.entry(DataType.INTEGER, CountDistinctIntAggregatorFunctionSupplier::new), + Map.entry(DataType.DOUBLE, CountDistinctDoubleAggregatorFunctionSupplier::new), + Map.entry(DataType.KEYWORD, CountDistinctBytesRefAggregatorFunctionSupplier::new), + Map.entry(DataType.IP, CountDistinctBytesRefAggregatorFunctionSupplier::new), + Map.entry(DataType.VERSION, CountDistinctBytesRefAggregatorFunctionSupplier::new), + Map.entry(DataType.TEXT, CountDistinctBytesRefAggregatorFunctionSupplier::new) + ); + private static final int DEFAULT_PRECISION = 3000; private final Expression precision; @@ -102,7 +118,7 @@ public CountDistinct( Source source, @Param( name = "field", - type = { "boolean", "date", "double", "integer", "ip", "keyword", "long", "text", "version" }, + type = { "boolean", "date", "date_nanos", "double", "integer", "ip", "keyword", "long", "text", "version" }, description = "Column or literal for which to count the number of distinct values." ) Expression field, @Param( @@ -179,7 +195,7 @@ protected TypeResolution resolveType() { .and( isType( field(), - dt -> dt != DataType.UNSIGNED_LONG && dt != DataType.SOURCE, + SUPPLIERS::containsKey, sourceText(), DEFAULT, "any exact type except unsigned_long, _source, or counter types" @@ -196,23 +212,11 @@ protected TypeResolution resolveType() { public AggregatorFunctionSupplier supplier(List inputChannels) { DataType type = field().dataType(); int precision = this.precision == null ? DEFAULT_PRECISION : ((Number) this.precision.fold()).intValue(); - if (type == DataType.BOOLEAN) { - // Booleans ignore the precision because there are only two possible values anyway - return new CountDistinctBooleanAggregatorFunctionSupplier(inputChannels); - } - if (type == DataType.DATETIME || type == DataType.LONG) { - return new CountDistinctLongAggregatorFunctionSupplier(inputChannels, precision); - } - if (type == DataType.INTEGER) { - return new CountDistinctIntAggregatorFunctionSupplier(inputChannels, precision); - } - if (type == DataType.DOUBLE) { - return new CountDistinctDoubleAggregatorFunctionSupplier(inputChannels, precision); - } - if (DataType.isString(type) || type == DataType.IP || type == DataType.VERSION) { - return new CountDistinctBytesRefAggregatorFunctionSupplier(inputChannels, precision); + if (SUPPLIERS.containsKey(type) == false) { + // If the type checking did its job, this should never happen + throw EsqlIllegalArgumentException.illegalDataType(type); } - throw EsqlIllegalArgumentException.illegalDataType(type); + return SUPPLIERS.get(type).apply(inputChannels, precision); } @Override diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Max.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Max.java index 6119b2ce58465..ee16193efdccc 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Max.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Max.java @@ -32,16 +32,28 @@ import java.io.IOException; import java.util.List; +import java.util.Map; +import java.util.function.Function; import static java.util.Collections.emptyList; import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.ParamOrdinal.DEFAULT; -import static org.elasticsearch.xpack.esql.core.type.DataType.UNSIGNED_LONG; -import static org.elasticsearch.xpack.esql.core.type.DataType.isRepresentable; -import static org.elasticsearch.xpack.esql.core.type.DataType.isSpatial; public class Max extends AggregateFunction implements ToAggregator, SurrogateExpression { public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry(Expression.class, "Max", Max::new); + private static final Map, AggregatorFunctionSupplier>> SUPPLIERS = Map.ofEntries( + Map.entry(DataType.BOOLEAN, MaxBooleanAggregatorFunctionSupplier::new), + Map.entry(DataType.LONG, MaxLongAggregatorFunctionSupplier::new), + Map.entry(DataType.DATETIME, MaxLongAggregatorFunctionSupplier::new), + Map.entry(DataType.DATE_NANOS, MaxLongAggregatorFunctionSupplier::new), + Map.entry(DataType.INTEGER, MaxIntAggregatorFunctionSupplier::new), + Map.entry(DataType.DOUBLE, MaxDoubleAggregatorFunctionSupplier::new), + Map.entry(DataType.IP, MaxIpAggregatorFunctionSupplier::new), + Map.entry(DataType.KEYWORD, MaxBytesRefAggregatorFunctionSupplier::new), + Map.entry(DataType.TEXT, MaxBytesRefAggregatorFunctionSupplier::new), + Map.entry(DataType.VERSION, MaxBytesRefAggregatorFunctionSupplier::new) + ); + @FunctionInfo( returnType = { "boolean", "double", "integer", "long", "date", "ip", "keyword", "text", "long", "version" }, description = "The maximum value of a field.", @@ -98,7 +110,7 @@ public Max replaceChildren(List newChildren) { protected TypeResolution resolveType() { return TypeResolutions.isType( field(), - t -> isRepresentable(t) && t != UNSIGNED_LONG && isSpatial(t) == false, + SUPPLIERS::containsKey, sourceText(), DEFAULT, "representable except unsigned_long and spatial types" @@ -113,25 +125,11 @@ public DataType dataType() { @Override public final AggregatorFunctionSupplier supplier(List inputChannels) { DataType type = field().dataType(); - if (type == DataType.BOOLEAN) { - return new MaxBooleanAggregatorFunctionSupplier(inputChannels); - } - if (type == DataType.LONG || type == DataType.DATETIME) { - return new MaxLongAggregatorFunctionSupplier(inputChannels); - } - if (type == DataType.INTEGER) { - return new MaxIntAggregatorFunctionSupplier(inputChannels); - } - if (type == DataType.DOUBLE) { - return new MaxDoubleAggregatorFunctionSupplier(inputChannels); - } - if (type == DataType.IP) { - return new MaxIpAggregatorFunctionSupplier(inputChannels); - } - if (type == DataType.VERSION || DataType.isString(type)) { - return new MaxBytesRefAggregatorFunctionSupplier(inputChannels); + if (SUPPLIERS.containsKey(type) == false) { + // If the type checking did its job, this should never happen + throw EsqlIllegalArgumentException.illegalDataType(type); } - throw EsqlIllegalArgumentException.illegalDataType(type); + return SUPPLIERS.get(type).apply(inputChannels); } @Override diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Min.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Min.java index a1492f79da393..7aaa41ea6ab11 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Min.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Min.java @@ -32,16 +32,28 @@ import java.io.IOException; import java.util.List; +import java.util.Map; +import java.util.function.Function; import static java.util.Collections.emptyList; import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.ParamOrdinal.DEFAULT; -import static org.elasticsearch.xpack.esql.core.type.DataType.UNSIGNED_LONG; -import static org.elasticsearch.xpack.esql.core.type.DataType.isRepresentable; -import static org.elasticsearch.xpack.esql.core.type.DataType.isSpatial; public class Min extends AggregateFunction implements ToAggregator, SurrogateExpression { public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry(Expression.class, "Min", Min::new); + private static final Map, AggregatorFunctionSupplier>> SUPPLIERS = Map.ofEntries( + Map.entry(DataType.BOOLEAN, MinBooleanAggregatorFunctionSupplier::new), + Map.entry(DataType.LONG, MinLongAggregatorFunctionSupplier::new), + Map.entry(DataType.DATETIME, MinLongAggregatorFunctionSupplier::new), + Map.entry(DataType.DATE_NANOS, MinLongAggregatorFunctionSupplier::new), + Map.entry(DataType.INTEGER, MinIntAggregatorFunctionSupplier::new), + Map.entry(DataType.DOUBLE, MinDoubleAggregatorFunctionSupplier::new), + Map.entry(DataType.IP, MinIpAggregatorFunctionSupplier::new), + Map.entry(DataType.VERSION, MinBytesRefAggregatorFunctionSupplier::new), + Map.entry(DataType.KEYWORD, MinBytesRefAggregatorFunctionSupplier::new), + Map.entry(DataType.TEXT, MinBytesRefAggregatorFunctionSupplier::new) + ); + @FunctionInfo( returnType = { "boolean", "double", "integer", "long", "date", "ip", "keyword", "text", "long", "version" }, description = "The minimum value of a field.", @@ -98,7 +110,7 @@ public Min withFilter(Expression filter) { protected TypeResolution resolveType() { return TypeResolutions.isType( field(), - t -> isRepresentable(t) && t != UNSIGNED_LONG && isSpatial(t) == false, + SUPPLIERS::containsKey, sourceText(), DEFAULT, "representable except unsigned_long and spatial types" @@ -113,25 +125,11 @@ public DataType dataType() { @Override public final AggregatorFunctionSupplier supplier(List inputChannels) { DataType type = field().dataType(); - if (type == DataType.BOOLEAN) { - return new MinBooleanAggregatorFunctionSupplier(inputChannels); - } - if (type == DataType.LONG || type == DataType.DATETIME) { - return new MinLongAggregatorFunctionSupplier(inputChannels); - } - if (type == DataType.INTEGER) { - return new MinIntAggregatorFunctionSupplier(inputChannels); - } - if (type == DataType.DOUBLE) { - return new MinDoubleAggregatorFunctionSupplier(inputChannels); - } - if (type == DataType.IP) { - return new MinIpAggregatorFunctionSupplier(inputChannels); - } - if (type == DataType.VERSION || DataType.isString(type)) { - return new MinBytesRefAggregatorFunctionSupplier(inputChannels); + if (SUPPLIERS.containsKey(type) == false) { + // If the type checking did its job, this should never happen + throw EsqlIllegalArgumentException.illegalDataType(type); } - throw EsqlIllegalArgumentException.illegalDataType(type); + return SUPPLIERS.get(type).apply(inputChannels); } @Override diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Values.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Values.java index a844b981c95d6..8d576839c3c5c 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Values.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Values.java @@ -29,14 +29,28 @@ import java.io.IOException; import java.util.List; +import java.util.Map; +import java.util.function.Function; import static java.util.Collections.emptyList; import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.ParamOrdinal.DEFAULT; -import static org.elasticsearch.xpack.esql.core.type.DataType.UNSIGNED_LONG; public class Values extends AggregateFunction implements ToAggregator { public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry(Expression.class, "Values", Values::new); + private static final Map, AggregatorFunctionSupplier>> SUPPLIERS = Map.ofEntries( + Map.entry(DataType.INTEGER, ValuesIntAggregatorFunctionSupplier::new), + Map.entry(DataType.LONG, ValuesLongAggregatorFunctionSupplier::new), + Map.entry(DataType.DATETIME, ValuesLongAggregatorFunctionSupplier::new), + Map.entry(DataType.DATE_NANOS, ValuesLongAggregatorFunctionSupplier::new), + Map.entry(DataType.DOUBLE, ValuesDoubleAggregatorFunctionSupplier::new), + Map.entry(DataType.KEYWORD, ValuesBytesRefAggregatorFunctionSupplier::new), + Map.entry(DataType.TEXT, ValuesBytesRefAggregatorFunctionSupplier::new), + Map.entry(DataType.IP, ValuesBytesRefAggregatorFunctionSupplier::new), + Map.entry(DataType.VERSION, ValuesBytesRefAggregatorFunctionSupplier::new), + Map.entry(DataType.BOOLEAN, ValuesBooleanAggregatorFunctionSupplier::new) + ); + @FunctionInfo( returnType = { "boolean", "date", "double", "integer", "ip", "keyword", "long", "text", "version" }, preview = true, @@ -98,7 +112,7 @@ public DataType dataType() { protected TypeResolution resolveType() { return TypeResolutions.isType( field(), - dt -> DataType.isSpatial(dt) == false && dt != UNSIGNED_LONG, + SUPPLIERS::containsKey, sourceText(), DEFAULT, "any type except unsigned_long and spatial types" @@ -108,22 +122,10 @@ protected TypeResolution resolveType() { @Override public AggregatorFunctionSupplier supplier(List inputChannels) { DataType type = field().dataType(); - if (type == DataType.INTEGER) { - return new ValuesIntAggregatorFunctionSupplier(inputChannels); - } - if (type == DataType.LONG || type == DataType.DATETIME) { - return new ValuesLongAggregatorFunctionSupplier(inputChannels); - } - if (type == DataType.DOUBLE) { - return new ValuesDoubleAggregatorFunctionSupplier(inputChannels); - } - if (DataType.isString(type) || type == DataType.IP || type == DataType.VERSION) { - return new ValuesBytesRefAggregatorFunctionSupplier(inputChannels); - } - if (type == DataType.BOOLEAN) { - return new ValuesBooleanAggregatorFunctionSupplier(inputChannels); + if (SUPPLIERS.containsKey(type) == false) { + // If the type checking did its job, this should never happen + throw EsqlIllegalArgumentException.illegalDataType(type); } - // TODO cartesian_point, geo_point - throw EsqlIllegalArgumentException.illegalDataType(type); + return SUPPLIERS.get(type).apply(inputChannels); } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/AggregateMapper.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/AggregateMapper.java index c322135198262..3e81c2a2c1101 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/AggregateMapper.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/AggregateMapper.java @@ -297,25 +297,18 @@ private static String dataTypeToString(DataType type, Class aggClass) { if (aggClass == Top.class && type.equals(DataType.IP)) { return "Ip"; } - if (type.equals(DataType.BOOLEAN)) { - return "Boolean"; - } else if (type.equals(DataType.INTEGER) || type.equals(DataType.COUNTER_INTEGER)) { - return "Int"; - } else if (type.equals(DataType.LONG) || type.equals(DataType.DATETIME) || type.equals(DataType.COUNTER_LONG)) { - return "Long"; - } else if (type.equals(DataType.DOUBLE) || type.equals(DataType.COUNTER_DOUBLE)) { - return "Double"; - } else if (type.equals(DataType.KEYWORD) - || type.equals(DataType.IP) - || type.equals(DataType.VERSION) - || type.equals(DataType.TEXT)) { - return "BytesRef"; - } else if (type.equals(GEO_POINT)) { - return "GeoPoint"; - } else if (type.equals(CARTESIAN_POINT)) { - return "CartesianPoint"; - } else { + + return switch (type) { + case DataType.BOOLEAN -> "Boolean"; + case DataType.INTEGER, DataType.COUNTER_INTEGER -> "Int"; + case DataType.LONG, DataType.DATETIME, DataType.COUNTER_LONG, DataType.DATE_NANOS -> "Long"; + case DataType.DOUBLE, DataType.COUNTER_DOUBLE -> "Double"; + case DataType.KEYWORD, DataType.IP, DataType.VERSION, DataType.TEXT -> "BytesRef"; + case GEO_POINT -> "GeoPoint"; + case CARTESIAN_POINT -> "CartesianPoint"; + case SEMANTIC_TEXT, UNSUPPORTED, NULL, UNSIGNED_LONG, SHORT, BYTE, FLOAT, HALF_FLOAT, SCALED_FLOAT, OBJECT, SOURCE, DATE_PERIOD, + TIME_DURATION, CARTESIAN_SHAPE, GEO_SHAPE, DOC_DATA_TYPE, TSID_DATA_TYPE, PARTIAL_AGG -> throw new EsqlIllegalArgumentException("illegal agg type: " + type.typeName()); - } + }; } } From 28715b791a88de6b3f2ccb6b4f097a9881f01007 Mon Sep 17 00:00:00 2001 From: mspielberg <9729801+mspielberg@users.noreply.github.com> Date: Thu, 24 Oct 2024 06:06:39 -0700 Subject: [PATCH 054/324] Add documentation for minimum_should_match (#113043) --- .../reference/query-dsl/terms-set-query.asciidoc | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/docs/reference/query-dsl/terms-set-query.asciidoc b/docs/reference/query-dsl/terms-set-query.asciidoc index 2abfe54d53976..27717af3ac171 100644 --- a/docs/reference/query-dsl/terms-set-query.asciidoc +++ b/docs/reference/query-dsl/terms-set-query.asciidoc @@ -159,12 +159,22 @@ GET /job-candidates/_search `terms`:: + -- -(Required, array of strings) Array of terms you wish to find in the provided +(Required, array) Array of terms you wish to find in the provided ``. To return a document, a required number of terms must exactly match the field values, including whitespace and capitalization. -The required number of matching terms is defined in the -`minimum_should_match_field` or `minimum_should_match_script` parameter. +The required number of matching terms is defined in the `minimum_should_match`, +`minimum_should_match_field` or `minimum_should_match_script` parameters. Exactly +one of these parameters must be provided. +-- + +`minimum_should_match`:: ++ +-- +(Optional) Specification for the number of matching terms required to return +a document. + +For valid values, see <>. -- `minimum_should_match_field`:: From 6980fc62531923b68accc204fc25e7dea59760e3 Mon Sep 17 00:00:00 2001 From: Liam Thompson <32779855+leemthompo@users.noreply.github.com> Date: Thu, 24 Oct 2024 15:11:10 +0200 Subject: [PATCH 055/324] [DOCS] Add text_expansion deprecation usage note (#115529) --- docs/reference/query-dsl/text-expansion-query.asciidoc | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/docs/reference/query-dsl/text-expansion-query.asciidoc b/docs/reference/query-dsl/text-expansion-query.asciidoc index 235a413df686f..5c7bce8c3fcf0 100644 --- a/docs/reference/query-dsl/text-expansion-query.asciidoc +++ b/docs/reference/query-dsl/text-expansion-query.asciidoc @@ -7,6 +7,13 @@ deprecated[8.15.0, This query has been replaced by <>.] +.Deprecation usage note +**** +You can continue using `rank_features` fields with `text_expansion` queries in the current version. +However, if you plan to upgrade, we recommend updating mappings to use the `sparse_vector` field type and <>. +This will allow you to take advantage of the new capabilities and improvements available in newer versions. +**** + The text expansion query uses a {nlp} model to convert the query text into a list of token-weight pairs which are then used in a query against a <> or <> field. From 833f2fb9185072b0f8edcd2576d512ff91810277 Mon Sep 17 00:00:00 2001 From: Stef Nestor <26751266+stefnestor@users.noreply.github.com> Date: Thu, 24 Oct 2024 07:27:23 -0600 Subject: [PATCH 056/324] (Doc+) link video for resolving max shards open (#115480) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 👋 howdy team! @anniegale9538 and my [video](https://www.youtube.com/watch?v=tZKbDegt4-M) demonstrates how to resolve `max shards open` errors as a common support ask. --- docs/reference/how-to/size-your-shards.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/reference/how-to/size-your-shards.asciidoc b/docs/reference/how-to/size-your-shards.asciidoc index 8770ec373bb18..86f195d030223 100644 --- a/docs/reference/how-to/size-your-shards.asciidoc +++ b/docs/reference/how-to/size-your-shards.asciidoc @@ -572,7 +572,7 @@ PUT _cluster/settings } ---- -For more information, see <>. +See this https://www.youtube.com/watch?v=tZKbDegt4-M[fixing "max shards open" video] for an example troubleshooting walkthrough. For more information, see <>. [discrete] [[troubleshooting-max-docs-limit]] From e99607b5895880d11b4981279314bcbb6b0fe3a9 Mon Sep 17 00:00:00 2001 From: Panagiotis Bailis Date: Thu, 24 Oct 2024 16:29:14 +0300 Subject: [PATCH 057/324] Adding breaking change entry for retrievers (#115399) --- docs/changelog/115399.yaml | 29 +++++++++++++++++++ .../TextSimilarityRankRetrieverBuilder.java | 2 +- .../xpack/rank/rrf/RRFRetrieverBuilder.java | 2 +- 3 files changed, 31 insertions(+), 2 deletions(-) create mode 100644 docs/changelog/115399.yaml diff --git a/docs/changelog/115399.yaml b/docs/changelog/115399.yaml new file mode 100644 index 0000000000000..9f69657a5d167 --- /dev/null +++ b/docs/changelog/115399.yaml @@ -0,0 +1,29 @@ +pr: 115399 +summary: Adding breaking change entry for retrievers +area: Search +type: breaking +issues: [] +breaking: + title: Reworking RRF retriever to be evaluated during rewrite phase + area: REST API + details: |- + In this release (8.16), we have introduced major changes to the retrievers framework + and how they can be evaluated, focusing mainly on compound retrievers + like `rrf` and `text_similarity_reranker`, which allowed us to support full + composability (i.e. any retriever can be nested under any compound retriever), + as well as supporting additional search features like collapsing, explaining, + aggregations, and highlighting. + + To ensure consistency, and given that this rework is not available until 8.16, + `rrf` and `text_similarity_reranker` retriever queries would now + throw an exception in a mixed cluster scenario, where there are nodes + both in current or later (i.e. >= 8.16) and previous ( <= 8.15) versions. + + As part of the rework, we have also removed the `_rank` property from + the responses of an `rrf` retriever. + impact: |- + - Users will not be able to use the `rrf` and `text_similarity_reranker` retrievers in a mixed cluster scenario + with previous releases (i.e. prior to 8.16), and the request will throw an `IllegalArgumentException`. + - `_rank` has now been removed from the output of the `rrf` retrievers so trying to directly parse the field + will throw an exception + notable: false diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankRetrieverBuilder.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankRetrieverBuilder.java index 342199dc51db8..91b6cdc61afe4 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankRetrieverBuilder.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankRetrieverBuilder.java @@ -81,7 +81,7 @@ public static TextSimilarityRankRetrieverBuilder fromXContent(XContentParser par throw new ParsingException(parser.getTokenLocation(), "unknown retriever [" + TextSimilarityRankBuilder.NAME + "]"); } if (context.clusterSupportsFeature(TEXT_SIMILARITY_RERANKER_COMPOSITION_SUPPORTED) == false) { - throw new UnsupportedOperationException( + throw new IllegalArgumentException( "[text_similarity_reranker] retriever composition feature is not supported by all nodes in the cluster" ); } diff --git a/x-pack/plugin/rank-rrf/src/main/java/org/elasticsearch/xpack/rank/rrf/RRFRetrieverBuilder.java b/x-pack/plugin/rank-rrf/src/main/java/org/elasticsearch/xpack/rank/rrf/RRFRetrieverBuilder.java index c3c9f19cde6ef..792ff4eac3893 100644 --- a/x-pack/plugin/rank-rrf/src/main/java/org/elasticsearch/xpack/rank/rrf/RRFRetrieverBuilder.java +++ b/x-pack/plugin/rank-rrf/src/main/java/org/elasticsearch/xpack/rank/rrf/RRFRetrieverBuilder.java @@ -83,7 +83,7 @@ public static RRFRetrieverBuilder fromXContent(XContentParser parser, RetrieverP throw new ParsingException(parser.getTokenLocation(), "unknown retriever [" + NAME + "]"); } if (context.clusterSupportsFeature(RRF_RETRIEVER_COMPOSITION_SUPPORTED) == false) { - throw new UnsupportedOperationException("[rrf] retriever composition feature is not supported by all nodes in the cluster"); + throw new IllegalArgumentException("[rrf] retriever composition feature is not supported by all nodes in the cluster"); } if (RRFRankPlugin.RANK_RRF_FEATURE.check(XPackPlugin.getSharedLicenseState()) == false) { throw LicenseUtils.newComplianceException("Reciprocal Rank Fusion (RRF)"); From 28882e86b200e9dfef47e6615bfd993d35f17abd Mon Sep 17 00:00:00 2001 From: Alexey Ivanov Date: Thu, 24 Oct 2024 14:30:32 +0100 Subject: [PATCH 058/324] Report JVM stats for all memory pools (97046) (#115117) This fix allows reporting of all JVM memory pools sizes in JVM stats --- docs/changelog/115117.yaml | 6 ++++++ .../elasticsearch/monitor/jvm/GcNames.java | 15 +++++++++++++- .../elasticsearch/monitor/jvm/JvmStats.java | 5 +---- .../monitor/jvm/JvmStatsTests.java | 20 +++++++++++++++++-- 4 files changed, 39 insertions(+), 7 deletions(-) create mode 100644 docs/changelog/115117.yaml diff --git a/docs/changelog/115117.yaml b/docs/changelog/115117.yaml new file mode 100644 index 0000000000000..de2defcd46afd --- /dev/null +++ b/docs/changelog/115117.yaml @@ -0,0 +1,6 @@ +pr: 115117 +summary: Report JVM stats for all memory pools (97046) +area: Infra/Core +type: bug +issues: + - 97046 diff --git a/server/src/main/java/org/elasticsearch/monitor/jvm/GcNames.java b/server/src/main/java/org/elasticsearch/monitor/jvm/GcNames.java index 9db8e8f414d5c..3494204c330c0 100644 --- a/server/src/main/java/org/elasticsearch/monitor/jvm/GcNames.java +++ b/server/src/main/java/org/elasticsearch/monitor/jvm/GcNames.java @@ -15,8 +15,14 @@ public class GcNames { public static final String OLD = "old"; public static final String SURVIVOR = "survivor"; + private GcNames() {} + /** - * Resolves the GC type by its memory pool name ({@link java.lang.management.MemoryPoolMXBean#getName()}. + * Resolves the memory area name by the memory pool name provided by {@link java.lang.management.MemoryPoolMXBean#getName()} + * + * @param poolName the name of the memory pool from {@link java.lang.management.MemoryPoolMXBean} + * @param defaultName the name to return if the pool name does not match any known memory area + * @return memory area name corresponding to the pool name or {@code defaultName} if no match is found */ public static String getByMemoryPoolName(String poolName, String defaultName) { if ("Eden Space".equals(poolName) @@ -40,6 +46,13 @@ public static String getByMemoryPoolName(String poolName, String defaultName) { return defaultName; } + /** + * Resolves the GC type by the GC name provided by {@link java.lang.management.GarbageCollectorMXBean#getName()} + * + * @param gcName the name of the GC from {@link java.lang.management.GarbageCollectorMXBean} + * @param defaultName the name to return if the GC name does not match any known GC type + * @return GC type corresponding to the GC name or {@code defaultName} if no match is found + */ public static String getByGcName(String gcName, String defaultName) { if ("Copy".equals(gcName) || "PS Scavenge".equals(gcName) || "ParNew".equals(gcName) || "G1 Young Generation".equals(gcName)) { return YOUNG; diff --git a/server/src/main/java/org/elasticsearch/monitor/jvm/JvmStats.java b/server/src/main/java/org/elasticsearch/monitor/jvm/JvmStats.java index 0a2763474b8df..e6b109207fdf3 100644 --- a/server/src/main/java/org/elasticsearch/monitor/jvm/JvmStats.java +++ b/server/src/main/java/org/elasticsearch/monitor/jvm/JvmStats.java @@ -64,10 +64,7 @@ public static JvmStats jvmStats() { List pools = new ArrayList<>(); for (MemoryPoolMXBean memoryPoolMXBean : memoryPoolMXBeans) { try { - String name = GcNames.getByMemoryPoolName(memoryPoolMXBean.getName(), null); - if (name == null) { // if we can't resolve it, its not interesting.... (Per Gen, Code Cache) - continue; - } + String name = GcNames.getByMemoryPoolName(memoryPoolMXBean.getName(), memoryPoolMXBean.getName()); MemoryUsage usage = memoryPoolMXBean.getUsage(); MemoryUsage peakUsage = memoryPoolMXBean.getPeakUsage(); pools.add( diff --git a/server/src/test/java/org/elasticsearch/monitor/jvm/JvmStatsTests.java b/server/src/test/java/org/elasticsearch/monitor/jvm/JvmStatsTests.java index 12fa776dd7efd..28976d803ff53 100644 --- a/server/src/test/java/org/elasticsearch/monitor/jvm/JvmStatsTests.java +++ b/server/src/test/java/org/elasticsearch/monitor/jvm/JvmStatsTests.java @@ -13,17 +13,22 @@ import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.test.ESTestCase; -import java.io.IOException; import java.util.Arrays; import java.util.List; +import java.util.Map; +import java.util.function.Function; +import java.util.stream.Collectors; +import java.util.stream.StreamSupport; import static org.hamcrest.Matchers.anyOf; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.hasKey; +import static org.hamcrest.Matchers.hasSize; public class JvmStatsTests extends ESTestCase { - public void testJvmStats() throws IOException { + public void testJvmStats() { JvmStats stats = JvmStats.jvmStats(); assertNotNull(stats); assertNotNull(stats.getUptime()); @@ -40,6 +45,17 @@ public void testJvmStats() throws IOException { assertNotNull(mem.getHeapUsedPercent()); assertThat(mem.getHeapUsedPercent(), anyOf(equalTo((short) -1), greaterThanOrEqualTo((short) 0))); + // Memory pools + Map memoryPools = StreamSupport.stream(stats.getMem().spliterator(), false) + .collect(Collectors.toMap(JvmStats.MemoryPool::getName, Function.identity())); + assertThat(memoryPools, hasKey(GcNames.YOUNG)); + assertThat(memoryPools, hasKey(GcNames.OLD)); + assertThat(memoryPools, hasKey("Metaspace")); + assertThat(memoryPools.keySet(), hasSize(greaterThan(3))); + for (JvmStats.MemoryPool memoryPool : memoryPools.values()) { + assertThat(memoryPool.getUsed().getBytes(), greaterThan(0L)); + } + // Threads JvmStats.Threads threads = stats.getThreads(); assertNotNull(threads); From 37c7137f39d13ce36785c0bed01f2f058da886f8 Mon Sep 17 00:00:00 2001 From: Gergely Kalapos Date: Thu, 24 Oct 2024 15:49:45 +0200 Subject: [PATCH 059/324] [otel-data] Add more kubernetes aliases (#115429) * Add more kubernetes aliases * Update docs/changelog/115429.yaml * Review feedback --------- Co-authored-by: Elastic Machine --- docs/changelog/115429.yaml | 5 ++ .../semconv-resource-to-ecs@mappings.yaml | 48 +++++++++++++++++++ .../rest-api-spec/test/20_logs_tests.yml | 37 ++++++++++++++ 3 files changed, 90 insertions(+) create mode 100644 docs/changelog/115429.yaml diff --git a/docs/changelog/115429.yaml b/docs/changelog/115429.yaml new file mode 100644 index 0000000000000..ddf3c69183000 --- /dev/null +++ b/docs/changelog/115429.yaml @@ -0,0 +1,5 @@ +pr: 115429 +summary: "[otel-data] Add more kubernetes aliases" +area: Data streams +type: bug +issues: [] diff --git a/x-pack/plugin/otel-data/src/main/resources/component-templates/semconv-resource-to-ecs@mappings.yaml b/x-pack/plugin/otel-data/src/main/resources/component-templates/semconv-resource-to-ecs@mappings.yaml index 6645e7d282520..eb5cd6d37af83 100644 --- a/x-pack/plugin/otel-data/src/main/resources/component-templates/semconv-resource-to-ecs@mappings.yaml +++ b/x-pack/plugin/otel-data/src/main/resources/component-templates/semconv-resource-to-ecs@mappings.yaml @@ -56,21 +56,45 @@ template: os.version: type: keyword ignore_above: 1024 + k8s.container.name: + type: keyword + ignore_above: 1024 + k8s.cronjob.name: + type: keyword + ignore_above: 1024 + k8s.daemonset.name: + type: keyword + ignore_above: 1024 k8s.deployment.name: type: keyword ignore_above: 1024 + k8s.job.name: + type: keyword + ignore_above: 1024 k8s.namespace.name: type: keyword ignore_above: 1024 + k8s.node.hostname: + type: keyword + ignore_above: 1024 k8s.node.name: type: keyword ignore_above: 1024 + k8s.node.uid: + type: keyword + ignore_above: 1024 k8s.pod.name: type: keyword ignore_above: 1024 k8s.pod.uid: type: keyword ignore_above: 1024 + k8s.replicaset.name: + type: keyword + ignore_above: 1024 + k8s.statefulset.name: + type: keyword + ignore_above: 1024 service.node.name: type: alias path: resource.attributes.service.instance.id @@ -122,6 +146,30 @@ template: kubernetes.pod.uid: type: alias path: resource.attributes.k8s.pod.uid + kubernetes.container.name: + type: alias + path: resource.attributes.k8s.container.name + kubernetes.cronjob.name: + type: alias + path: resource.attributes.k8s.cronjob.name + kubernetes.job.name: + type: alias + path: resource.attributes.k8s.job.name + kubernetes.statefulset.name: + type: alias + path: resource.attributes.k8s.statefulset.name + kubernetes.daemonset.name: + type: alias + path: resource.attributes.k8s.daemonset.name + kubernetes.replicaset.name: + type: alias + path: resource.attributes.k8s.replicaset.name + kubernetes.node.uid: + type: alias + path: resource.attributes.k8s.node.uid + kubernetes.node.hostname: + type: alias + path: resource.attributes.k8s.node.hostname # Below are non-ECS fields that may be used by Kibana. service.language.name: type: alias diff --git a/x-pack/plugin/otel-data/src/yamlRestTest/resources/rest-api-spec/test/20_logs_tests.yml b/x-pack/plugin/otel-data/src/yamlRestTest/resources/rest-api-spec/test/20_logs_tests.yml index 6bc0cee78be4f..63966e601a3cb 100644 --- a/x-pack/plugin/otel-data/src/yamlRestTest/resources/rest-api-spec/test/20_logs_tests.yml +++ b/x-pack/plugin/otel-data/src/yamlRestTest/resources/rest-api-spec/test/20_logs_tests.yml @@ -187,3 +187,40 @@ host.name pass-through: - length: { hits.hits: 1 } - match: { hits.hits.0.fields.resource\.attributes\.host\.name: [ "localhost" ] } - match: { hits.hits.0.fields.host\.name: [ "localhost" ] } +--- +"kubernetes.* -> resource.attributes.k8s.* aliases": + - do: + bulk: + index: logs-generic.otel-default + refresh: true + body: + - create: { } + - "@timestamp": 2024-07-18T14:48:33.467654000Z + data_stream: + dataset: generic.otel + namespace: default + resource: + attributes: + k8s.container.name: myContainerName + k8s.cronjob.name: myCronJobName + k8s.job.name: myJobName + k8s.statefulset.name: myStatefulsetName + k8s.daemonset.name: myDaemonsetName + k8s.replicaset.name: myReplicasetName + k8s.node.uid: myNodeUid + k8s.node.hostname: myNodeHostname + - is_false: errors + - do: + search: + index: logs-generic.otel-default + body: + fields: ["kubernetes.container.name", "kubernetes.cronjob.name", "kubernetes.job.name", "kubernetes.statefulset.name", "kubernetes.daemonset.name", "kubernetes.replicaset.name", "kubernetes.node.uid", "kubernetes.node.hostname" ] + - length: { hits.hits: 1 } + - match: { hits.hits.0.fields.kubernetes\.container\.name : ["myContainerName"] } + - match: { hits.hits.0.fields.kubernetes\.cronjob\.name : ["myCronJobName"] } + - match: { hits.hits.0.fields.kubernetes\.job\.name : ["myJobName"] } + - match: { hits.hits.0.fields.kubernetes\.statefulset\.name : ["myStatefulsetName"] } + - match: { hits.hits.0.fields.kubernetes\.daemonset\.name : ["myDaemonsetName"] } + - match: { hits.hits.0.fields.kubernetes\.replicaset\.name : ["myReplicasetName"] } + - match: { hits.hits.0.fields.kubernetes\.node\.uid : ["myNodeUid"] } + - match: { hits.hits.0.fields.kubernetes\.node\.hostname : ["myNodeHostname"] } From 31ede8fd284a79e1f62088d9800e59701f42b79a Mon Sep 17 00:00:00 2001 From: Liam Thompson <32779855+leemthompo@users.noreply.github.com> Date: Thu, 24 Oct 2024 15:57:49 +0200 Subject: [PATCH 060/324] Update 8.12.0.asciidoc (#115303) (#115546) Fixing confusing format Co-authored-by: Johannes Mahne --- docs/reference/release-notes/8.12.0.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/reference/release-notes/8.12.0.asciidoc b/docs/reference/release-notes/8.12.0.asciidoc index bfa99401f41a2..bd0ae032ef0b9 100644 --- a/docs/reference/release-notes/8.12.0.asciidoc +++ b/docs/reference/release-notes/8.12.0.asciidoc @@ -11,7 +11,7 @@ Also see <>. + When using `int8_hnsw` and the default `confidence_interval` (or any `confidence_interval` less than `1.0`) and when there are deleted documents in the segments, quantiles may fail to build and prevent merging. - ++ This issue is fixed in 8.12.1. * When upgrading clusters from version 8.11.4 or earlier, if your cluster contains non-master-eligible nodes, From aae3b3499a7e397bbd2f2cd7df0e218ec3f12caf Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Fri, 25 Oct 2024 00:57:55 +1100 Subject: [PATCH 061/324] Mute org.elasticsearch.test.apmintegration.MetricsApmIT testApmIntegration #115415 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index 2d5349ed03b48..1ee677b14fea1 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -285,6 +285,9 @@ tests: - class: org.elasticsearch.xpack.restart.MLModelDeploymentFullClusterRestartIT method: testDeploymentSurvivesRestart {cluster=UPGRADED} issue: https://github.com/elastic/elasticsearch/issues/115528 +- class: org.elasticsearch.test.apmintegration.MetricsApmIT + method: testApmIntegration + issue: https://github.com/elastic/elasticsearch/issues/115415 # Examples: # From fffb98ac6c68cc633afbb855f697d514f4185c9b Mon Sep 17 00:00:00 2001 From: David Kyle Date: Thu, 24 Oct 2024 15:12:41 +0100 Subject: [PATCH 062/324] [ML] Set max allocations to 32 in default configs (#115518) --- .../services/elasticsearch/ElasticsearchInternalService.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalService.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalService.java index 6732e5719b897..a0235f74ce511 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalService.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalService.java @@ -859,7 +859,7 @@ private List defaultConfigs(boolean useLinuxOptimizedModel) { null, 1, useLinuxOptimizedModel ? ELSER_V2_MODEL_LINUX_X86 : ELSER_V2_MODEL, - new AdaptiveAllocationsSettings(Boolean.TRUE, 0, 8) + new AdaptiveAllocationsSettings(Boolean.TRUE, 0, 32) ), ElserMlNodeTaskSettings.DEFAULT, null // default chunking settings @@ -872,7 +872,7 @@ private List defaultConfigs(boolean useLinuxOptimizedModel) { null, 1, useLinuxOptimizedModel ? MULTILINGUAL_E5_SMALL_MODEL_ID_LINUX_X86 : MULTILINGUAL_E5_SMALL_MODEL_ID, - new AdaptiveAllocationsSettings(Boolean.TRUE, 0, 8) + new AdaptiveAllocationsSettings(Boolean.TRUE, 0, 32) ), null // default chunking settings ); From 7d829fa51a13b2150ce7c0a08e3f5f66c9ee8bfb Mon Sep 17 00:00:00 2001 From: David Kyle Date: Thu, 24 Oct 2024 15:14:29 +0100 Subject: [PATCH 063/324] [ML] Prevent NPE if model assignment is removed while waiting to start (#115430) --- docs/changelog/115430.yaml | 5 +++++ .../action/TransportStartTrainedModelDeploymentAction.java | 6 +++++- 2 files changed, 10 insertions(+), 1 deletion(-) create mode 100644 docs/changelog/115430.yaml diff --git a/docs/changelog/115430.yaml b/docs/changelog/115430.yaml new file mode 100644 index 0000000000000..c2903f7751012 --- /dev/null +++ b/docs/changelog/115430.yaml @@ -0,0 +1,5 @@ +pr: 115430 +summary: Prevent NPE if model assignment is removed while waiting to start +area: Machine Learning +type: bug +issues: [] diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStartTrainedModelDeploymentAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStartTrainedModelDeploymentAction.java index 0bda2de2ce9ae..5fd70ce71cd24 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStartTrainedModelDeploymentAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStartTrainedModelDeploymentAction.java @@ -671,7 +671,11 @@ public boolean test(ClusterState clusterState) { deploymentId ).orElse(null); if (trainedModelAssignment == null) { - // Something weird happened, it should NEVER be null... + // The assignment may be null if it was stopped by another action while waiting + this.exception = new ElasticsearchStatusException( + "Error waiting for the model deployment to start. The trained model assignment was removed while waiting", + RestStatus.BAD_REQUEST + ); logger.trace(() -> format("[%s] assignment was null while waiting for state [%s]", deploymentId, waitForState)); return true; } From 755c392bb22e9046ef79982aba188f3c45193c8b Mon Sep 17 00:00:00 2001 From: Luke Whiting Date: Thu, 24 Oct 2024 15:24:26 +0100 Subject: [PATCH 064/324] Fix for race condition in interval watcher scheduler tests (#115501) --- muted-tests.yml | 12 ------------ .../schedule/engine/TickerScheduleEngineTests.java | 12 ++++-------- 2 files changed, 4 insertions(+), 20 deletions(-) diff --git a/muted-tests.yml b/muted-tests.yml index 1ee677b14fea1..ba816ed5f3a9e 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -258,21 +258,9 @@ tests: - class: org.elasticsearch.xpack.test.rest.XPackRestIT method: test {p0=esql/60_usage/Basic ESQL usage output (telemetry)} issue: https://github.com/elastic/elasticsearch/issues/115231 -- class: org.elasticsearch.xpack.watcher.trigger.schedule.engine.TickerScheduleEngineTests - method: testAddWithNoLastCheckedTimeButHasActivationTimeExecutesBeforeInitialInterval - issue: https://github.com/elastic/elasticsearch/issues/115339 -- class: org.elasticsearch.xpack.watcher.trigger.schedule.engine.TickerScheduleEngineTests - method: testWatchWithLastCheckedTimeExecutesBeforeInitialInterval - issue: https://github.com/elastic/elasticsearch/issues/115354 -- class: org.elasticsearch.xpack.watcher.trigger.schedule.engine.TickerScheduleEngineTests - method: testAddWithLastCheckedTimeExecutesBeforeInitialInterval - issue: https://github.com/elastic/elasticsearch/issues/115356 - class: org.elasticsearch.xpack.inference.DefaultEndPointsIT method: testInferDeploysDefaultE5 issue: https://github.com/elastic/elasticsearch/issues/115361 -- class: org.elasticsearch.xpack.watcher.trigger.schedule.engine.TickerScheduleEngineTests - method: testWatchWithNoLastCheckedTimeButHasActivationTimeExecutesBeforeInitialInterval - issue: https://github.com/elastic/elasticsearch/issues/115368 - class: org.elasticsearch.reservedstate.service.FileSettingsServiceTests method: testProcessFileChanges issue: https://github.com/elastic/elasticsearch/issues/115280 diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/trigger/schedule/engine/TickerScheduleEngineTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/trigger/schedule/engine/TickerScheduleEngineTests.java index 9a12b8f394eb2..ef290628c06d5 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/trigger/schedule/engine/TickerScheduleEngineTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/trigger/schedule/engine/TickerScheduleEngineTests.java @@ -312,14 +312,13 @@ public void testWatchWithLastCheckedTimeExecutesBeforeInitialInterval() throws E engine.register(events -> { for (TriggerEvent ignored : events) { - if (runCount.get() == 0) { + if (runCount.getAndIncrement() == 0) { logger.info("job first fire"); firstLatch.countDown(); } else { logger.info("job second fire"); secondLatch.countDown(); } - runCount.incrementAndGet(); } }); @@ -375,14 +374,13 @@ public void testWatchWithNoLastCheckedTimeButHasActivationTimeExecutesBeforeInit engine.register(events -> { for (TriggerEvent ignored : events) { - if (runCount.get() == 0) { + if (runCount.getAndIncrement() == 0) { logger.info("job first fire"); firstLatch.countDown(); } else { logger.info("job second fire"); secondLatch.countDown(); } - runCount.incrementAndGet(); } }); @@ -428,14 +426,13 @@ public void testAddWithLastCheckedTimeExecutesBeforeInitialInterval() throws Exc engine.register(events -> { for (TriggerEvent ignored : events) { - if (runCount.get() == 0) { + if (runCount.getAndIncrement() == 0) { logger.info("job first fire"); firstLatch.countDown(); } else { logger.info("job second fire"); secondLatch.countDown(); } - runCount.incrementAndGet(); } }); @@ -492,14 +489,13 @@ public void testAddWithNoLastCheckedTimeButHasActivationTimeExecutesBeforeInitia engine.register(events -> { for (TriggerEvent ignored : events) { - if (runCount.get() == 0) { + if (runCount.getAndIncrement() == 0) { logger.info("job first fire"); firstLatch.countDown(); } else { logger.info("job second fire"); secondLatch.countDown(); } - runCount.incrementAndGet(); } }); From d7a9575d0314adcc65b50c4972c585464c2aefa9 Mon Sep 17 00:00:00 2001 From: Pete Gillin Date: Thu, 24 Oct 2024 15:58:24 +0100 Subject: [PATCH 065/324] Remove deprecated local parameter from alias APIs (#115393) This removes the `local` parameter from the `GET /_alias`, `HEAD /_alias`, and `GET /_cat/aliases` APIs. This option became a no-op and was deprecated in 8.12 by https://github.com/elastic/elasticsearch/pull/101815. We continue to accept the parameter (deprecated, with no other effect) in v8 compatibility mode for `GET /_alias` and `HEAD /_alias`. We don't do this for `GET /_cat/aliases` where the [compatibility policy does not apply](https://github.com/elastic/elasticsearch/blob/main/REST_API_COMPATIBILITY.md#when-not-to-apply). --- docs/changelog/115393.yaml | 18 ++++++++++++ docs/reference/cat/alias.asciidoc | 10 +++---- docs/reference/indices/alias-exists.asciidoc | 2 -- docs/reference/indices/get-alias.asciidoc | 2 -- rest-api-spec/build.gradle | 1 + .../rest-api-spec/api/cat.aliases.json | 4 --- .../api/indices.exists_alias.json | 4 --- .../rest-api-spec/api/indices.get_alias.json | 4 --- .../test/cat.aliases/10_basic.yml | 13 --------- .../test/indices.exists_alias/10_basic.yml | 14 --------- .../test/indices.get_alias/10_basic.yml | 29 ------------------- .../admin/indices/RestGetAliasesAction.java | 15 +++++----- .../rest/action/cat/RestAliasAction.java | 22 -------------- 13 files changed, 30 insertions(+), 108 deletions(-) create mode 100644 docs/changelog/115393.yaml diff --git a/docs/changelog/115393.yaml b/docs/changelog/115393.yaml new file mode 100644 index 0000000000000..5cf4e5f64ab34 --- /dev/null +++ b/docs/changelog/115393.yaml @@ -0,0 +1,18 @@ +pr: 115393 +summary: Remove deprecated local attribute from alias APIs +area: Indices APIs +type: breaking +issues: [] +breaking: + title: Remove deprecated local attribute from alias APIs + area: REST API + details: >- + The following APIs no longer accept the `?local` query parameter: + `GET /_alias`, `GET /_aliases`, `GET /_alias/{name}`, + `HEAD /_alias/{name}`, `GET /{index}/_alias`, `HEAD /{index}/_alias`, + `GET /{index}/_alias/{name}`, `HEAD /{index}/_alias/{name}`, + `GET /_cat/aliases`, and `GET /_cat/aliases/{alias}`. This parameter + has been deprecated and ignored since version 8.12. + impact: >- + Cease usage of the `?local` query parameter when calling the listed APIs. + notable: false diff --git a/docs/reference/cat/alias.asciidoc b/docs/reference/cat/alias.asciidoc index 72f949bf11e50..41ac279d3b2f5 100644 --- a/docs/reference/cat/alias.asciidoc +++ b/docs/reference/cat/alias.asciidoc @@ -6,8 +6,8 @@ [IMPORTANT] ==== -cat APIs are only intended for human consumption using the command line or the -{kib} console. They are _not_ intended for use by applications. For application +cat APIs are only intended for human consumption using the command line or the +{kib} console. They are _not_ intended for use by applications. For application consumption, use the <>. ==== @@ -45,8 +45,6 @@ include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=cat-h] include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=help] -include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=local] - include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=cat-s] include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=cat-v] @@ -104,6 +102,6 @@ alias4 test1 - 2 1,2 - This response shows that `alias2` has configured a filter, and specific routing configurations in `alias3` and `alias4`. -If you only want to get information about specific aliases, you can specify -the aliases in comma-delimited format as a URL parameter, e.g., +If you only want to get information about specific aliases, you can specify +the aliases in comma-delimited format as a URL parameter, e.g., /_cat/aliases/alias1,alias2. diff --git a/docs/reference/indices/alias-exists.asciidoc b/docs/reference/indices/alias-exists.asciidoc index f820a95028a0f..d7b3454dcff56 100644 --- a/docs/reference/indices/alias-exists.asciidoc +++ b/docs/reference/indices/alias-exists.asciidoc @@ -52,8 +52,6 @@ Defaults to `all`. (Optional, Boolean) If `false`, requests that include a missing data stream or index in the `` return an error. Defaults to `false`. -include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=local] - [[alias-exists-api-response-codes]] ==== {api-response-codes-title} diff --git a/docs/reference/indices/get-alias.asciidoc b/docs/reference/indices/get-alias.asciidoc index 743aaf7aee174..41d62fb70e01b 100644 --- a/docs/reference/indices/get-alias.asciidoc +++ b/docs/reference/indices/get-alias.asciidoc @@ -58,5 +58,3 @@ Defaults to `all`. `ignore_unavailable`:: (Optional, Boolean) If `false`, requests that include a missing data stream or index in the `` return an error. Defaults to `false`. - -include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=local] diff --git a/rest-api-spec/build.gradle b/rest-api-spec/build.gradle index 4bd293f0a8641..6cc2028bffa39 100644 --- a/rest-api-spec/build.gradle +++ b/rest-api-spec/build.gradle @@ -60,4 +60,5 @@ tasks.named("yamlRestCompatTestTransform").configure ({ task -> task.skipTest("indices.sort/10_basic/Index Sort", "warning does not exist for compatibility") task.skipTest("search/330_fetch_fields/Test search rewrite", "warning does not exist for compatibility") task.skipTest("indices.create/21_synthetic_source_stored/object param - nested object with stored array", "temporary until backported") + task.skipTest("cat.aliases/10_basic/Deprecated local parameter", "CAT APIs not covered by compatibility policy") }) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.aliases.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.aliases.json index db49daeea372b..d3856b455efd1 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.aliases.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.aliases.json @@ -36,10 +36,6 @@ "type":"string", "description":"a short version of the Accept header, e.g. json, yaml" }, - "local":{ - "type":"boolean", - "description":"Return local information, do not retrieve the state from master node (default: false)" - }, "h":{ "type":"list", "description":"Comma-separated list of column names to display" diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.exists_alias.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.exists_alias.json index b70854fdc3eb2..7d7a9c96c6419 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.exists_alias.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.exists_alias.json @@ -61,10 +61,6 @@ ], "default":"all", "description":"Whether to expand wildcard expression to concrete indices that are open, closed or both." - }, - "local":{ - "type":"boolean", - "description":"Return local information, do not retrieve the state from master node (default: false)" } } } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get_alias.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get_alias.json index 0a4e4bb9ed90c..dc02a65adb068 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get_alias.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get_alias.json @@ -79,10 +79,6 @@ ], "default": "all", "description":"Whether to expand wildcard expression to concrete indices that are open, closed or both." - }, - "local":{ - "type":"boolean", - "description":"Return local information, do not retrieve the state from master node (default: false)" } } } diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cat.aliases/10_basic.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cat.aliases/10_basic.yml index 2e5234bd1ced1..6118453d7805e 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cat.aliases/10_basic.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cat.aliases/10_basic.yml @@ -484,16 +484,3 @@ test_alias \s+ test_index\n my_alias \s+ test_index\n $/ - ---- -"Deprecated local parameter": - - requires: - cluster_features: ["gte_v8.12.0"] - test_runner_features: ["warnings"] - reason: verifying deprecation warnings from 8.12.0 onwards - - - do: - cat.aliases: - local: true - warnings: - - "the [?local=true] query parameter to cat-aliases requests has no effect and will be removed in a future version" diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.exists_alias/10_basic.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.exists_alias/10_basic.yml index bf499de8463bd..a4223c2a983be 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.exists_alias/10_basic.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.exists_alias/10_basic.yml @@ -34,17 +34,3 @@ name: test_alias - is_false: '' - ---- -"Test indices.exists_alias with local flag": - - skip: - features: ["allowed_warnings"] - - - do: - indices.exists_alias: - name: test_alias - local: true - allowed_warnings: - - "the [?local=true] query parameter to get-aliases requests has no effect and will be removed in a future version" - - - is_false: '' diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.get_alias/10_basic.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.get_alias/10_basic.yml index 4f26a69712e83..63ab40f3bf578 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.get_alias/10_basic.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.get_alias/10_basic.yml @@ -289,21 +289,6 @@ setup: index: non-existent name: foo ---- -"Get alias with local flag": - - skip: - features: ["allowed_warnings"] - - - do: - indices.get_alias: - local: true - allowed_warnings: - - "the [?local=true] query parameter to get-aliases requests has no effect and will be removed in a future version" - - - is_true: test_index - - - is_true: test_index_2 - --- "Get alias against closed indices": - skip: @@ -329,17 +314,3 @@ setup: - is_true: test_index - is_false: test_index_2 - - ---- -"Deprecated local parameter": - - requires: - cluster_features: "gte_v8.12.0" - test_runner_features: ["warnings"] - reason: verifying deprecation warnings from 8.12.0 onwards - - - do: - indices.get_alias: - local: true - warnings: - - "the [?local=true] query parameter to get-aliases requests has no effect and will be removed in a future version" diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetAliasesAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetAliasesAction.java index 7780ae08ac0ff..dfe501f29ce2e 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetAliasesAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetAliasesAction.java @@ -22,7 +22,7 @@ import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.core.RestApiVersion; -import org.elasticsearch.core.UpdateForV9; +import org.elasticsearch.core.UpdateForV10; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.RestResponse; @@ -52,7 +52,7 @@ @ServerlessScope(Scope.PUBLIC) public class RestGetAliasesAction extends BaseRestHandler { - @UpdateForV9(owner = UpdateForV9.Owner.DATA_MANAGEMENT) // reject the deprecated ?local parameter + @UpdateForV10(owner = UpdateForV10.Owner.DATA_MANAGEMENT) // remove the BWC support for the deprecated ?local parameter private static final DeprecationLogger DEPRECATION_LOGGER = DeprecationLogger.getLogger(RestGetAliasesAction.class); @Override @@ -199,8 +199,7 @@ static RestResponse buildRestResponse( } @Override - @UpdateForV9(owner = UpdateForV9.Owner.DATA_MANAGEMENT) - // v7 REST API no longer exists: eliminate ref to RestApiVersion.V_7; reject local parameter in v9 too? + @UpdateForV10(owner = UpdateForV10.Owner.DATA_MANAGEMENT) // remove the BWC support for the deprecated ?local parameter public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { // The TransportGetAliasesAction was improved do the same post processing as is happening here. // We can't remove this logic yet to support mixed clusters. We should be able to remove this logic here @@ -213,10 +212,10 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC getAliasesRequest.indices(indices); getAliasesRequest.indicesOptions(IndicesOptions.fromRequest(request, getAliasesRequest.indicesOptions())); - if (request.hasParam("local")) { - // consume this param just for validation - final var localParam = request.paramAsBoolean("local", false); - if (request.getRestApiVersion() != RestApiVersion.V_7) { + if (request.getRestApiVersion() == RestApiVersion.V_8) { + if (request.hasParam("local")) { + // consume this param just for validation when in BWC mode for V_8 + final var localParam = request.paramAsBoolean("local", false); DEPRECATION_LOGGER.critical( DeprecationCategory.API, "get-aliases-local", diff --git a/server/src/main/java/org/elasticsearch/rest/action/cat/RestAliasAction.java b/server/src/main/java/org/elasticsearch/rest/action/cat/RestAliasAction.java index 191746b421c98..6aa0b1c865682 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/cat/RestAliasAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/cat/RestAliasAction.java @@ -15,10 +15,6 @@ import org.elasticsearch.cluster.metadata.AliasMetadata; import org.elasticsearch.common.Strings; import org.elasticsearch.common.Table; -import org.elasticsearch.common.logging.DeprecationCategory; -import org.elasticsearch.common.logging.DeprecationLogger; -import org.elasticsearch.core.RestApiVersion; -import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.RestResponse; import org.elasticsearch.rest.Scope; @@ -34,8 +30,6 @@ @ServerlessScope(Scope.PUBLIC) public class RestAliasAction extends AbstractCatAction { - private static final DeprecationLogger DEPRECATION_LOGGER = DeprecationLogger.getLogger(RestAliasAction.class); - @Override public List routes() { return List.of(new Route(GET, "/_cat/aliases"), new Route(GET, "/_cat/aliases/{alias}")); @@ -52,27 +46,11 @@ public boolean allowSystemIndexAccessByDefault() { } @Override - @UpdateForV9(owner = UpdateForV9.Owner.DATA_MANAGEMENT) - // v7 REST API no longer exists: eliminate ref to RestApiVersion.V_7; reject local parameter in v9 too? protected RestChannelConsumer doCatRequest(final RestRequest request, final NodeClient client) { final GetAliasesRequest getAliasesRequest = request.hasParam("alias") ? new GetAliasesRequest(Strings.commaDelimitedListToStringArray(request.param("alias"))) : new GetAliasesRequest(); getAliasesRequest.indicesOptions(IndicesOptions.fromRequest(request, getAliasesRequest.indicesOptions())); - - if (request.hasParam("local")) { - // consume this param just for validation - final var localParam = request.paramAsBoolean("local", false); - if (request.getRestApiVersion() != RestApiVersion.V_7) { - DEPRECATION_LOGGER.critical( - DeprecationCategory.API, - "cat-aliases-local", - "the [?local={}] query parameter to cat-aliases requests has no effect and will be removed in a future version", - localParam - ); - } - } - return channel -> new RestCancellableNodeClient(client, request.getHttpChannel()).admin() .indices() .getAliases(getAliasesRequest, new RestResponseListener<>(channel) { From d8a3fc22cde255dc9b7456ba1009bb8b45b7407d Mon Sep 17 00:00:00 2001 From: Artem Prigoda Date: Thu, 24 Oct 2024 16:59:10 +0200 Subject: [PATCH 066/324] [test] Don't test any 7.x snapshots in `testLogicallyEquivalentSnapshotIsUsedEvenIfFilesAreDifferent` (#114821) Don't test any 7.x snapshots, keep using any 8,x compatible snapshot and Lucene version. Originally added in 8.0 (#77420) for testing peer recoveries using snapshots. Co-authored-by: Yang Wang Co-authored-by: Elastic Machine --- .../SnapshotsRecoveryPlannerServiceTests.java | 20 ++----------------- 1 file changed, 2 insertions(+), 18 deletions(-) diff --git a/x-pack/plugin/snapshot-based-recoveries/src/test/java/org/elasticsearch/xpack/snapshotbasedrecoveries/recovery/plan/SnapshotsRecoveryPlannerServiceTests.java b/x-pack/plugin/snapshot-based-recoveries/src/test/java/org/elasticsearch/xpack/snapshotbasedrecoveries/recovery/plan/SnapshotsRecoveryPlannerServiceTests.java index b082698254d17..6e7f2d82cfb1d 100644 --- a/x-pack/plugin/snapshot-based-recoveries/src/test/java/org/elasticsearch/xpack/snapshotbasedrecoveries/recovery/plan/SnapshotsRecoveryPlannerServiceTests.java +++ b/x-pack/plugin/snapshot-based-recoveries/src/test/java/org/elasticsearch/xpack/snapshotbasedrecoveries/recovery/plan/SnapshotsRecoveryPlannerServiceTests.java @@ -26,15 +26,12 @@ import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.core.CheckedConsumer; import org.elasticsearch.core.IOUtils; -import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.IndexVersion; -import org.elasticsearch.index.IndexVersions; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshot; import org.elasticsearch.index.store.Store; import org.elasticsearch.index.store.StoreFileMetadata; -import org.elasticsearch.indices.recovery.RecoverySettings; import org.elasticsearch.indices.recovery.plan.ShardRecoveryPlan; import org.elasticsearch.indices.recovery.plan.ShardSnapshot; import org.elasticsearch.indices.recovery.plan.ShardSnapshotsService; @@ -63,7 +60,6 @@ import static org.elasticsearch.common.util.CollectionUtils.iterableAsArrayList; import static org.elasticsearch.index.engine.Engine.ES_VERSION; import static org.elasticsearch.index.engine.Engine.HISTORY_UUID_KEY; -import static org.elasticsearch.test.index.IndexVersionUtils.randomVersionBetween; import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; @@ -203,8 +199,6 @@ public void fetchLatestSnapshotsForShard(ShardId shardId, ActionListener { boolean shareFilesWithSource = randomBoolean(); @@ -217,18 +211,8 @@ public void testLogicallyEquivalentSnapshotIsUsedEvenIfFilesAreDifferent() throw final IndexVersion snapshotVersion; final Version luceneVersion; if (compatibleVersion) { - snapshotVersion = randomBoolean() ? null : IndexVersionUtils.randomCompatibleVersion(random()); - // If snapshotVersion is not present, - // then lucene version must be < RecoverySettings.SEQ_NO_SNAPSHOT_RECOVERIES_SUPPORTED_VERSION - if (snapshotVersion == null) { - luceneVersion = randomVersionBetween( - random(), - IndexVersions.V_7_0_0, - RecoverySettings.SNAPSHOT_RECOVERIES_SUPPORTED_INDEX_VERSION - ).luceneVersion(); - } else { - luceneVersion = IndexVersionUtils.randomCompatibleVersion(random()).luceneVersion(); - } + snapshotVersion = IndexVersionUtils.randomCompatibleVersion(random()); + luceneVersion = snapshotVersion.luceneVersion(); } else { snapshotVersion = IndexVersion.fromId(Integer.MAX_VALUE); luceneVersion = org.apache.lucene.util.Version.parse("255.255.255"); From f5d3c7c3d8bff7b91430c42d66550613e2716387 Mon Sep 17 00:00:00 2001 From: Artem Prigoda Date: Thu, 24 Oct 2024 17:09:34 +0200 Subject: [PATCH 067/324] Remove legacy join validation transport protocol (#114571) We introduced a new join validation protocol in #85380 (8.3), the legacy protocol can be removed in 9.0 Remove assertion that we run a version after 8.3.0 --- .../coordination/JoinValidationService.java | 57 ++----------------- .../coordination/ValidateJoinRequest.java | 21 ++----- 2 files changed, 12 insertions(+), 66 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/JoinValidationService.java b/server/src/main/java/org/elasticsearch/cluster/coordination/JoinValidationService.java index 34d59c9860aba..7de7fd4d92d1b 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/JoinValidationService.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/JoinValidationService.java @@ -13,7 +13,6 @@ import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.TransportVersion; -import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRunnable; import org.elasticsearch.cluster.ClusterState; @@ -31,7 +30,6 @@ import org.elasticsearch.core.Nullable; import org.elasticsearch.core.RefCounted; import org.elasticsearch.core.TimeValue; -import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.env.Environment; import org.elasticsearch.node.NodeClosedException; import org.elasticsearch.threadpool.ThreadPool; @@ -46,7 +44,6 @@ import java.io.IOException; import java.util.Collection; import java.util.HashMap; -import java.util.Locale; import java.util.Map; import java.util.Queue; import java.util.concurrent.ConcurrentLinkedQueue; @@ -162,55 +159,14 @@ public void validateJoin(DiscoveryNode discoveryNode, ActionListener liste return; } - if (connection.getTransportVersion().onOrAfter(TransportVersions.V_8_3_0)) { - if (executeRefs.tryIncRef()) { - try { - execute(new JoinValidation(discoveryNode, connection, listener)); - } finally { - executeRefs.decRef(); - } - } else { - listener.onFailure(new NodeClosedException(transportService.getLocalNode())); + if (executeRefs.tryIncRef()) { + try { + execute(new JoinValidation(discoveryNode, connection, listener)); + } finally { + executeRefs.decRef(); } } else { - legacyValidateJoin(discoveryNode, listener, connection); - } - } - - @UpdateForV9(owner = UpdateForV9.Owner.DISTRIBUTED_COORDINATION) - private void legacyValidateJoin(DiscoveryNode discoveryNode, ActionListener listener, Transport.Connection connection) { - final var responseHandler = TransportResponseHandler.empty(responseExecutor, listener.delegateResponse((l, e) -> { - logger.warn(() -> "failed to validate incoming join request from node [" + discoveryNode + "]", e); - listener.onFailure( - new IllegalStateException( - String.format( - Locale.ROOT, - "failure when sending a join validation request from [%s] to [%s]", - transportService.getLocalNode().descriptionWithoutAttributes(), - discoveryNode.descriptionWithoutAttributes() - ), - e - ) - ); - })); - final var clusterState = clusterStateSupplier.get(); - if (clusterState != null) { - assert clusterState.nodes().isLocalNodeElectedMaster(); - transportService.sendRequest( - connection, - JOIN_VALIDATE_ACTION_NAME, - new ValidateJoinRequest(clusterState), - REQUEST_OPTIONS, - responseHandler - ); - } else { - transportService.sendRequest( - connection, - JoinHelper.JOIN_PING_ACTION_NAME, - new JoinHelper.JoinPingRequest(), - REQUEST_OPTIONS, - responseHandler - ); + listener.onFailure(new NodeClosedException(transportService.getLocalNode())); } } @@ -341,7 +297,6 @@ private class JoinValidation extends ActionRunnable { @Override protected void doRun() { - assert connection.getTransportVersion().onOrAfter(TransportVersions.V_8_3_0) : discoveryNode.getVersion(); // NB these things never run concurrently to each other, or to the cache cleaner (see IMPLEMENTATION NOTES above) so it is safe // to do these (non-atomic) things to the (unsynchronized) statesByVersion map. var transportVersion = connection.getTransportVersion(); diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/ValidateJoinRequest.java b/server/src/main/java/org/elasticsearch/cluster/coordination/ValidateJoinRequest.java index 1d99f28e62582..c81e4877196b3 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/ValidateJoinRequest.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/ValidateJoinRequest.java @@ -9,7 +9,6 @@ package org.elasticsearch.cluster.coordination; import org.elasticsearch.TransportVersion; -import org.elasticsearch.TransportVersions; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.common.CheckedSupplier; import org.elasticsearch.common.bytes.BytesReference; @@ -29,19 +28,12 @@ public class ValidateJoinRequest extends TransportRequest { public ValidateJoinRequest(StreamInput in) throws IOException { super(in); - if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_3_0)) { - // recent versions send a BytesTransportRequest containing a compressed representation of the state - final var bytes = in.readReleasableBytesReference(); - final var version = in.getTransportVersion(); - final var namedWriteableRegistry = in.namedWriteableRegistry(); - this.stateSupplier = () -> readCompressed(version, bytes, namedWriteableRegistry); - this.refCounted = bytes; - } else { - // older versions just contain the bare state - final var state = ClusterState.readFrom(in, null); - this.stateSupplier = () -> state; - this.refCounted = null; - } + // recent versions send a BytesTransportRequest containing a compressed representation of the state + final var bytes = in.readReleasableBytesReference(); + final var version = in.getTransportVersion(); + final var namedWriteableRegistry = in.namedWriteableRegistry(); + this.stateSupplier = () -> readCompressed(version, bytes, namedWriteableRegistry); + this.refCounted = bytes; } private static ClusterState readCompressed( @@ -68,7 +60,6 @@ public ValidateJoinRequest(ClusterState state) { @Override public void writeTo(StreamOutput out) throws IOException { - assert out.getTransportVersion().before(TransportVersions.V_8_3_0); super.writeTo(out); stateSupplier.get().writeTo(out); } From 2ddd08aff7bea3a4ef1e4aea28d2ae63518902a1 Mon Sep 17 00:00:00 2001 From: Keith Massey Date: Thu, 24 Oct 2024 10:10:36 -0500 Subject: [PATCH 068/324] Fixing ingest simulate yaml rest test when there is a global legacy template (#115559) The ingest simulate yaml rest test `Test mapping addition works with indices without templates` tests what happens when an index has a mapping but matches no template at all. However, randomly and rarely a global match-all legacy template is applied to the cluster. When this happens, the assumptions for the test fail since the index matches a template. This PR removes that global legacy template so that the test works as intended. Closes #115412 Closes #115472 --- muted-tests.yml | 3 --- .../rest-api-spec/test/ingest/80_ingest_simulate.yml | 7 +++++++ 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/muted-tests.yml b/muted-tests.yml index ba816ed5f3a9e..8c90f73f475e6 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -264,9 +264,6 @@ tests: - class: org.elasticsearch.reservedstate.service.FileSettingsServiceTests method: testProcessFileChanges issue: https://github.com/elastic/elasticsearch/issues/115280 -- class: org.elasticsearch.smoketest.SmokeTestIngestWithAllDepsClientYamlTestSuiteIT - method: test {yaml=ingest/80_ingest_simulate/Test mapping addition works with legacy templates} - issue: https://github.com/elastic/elasticsearch/issues/115412 - class: org.elasticsearch.xpack.security.FileSettingsRoleMappingsRestartIT method: testFileSettingsReprocessedOnRestartWithoutVersionChange issue: https://github.com/elastic/elasticsearch/issues/115450 diff --git a/qa/smoke-test-ingest-with-all-dependencies/src/yamlRestTest/resources/rest-api-spec/test/ingest/80_ingest_simulate.yml b/qa/smoke-test-ingest-with-all-dependencies/src/yamlRestTest/resources/rest-api-spec/test/ingest/80_ingest_simulate.yml index 4d1a62c6f179e..7ed5ad3154151 100644 --- a/qa/smoke-test-ingest-with-all-dependencies/src/yamlRestTest/resources/rest-api-spec/test/ingest/80_ingest_simulate.yml +++ b/qa/smoke-test-ingest-with-all-dependencies/src/yamlRestTest/resources/rest-api-spec/test/ingest/80_ingest_simulate.yml @@ -1586,6 +1586,13 @@ setup: cluster_features: ["simulate.support.non.template.mapping"] reason: "ingest simulate support for indices with mappings that didn't come from templates added in 8.17" + # A global match-everything legacy template is added to the cluster sometimes (rarely). We have to get rid of this template if it exists + # because this test is making sure we get correct behavior when an index matches *no* template: + - do: + indices.delete_template: + name: '*' + ignore: 404 + # First, make sure that validation fails before we create the index (since we are only defining to bar field but trying to index a value # for foo. - do: From 79be69a5f87da015e6105a84537c590ae68c197b Mon Sep 17 00:00:00 2001 From: Kostas Krikellas <131142368+kkrik-es@users.noreply.github.com> Date: Thu, 24 Oct 2024 18:22:13 +0300 Subject: [PATCH 069/324] Ignore _field_names warning in testRollupAfterRestart (#115563) --- .../org/elasticsearch/xpack/restart/FullClusterRestartIT.java | 1 + 1 file changed, 1 insertion(+) diff --git a/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java b/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java index c57e5653d1279..a56ddaabe8280 100644 --- a/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java +++ b/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java @@ -435,6 +435,7 @@ public void testRollupAfterRestart() throws Exception { final Request bulkRequest = new Request("POST", "/_bulk"); bulkRequest.setJsonEntity(bulk.toString()); + bulkRequest.setOptions(RequestOptions.DEFAULT.toBuilder().setWarningsHandler(fieldNamesFieldOk())); client().performRequest(bulkRequest); // create the rollup job From fb6c729858b443956ba41c68495a5de084ffa73d Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Thu, 24 Oct 2024 08:47:52 -0700 Subject: [PATCH 070/324] Guard blob store local directory creation with doPrivileged (#115459) The blob store may be triggered to create a local directory while in a reduced privilege context. This commit guards the creation of directories with doPrivileged. --- docs/changelog/115459.yaml | 5 +++++ .../common/blobstore/fs/FsBlobStore.java | 15 ++++++++++----- 2 files changed, 15 insertions(+), 5 deletions(-) create mode 100644 docs/changelog/115459.yaml diff --git a/docs/changelog/115459.yaml b/docs/changelog/115459.yaml new file mode 100644 index 0000000000000..b20a8f765c084 --- /dev/null +++ b/docs/changelog/115459.yaml @@ -0,0 +1,5 @@ +pr: 115459 +summary: Guard blob store local directory creation with `doPrivileged` +area: Infra/Core +type: bug +issues: [] diff --git a/server/src/main/java/org/elasticsearch/common/blobstore/fs/FsBlobStore.java b/server/src/main/java/org/elasticsearch/common/blobstore/fs/FsBlobStore.java index c4240672239fa..53e3b4b4796dc 100644 --- a/server/src/main/java/org/elasticsearch/common/blobstore/fs/FsBlobStore.java +++ b/server/src/main/java/org/elasticsearch/common/blobstore/fs/FsBlobStore.java @@ -19,6 +19,8 @@ import java.io.IOException; import java.nio.file.Files; import java.nio.file.Path; +import java.security.AccessController; +import java.security.PrivilegedAction; import java.util.Iterator; import java.util.List; @@ -56,11 +58,14 @@ public int bufferSizeInBytes() { public BlobContainer blobContainer(BlobPath path) { Path f = buildPath(path); if (readOnly == false) { - try { - Files.createDirectories(f); - } catch (IOException ex) { - throw new ElasticsearchException("failed to create blob container", ex); - } + AccessController.doPrivileged((PrivilegedAction) () -> { + try { + Files.createDirectories(f); + } catch (IOException ex) { + throw new ElasticsearchException("failed to create blob container", ex); + } + return null; + }); } return new FsBlobContainer(this, path, f); } From 482d2aced5f888d548a755e0fe20fc6f83125d11 Mon Sep 17 00:00:00 2001 From: Rene Groeschke Date: Thu, 24 Oct 2024 17:58:36 +0200 Subject: [PATCH 071/324] Remove unused elasticsearch cloud docker image (#115357) --- .../gradle/internal/DockerBase.java | 3 --- distribution/docker/build.gradle | 25 +++---------------- .../cloud-docker-aarch64-export/build.gradle | 2 -- .../docker/cloud-docker-export/build.gradle | 2 -- .../build.gradle | 2 -- .../wolfi-ess-docker-export/build.gradle | 2 -- .../packaging/test/DockerTests.java | 11 +++----- .../test/KeystoreManagementTests.java | 5 +--- .../packaging/test/PackagingTestCase.java | 6 ++--- .../packaging/util/Distribution.java | 5 +--- .../packaging/util/docker/Docker.java | 2 +- .../packaging/util/docker/DockerRun.java | 1 - settings.gradle | 2 -- 13 files changed, 12 insertions(+), 56 deletions(-) delete mode 100644 distribution/docker/cloud-docker-aarch64-export/build.gradle delete mode 100644 distribution/docker/cloud-docker-export/build.gradle delete mode 100644 distribution/docker/wolfi-ess-docker-aarch64-export/build.gradle delete mode 100644 distribution/docker/wolfi-ess-docker-export/build.gradle diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/DockerBase.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/DockerBase.java index fb52daf7e164f..0535f0bdc3cc8 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/DockerBase.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/DockerBase.java @@ -21,9 +21,6 @@ public enum DockerBase { // The Iron Bank base image is UBI (albeit hardened), but we are required to parameterize the Docker build IRON_BANK("${BASE_REGISTRY}/${BASE_IMAGE}:${BASE_TAG}", "-ironbank", "yum"), - // Base image with extras for Cloud - CLOUD("ubuntu:20.04", "-cloud", "apt-get"), - // Chainguard based wolfi image with latest jdk // This is usually updated via renovatebot // spotless:off diff --git a/distribution/docker/build.gradle b/distribution/docker/build.gradle index e40ac68bbacf4..788e836f8f045 100644 --- a/distribution/docker/build.gradle +++ b/distribution/docker/build.gradle @@ -288,20 +288,6 @@ void addBuildDockerContextTask(Architecture architecture, DockerBase base) { } } - if (base == DockerBase.CLOUD) { - // If we're performing a release build, but `build.id` hasn't been set, we can - // infer that we're not at the Docker building stage of the build, and therefore - // we should skip the beats part of the build. - String buildId = providers.systemProperty('build.id').getOrNull() - boolean includeBeats = VersionProperties.isElasticsearchSnapshot() == true || buildId != null || useDra - - if (includeBeats) { - from configurations.getByName("filebeat_${architecture.classifier}") - from configurations.getByName("metricbeat_${architecture.classifier}") - } - // For some reason, the artifact name can differ depending on what repository we used. - rename ~/((?:file|metric)beat)-.*\.tar\.gz$/, "\$1-${VersionProperties.elasticsearch}.tar.gz" - } Provider serviceProvider = GradleUtils.getBuildService( project.gradle.sharedServices, DockerSupportPlugin.DOCKER_SUPPORT_SERVICE_NAME @@ -381,7 +367,7 @@ private static List generateTags(DockerBase base, Architecture architect String image = "elasticsearch${base.suffix}" String namespace = 'elasticsearch' - if (base == DockerBase.CLOUD || base == DockerBase.CLOUD_ESS) { + if (base == base == DockerBase.CLOUD_ESS) { namespace += '-ci' } @@ -439,7 +425,7 @@ void addBuildDockerImageTask(Architecture architecture, DockerBase base) { } - if (base != DockerBase.IRON_BANK && base != DockerBase.CLOUD && base != DockerBase.CLOUD_ESS) { + if (base != DockerBase.IRON_BANK && base != DockerBase.CLOUD_ESS) { tasks.named("assemble").configure { dependsOn(buildDockerImageTask) } @@ -548,10 +534,6 @@ subprojects { Project subProject -> base = DockerBase.IRON_BANK } else if (subProject.name.contains('cloud-ess-')) { base = DockerBase.CLOUD_ESS - } else if (subProject.name.contains('cloud-')) { - base = DockerBase.CLOUD - } else if (subProject.name.contains('wolfi-ess')) { - base = DockerBase.WOLFI_ESS } else if (subProject.name.contains('wolfi-')) { base = DockerBase.WOLFI } @@ -559,10 +541,9 @@ subprojects { Project subProject -> final String arch = architecture == Architecture.AARCH64 ? '-aarch64' : '' final String extension = base == DockerBase.UBI ? 'ubi.tar' : (base == DockerBase.IRON_BANK ? 'ironbank.tar' : - (base == DockerBase.CLOUD ? 'cloud.tar' : (base == DockerBase.CLOUD_ESS ? 'cloud-ess.tar' : (base == DockerBase.WOLFI ? 'wolfi.tar' : - 'docker.tar')))) + 'docker.tar'))) final String artifactName = "elasticsearch${arch}${base.suffix}_test" final String exportTaskName = taskName("export", architecture, base, 'DockerImage') diff --git a/distribution/docker/cloud-docker-aarch64-export/build.gradle b/distribution/docker/cloud-docker-aarch64-export/build.gradle deleted file mode 100644 index 537b5a093683e..0000000000000 --- a/distribution/docker/cloud-docker-aarch64-export/build.gradle +++ /dev/null @@ -1,2 +0,0 @@ -// This file is intentionally blank. All configuration of the -// export is done in the parent project. diff --git a/distribution/docker/cloud-docker-export/build.gradle b/distribution/docker/cloud-docker-export/build.gradle deleted file mode 100644 index 537b5a093683e..0000000000000 --- a/distribution/docker/cloud-docker-export/build.gradle +++ /dev/null @@ -1,2 +0,0 @@ -// This file is intentionally blank. All configuration of the -// export is done in the parent project. diff --git a/distribution/docker/wolfi-ess-docker-aarch64-export/build.gradle b/distribution/docker/wolfi-ess-docker-aarch64-export/build.gradle deleted file mode 100644 index 537b5a093683e..0000000000000 --- a/distribution/docker/wolfi-ess-docker-aarch64-export/build.gradle +++ /dev/null @@ -1,2 +0,0 @@ -// This file is intentionally blank. All configuration of the -// export is done in the parent project. diff --git a/distribution/docker/wolfi-ess-docker-export/build.gradle b/distribution/docker/wolfi-ess-docker-export/build.gradle deleted file mode 100644 index 537b5a093683e..0000000000000 --- a/distribution/docker/wolfi-ess-docker-export/build.gradle +++ /dev/null @@ -1,2 +0,0 @@ -// This file is intentionally blank. All configuration of the -// export is done in the parent project. diff --git a/qa/packaging/src/test/java/org/elasticsearch/packaging/test/DockerTests.java b/qa/packaging/src/test/java/org/elasticsearch/packaging/test/DockerTests.java index 4ca97bff42333..8cb8354eb5d71 100644 --- a/qa/packaging/src/test/java/org/elasticsearch/packaging/test/DockerTests.java +++ b/qa/packaging/src/test/java/org/elasticsearch/packaging/test/DockerTests.java @@ -169,10 +169,7 @@ public void test012SecurityCanBeDisabled() throws Exception { * Checks that no plugins are initially active. */ public void test020PluginsListWithNoPlugins() { - assumeTrue( - "Only applies to non-Cloud images", - distribution.packaging != Packaging.DOCKER_CLOUD && distribution().packaging != Packaging.DOCKER_CLOUD_ESS - ); + assumeTrue("Only applies to non-Cloud images", distribution().packaging != Packaging.DOCKER_CLOUD_ESS); final Installation.Executables bin = installation.executables(); final Result r = sh.run(bin.pluginTool + " list"); @@ -1116,8 +1113,8 @@ public void test170DefaultShellIsBash() { */ public void test171AdditionalCliOptionsAreForwarded() throws Exception { assumeTrue( - "Does not apply to Cloud and Cloud ESS images, because they don't use the default entrypoint", - distribution.packaging != Packaging.DOCKER_CLOUD && distribution().packaging != Packaging.DOCKER_CLOUD_ESS + "Does not apply to Cloud ESS images, because they don't use the default entrypoint", + distribution().packaging != Packaging.DOCKER_CLOUD_ESS ); runContainer(distribution(), builder().runArgs("bin/elasticsearch", "-Ecluster.name=kimchy").envVar("ELASTIC_PASSWORD", PASSWORD)); @@ -1204,7 +1201,7 @@ public void test310IronBankImageHasNoAdditionalLabels() throws Exception { * Check that the Cloud image contains the required Beats */ public void test400CloudImageBundlesBeats() { - assumeTrue(distribution.packaging == Packaging.DOCKER_CLOUD || distribution.packaging == Packaging.DOCKER_CLOUD_ESS); + assumeTrue(distribution.packaging == Packaging.DOCKER_CLOUD_ESS); final List contents = listContents("/opt"); assertThat("Expected beats in /opt", contents, hasItems("filebeat", "metricbeat")); diff --git a/qa/packaging/src/test/java/org/elasticsearch/packaging/test/KeystoreManagementTests.java b/qa/packaging/src/test/java/org/elasticsearch/packaging/test/KeystoreManagementTests.java index a988a446f561f..02e1ce35764cf 100644 --- a/qa/packaging/src/test/java/org/elasticsearch/packaging/test/KeystoreManagementTests.java +++ b/qa/packaging/src/test/java/org/elasticsearch/packaging/test/KeystoreManagementTests.java @@ -436,10 +436,7 @@ private void verifyKeystorePermissions() { switch (distribution.packaging) { case TAR, ZIP -> assertThat(keystore, file(File, ARCHIVE_OWNER, ARCHIVE_OWNER, p660)); case DEB, RPM -> assertThat(keystore, file(File, "root", "elasticsearch", p660)); - case DOCKER, DOCKER_UBI, DOCKER_IRON_BANK, DOCKER_CLOUD, DOCKER_CLOUD_ESS, DOCKER_WOLFI -> assertThat( - keystore, - DockerFileMatcher.file(p660) - ); + case DOCKER, DOCKER_UBI, DOCKER_IRON_BANK, DOCKER_CLOUD_ESS, DOCKER_WOLFI -> assertThat(keystore, DockerFileMatcher.file(p660)); default -> throw new IllegalStateException("Unknown Elasticsearch packaging type."); } } diff --git a/qa/packaging/src/test/java/org/elasticsearch/packaging/test/PackagingTestCase.java b/qa/packaging/src/test/java/org/elasticsearch/packaging/test/PackagingTestCase.java index 644990105f60f..b4a00ca56924a 100644 --- a/qa/packaging/src/test/java/org/elasticsearch/packaging/test/PackagingTestCase.java +++ b/qa/packaging/src/test/java/org/elasticsearch/packaging/test/PackagingTestCase.java @@ -245,7 +245,7 @@ protected static void install() throws Exception { installation = Packages.installPackage(sh, distribution); Packages.verifyPackageInstallation(installation, distribution, sh); } - case DOCKER, DOCKER_UBI, DOCKER_IRON_BANK, DOCKER_CLOUD, DOCKER_CLOUD_ESS, DOCKER_WOLFI -> { + case DOCKER, DOCKER_UBI, DOCKER_IRON_BANK, DOCKER_CLOUD_ESS, DOCKER_WOLFI -> { installation = Docker.runContainer(distribution); Docker.verifyContainerInstallation(installation); } @@ -335,7 +335,6 @@ public Shell.Result runElasticsearchStartCommand(String password, boolean daemon case DOCKER: case DOCKER_UBI: case DOCKER_IRON_BANK: - case DOCKER_CLOUD: case DOCKER_CLOUD_ESS: case DOCKER_WOLFI: // nothing, "installing" docker image is running it @@ -358,7 +357,6 @@ public void stopElasticsearch() throws Exception { case DOCKER: case DOCKER_UBI: case DOCKER_IRON_BANK: - case DOCKER_CLOUD: case DOCKER_CLOUD_ESS: case DOCKER_WOLFI: // nothing, "installing" docker image is running it @@ -373,7 +371,7 @@ public void awaitElasticsearchStartup(Shell.Result result) throws Exception { switch (distribution.packaging) { case TAR, ZIP -> Archives.assertElasticsearchStarted(installation); case DEB, RPM -> Packages.assertElasticsearchStarted(sh, installation); - case DOCKER, DOCKER_UBI, DOCKER_IRON_BANK, DOCKER_CLOUD, DOCKER_CLOUD_ESS, DOCKER_WOLFI -> Docker.waitForElasticsearchToStart(); + case DOCKER, DOCKER_UBI, DOCKER_IRON_BANK, DOCKER_CLOUD_ESS, DOCKER_WOLFI -> Docker.waitForElasticsearchToStart(); default -> throw new IllegalStateException("Unknown Elasticsearch packaging type."); } } diff --git a/qa/packaging/src/test/java/org/elasticsearch/packaging/util/Distribution.java b/qa/packaging/src/test/java/org/elasticsearch/packaging/util/Distribution.java index 05cef4a0818ba..11b8324384631 100644 --- a/qa/packaging/src/test/java/org/elasticsearch/packaging/util/Distribution.java +++ b/qa/packaging/src/test/java/org/elasticsearch/packaging/util/Distribution.java @@ -33,8 +33,6 @@ public Distribution(Path path) { this.packaging = Packaging.DOCKER_UBI; } else if (filename.endsWith(".ironbank.tar")) { this.packaging = Packaging.DOCKER_IRON_BANK; - } else if (filename.endsWith(".cloud.tar")) { - this.packaging = Packaging.DOCKER_CLOUD; } else if (filename.endsWith(".cloud-ess.tar")) { this.packaging = Packaging.DOCKER_CLOUD_ESS; } else if (filename.endsWith(".wolfi.tar")) { @@ -63,7 +61,7 @@ public boolean isPackage() { */ public boolean isDocker() { return switch (packaging) { - case DOCKER, DOCKER_UBI, DOCKER_IRON_BANK, DOCKER_CLOUD, DOCKER_CLOUD_ESS, DOCKER_WOLFI -> true; + case DOCKER, DOCKER_UBI, DOCKER_IRON_BANK, DOCKER_CLOUD_ESS, DOCKER_WOLFI -> true; default -> false; }; } @@ -77,7 +75,6 @@ public enum Packaging { DOCKER(".docker.tar", Platforms.isDocker()), DOCKER_UBI(".ubi.tar", Platforms.isDocker()), DOCKER_IRON_BANK(".ironbank.tar", Platforms.isDocker()), - DOCKER_CLOUD(".cloud.tar", Platforms.isDocker()), DOCKER_CLOUD_ESS(".cloud-ess.tar", Platforms.isDocker()), DOCKER_WOLFI(".wolfi.tar", Platforms.isDocker()); diff --git a/qa/packaging/src/test/java/org/elasticsearch/packaging/util/docker/Docker.java b/qa/packaging/src/test/java/org/elasticsearch/packaging/util/docker/Docker.java index c38eaa58f0552..0cd2823080b9b 100644 --- a/qa/packaging/src/test/java/org/elasticsearch/packaging/util/docker/Docker.java +++ b/qa/packaging/src/test/java/org/elasticsearch/packaging/util/docker/Docker.java @@ -532,7 +532,7 @@ public static void verifyContainerInstallation(Installation es) { ) ); - if (es.distribution.packaging == Packaging.DOCKER_CLOUD || es.distribution.packaging == Packaging.DOCKER_CLOUD_ESS) { + if (es.distribution.packaging == Packaging.DOCKER_CLOUD_ESS) { verifyCloudContainerInstallation(es); } } diff --git a/qa/packaging/src/test/java/org/elasticsearch/packaging/util/docker/DockerRun.java b/qa/packaging/src/test/java/org/elasticsearch/packaging/util/docker/DockerRun.java index e562e7591564e..e3eac23d3ecce 100644 --- a/qa/packaging/src/test/java/org/elasticsearch/packaging/util/docker/DockerRun.java +++ b/qa/packaging/src/test/java/org/elasticsearch/packaging/util/docker/DockerRun.java @@ -165,7 +165,6 @@ public static String getImageName(Distribution distribution) { case DOCKER -> ""; case DOCKER_UBI -> "-ubi"; case DOCKER_IRON_BANK -> "-ironbank"; - case DOCKER_CLOUD -> "-cloud"; case DOCKER_CLOUD_ESS -> "-cloud-ess"; case DOCKER_WOLFI -> "-wolfi"; default -> throw new IllegalStateException("Unexpected distribution packaging type: " + distribution.packaging); diff --git a/settings.gradle b/settings.gradle index a95a46a3569d7..39453e8d0935a 100644 --- a/settings.gradle +++ b/settings.gradle @@ -63,8 +63,6 @@ List projects = [ 'distribution:archives:linux-aarch64-tar', 'distribution:archives:linux-tar', 'distribution:docker', - 'distribution:docker:cloud-docker-export', - 'distribution:docker:cloud-docker-aarch64-export', 'distribution:docker:cloud-ess-docker-export', 'distribution:docker:cloud-ess-docker-aarch64-export', 'distribution:docker:docker-aarch64-export', From d500daf2e16bb3b6fb4bdde49bbf9d93b7fec25b Mon Sep 17 00:00:00 2001 From: Liam Thompson <32779855+leemthompo@users.noreply.github.com> Date: Thu, 24 Oct 2024 18:02:11 +0200 Subject: [PATCH 072/324] [DOCS][101] Add BYO vectors ingestion tutorial (#115112) --- docs/reference/images/semantic-options.svg | 62 ++++++++ .../search-your-data/ingest-vectors.asciidoc | 141 ++++++++++++++++++ .../search-your-data/semantic-search.asciidoc | 3 + 3 files changed, 206 insertions(+) create mode 100644 docs/reference/images/semantic-options.svg create mode 100644 docs/reference/search/search-your-data/ingest-vectors.asciidoc diff --git a/docs/reference/images/semantic-options.svg b/docs/reference/images/semantic-options.svg new file mode 100644 index 0000000000000..3bedf5307357e --- /dev/null +++ b/docs/reference/images/semantic-options.svg @@ -0,0 +1,62 @@ + + + + Elasticsearch semantic search workflows + + + + + + semantic_text + (Recommended) + + + + Inference API + + + + Model Deployment + + + Complexity: Low + Complexity: Medium + Complexity: High + + + + + + Create Inference Endpoint + + + Define Index Mapping + + + + Create Inference Endpoint + + + Configure Model Settings + + + Define Index Mapping + + + Setup Ingest Pipeline + + + + Select NLP Model + + + Deploy with Eland Client + + + Define Index Mapping + + + Setup Ingest Pipeline + + + diff --git a/docs/reference/search/search-your-data/ingest-vectors.asciidoc b/docs/reference/search/search-your-data/ingest-vectors.asciidoc new file mode 100644 index 0000000000000..f288293d2b03a --- /dev/null +++ b/docs/reference/search/search-your-data/ingest-vectors.asciidoc @@ -0,0 +1,141 @@ +[[bring-your-own-vectors]] +=== Bring your own dense vector embeddings to {es} +++++ +Bring your own dense vectors +++++ + +This tutorial demonstrates how to index documents that already have dense vector embeddings into {es}. +You'll also learn the syntax for searching these documents using a `knn` query. + +You'll find links at the end of this tutorial for more information about deploying a text embedding model in {es}, so you can generate embeddings for queries on the fly. + +[TIP] +==== +This is an advanced use case. +Refer to <> for an overview of your options for semantic search with {es}. +==== + +[discrete] +[[bring-your-own-vectors-create-index]] +=== Step 1: Create an index with `dense_vector` mapping + +Each document in our simple dataset will have: + +* A review: stored in a `review_text` field +* An embedding of that review: stored in a `review_vector` field +** The `review_vector` field is defined as a <> data type. + +[TIP] +==== +The `dense_vector` type automatically uses `int8_hnsw` quantization by default to reduce the memory footprint required when searching float vectors. +Learn more about balancing performance and accuracy in <>. +==== + +[source,console] +---- +PUT /amazon-reviews +{ + "mappings": { + "properties": { + "review_vector": { + "type": "dense_vector", + "dims": 8, <1> + "index": true, <2> + "similarity": "cosine" <3> + }, + "review_text": { + "type": "text" + } + } + } +} +---- +// TEST SETUP +<1> The `dims` parameter must match the length of the embedding vector. Here we're using a simple 8-dimensional embedding for readability. If not specified, `dims` will be dynamically calculated based on the first indexed document. +<2> The `index` parameter is set to `true` to enable the use of the `knn` query. +<3> The `similarity` parameter defines the similarity function used to compare the query vector to the document vectors. `cosine` is the default similarity function for `dense_vector` fields in {es}. + +[discrete] +[[bring-your-own-vectors-index-documents]] +=== Step 2: Index documents with embeddings + +[discrete] +==== Index a single document + +First, index a single document to understand the document structure. + +[source,console] +---- +PUT /amazon-reviews/_doc/1 +{ + "review_text": "This product is lifechanging! I'm telling all my friends about it.", + "review_vector": [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8] <1> +} +---- +// TEST +<1> The size of the `review_vector` array is 8, matching the `dims` count specified in the mapping. + +[discrete] +==== Bulk index multiple documents + +In a production scenario, you'll want to index many documents at once using the <>. + +Here's an example of indexing multiple documents in a single `_bulk` request. + +[source,console] +---- +POST /_bulk +{ "index": { "_index": "amazon-reviews", "_id": "2" } } +{ "review_text": "This product is amazing! I love it.", "review_vector": [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8] } +{ "index": { "_index": "amazon-reviews", "_id": "3" } } +{ "review_text": "This product is terrible. I hate it.", "review_vector": [0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1] } +{ "index": { "_index": "amazon-reviews", "_id": "4" } } +{ "review_text": "This product is great. I can do anything with it.", "review_vector": [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8] } +{ "index": { "_index": "amazon-reviews", "_id": "5" } } +{ "review_text": "This product has ruined my life and the lives of my family and friends.", "review_vector": [0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1] } +---- +// TEST[continued] + +[discrete] +[[bring-your-own-vectors-search-documents]] +=== Step 3: Search documents with embeddings + +Now you can query these document vectors using a <>. +`knn` is a type of vector search, which finds the `k` most similar documents to a query vector. +Here we're simply using a raw vector for the query text, for demonstration purposes. + +[source,console] +---- +POST /amazon-reviews/_search +{ + "retriever": { + "knn": { + "field": "review_vector", + "query_vector": [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8], <1> + "k": 2, <2> + "num_candidates": 5 <3> + } + } +} +---- +// TEST[skip:flakeyknnerror] +<1> In this simple example, we're sending a raw vector as the query text. In a real-world scenario, you'll need to generate vectors for queries using an embedding model. +<2> The `k` parameter specifies the number of results to return. +<3> The `num_candidates` parameter is optional. It limits the number of candidates returned by the search node. This can improve performance and reduce costs. + +[discrete] +[[bring-your-own-vectors-learn-more]] +=== Learn more + +In this simple example, we're sending a raw vector for the query text. +In a real-world scenario you won't know the query text ahead of time. +You'll need to generate query vectors, on the fly, using the same embedding model that generated the document vectors. + +For this you'll need to deploy a text embedding model in {es} and use the <>. Alternatively, you can generate vectors client-side and send them directly with the search request. + +Learn how to <> for semantic search. + +[TIP] +==== +If you're just getting started with vector search in {es}, refer to <>. +==== diff --git a/docs/reference/search/search-your-data/semantic-search.asciidoc b/docs/reference/search/search-your-data/semantic-search.asciidoc index 0ef8591e42b5d..e0fb8415fee18 100644 --- a/docs/reference/search/search-your-data/semantic-search.asciidoc +++ b/docs/reference/search/search-your-data/semantic-search.asciidoc @@ -8,6 +8,8 @@ Using an NLP model enables you to extract text embeddings out of text. Embeddings are vectors that provide a numeric representation of a text. Pieces of content with similar meaning have similar representations. +image::images/semantic-options.svg[Overview of semantic search workflows in {es}] + You have several options for using NLP models in the {stack}: * use the `semantic_text` workflow (recommended) @@ -109,3 +111,4 @@ include::semantic-search-inference.asciidoc[] include::semantic-search-elser.asciidoc[] include::cohere-es.asciidoc[] include::semantic-search-deploy-model.asciidoc[] +include::ingest-vectors.asciidoc[] From a270ee3f9c3e0dcfdd2874d8f64b9612098ddaf3 Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Fri, 25 Oct 2024 03:05:08 +1100 Subject: [PATCH 073/324] Mute org.elasticsearch.smoketest.DocsClientYamlTestSuiteIT test {yaml=reference/esql/esql-across-clusters/line_197} #115575 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index 8c90f73f475e6..ab5d686a041c1 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -273,6 +273,9 @@ tests: - class: org.elasticsearch.test.apmintegration.MetricsApmIT method: testApmIntegration issue: https://github.com/elastic/elasticsearch/issues/115415 +- class: org.elasticsearch.smoketest.DocsClientYamlTestSuiteIT + method: test {yaml=reference/esql/esql-across-clusters/line_197} + issue: https://github.com/elastic/elasticsearch/issues/115575 # Examples: # From c64226c3503b458c3285064d95528932d324177d Mon Sep 17 00:00:00 2001 From: Artem Prigoda Date: Thu, 24 Oct 2024 18:19:14 +0200 Subject: [PATCH 074/324] Don't return or accept `node_version` in the Desired Nodes API (#114580) It was deprecated in #104209 (8.13) and shouldn't be set or returned in 9.0 The Desired Nodes API is an internal API, and users shouldn't depend on its backward compatibility. --- .../upgrades/DesiredNodesUpgradeIT.java | 13 +-- rest-api-spec/build.gradle | 2 + .../test/cluster.desired_nodes/10_basic.yml | 95 ------------------- .../cluster/metadata/DesiredNode.java | 77 +-------------- .../metadata/DesiredNodeWithStatus.java | 5 +- .../cluster/RestUpdateDesiredNodesAction.java | 12 --- 6 files changed, 13 insertions(+), 191 deletions(-) diff --git a/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/DesiredNodesUpgradeIT.java b/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/DesiredNodesUpgradeIT.java index e0d1e7aafa637..17618d5439d48 100644 --- a/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/DesiredNodesUpgradeIT.java +++ b/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/DesiredNodesUpgradeIT.java @@ -11,7 +11,6 @@ import com.carrotsearch.randomizedtesting.annotations.Name; -import org.elasticsearch.Build; import org.elasticsearch.action.admin.cluster.desirednodes.UpdateDesiredNodesRequest; import org.elasticsearch.client.Request; import org.elasticsearch.client.ResponseException; @@ -82,8 +81,7 @@ private void assertDesiredNodesUpdatedWithRoundedUpFloatsAreIdempotent() throws Settings.builder().put(NODE_NAME_SETTING.getKey(), nodeName).build(), 1238.49922909, ByteSizeValue.ofGb(32), - ByteSizeValue.ofGb(128), - clusterHasFeature(DesiredNode.DESIRED_NODE_VERSION_DEPRECATED) ? null : Build.current().version() + ByteSizeValue.ofGb(128) ) ) .toList(); @@ -153,8 +151,7 @@ private void addClusterNodesToDesiredNodesWithProcessorsOrProcessorRanges(int ve Settings.builder().put(NODE_NAME_SETTING.getKey(), nodeName).build(), processorsPrecision == ProcessorsPrecision.DOUBLE ? randomDoubleProcessorCount() : 0.5f, ByteSizeValue.ofGb(randomIntBetween(10, 24)), - ByteSizeValue.ofGb(randomIntBetween(128, 256)), - clusterHasFeature(DesiredNode.DESIRED_NODE_VERSION_DEPRECATED) ? null : Build.current().version() + ByteSizeValue.ofGb(randomIntBetween(128, 256)) ) ) .toList(); @@ -167,8 +164,7 @@ private void addClusterNodesToDesiredNodesWithProcessorsOrProcessorRanges(int ve Settings.builder().put(NODE_NAME_SETTING.getKey(), nodeName).build(), new DesiredNode.ProcessorsRange(minProcessors, minProcessors + randomIntBetween(10, 20)), ByteSizeValue.ofGb(randomIntBetween(10, 24)), - ByteSizeValue.ofGb(randomIntBetween(128, 256)), - clusterHasFeature(DesiredNode.DESIRED_NODE_VERSION_DEPRECATED) ? null : Build.current().version() + ByteSizeValue.ofGb(randomIntBetween(128, 256)) ); }).toList(); } @@ -182,8 +178,7 @@ private void addClusterNodesToDesiredNodesWithIntegerProcessors(int version) thr Settings.builder().put(NODE_NAME_SETTING.getKey(), nodeName).build(), randomIntBetween(1, 24), ByteSizeValue.ofGb(randomIntBetween(10, 24)), - ByteSizeValue.ofGb(randomIntBetween(128, 256)), - clusterHasFeature(DesiredNode.DESIRED_NODE_VERSION_DEPRECATED) ? null : Build.current().version() + ByteSizeValue.ofGb(randomIntBetween(128, 256)) ) ) .toList(); diff --git a/rest-api-spec/build.gradle b/rest-api-spec/build.gradle index 6cc2028bffa39..1a398f79085e7 100644 --- a/rest-api-spec/build.gradle +++ b/rest-api-spec/build.gradle @@ -61,4 +61,6 @@ tasks.named("yamlRestCompatTestTransform").configure ({ task -> task.skipTest("search/330_fetch_fields/Test search rewrite", "warning does not exist for compatibility") task.skipTest("indices.create/21_synthetic_source_stored/object param - nested object with stored array", "temporary until backported") task.skipTest("cat.aliases/10_basic/Deprecated local parameter", "CAT APIs not covered by compatibility policy") + task.skipTest("cluster.desired_nodes/10_basic/Test delete desired nodes with node_version generates a warning", "node_version warning is removed in 9.0") + task.skipTest("cluster.desired_nodes/10_basic/Test update desired nodes with node_version generates a warning", "node_version warning is removed in 9.0") }) diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.desired_nodes/10_basic.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.desired_nodes/10_basic.yml index 1d1aa524ffb21..a45146a4e147a 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.desired_nodes/10_basic.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.desired_nodes/10_basic.yml @@ -59,61 +59,6 @@ teardown: - contains: { nodes: { settings: { node: { name: "instance-000187" } }, processors: 8.5, memory: "64gb", storage: "128gb" } } - contains: { nodes: { settings: { node: { name: "instance-000188" } }, processors: 16.0, memory: "128gb", storage: "1tb" } } --- -"Test update desired nodes with node_version generates a warning": - - skip: - reason: "contains is a newly added assertion" - features: ["contains", "allowed_warnings"] - - do: - cluster.state: {} - - # Get master node id - - set: { master_node: master } - - - do: - nodes.info: {} - - set: { nodes.$master.version: es_version } - - - do: - _internal.update_desired_nodes: - history_id: "test" - version: 1 - body: - nodes: - - { settings: { "node.name": "instance-000187" }, processors: 8.5, memory: "64gb", storage: "128gb", node_version: $es_version } - allowed_warnings: - - "[version removal] Specifying node_version in desired nodes requests is deprecated." - - match: { replaced_existing_history_id: false } - - - do: - _internal.get_desired_nodes: {} - - match: - $body: - history_id: "test" - version: 1 - nodes: - - { settings: { node: { name: "instance-000187" } }, processors: 8.5, memory: "64gb", storage: "128gb", node_version: $es_version } - - - do: - _internal.update_desired_nodes: - history_id: "test" - version: 2 - body: - nodes: - - { settings: { "node.name": "instance-000187" }, processors: 8.5, memory: "64gb", storage: "128gb", node_version: $es_version } - - { settings: { "node.name": "instance-000188" }, processors: 16.0, memory: "128gb", storage: "1tb", node_version: $es_version } - allowed_warnings: - - "[version removal] Specifying node_version in desired nodes requests is deprecated." - - match: { replaced_existing_history_id: false } - - - do: - _internal.get_desired_nodes: {} - - - match: { history_id: "test" } - - match: { version: 2 } - - length: { nodes: 2 } - - contains: { nodes: { settings: { node: { name: "instance-000187" } }, processors: 8.5, memory: "64gb", storage: "128gb", node_version: $es_version } } - - contains: { nodes: { settings: { node: { name: "instance-000188" } }, processors: 16.0, memory: "128gb", storage: "1tb", node_version: $es_version } } ---- "Test update move to a new history id": - skip: reason: "contains is a newly added assertion" @@ -199,46 +144,6 @@ teardown: _internal.get_desired_nodes: {} - match: { status: 404 } --- -"Test delete desired nodes with node_version generates a warning": - - skip: - features: allowed_warnings - - do: - cluster.state: {} - - - set: { master_node: master } - - - do: - nodes.info: {} - - set: { nodes.$master.version: es_version } - - - do: - _internal.update_desired_nodes: - history_id: "test" - version: 1 - body: - nodes: - - { settings: { "node.external_id": "instance-000187" }, processors: 8.0, memory: "64gb", storage: "128gb", node_version: $es_version } - allowed_warnings: - - "[version removal] Specifying node_version in desired nodes requests is deprecated." - - match: { replaced_existing_history_id: false } - - - do: - _internal.get_desired_nodes: {} - - match: - $body: - history_id: "test" - version: 1 - nodes: - - { settings: { node: { external_id: "instance-000187" } }, processors: 8.0, memory: "64gb", storage: "128gb", node_version: $es_version } - - - do: - _internal.delete_desired_nodes: {} - - - do: - catch: missing - _internal.get_desired_nodes: {} - - match: { status: 404 } ---- "Test update desired nodes is idempotent": - skip: reason: "contains is a newly added assertion" diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/DesiredNode.java b/server/src/main/java/org/elasticsearch/cluster/metadata/DesiredNode.java index fb8559b19d81d..fe72a59565cf6 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/DesiredNode.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/DesiredNode.java @@ -14,7 +14,6 @@ import org.elasticsearch.Version; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodeRole; -import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; @@ -22,7 +21,6 @@ import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.Processors; import org.elasticsearch.core.Nullable; -import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.features.NodeFeature; import org.elasticsearch.xcontent.ConstructingObjectParser; import org.elasticsearch.xcontent.ObjectParser; @@ -38,7 +36,6 @@ import java.util.Set; import java.util.TreeSet; import java.util.function.Predicate; -import java.util.regex.Pattern; import static java.lang.String.format; import static org.elasticsearch.node.Node.NODE_EXTERNAL_ID_SETTING; @@ -58,8 +55,6 @@ public final class DesiredNode implements Writeable, ToXContentObject, Comparabl private static final ParseField PROCESSORS_RANGE_FIELD = new ParseField("processors_range"); private static final ParseField MEMORY_FIELD = new ParseField("memory"); private static final ParseField STORAGE_FIELD = new ParseField("storage"); - @UpdateForV9(owner = UpdateForV9.Owner.DISTRIBUTED_COORDINATION) // Remove deprecated field - private static final ParseField VERSION_FIELD = new ParseField("node_version"); public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( "desired_node", @@ -69,8 +64,7 @@ public final class DesiredNode implements Writeable, ToXContentObject, Comparabl (Processors) args[1], (ProcessorsRange) args[2], (ByteSizeValue) args[3], - (ByteSizeValue) args[4], - (String) args[5] + (ByteSizeValue) args[4] ) ); @@ -104,12 +98,6 @@ static void configureParser(ConstructingObjectParser parser) { STORAGE_FIELD, ObjectParser.ValueType.STRING ); - parser.declareField( - ConstructingObjectParser.optionalConstructorArg(), - (p, c) -> p.text(), - VERSION_FIELD, - ObjectParser.ValueType.STRING - ); } private final Settings settings; @@ -118,21 +106,9 @@ static void configureParser(ConstructingObjectParser parser) { private final ByteSizeValue memory; private final ByteSizeValue storage; - @UpdateForV9(owner = UpdateForV9.Owner.DISTRIBUTED_COORDINATION) // Remove deprecated version field - private final String version; private final String externalId; private final Set roles; - @Deprecated - public DesiredNode(Settings settings, ProcessorsRange processorsRange, ByteSizeValue memory, ByteSizeValue storage, String version) { - this(settings, null, processorsRange, memory, storage, version); - } - - @Deprecated - public DesiredNode(Settings settings, double processors, ByteSizeValue memory, ByteSizeValue storage, String version) { - this(settings, Processors.of(processors), null, memory, storage, version); - } - public DesiredNode(Settings settings, ProcessorsRange processorsRange, ByteSizeValue memory, ByteSizeValue storage) { this(settings, null, processorsRange, memory, storage); } @@ -142,17 +118,6 @@ public DesiredNode(Settings settings, double processors, ByteSizeValue memory, B } DesiredNode(Settings settings, Processors processors, ProcessorsRange processorsRange, ByteSizeValue memory, ByteSizeValue storage) { - this(settings, processors, processorsRange, memory, storage, null); - } - - DesiredNode( - Settings settings, - Processors processors, - ProcessorsRange processorsRange, - ByteSizeValue memory, - ByteSizeValue storage, - @Deprecated String version - ) { assert settings != null; assert memory != null; assert storage != null; @@ -186,7 +151,6 @@ public DesiredNode(Settings settings, double processors, ByteSizeValue memory, B this.processorsRange = processorsRange; this.memory = memory; this.storage = storage; - this.version = version; this.externalId = NODE_EXTERNAL_ID_SETTING.get(settings); this.roles = Collections.unmodifiableSortedSet(new TreeSet<>(DiscoveryNode.getRolesFromSettings(settings))); } @@ -210,19 +174,7 @@ public static DesiredNode readFrom(StreamInput in) throws IOException { } else { version = Version.readVersion(in).toString(); } - return new DesiredNode(settings, processors, processorsRange, memory, storage, version); - } - - private static final Pattern SEMANTIC_VERSION_PATTERN = Pattern.compile("^(\\d+\\.\\d+\\.\\d+)\\D?.*"); - - private static Version parseLegacyVersion(String version) { - if (version != null) { - var semanticVersionMatcher = SEMANTIC_VERSION_PATTERN.matcher(version); - if (semanticVersionMatcher.matches()) { - return Version.fromString(semanticVersionMatcher.group(1)); - } - } - return null; + return new DesiredNode(settings, processors, processorsRange, memory, storage); } @Override @@ -239,15 +191,9 @@ public void writeTo(StreamOutput out) throws IOException { memory.writeTo(out); storage.writeTo(out); if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_13_0)) { - out.writeOptionalString(version); + out.writeOptionalString(null); } else { - Version parsedVersion = parseLegacyVersion(version); - if (version == null) { - // Some node is from before we made the version field not required. If so, fill in with the current node version. - Version.writeVersion(Version.CURRENT, out); - } else { - Version.writeVersion(parsedVersion, out); - } + Version.writeVersion(Version.CURRENT, out); } } @@ -275,14 +221,6 @@ public void toInnerXContent(XContentBuilder builder, Params params) throws IOExc } builder.field(MEMORY_FIELD.getPreferredName(), memory); builder.field(STORAGE_FIELD.getPreferredName(), storage); - addDeprecatedVersionField(builder); - } - - @UpdateForV9(owner = UpdateForV9.Owner.DISTRIBUTED_COORDINATION) // Remove deprecated field from response - private void addDeprecatedVersionField(XContentBuilder builder) throws IOException { - if (version != null) { - builder.field(VERSION_FIELD.getPreferredName(), version); - } } public boolean hasMasterRole() { @@ -366,7 +304,6 @@ private boolean equalsWithoutProcessorsSpecification(DesiredNode that) { return Objects.equals(settings, that.settings) && Objects.equals(memory, that.memory) && Objects.equals(storage, that.storage) - && Objects.equals(version, that.version) && Objects.equals(externalId, that.externalId) && Objects.equals(roles, that.roles); } @@ -379,7 +316,7 @@ public boolean equalsWithProcessorsCloseTo(DesiredNode that) { @Override public int hashCode() { - return Objects.hash(settings, processors, processorsRange, memory, storage, version, externalId, roles); + return Objects.hash(settings, processors, processorsRange, memory, storage, externalId, roles); } @Override @@ -408,10 +345,6 @@ public String toString() { + '}'; } - public boolean hasVersion() { - return Strings.isNullOrBlank(version) == false; - } - public record ProcessorsRange(Processors min, @Nullable Processors max) implements Writeable, ToXContentObject { private static final ParseField MIN_FIELD = new ParseField("min"); diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/DesiredNodeWithStatus.java b/server/src/main/java/org/elasticsearch/cluster/metadata/DesiredNodeWithStatus.java index 7b89406be9aa0..606309adf205c 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/DesiredNodeWithStatus.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/DesiredNodeWithStatus.java @@ -44,13 +44,12 @@ public record DesiredNodeWithStatus(DesiredNode desiredNode, Status status) (Processors) args[1], (DesiredNode.ProcessorsRange) args[2], (ByteSizeValue) args[3], - (ByteSizeValue) args[4], - (String) args[5] + (ByteSizeValue) args[4] ), // An unknown status is expected during upgrades to versions >= STATUS_TRACKING_SUPPORT_VERSION // the desired node status would be populated when a node in the newer version is elected as // master, the desired nodes status update happens in NodeJoinExecutor. - args[6] == null ? Status.PENDING : (Status) args[6] + args[5] == null ? Status.PENDING : (Status) args[5] ) ); diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestUpdateDesiredNodesAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestUpdateDesiredNodesAction.java index ec8bb6285bdd4..b8e1fa0c836a3 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestUpdateDesiredNodesAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestUpdateDesiredNodesAction.java @@ -12,13 +12,11 @@ import org.elasticsearch.action.admin.cluster.desirednodes.UpdateDesiredNodesAction; import org.elasticsearch.action.admin.cluster.desirednodes.UpdateDesiredNodesRequest; import org.elasticsearch.client.internal.node.NodeClient; -import org.elasticsearch.cluster.metadata.DesiredNode; import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.features.NodeFeature; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.action.RestToXContentListener; -import org.elasticsearch.xcontent.XContentParseException; import org.elasticsearch.xcontent.XContentParser; import java.io.IOException; @@ -67,16 +65,6 @@ protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient cli ); } - if (clusterSupportsFeature.test(DesiredNode.DESIRED_NODE_VERSION_DEPRECATED)) { - if (updateDesiredNodesRequest.getNodes().stream().anyMatch(DesiredNode::hasVersion)) { - deprecationLogger.compatibleCritical("desired_nodes_version", VERSION_DEPRECATION_MESSAGE); - } - } else { - if (updateDesiredNodesRequest.getNodes().stream().anyMatch(n -> n.hasVersion() == false)) { - throw new XContentParseException("[node_version] field is required and must have a valid value"); - } - } - return restChannel -> client.execute( UpdateDesiredNodesAction.INSTANCE, updateDesiredNodesRequest, From ebec1a2fe2bc2b9fc40401074dbbb0dbcdc800bd Mon Sep 17 00:00:00 2001 From: Salvatore Campagna <93581129+salvatore-campagna@users.noreply.github.com> Date: Thu, 24 Oct 2024 18:25:38 +0200 Subject: [PATCH 075/324] Improve Logsdb docs including default values (#115205) This PR adds detailed documentation for `logsdb` mode, covering several key aspects of its default behavior and configuration options. It includes: - default settings for index sorting (`index.sort.field`, `index.sort.order`, etc.). - usage of synthetic `_source` by default. - information about specialized codecs and how users can override them. - default behavior for `ignore_malformed` and `ignore_above` settings, including precedence rules. - explanation of how fields without `doc_values` are handled and what we do if they are missing. --- docs/reference/data-streams/logs.asciidoc | 180 +++++++++++++++++++++- 1 file changed, 172 insertions(+), 8 deletions(-) diff --git a/docs/reference/data-streams/logs.asciidoc b/docs/reference/data-streams/logs.asciidoc index e870289bcf7be..6bb98684544a3 100644 --- a/docs/reference/data-streams/logs.asciidoc +++ b/docs/reference/data-streams/logs.asciidoc @@ -8,14 +8,6 @@ A logs data stream is a data stream type that stores log data more efficiently. In benchmarks, log data stored in a logs data stream used ~2.5 times less disk space than a regular data stream. The exact impact will vary depending on your data set. -The following features are enabled in a logs data stream: - -* <>, which omits storing the `_source` field. When the document source is requested, it is synthesized from document fields upon retrieval. - -* Index sorting. This yields a lower storage footprint. By default indices are sorted by `host.name` and `@timestamp` fields at index time. - -* More space efficient compression for fields with <> enabled. - [discrete] [[how-to-use-logsds]] === Create a logs data stream @@ -50,3 +42,175 @@ DELETE _index_template/my-index-template ---- // TEST[continued] //// + +[[logsdb-default-settings]] + +[discrete] +[[logsdb-synthtic-source]] +=== Synthetic source + +By default, `logsdb` mode uses <>, which omits storing the original `_source` +field and synthesizes it from doc values or stored fields upon document retrieval. Synthetic source comes with a few +restrictions which you can read more about in the <> section dedicated to it. + +NOTE: When dealing with multi-value fields, the `index.mapping.synthetic_source_keep` setting controls how field values +are preserved for <> reconstruction. In `logsdb`, the default value is `arrays`, +which retains both duplicate values and the order of entries but not necessarily the exact structure when it comes to +array elements or objects. Preserving duplicates and ordering could be critical for some log fields. This could be the +case, for instance, for DNS A records, HTTP headers, or log entries that represent sequential or repeated events. + +For more details on this setting and ways to refine or bypass it, check out <>. + +[discrete] +[[logsdb-sort-settings]] +=== Index sort settings + +The following settings are applied by default when using the `logsdb` mode for index sorting: + +* `index.sort.field`: `["host.name", "@timestamp"]` + In `logsdb` mode, indices are sorted by `host.name` and `@timestamp` fields by default. For data streams, the + `@timestamp` field is automatically injected if it is not present. + +* `index.sort.order`: `["desc", "desc"]` + The default sort order for both fields is descending (`desc`), prioritizing the latest data. + +* `index.sort.mode`: `["min", "min"]` + The default sort mode is `min`, ensuring that indices are sorted by the minimum value of multi-value fields. + +* `index.sort.missing`: `["_first", "_first"]` + Missing values are sorted to appear first (`_first`) in `logsdb` index mode. + +`logsdb` index mode allows users to override the default sort settings. For instance, users can specify their own fields +and order for sorting by modifying the `index.sort.field` and `index.sort.order`. + +When using default sort settings, the `host.name` field is automatically injected into the mappings of the +index as a `keyword` field to ensure that sorting can be applied. This guarantees that logs are efficiently sorted and +retrieved based on the `host.name` and `@timestamp` fields. + +NOTE: If `subobjects` is set to `true` (which is the default), the `host.name` field will be mapped as an object field +named `host`, containing a `name` child field of type `keyword`. On the other hand, if `subobjects` is set to `false`, +a single `host.name` field will be mapped as a `keyword` field. + +Once an index is created, the sort settings are immutable and cannot be modified. To apply different sort settings, +a new index must be created with the desired configuration. For data streams, this can be achieved by means of an index +rollover after updating relevant (component) templates. + +If the default sort settings are not suitable for your use case, consider modifying them. Keep in mind that sort +settings can influence indexing throughput, query latency, and may affect compression efficiency due to the way data +is organized after sorting. For more details, refer to our documentation on +<>. + +NOTE: For <>, the `@timestamp` field is automatically injected if not already present. +However, if custom sort settings are applied, the `@timestamp` field is injected into the mappings, but it is not +automatically added to the list of sort fields. + +[discrete] +[[logsdb-specialized-codecs]] +=== Specialized codecs + +`logsdb` index mode uses the `best_compression` <> by default, which applies {wikipedia}/Zstd[ZSTD] +compression to stored fields. Users are allowed to override it and switch to the `default` codec for faster compression +at the expense of slightly larger storage footprint. + +`logsdb` index mode also adopts specialized codecs for numeric doc values that are crafted to optimize storage usage. +Users can rely on these specialized codecs being applied by default when using `logsdb` index mode. + +Doc values encoding for numeric fields in `logsdb` follows a static sequence of codecs, applying each one in the +following order: delta encoding, offset encoding, Greatest Common Divisor GCD encoding, and finally Frame Of Reference +(FOR) encoding. The decision to apply each encoding is based on heuristics determined by the data distribution. +For example, before applying delta encoding, the algorithm checks if the data is monotonically non-decreasing or +non-increasing. If the data fits this pattern, delta encoding is applied; otherwise, the next encoding is considered. + +The encoding is specific to each Lucene segment and is also re-applied at segment merging time. The merged Lucene segment +may use a different encoding compared to the original Lucene segments, based on the characteristics of the merged data. + +The following methods are applied sequentially: + +* **Delta encoding**: + a compression method that stores the difference between consecutive values instead of the actual values. + +* **Offset encoding**: + a compression method that stores the difference from a base value rather than between consecutive values. + +* **Greatest Common Divisor (GCD) encoding**: + a compression method that finds the greatest common divisor of a set of values and stores the differences + as multiples of the GCD. + +* **Frame Of Reference (FOR) encoding**: + a compression method that determines the smallest number of bits required to encode a block of values and uses + bit-packing to fit such values into larger 64-bit blocks. + +For keyword fields, **Run Length Encoding (RLE)** is applied to the ordinals, which represent positions in the Lucene +segment-level keyword dictionary. This compression is used when multiple consecutive documents share the same keyword. + +[discrete] +[[logsdb-ignored-settings]] +=== `ignore_malformed`, `ignore_above`, `ignore_dynamic_beyond_limit` + +By default, `logsdb` index mode sets `ignore_malformed` to `true`. This setting allows documents with malformed fields +to be indexed without causing indexing failures, ensuring that log data ingestion continues smoothly even when some +fields contain invalid or improperly formatted data. + +Users can override this setting by setting `index.mapping.ignore_malformed` to `false`. However, this is not recommended +as it might result in documents with malformed fields being rejected and not indexed at all. + +In `logsdb` index mode, the `index.mapping.ignore_above` setting is applied by default at the index level to ensure +efficient storage and indexing of large keyword fields.The index-level default for `ignore_above` is set to 8191 +**characters**. If using UTF-8 encoding, this results in a limit of 32764 bytes, depending on character encoding. +The mapping-level `ignore_above` setting still takes precedence. If a specific field has an `ignore_above` value +defined in its mapping, that value will override the index-level `index.mapping.ignore_above` value. This default +behavior helps to optimize indexing performance by preventing excessively large string values from being indexed, while +still allowing users to customize the limit, overriding it at the mapping level or changing the index level default +setting. + +In `logsdb` index mode, the setting `index.mapping.total_fields.ignore_dynamic_beyond_limit` is set to `true` by +default. This allows dynamically mapped fields to be added on top of statically defined fields without causing document +rejection, even after the total number of fields exceeds the limit defined by `index.mapping.total_fields.limit`. The +`index.mapping.total_fields.limit` setting specifies the maximum number of fields an index can have (static, dynamic +and runtime). When the limit is reached, new dynamically mapped fields will be ignored instead of failing the document +indexing, ensuring continued log ingestion without errors. + +NOTE: When automatically injected, `host.name` and `@timestamp` contribute to the limit of mapped fields. When +`host.name` is mapped with `subobjects: true` it consists of two fields. When `host.name` is mapped with +`subobjects: false` it only consists of one field. + +[discrete] +[[logsdb-nodocvalue-fields]] +=== Fields without doc values + +When `logsdb` index mode uses synthetic `_source`, and `doc_values` are disabled for a field in the mapping, +Elasticsearch may set the `store` setting to `true` for that field as a last resort option to ensure that the field's +data is still available for reconstructing the document’s source when retrieving it via +<>. + +For example, this happens with text fields when `store` is `false` and there is no suitable multi-field available to +reconstruct the original value in <>. + +This automatic adjustment allows synthetic source to work correctly, even when doc values are not enabled for certain +fields. + +[discrete] +[[logsdb-settings-summary]] +=== LogsDB settings summary + +The following is a summary of key settings that apply when using `logsdb` index mode in Elasticsearch: + +* **`index.mode`**: `"logsdb"` + +* **`index.mapping.synthetic_source_keep`**: `"arrays"` + +* **`index.sort.field`**: `["host.name", "@timestamp"]` + +* **`index.sort.order`**: `["desc", "desc"]` + +* **`index.sort.mode`**: `["min", "min"]` + +* **`index.sort.missing`**: `["_first", "_first"]` + +* **`index.codec`**: `"best_compression"` + +* **`index.mapping.ignore_malformed`**: `true` + +* **`index.mapping.ignore_above`**: `8191` + +* **`index.mapping.total_fields.ignore_dynamic_beyond_limit`**: `true` From 160faa2dfc8c590dcb398487b79eb51eb84f8f44 Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Thu, 24 Oct 2024 09:29:34 -0700 Subject: [PATCH 076/324] Re-enable threadpool blocking in Kibana system index test (#112569) KibanaThreadPoolIT checks the Kibana system user can write (using the system read/write threadpools) even when the normal read/write threadpools are blocked. This commit re-enables a key part of the test which was disabled. closes #107625 --- .../kibana/KibanaThreadPoolIT.java | 21 +++++++++++-------- 1 file changed, 12 insertions(+), 9 deletions(-) diff --git a/modules/kibana/src/internalClusterTest/java/org/elasticsearch/kibana/KibanaThreadPoolIT.java b/modules/kibana/src/internalClusterTest/java/org/elasticsearch/kibana/KibanaThreadPoolIT.java index 61bd31fea3455..553e4696af316 100644 --- a/modules/kibana/src/internalClusterTest/java/org/elasticsearch/kibana/KibanaThreadPoolIT.java +++ b/modules/kibana/src/internalClusterTest/java/org/elasticsearch/kibana/KibanaThreadPoolIT.java @@ -12,6 +12,8 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.action.bulk.BulkResponse; +import org.elasticsearch.action.search.SearchPhaseExecutionException; +import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.client.internal.Client; import org.elasticsearch.common.settings.Settings; @@ -37,6 +39,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.startsWith; /** @@ -150,15 +153,15 @@ private void assertThreadPoolsBlocked() { new Thread(() -> expectThrows(EsRejectedExecutionException.class, () -> getFuture.actionGet(SAFE_AWAIT_TIMEOUT))).start(); // intentionally commented out this test until https://github.com/elastic/elasticsearch/issues/97916 is fixed - // var e3 = expectThrows( - // SearchPhaseExecutionException.class, - // () -> client().prepareSearch(USER_INDEX) - // .setQuery(QueryBuilders.matchAllQuery()) - // // Request times out if max concurrent shard requests is set to 1 - // .setMaxConcurrentShardRequests(usually() ? SearchRequest.DEFAULT_MAX_CONCURRENT_SHARD_REQUESTS : randomIntBetween(2, 10)) - // .get() - // ); - // assertThat(e3.getMessage(), containsString("all shards failed")); + var e3 = expectThrows( + SearchPhaseExecutionException.class, + () -> client().prepareSearch(USER_INDEX) + .setQuery(QueryBuilders.matchAllQuery()) + // Request times out if max concurrent shard requests is set to 1 + .setMaxConcurrentShardRequests(usually() ? SearchRequest.DEFAULT_MAX_CONCURRENT_SHARD_REQUESTS : randomIntBetween(2, 10)) + .get() + ); + assertThat(e3.getMessage(), containsString("all shards failed")); } protected void runWithBlockedThreadPools(Runnable runnable) throws Exception { From 5c1a3ada8ae7a790dfd8460c76c6a341d9d42b7a Mon Sep 17 00:00:00 2001 From: Kostas Krikellas <131142368+kkrik-es@users.noreply.github.com> Date: Thu, 24 Oct 2024 19:37:02 +0300 Subject: [PATCH 077/324] Propagate root subobjects setting to downsample indexes (#115358) * Propagate root subobjects setting to downsample indexes * exclude tests from rest compat * remove subobjects propagation --- x-pack/plugin/downsample/qa/rest/build.gradle | 14 + .../downsample/DownsampleWithBasicRestIT.java | 40 ++ .../test/downsample/10_basic.yml | 466 +++++++++--------- 3 files changed, 292 insertions(+), 228 deletions(-) create mode 100644 x-pack/plugin/downsample/qa/rest/src/yamlRestTest/java/org/elasticsearch/xpack/downsample/DownsampleWithBasicRestIT.java diff --git a/x-pack/plugin/downsample/qa/rest/build.gradle b/x-pack/plugin/downsample/qa/rest/build.gradle index ba5ac7b0c7317..5142632a36006 100644 --- a/x-pack/plugin/downsample/qa/rest/build.gradle +++ b/x-pack/plugin/downsample/qa/rest/build.gradle @@ -32,6 +32,20 @@ tasks.named('yamlRestTest') { tasks.named('yamlRestCompatTest') { usesDefaultDistribution() } +tasks.named("yamlRestCompatTestTransform").configure ({ task -> + task.skipTest("downsample/10_basic/Downsample index with empty dimension on routing path", "Skip until pr/115358 gets backported") + task.skipTest("downsample/10_basic/Downsample histogram as label", "Skip until pr/115358 gets backported") + task.skipTest("downsample/10_basic/Downsample date timestamp field using strict_date_optional_time_nanos format", + "Skip until pr/115358 gets backported") + task.skipTest("downsample/10_basic/Downsample a downsampled index", "Skip until pr/115358 gets backported") + task.skipTest("downsample/10_basic/Downsample date_nanos timestamp field using custom format", "Skip until pr/115358 gets backported") + task.skipTest("downsample/10_basic/Downsample using coarse grained timestamp", "Skip until pr/115358 gets backported") + task.skipTest("downsample/10_basic/Downsample label with ignore_above", "Skip until pr/115358 gets backported") + task.skipTest("downsample/10_basic/Downsample object field", "Skip until pr/115358 gets backported") + task.skipTest("downsample/10_basic/Downsample empty and missing labels", "Skip until pr/115358 gets backported") + task.skipTest("downsample/10_basic/Downsample index", "Skip until pr/115358 gets backported") + task.skipTest("downsample/10_basic/Downsample index with empty dimension", "Skip until pr/115358 gets backported") +}) if (BuildParams.inFipsJvm){ // This test cluster is using a BASIC license and FIPS 140 mode is not supported in BASIC tasks.named("yamlRestTest").configure{enabled = false } diff --git a/x-pack/plugin/downsample/qa/rest/src/yamlRestTest/java/org/elasticsearch/xpack/downsample/DownsampleWithBasicRestIT.java b/x-pack/plugin/downsample/qa/rest/src/yamlRestTest/java/org/elasticsearch/xpack/downsample/DownsampleWithBasicRestIT.java new file mode 100644 index 0000000000000..8f75e76315844 --- /dev/null +++ b/x-pack/plugin/downsample/qa/rest/src/yamlRestTest/java/org/elasticsearch/xpack/downsample/DownsampleWithBasicRestIT.java @@ -0,0 +1,40 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.downsample; + +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.test.cluster.local.distribution.DistributionType; +import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate; +import org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase; +import org.junit.ClassRule; + +public class DownsampleWithBasicRestIT extends ESClientYamlSuiteTestCase { + + @ClassRule + public static ElasticsearchCluster cluster = ElasticsearchCluster.local() + .distribution(DistributionType.DEFAULT) + .setting("xpack.security.enabled", "false") + .build(); + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } + + public DownsampleWithBasicRestIT(final ClientYamlTestCandidate testCandidate) { + super(testCandidate); + } + + @ParametersFactory + public static Iterable parameters() throws Exception { + return ESClientYamlSuiteTestCase.createParameters(); + } + +} diff --git a/x-pack/plugin/downsample/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/downsample/10_basic.yml b/x-pack/plugin/downsample/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/downsample/10_basic.yml index 0bcd35cc69038..fa3560bec516e 100644 --- a/x-pack/plugin/downsample/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/downsample/10_basic.yml +++ b/x-pack/plugin/downsample/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/downsample/10_basic.yml @@ -16,6 +16,7 @@ setup: start_time: 2021-04-28T00:00:00Z end_time: 2021-04-29T00:00:00Z mappings: + subobjects: false properties: "@timestamp": type: date @@ -106,6 +107,7 @@ setup: start_time: 2021-04-28T00:00:00Z end_time: 2021-04-29T00:00:00Z mappings: + subobjects: false properties: "@timestamp": type: date @@ -172,6 +174,7 @@ setup: start_time: 2021-04-28T00:00:00Z end_time: 2021-04-29T00:00:00Z mappings: + subobjects: false properties: "@timestamp": type: date @@ -237,6 +240,7 @@ setup: start_time: 2021-04-28T00:00:00Z end_time: 2021-04-29T00:00:00Z mappings: + subobjects: false properties: "@timestamp": type: date @@ -318,29 +322,29 @@ setup: - length: { hits.hits: 4 } - match: { hits.hits.0._source._doc_count: 2 } - - match: { hits.hits.0._source.k8s.pod.uid: df3145b3-0563-4d3b-a0f7-897eb2876ea9 } + - match: { hits.hits.0._source.k8s\.pod\.uid: df3145b3-0563-4d3b-a0f7-897eb2876ea9 } - match: { hits.hits.0._source.metricset: pod } - match: { hits.hits.0._source.@timestamp: 2021-04-28T18:00:00.000Z } - - match: { hits.hits.0._source.k8s.pod.multi-counter: 0 } - - match: { hits.hits.0._source.k8s.pod.scaled-counter: 0.00 } - - match: { hits.hits.0._source.k8s.pod.multi-gauge.min: 100 } - - match: { hits.hits.0._source.k8s.pod.multi-gauge.max: 102 } - - match: { hits.hits.0._source.k8s.pod.multi-gauge.sum: 607 } - - match: { hits.hits.0._source.k8s.pod.multi-gauge.value_count: 6 } - - match: { hits.hits.0._source.k8s.pod.scaled-gauge.min: 100.0 } - - match: { hits.hits.0._source.k8s.pod.scaled-gauge.max: 101.0 } - - match: { hits.hits.0._source.k8s.pod.scaled-gauge.sum: 201.0 } - - match: { hits.hits.0._source.k8s.pod.scaled-gauge.value_count: 2 } - - match: { hits.hits.0._source.k8s.pod.network.tx.min: 1434521831 } - - match: { hits.hits.0._source.k8s.pod.network.tx.max: 1434577921 } - - match: { hits.hits.0._source.k8s.pod.network.tx.value_count: 2 } - - match: { hits.hits.0._source.k8s.pod.ip: "10.10.55.56" } - - match: { hits.hits.0._source.k8s.pod.created_at: "2021-04-28T19:43:00.000Z" } - - match: { hits.hits.0._source.k8s.pod.number_of_containers: 1 } - - match: { hits.hits.0._source.k8s.pod.tags: ["backend", "test", "us-west2"] } - - match: { hits.hits.0._source.k8s.pod.values: [1, 1, 2] } - - is_false: hits.hits.0._source.k8s.pod.running + - match: { hits.hits.0._source.k8s\.pod\.multi-counter: 0 } + - match: { hits.hits.0._source.k8s\.pod\.scaled-counter: 0.00 } + - match: { hits.hits.0._source.k8s\.pod\.multi-gauge.min: 100 } + - match: { hits.hits.0._source.k8s\.pod\.multi-gauge.max: 102 } + - match: { hits.hits.0._source.k8s\.pod\.multi-gauge.sum: 607 } + - match: { hits.hits.0._source.k8s\.pod\.multi-gauge.value_count: 6 } + - match: { hits.hits.0._source.k8s\.pod\.scaled-gauge.min: 100.0 } + - match: { hits.hits.0._source.k8s\.pod\.scaled-gauge.max: 101.0 } + - match: { hits.hits.0._source.k8s\.pod\.scaled-gauge.sum: 201.0 } + - match: { hits.hits.0._source.k8s\.pod\.scaled-gauge.value_count: 2 } + - match: { hits.hits.0._source.k8s\.pod\.network\.tx.min: 1434521831 } + - match: { hits.hits.0._source.k8s\.pod\.network\.tx.max: 1434577921 } + - match: { hits.hits.0._source.k8s\.pod\.network\.tx.value_count: 2 } + - match: { hits.hits.0._source.k8s\.pod\.ip: "10.10.55.56" } + - match: { hits.hits.0._source.k8s\.pod\.created_at: "2021-04-28T19:43:00.000Z" } + - match: { hits.hits.0._source.k8s\.pod\.number_of_containers: 1 } + - match: { hits.hits.0._source.k8s\.pod\.tags: ["backend", "test", "us-west2"] } + - match: { hits.hits.0._source.k8s\.pod\.values: [1, 1, 2] } + - is_false: hits.hits.0._source.k8s\.pod\.running # Assert rollup index settings - do: @@ -362,21 +366,21 @@ setup: - match: { test-downsample.mappings.properties.@timestamp.type: date } - match: { test-downsample.mappings.properties.@timestamp.meta.fixed_interval: 1h } - match: { test-downsample.mappings.properties.@timestamp.meta.time_zone: UTC } - - match: { test-downsample.mappings.properties.k8s.properties.pod.properties.multi-gauge.type: aggregate_metric_double } - - match: { test-downsample.mappings.properties.k8s.properties.pod.properties.multi-gauge.metrics: [ "min", "max", "sum", "value_count" ] } - - match: { test-downsample.mappings.properties.k8s.properties.pod.properties.multi-gauge.default_metric: max } - - match: { test-downsample.mappings.properties.k8s.properties.pod.properties.multi-gauge.time_series_metric: gauge } - - match: { test-downsample.mappings.properties.k8s.properties.pod.properties.multi-counter.type: long } - - match: { test-downsample.mappings.properties.k8s.properties.pod.properties.multi-counter.time_series_metric: counter } - - match: { test-downsample.mappings.properties.k8s.properties.pod.properties.scaled-counter.type: scaled_float } - - match: { test-downsample.mappings.properties.k8s.properties.pod.properties.scaled-counter.scaling_factor: 100 } - - match: { test-downsample.mappings.properties.k8s.properties.pod.properties.scaled-counter.time_series_metric: counter } - - match: { test-downsample.mappings.properties.k8s.properties.pod.properties.scaled-gauge.type: aggregate_metric_double } - - match: { test-downsample.mappings.properties.k8s.properties.pod.properties.scaled-gauge.metrics: [ "min", "max", "sum", "value_count" ] } - - match: { test-downsample.mappings.properties.k8s.properties.pod.properties.scaled-gauge.default_metric: max } - - match: { test-downsample.mappings.properties.k8s.properties.pod.properties.scaled-gauge.time_series_metric: gauge } - - match: { test-downsample.mappings.properties.k8s.properties.pod.properties.uid.type: keyword } - - match: { test-downsample.mappings.properties.k8s.properties.pod.properties.uid.time_series_dimension: true } + - match: { test-downsample.mappings.properties.k8s\.pod\.multi-gauge.type: aggregate_metric_double } + - match: { test-downsample.mappings.properties.k8s\.pod\.multi-gauge.metrics: [ "min", "max", "sum", "value_count" ] } + - match: { test-downsample.mappings.properties.k8s\.pod\.multi-gauge.default_metric: max } + - match: { test-downsample.mappings.properties.k8s\.pod\.multi-gauge.time_series_metric: gauge } + - match: { test-downsample.mappings.properties.k8s\.pod\.multi-counter.type: long } + - match: { test-downsample.mappings.properties.k8s\.pod\.multi-counter.time_series_metric: counter } + - match: { test-downsample.mappings.properties.k8s\.pod\.scaled-counter.type: scaled_float } + - match: { test-downsample.mappings.properties.k8s\.pod\.scaled-counter.scaling_factor: 100 } + - match: { test-downsample.mappings.properties.k8s\.pod\.scaled-counter.time_series_metric: counter } + - match: { test-downsample.mappings.properties.k8s\.pod\.scaled-gauge.type: aggregate_metric_double } + - match: { test-downsample.mappings.properties.k8s\.pod\.scaled-gauge.metrics: [ "min", "max", "sum", "value_count" ] } + - match: { test-downsample.mappings.properties.k8s\.pod\.scaled-gauge.default_metric: max } + - match: { test-downsample.mappings.properties.k8s\.pod\.scaled-gauge.time_series_metric: gauge } + - match: { test-downsample.mappings.properties.k8s\.pod\.uid.type: keyword } + - match: { test-downsample.mappings.properties.k8s\.pod\.uid.time_series_dimension: true } # Assert source index has not been deleted @@ -763,18 +767,18 @@ setup: - match: { test-downsample-2.mappings.properties.@timestamp.type: date } - match: { test-downsample-2.mappings.properties.@timestamp.meta.fixed_interval: 2h } - match: { test-downsample-2.mappings.properties.@timestamp.meta.time_zone: UTC } - - match: { test-downsample-2.mappings.properties.k8s.properties.pod.properties.multi-gauge.type: aggregate_metric_double } - - match: { test-downsample-2.mappings.properties.k8s.properties.pod.properties.multi-gauge.metrics: [ "min", "max", "sum", "value_count" ] } - - match: { test-downsample-2.mappings.properties.k8s.properties.pod.properties.multi-gauge.default_metric: max } - - match: { test-downsample-2.mappings.properties.k8s.properties.pod.properties.multi-gauge.time_series_metric: gauge } - - match: { test-downsample-2.mappings.properties.k8s.properties.pod.properties.multi-counter.type: long } - - match: { test-downsample-2.mappings.properties.k8s.properties.pod.properties.multi-counter.time_series_metric: counter } - - match: { test-downsample-2.mappings.properties.k8s.properties.pod.properties.uid.type: keyword } - - match: { test-downsample-2.mappings.properties.k8s.properties.pod.properties.uid.time_series_dimension: true } - - match: { test-downsample-2.mappings.properties.k8s.properties.pod.properties.network.properties.tx.type: aggregate_metric_double } - - match: { test-downsample-2.mappings.properties.k8s.properties.pod.properties.network.properties.tx.metrics: [ "min", "max", "sum", "value_count" ] } - - match: { test-downsample-2.mappings.properties.k8s.properties.pod.properties.network.properties.tx.default_metric: max } - - match: { test-downsample-2.mappings.properties.k8s.properties.pod.properties.network.properties.tx.time_series_metric: gauge } + - match: { test-downsample-2.mappings.properties.k8s\.pod\.multi-gauge.type: aggregate_metric_double } + - match: { test-downsample-2.mappings.properties.k8s\.pod\.multi-gauge.metrics: [ "min", "max", "sum", "value_count" ] } + - match: { test-downsample-2.mappings.properties.k8s\.pod\.multi-gauge.default_metric: max } + - match: { test-downsample-2.mappings.properties.k8s\.pod\.multi-gauge.time_series_metric: gauge } + - match: { test-downsample-2.mappings.properties.k8s\.pod\.multi-counter.type: long } + - match: { test-downsample-2.mappings.properties.k8s\.pod\.multi-counter.time_series_metric: counter } + - match: { test-downsample-2.mappings.properties.k8s\.pod\.uid.type: keyword } + - match: { test-downsample-2.mappings.properties.k8s\.pod\.uid.time_series_dimension: true } + - match: { test-downsample-2.mappings.properties.k8s\.pod\.network\.tx.type: aggregate_metric_double } + - match: { test-downsample-2.mappings.properties.k8s\.pod\.network\.tx.metrics: [ "min", "max", "sum", "value_count" ] } + - match: { test-downsample-2.mappings.properties.k8s\.pod\.network\.tx.default_metric: max } + - match: { test-downsample-2.mappings.properties.k8s\.pod\.network\.tx.time_series_metric: gauge } - do: search: @@ -784,29 +788,29 @@ setup: - length: { hits.hits: 3 } - match: { hits.hits.0._source._doc_count: 4 } - - match: { hits.hits.0._source.k8s.pod.uid: df3145b3-0563-4d3b-a0f7-897eb2876ea9 } + - match: { hits.hits.0._source.k8s\.pod\.uid: df3145b3-0563-4d3b-a0f7-897eb2876ea9 } - match: { hits.hits.0._source.metricset: pod } - match: { hits.hits.0._source.@timestamp: 2021-04-28T18:00:00.000Z } - - match: { hits.hits.0._source.k8s.pod.multi-counter: 76 } - - match: { hits.hits.0._source.k8s.pod.multi-gauge.min: 95.0 } - - match: { hits.hits.0._source.k8s.pod.multi-gauge.max: 110.0 } - - match: { hits.hits.0._source.k8s.pod.multi-gauge.sum: 1209.0 } - - match: { hits.hits.0._source.k8s.pod.multi-gauge.value_count: 12 } - - match: { hits.hits.0._source.k8s.pod.network.tx.min: 1434521831 } - - match: { hits.hits.0._source.k8s.pod.network.tx.max: 1434595272 } - - match: { hits.hits.0._source.k8s.pod.network.tx.value_count: 4 } - - match: { hits.hits.0._source.k8s.pod.ip: "10.10.55.120" } - - match: { hits.hits.0._source.k8s.pod.created_at: "2021-04-28T19:45:00.000Z" } - - match: { hits.hits.0._source.k8s.pod.number_of_containers: 1 } - - match: { hits.hits.0._source.k8s.pod.tags: [ "backend", "test", "us-west1" ] } - - match: { hits.hits.0._source.k8s.pod.values: [ 1, 2, 3 ] } - - - match: { hits.hits.1._source.k8s.pod.uid: 947e4ced-1786-4e53-9e0c-5c447e959507 } + - match: { hits.hits.0._source.k8s\.pod\.multi-counter: 76 } + - match: { hits.hits.0._source.k8s\.pod\.multi-gauge.min: 95.0 } + - match: { hits.hits.0._source.k8s\.pod\.multi-gauge.max: 110.0 } + - match: { hits.hits.0._source.k8s\.pod\.multi-gauge.sum: 1209.0 } + - match: { hits.hits.0._source.k8s\.pod\.multi-gauge.value_count: 12 } + - match: { hits.hits.0._source.k8s\.pod\.network\.tx.min: 1434521831 } + - match: { hits.hits.0._source.k8s\.pod\.network\.tx.max: 1434595272 } + - match: { hits.hits.0._source.k8s\.pod\.network\.tx.value_count: 4 } + - match: { hits.hits.0._source.k8s\.pod\.ip: "10.10.55.120" } + - match: { hits.hits.0._source.k8s\.pod\.created_at: "2021-04-28T19:45:00.000Z" } + - match: { hits.hits.0._source.k8s\.pod\.number_of_containers: 1 } + - match: { hits.hits.0._source.k8s\.pod\.tags: [ "backend", "test", "us-west1" ] } + - match: { hits.hits.0._source.k8s\.pod\.values: [ 1, 2, 3 ] } + + - match: { hits.hits.1._source.k8s\.pod\.uid: 947e4ced-1786-4e53-9e0c-5c447e959507 } - match: { hits.hits.1._source.metricset: pod } - match: { hits.hits.1._source.@timestamp: 2021-04-28T18:00:00.000Z } - match: { hits.hits.1._source._doc_count: 2 } - - match: { hits.hits.2._source.k8s.pod.uid: 947e4ced-1786-4e53-9e0c-5c447e959507 } + - match: { hits.hits.2._source.k8s\.pod\.uid: 947e4ced-1786-4e53-9e0c-5c447e959507 } - match: { hits.hits.2._source.metricset: pod } - match: { hits.hits.2._source.@timestamp: 2021-04-28T20:00:00.000Z } - match: { hits.hits.2._source._doc_count: 2 } @@ -890,16 +894,16 @@ setup: - match: { test-downsample-histogram.mappings.properties.@timestamp.type: date } - match: { test-downsample-histogram.mappings.properties.@timestamp.meta.fixed_interval: 1h } - match: { test-downsample-histogram.mappings.properties.@timestamp.meta.time_zone: UTC } - - match: { test-downsample-histogram.mappings.properties.k8s.properties.pod.properties.latency.type: histogram } - - match: { test-downsample-histogram.mappings.properties.k8s.properties.pod.properties.latency.time_series_metric: null } - - match: { test-downsample-histogram.mappings.properties.k8s.properties.pod.properties.empty-histogram.type: histogram } - - match: { test-downsample-histogram.mappings.properties.k8s.properties.pod.properties.empty-histogram.time_series_metric: null } - - match: { test-downsample-histogram.mappings.properties.k8s.properties.pod.properties.uid.type: keyword } - - match: { test-downsample-histogram.mappings.properties.k8s.properties.pod.properties.uid.time_series_dimension: true } - - match: { test-downsample-histogram.mappings.properties.k8s.properties.pod.properties.network.properties.tx.type: aggregate_metric_double } - - match: { test-downsample-histogram.mappings.properties.k8s.properties.pod.properties.network.properties.tx.metrics: [ "min", "max", "sum", "value_count" ] } - - match: { test-downsample-histogram.mappings.properties.k8s.properties.pod.properties.network.properties.tx.default_metric: max } - - match: { test-downsample-histogram.mappings.properties.k8s.properties.pod.properties.network.properties.tx.time_series_metric: gauge } + - match: { test-downsample-histogram.mappings.properties.k8s\.pod\.latency.type: histogram } + - match: { test-downsample-histogram.mappings.properties.k8s\.pod\.latency.time_series_metric: null } + - match: { test-downsample-histogram.mappings.properties.k8s\.pod\.empty-histogram.type: histogram } + - match: { test-downsample-histogram.mappings.properties.k8s\.pod\.empty-histogram.time_series_metric: null } + - match: { test-downsample-histogram.mappings.properties.k8s\.pod\.uid.type: keyword } + - match: { test-downsample-histogram.mappings.properties.k8s\.pod\.uid.time_series_dimension: true } + - match: { test-downsample-histogram.mappings.properties.k8s\.pod\.network\.tx.type: aggregate_metric_double } + - match: { test-downsample-histogram.mappings.properties.k8s\.pod\.network\.tx.metrics: [ "min", "max", "sum", "value_count" ] } + - match: { test-downsample-histogram.mappings.properties.k8s\.pod\.network\.tx.default_metric: max } + - match: { test-downsample-histogram.mappings.properties.k8s\.pod\.network\.tx.time_series_metric: gauge } - do: search: @@ -910,64 +914,64 @@ setup: - length: { hits.hits: 4 } - match: { hits.hits.0._source._doc_count: 2 } - - match: { hits.hits.0._source.k8s.pod.uid: df3145b3-0563-4d3b-a0f7-897eb2876ea9 } + - match: { hits.hits.0._source.k8s\.pod\.uid: df3145b3-0563-4d3b-a0f7-897eb2876ea9 } - match: { hits.hits.0._source.metricset: pod } - match: { hits.hits.0._source.@timestamp: 2021-04-28T18:00:00.000Z } - - length: { hits.hits.0._source.k8s.pod.latency.counts: 4 } - - match: { hits.hits.0._source.k8s.pod.latency.counts.0: 2 } - - match: { hits.hits.0._source.k8s.pod.latency.counts.1: 2 } - - match: { hits.hits.0._source.k8s.pod.latency.counts.2: 8 } - - match: { hits.hits.0._source.k8s.pod.latency.counts.3: 8 } - - length: { hits.hits.0._source.k8s.pod.latency.values: 4 } - - match: { hits.hits.0._source.k8s.pod.latency.values.0: 1.0 } - - match: { hits.hits.0._source.k8s.pod.latency.values.1: 10.0 } - - match: { hits.hits.0._source.k8s.pod.latency.values.2: 100.0 } - - match: { hits.hits.0._source.k8s.pod.latency.values.3: 1000.0 } + - length: { hits.hits.0._source.k8s\.pod\.latency.counts: 4 } + - match: { hits.hits.0._source.k8s\.pod\.latency.counts.0: 2 } + - match: { hits.hits.0._source.k8s\.pod\.latency.counts.1: 2 } + - match: { hits.hits.0._source.k8s\.pod\.latency.counts.2: 8 } + - match: { hits.hits.0._source.k8s\.pod\.latency.counts.3: 8 } + - length: { hits.hits.0._source.k8s\.pod\.latency.values: 4 } + - match: { hits.hits.0._source.k8s\.pod\.latency.values.0: 1.0 } + - match: { hits.hits.0._source.k8s\.pod\.latency.values.1: 10.0 } + - match: { hits.hits.0._source.k8s\.pod\.latency.values.2: 100.0 } + - match: { hits.hits.0._source.k8s\.pod\.latency.values.3: 1000.0 } - match: { hits.hits.1._source._doc_count: 1 } - - match: { hits.hits.1._source.k8s.pod.uid: df3145b3-0563-4d3b-a0f7-897eb2876ea9 } + - match: { hits.hits.1._source.k8s\.pod\.uid: df3145b3-0563-4d3b-a0f7-897eb2876ea9 } - match: { hits.hits.1._source.metricset: pod } - match: { hits.hits.1._source.@timestamp: 2021-04-28T19:00:00.000Z } - - length: { hits.hits.1._source.k8s.pod.latency.counts: 4 } - - match: { hits.hits.1._source.k8s.pod.latency.counts.0: 4 } - - match: { hits.hits.1._source.k8s.pod.latency.counts.1: 5 } - - match: { hits.hits.1._source.k8s.pod.latency.counts.2: 4 } - - match: { hits.hits.1._source.k8s.pod.latency.counts.3: 13 } - - length: { hits.hits.1._source.k8s.pod.latency.values: 4 } - - match: { hits.hits.1._source.k8s.pod.latency.values.0: 1.0 } - - match: { hits.hits.1._source.k8s.pod.latency.values.1: 10.0 } - - match: { hits.hits.1._source.k8s.pod.latency.values.2: 100.0 } - - match: { hits.hits.1._source.k8s.pod.latency.values.3: 1000.0 } + - length: { hits.hits.1._source.k8s\.pod\.latency.counts: 4 } + - match: { hits.hits.1._source.k8s\.pod\.latency.counts.0: 4 } + - match: { hits.hits.1._source.k8s\.pod\.latency.counts.1: 5 } + - match: { hits.hits.1._source.k8s\.pod\.latency.counts.2: 4 } + - match: { hits.hits.1._source.k8s\.pod\.latency.counts.3: 13 } + - length: { hits.hits.1._source.k8s\.pod\.latency.values: 4 } + - match: { hits.hits.1._source.k8s\.pod\.latency.values.0: 1.0 } + - match: { hits.hits.1._source.k8s\.pod\.latency.values.1: 10.0 } + - match: { hits.hits.1._source.k8s\.pod\.latency.values.2: 100.0 } + - match: { hits.hits.1._source.k8s\.pod\.latency.values.3: 1000.0 } - match: { hits.hits.2._source._doc_count: 2 } - - match: { hits.hits.2._source.k8s.pod.uid: 947e4ced-1786-4e53-9e0c-5c447e959507 } + - match: { hits.hits.2._source.k8s\.pod\.uid: 947e4ced-1786-4e53-9e0c-5c447e959507 } - match: { hits.hits.2._source.metricset: pod } - match: { hits.hits.2._source.@timestamp: 2021-04-28T18:00:00.000Z } - - length: { hits.hits.2._source.k8s.pod.latency.counts: 4 } - - match: { hits.hits.2._source.k8s.pod.latency.counts.0: 8 } - - match: { hits.hits.2._source.k8s.pod.latency.counts.1: 7 } - - match: { hits.hits.2._source.k8s.pod.latency.counts.2: 10 } - - match: { hits.hits.2._source.k8s.pod.latency.counts.3: 12 } - - length: { hits.hits.2._source.k8s.pod.latency.values: 4 } - - match: { hits.hits.2._source.k8s.pod.latency.values.0: 1.0 } - - match: { hits.hits.2._source.k8s.pod.latency.values.1: 2.0 } - - match: { hits.hits.2._source.k8s.pod.latency.values.2: 5.0 } - - match: { hits.hits.2._source.k8s.pod.latency.values.3: 10.0 } + - length: { hits.hits.2._source.k8s\.pod\.latency.counts: 4 } + - match: { hits.hits.2._source.k8s\.pod\.latency.counts.0: 8 } + - match: { hits.hits.2._source.k8s\.pod\.latency.counts.1: 7 } + - match: { hits.hits.2._source.k8s\.pod\.latency.counts.2: 10 } + - match: { hits.hits.2._source.k8s\.pod\.latency.counts.3: 12 } + - length: { hits.hits.2._source.k8s\.pod\.latency.values: 4 } + - match: { hits.hits.2._source.k8s\.pod\.latency.values.0: 1.0 } + - match: { hits.hits.2._source.k8s\.pod\.latency.values.1: 2.0 } + - match: { hits.hits.2._source.k8s\.pod\.latency.values.2: 5.0 } + - match: { hits.hits.2._source.k8s\.pod\.latency.values.3: 10.0 } - match: { hits.hits.3._source._doc_count: 2 } - - match: { hits.hits.3._source.k8s.pod.uid: 947e4ced-1786-4e53-9e0c-5c447e959507 } + - match: { hits.hits.3._source.k8s\.pod\.uid: 947e4ced-1786-4e53-9e0c-5c447e959507 } - match: { hits.hits.3._source.metricset: pod } - match: { hits.hits.3._source.@timestamp: 2021-04-28T19:00:00.000Z } - - length: { hits.hits.3._source.k8s.pod.latency.counts: 4 } - - match: { hits.hits.3._source.k8s.pod.latency.counts.0: 7 } - - match: { hits.hits.3._source.k8s.pod.latency.counts.1: 15 } - - match: { hits.hits.3._source.k8s.pod.latency.counts.2: 10 } - - match: { hits.hits.3._source.k8s.pod.latency.counts.3: 10 } - - length: { hits.hits.3._source.k8s.pod.latency.values: 4 } - - match: { hits.hits.3._source.k8s.pod.latency.values.0: 1.0 } - - match: { hits.hits.3._source.k8s.pod.latency.values.1: 2.0 } - - match: { hits.hits.3._source.k8s.pod.latency.values.2: 5.0 } - - match: { hits.hits.3._source.k8s.pod.latency.values.3: 10.0 } + - length: { hits.hits.3._source.k8s\.pod\.latency.counts: 4 } + - match: { hits.hits.3._source.k8s\.pod\.latency.counts.0: 7 } + - match: { hits.hits.3._source.k8s\.pod\.latency.counts.1: 15 } + - match: { hits.hits.3._source.k8s\.pod\.latency.counts.2: 10 } + - match: { hits.hits.3._source.k8s\.pod\.latency.counts.3: 10 } + - length: { hits.hits.3._source.k8s\.pod\.latency.values: 4 } + - match: { hits.hits.3._source.k8s\.pod\.latency.values.0: 1.0 } + - match: { hits.hits.3._source.k8s\.pod\.latency.values.1: 2.0 } + - match: { hits.hits.3._source.k8s\.pod\.latency.values.2: 5.0 } + - match: { hits.hits.3._source.k8s\.pod\.latency.values.3: 10.0 } --- "Downsample date_nanos timestamp field using custom format": @@ -988,6 +992,7 @@ setup: start_time: 2023-02-23T00:00:00Z end_time: 2023-02-24T00:00:00Z mappings: + subobjects: false properties: "@timestamp": type: date_nanos @@ -1048,19 +1053,19 @@ setup: - length: { hits.hits: 2 } - match: { hits.hits.0._source._doc_count: 3 } - - match: { hits.hits.0._source.k8s.pod.uid: 947e4ced-1786-4e53-9e0c-5c447e959507 } + - match: { hits.hits.0._source.k8s\.pod\.uid: 947e4ced-1786-4e53-9e0c-5c447e959507 } - match: { hits.hits.0._source.metricset: pod } - match: { hits.hits.0._source.@timestamp: 2023-02-23T12:00:00.000000000Z } - - match: { hits.hits.0._source.k8s.pod.value.min: 8.0 } - - match: { hits.hits.0._source.k8s.pod.value.max: 12.0 } - - match: { hits.hits.0._source.k8s.pod.value.sum: 30.0 } + - match: { hits.hits.0._source.k8s\.pod\.value.min: 8.0 } + - match: { hits.hits.0._source.k8s\.pod\.value.max: 12.0 } + - match: { hits.hits.0._source.k8s\.pod\.value.sum: 30.0 } - match: { hits.hits.1._source._doc_count: 1 } - - match: { hits.hits.1._source.k8s.pod.uid: 947e4ced-1786-4e53-9e0c-5c447e959507 } + - match: { hits.hits.1._source.k8s\.pod\.uid: 947e4ced-1786-4e53-9e0c-5c447e959507 } - match: { hits.hits.1._source.metricset: pod } - match: { hits.hits.1._source.@timestamp: 2023-02-23T13:00:00.000000000Z } - - match: { hits.hits.1._source.k8s.pod.value.min: 9.0 } - - match: { hits.hits.1._source.k8s.pod.value.max: 9.0 } - - match: { hits.hits.1._source.k8s.pod.value.sum: 9.0 } + - match: { hits.hits.1._source.k8s\.pod\.value.min: 9.0 } + - match: { hits.hits.1._source.k8s\.pod\.value.max: 9.0 } + - match: { hits.hits.1._source.k8s\.pod\.value.sum: 9.0 } - do: indices.get_mapping: @@ -1090,6 +1095,7 @@ setup: start_time: 2023-02-23T00:00:00Z end_time: 2023-02-24T00:00:00Z mappings: + subobjects: false properties: "@timestamp": type: date @@ -1150,19 +1156,19 @@ setup: - length: { hits.hits: 2 } - match: { hits.hits.0._source._doc_count: 3 } - - match: { hits.hits.0._source.k8s.pod.uid: 947e4ced-1786-4e53-9e0c-5c447e959507 } + - match: { hits.hits.0._source.k8s\.pod\.uid: 947e4ced-1786-4e53-9e0c-5c447e959507 } - match: { hits.hits.0._source.metricset: pod } - match: { hits.hits.0._source.@timestamp: 2023-02-23T12:00:00.000Z } - - match: { hits.hits.0._source.k8s.pod.value.min: 8.0 } - - match: { hits.hits.0._source.k8s.pod.value.max: 12.0 } - - match: { hits.hits.0._source.k8s.pod.value.sum: 30.0 } + - match: { hits.hits.0._source.k8s\.pod\.value.min: 8.0 } + - match: { hits.hits.0._source.k8s\.pod\.value.max: 12.0 } + - match: { hits.hits.0._source.k8s\.pod\.value.sum: 30.0 } - match: { hits.hits.1._source._doc_count: 1 } - - match: { hits.hits.1._source.k8s.pod.uid: 947e4ced-1786-4e53-9e0c-5c447e959507 } + - match: { hits.hits.1._source.k8s\.pod\.uid: 947e4ced-1786-4e53-9e0c-5c447e959507 } - match: { hits.hits.1._source.metricset: pod } - match: { hits.hits.1._source.@timestamp: 2023-02-23T13:00:00.000Z } - - match: { hits.hits.1._source.k8s.pod.value.min: 9.0 } - - match: { hits.hits.1._source.k8s.pod.value.max: 9.0 } - - match: { hits.hits.1._source.k8s.pod.value.sum: 9.0 } + - match: { hits.hits.1._source.k8s\.pod\.value.min: 9.0 } + - match: { hits.hits.1._source.k8s\.pod\.value.max: 9.0 } + - match: { hits.hits.1._source.k8s\.pod\.value.sum: 9.0 } - do: indices.get_mapping: @@ -1192,6 +1198,7 @@ setup: start_time: 2023-02-23T00:00:00Z end_time: 2023-02-27T00:00:00Z mappings: + subobjects: false properties: "@timestamp": type: date @@ -1251,33 +1258,33 @@ setup: - length: { hits.hits: 4 } - match: { hits.hits.0._source._doc_count: 1 } - - match: { hits.hits.0._source.k8s.pod.uid: 947e4ced-1786-4e53-9e0c-5c447e959507 } + - match: { hits.hits.0._source.k8s\.pod\.uid: 947e4ced-1786-4e53-9e0c-5c447e959507 } - match: { hits.hits.0._source.metricset: pod } - match: { hits.hits.0._source.@timestamp: 2023-02-23 } - - match: { hits.hits.0._source.k8s.pod.value.min: 10.0 } - - match: { hits.hits.0._source.k8s.pod.value.max: 10.0 } - - match: { hits.hits.0._source.k8s.pod.value.sum: 10.0 } + - match: { hits.hits.0._source.k8s\.pod\.value.min: 10.0 } + - match: { hits.hits.0._source.k8s\.pod\.value.max: 10.0 } + - match: { hits.hits.0._source.k8s\.pod\.value.sum: 10.0 } - match: { hits.hits.1._source._doc_count: 1 } - - match: { hits.hits.1._source.k8s.pod.uid: 947e4ced-1786-4e53-9e0c-5c447e959507 } + - match: { hits.hits.1._source.k8s\.pod\.uid: 947e4ced-1786-4e53-9e0c-5c447e959507 } - match: { hits.hits.1._source.metricset: pod } - match: { hits.hits.1._source.@timestamp: 2023-02-24 } - - match: { hits.hits.1._source.k8s.pod.value.min: 12.0 } - - match: { hits.hits.1._source.k8s.pod.value.max: 12.0 } - - match: { hits.hits.1._source.k8s.pod.value.sum: 12.0 } + - match: { hits.hits.1._source.k8s\.pod\.value.min: 12.0 } + - match: { hits.hits.1._source.k8s\.pod\.value.max: 12.0 } + - match: { hits.hits.1._source.k8s\.pod\.value.sum: 12.0 } - match: { hits.hits.2._source._doc_count: 1 } - - match: { hits.hits.2._source.k8s.pod.uid: 947e4ced-1786-4e53-9e0c-5c447e959507 } + - match: { hits.hits.2._source.k8s\.pod\.uid: 947e4ced-1786-4e53-9e0c-5c447e959507 } - match: { hits.hits.2._source.metricset: pod } - match: { hits.hits.2._source.@timestamp: 2023-02-25 } - - match: { hits.hits.2._source.k8s.pod.value.min: 8.0 } - - match: { hits.hits.2._source.k8s.pod.value.max: 8.0 } - - match: { hits.hits.2._source.k8s.pod.value.sum: 8.0 } + - match: { hits.hits.2._source.k8s\.pod\.value.min: 8.0 } + - match: { hits.hits.2._source.k8s\.pod\.value.max: 8.0 } + - match: { hits.hits.2._source.k8s\.pod\.value.sum: 8.0 } - match: { hits.hits.3._source._doc_count: 1 } - - match: { hits.hits.3._source.k8s.pod.uid: 947e4ced-1786-4e53-9e0c-5c447e959507 } + - match: { hits.hits.3._source.k8s\.pod\.uid: 947e4ced-1786-4e53-9e0c-5c447e959507 } - match: { hits.hits.3._source.metricset: pod } - match: { hits.hits.3._source.@timestamp: 2023-02-26 } - - match: { hits.hits.3._source.k8s.pod.value.min: 9.0 } - - match: { hits.hits.3._source.k8s.pod.value.max: 9.0 } - - match: { hits.hits.3._source.k8s.pod.value.sum: 9.0 } + - match: { hits.hits.3._source.k8s\.pod\.value.min: 9.0 } + - match: { hits.hits.3._source.k8s\.pod\.value.max: 9.0 } + - match: { hits.hits.3._source.k8s\.pod\.value.sum: 9.0 } --- "Downsample object field": @@ -1304,48 +1311,48 @@ setup: - length: { hits.hits: 4 } - match: { hits.hits.0._source._doc_count: 2 } - - match: { hits.hits.0._source.k8s.pod.uid: df3145b3-0563-4d3b-a0f7-897eb2876ea9 } + - match: { hits.hits.0._source.k8s\.pod\.uid: df3145b3-0563-4d3b-a0f7-897eb2876ea9 } - match: { hits.hits.0._source.metricset: pod } - match: { hits.hits.0._source.@timestamp: "2021-04-28T18:00:00.000Z" } - - match: { hits.hits.0._source.k8s.pod.name: "dog" } - - match: { hits.hits.0._source.k8s.pod.value.min: 9.0 } - - match: { hits.hits.0._source.k8s.pod.value.max: 16.0 } - - match: { hits.hits.0._source.k8s.pod.value.sum: 25.0 } - - match: { hits.hits.0._source.k8s.pod.agent.id: "second" } - - match: { hits.hits.0._source.k8s.pod.agent.version: "2.1.7" } + - match: { hits.hits.0._source.k8s\.pod\.name: "dog" } + - match: { hits.hits.0._source.k8s\.pod\.value.min: 9.0 } + - match: { hits.hits.0._source.k8s\.pod\.value.max: 16.0 } + - match: { hits.hits.0._source.k8s\.pod\.value.sum: 25.0 } + - match: { hits.hits.0._source.k8s\.pod\.agent\.id: "second" } + - match: { hits.hits.0._source.k8s\.pod\.agent\.version: "2.1.7" } - match: { hits.hits.1._source._doc_count: 2 } - - match: { hits.hits.1._source.k8s.pod.uid: df3145b3-0563-4d3b-a0f7-897eb2876ea9 } + - match: { hits.hits.1._source.k8s\.pod\.uid: df3145b3-0563-4d3b-a0f7-897eb2876ea9 } - match: { hits.hits.1._source.metricset: pod } - match: { hits.hits.1._source.@timestamp: "2021-04-28T19:00:00.000Z" } - - match: { hits.hits.1._source.k8s.pod.name: "dog" } - - match: { hits.hits.1._source.k8s.pod.value.min: 17.0 } - - match: { hits.hits.1._source.k8s.pod.value.max: 25.0 } - - match: { hits.hits.1._source.k8s.pod.value.sum: 42.0 } - - match: { hits.hits.1._source.k8s.pod.agent.id: "second" } - - match: { hits.hits.1._source.k8s.pod.agent.version: "2.1.7" } + - match: { hits.hits.1._source.k8s\.pod\.name: "dog" } + - match: { hits.hits.1._source.k8s\.pod\.value.min: 17.0 } + - match: { hits.hits.1._source.k8s\.pod\.value.max: 25.0 } + - match: { hits.hits.1._source.k8s\.pod\.value.sum: 42.0 } + - match: { hits.hits.1._source.k8s\.pod\.agent\.id: "second" } + - match: { hits.hits.1._source.k8s\.pod\.agent\.version: "2.1.7" } - match: { hits.hits.2._source._doc_count: 2 } - - match: { hits.hits.2._source.k8s.pod.uid: 947e4ced-1786-4e53-9e0c-5c447e959507 } + - match: { hits.hits.2._source.k8s\.pod\.uid: 947e4ced-1786-4e53-9e0c-5c447e959507 } - match: { hits.hits.2._source.metricset: pod } - match: { hits.hits.2._source.@timestamp: "2021-04-28T18:00:00.000Z" } - - match: { hits.hits.2._source.k8s.pod.name: "cat" } - - match: { hits.hits.2._source.k8s.pod.value.min: 10.0 } - - match: { hits.hits.2._source.k8s.pod.value.max: 20.0 } - - match: { hits.hits.2._source.k8s.pod.value.sum: 30.0 } - - match: { hits.hits.2._source.k8s.pod.agent.id: "first" } - - match: { hits.hits.2._source.k8s.pod.agent.version: "2.0.4" } + - match: { hits.hits.2._source.k8s\.pod\.name: "cat" } + - match: { hits.hits.2._source.k8s\.pod\.value.min: 10.0 } + - match: { hits.hits.2._source.k8s\.pod\.value.max: 20.0 } + - match: { hits.hits.2._source.k8s\.pod\.value.sum: 30.0 } + - match: { hits.hits.2._source.k8s\.pod\.agent\.id: "first" } + - match: { hits.hits.2._source.k8s\.pod\.agent\.version: "2.0.4" } - match: { hits.hits.3._source._doc_count: 2 } - - match: { hits.hits.3._source.k8s.pod.uid: 947e4ced-1786-4e53-9e0c-5c447e959507 } + - match: { hits.hits.3._source.k8s\.pod\.uid: 947e4ced-1786-4e53-9e0c-5c447e959507 } - match: { hits.hits.3._source.metricset: pod } - match: { hits.hits.3._source.@timestamp: "2021-04-28T20:00:00.000Z" } - - match: { hits.hits.3._source.k8s.pod.name: "cat" } - - match: { hits.hits.3._source.k8s.pod.value.min: 12.0 } - - match: { hits.hits.3._source.k8s.pod.value.max: 15.0 } - - match: { hits.hits.3._source.k8s.pod.value.sum: 27.0 } - - match: { hits.hits.3._source.k8s.pod.agent.id: "first" } - - match: { hits.hits.3._source.k8s.pod.agent.version: "2.0.4" } + - match: { hits.hits.3._source.k8s\.pod\.name: "cat" } + - match: { hits.hits.3._source.k8s\.pod\.value.min: 12.0 } + - match: { hits.hits.3._source.k8s\.pod\.value.max: 15.0 } + - match: { hits.hits.3._source.k8s\.pod\.value.sum: 27.0 } + - match: { hits.hits.3._source.k8s\.pod\.agent\.id: "first" } + - match: { hits.hits.3._source.k8s\.pod\.agent\.version: "2.0.4" } --- "Downsample empty and missing labels": @@ -1372,40 +1379,40 @@ setup: - length: { hits.hits: 3 } - match: { hits.hits.2._source._doc_count: 4 } - - match: { hits.hits.2._source.k8s.pod.uid: 947e4ced-1786-4e53-9e0c-5c447e959507 } + - match: { hits.hits.2._source.k8s\.pod\.uid: 947e4ced-1786-4e53-9e0c-5c447e959507 } - match: { hits.hits.2._source.metricset: pod } - match: { hits.hits.2._source.@timestamp: "2021-04-28T18:00:00.000Z" } - - match: { hits.hits.2._source.k8s.pod.name: "cat" } - - match: { hits.hits.2._source.k8s.pod.value.min: 10.0 } - - match: { hits.hits.2._source.k8s.pod.value.max: 40.0 } - - match: { hits.hits.2._source.k8s.pod.value.sum: 100.0 } - - match: { hits.hits.2._source.k8s.pod.value.value_count: 4 } - - match: { hits.hits.2._source.k8s.pod.label: "abc" } - - match: { hits.hits.2._source.k8s.pod.unmapped: "abc" } + - match: { hits.hits.2._source.k8s\.pod\.name: "cat" } + - match: { hits.hits.2._source.k8s\.pod\.value.min: 10.0 } + - match: { hits.hits.2._source.k8s\.pod\.value.max: 40.0 } + - match: { hits.hits.2._source.k8s\.pod\.value.sum: 100.0 } + - match: { hits.hits.2._source.k8s\.pod\.value.value_count: 4 } + - match: { hits.hits.2._source.k8s\.pod\.label: "abc" } + - match: { hits.hits.2._source.k8s\.pod\.unmapped: "abc" } - match: { hits.hits.1._source._doc_count: 4 } - - match: { hits.hits.1._source.k8s.pod.uid: 947e4ced-1786-4e53-9e0c-5c447e9597ab } + - match: { hits.hits.1._source.k8s\.pod\.uid: 947e4ced-1786-4e53-9e0c-5c447e9597ab } - match: { hits.hits.1._source.metricset: pod } - match: { hits.hits.1._source.@timestamp: "2021-04-28T18:00:00.000Z" } - - match: { hits.hits.1._source.k8s.pod.name: "cat" } - - match: { hits.hits.1._source.k8s.pod.value.min: 10.0 } - - match: { hits.hits.1._source.k8s.pod.value.max: 40.0 } - - match: { hits.hits.1._source.k8s.pod.value.sum: 100.0 } - - match: { hits.hits.1._source.k8s.pod.value.value_count: 4 } - - match: { hits.hits.1._source.k8s.pod.label: null } - - match: { hits.hits.1._source.k8s.pod.unmapped: null } + - match: { hits.hits.1._source.k8s\.pod\.name: "cat" } + - match: { hits.hits.1._source.k8s\.pod\.value.min: 10.0 } + - match: { hits.hits.1._source.k8s\.pod\.value.max: 40.0 } + - match: { hits.hits.1._source.k8s\.pod\.value.sum: 100.0 } + - match: { hits.hits.1._source.k8s\.pod\.value.value_count: 4 } + - match: { hits.hits.1._source.k8s\.pod\.label: null } + - match: { hits.hits.1._source.k8s\.pod\.unmapped: null } - match: { hits.hits.0._source._doc_count: 4 } - - match: { hits.hits.0._source.k8s.pod.uid: df3145b3-0563-4d3b-a0f7-897eb2876ea9 } + - match: { hits.hits.0._source.k8s\.pod\.uid: df3145b3-0563-4d3b-a0f7-897eb2876ea9 } - match: { hits.hits.0._source.metricset: pod } - match: { hits.hits.0._source.@timestamp: "2021-04-28T18:00:00.000Z" } - - match: { hits.hits.0._source.k8s.pod.name: "dog" } - - match: { hits.hits.0._source.k8s.pod.value.min: 10.0 } - - match: { hits.hits.0._source.k8s.pod.value.max: 40.0 } - - match: { hits.hits.0._source.k8s.pod.value.sum: 100.0 } - - match: { hits.hits.0._source.k8s.pod.value.value_count: 4 } - - match: { hits.hits.0._source.k8s.pod.label: "xyz" } - - match: { hits.hits.0._source.k8s.pod.unmapped: "xyz" } + - match: { hits.hits.0._source.k8s\.pod\.name: "dog" } + - match: { hits.hits.0._source.k8s\.pod\.value.min: 10.0 } + - match: { hits.hits.0._source.k8s\.pod\.value.max: 40.0 } + - match: { hits.hits.0._source.k8s\.pod\.value.sum: 100.0 } + - match: { hits.hits.0._source.k8s\.pod\.value.value_count: 4 } + - match: { hits.hits.0._source.k8s\.pod\.label: "xyz" } + - match: { hits.hits.0._source.k8s\.pod\.unmapped: "xyz" } --- @@ -1427,6 +1434,7 @@ setup: start_time: 2021-04-28T00:00:00Z end_time: 2021-04-29T00:00:00Z mappings: + subobjects: false properties: "@timestamp": type: date @@ -1495,45 +1503,45 @@ setup: - match: { hits.hits.0._source._doc_count: 2 } - match: { hits.hits.0._source.metricset: pod } - - match: { hits.hits.0._source.k8s.pod.name: dog } - - match: { hits.hits.0._source.k8s.pod.value: 20 } - - match: { hits.hits.0._source.k8s.pod.uid: df3145b3-0563-4d3b-a0f7-897eb2876ea9 } - - match: { hits.hits.0._source.k8s.pod.label: foo } + - match: { hits.hits.0._source.k8s\.pod\.name: dog } + - match: { hits.hits.0._source.k8s\.pod\.value: 20 } + - match: { hits.hits.0._source.k8s\.pod\.uid: df3145b3-0563-4d3b-a0f7-897eb2876ea9 } + - match: { hits.hits.0._source.k8s\.pod\.label: foo } - match: { hits.hits.0._source.@timestamp: 2021-04-28T18:00:00.000Z } - match: { hits.hits.1._source._doc_count: 2 } - match: { hits.hits.1._source.metricset: pod } - - match: { hits.hits.1._source.k8s.pod.name: fox } - - match: { hits.hits.1._source.k8s.pod.value: 20 } - - match: { hits.hits.1._source.k8s.pod.uid: 7393ef8e-489c-11ee-be56-0242ac120002 } - - match: { hits.hits.1._source.k8s.pod.label: bar } + - match: { hits.hits.1._source.k8s\.pod\.name: fox } + - match: { hits.hits.1._source.k8s\.pod\.value: 20 } + - match: { hits.hits.1._source.k8s\.pod\.uid: 7393ef8e-489c-11ee-be56-0242ac120002 } + - match: { hits.hits.1._source.k8s\.pod\.label: bar } - match: { hits.hits.1._source.@timestamp: 2021-04-28T18:00:00.000Z } - match: { hits.hits.2._source._doc_count: 2 } - match: { hits.hits.2._source.metricset: pod } - - match: { hits.hits.2._source.k8s.pod.name: cat } - - match: { hits.hits.2._source.k8s.pod.value: 20 } - - match: { hits.hits.2._source.k8s.pod.uid: 947e4ced-1786-4e53-9e0c-5c447e959507 } + - match: { hits.hits.2._source.k8s\.pod\.name: cat } + - match: { hits.hits.2._source.k8s\.pod\.value: 20 } + - match: { hits.hits.2._source.k8s\.pod\.uid: 947e4ced-1786-4e53-9e0c-5c447e959507 } # NOTE: when downsampling a label field we propagate the last (most-recent timestamp-wise) non-null value, # ignoring/skipping null values. Here the last document has a value that hits ignore_above ("foofoo") and, # as a result, we propagate the value of the previous document ("foo") - - match: { hits.hits.2._source.k8s.pod.label: foo } + - match: { hits.hits.2._source.k8s\.pod\.label: foo } - match: { hits.hits.2._source.@timestamp: 2021-04-28T18:00:00.000Z } - match: { hits.hits.3._source._doc_count: 2 } - match: { hits.hits.3._source.metricset: pod } - - match: { hits.hits.3._source.k8s.pod.name: cow } - - match: { hits.hits.3._source.k8s.pod.value: 20 } - - match: { hits.hits.3._source.k8s.pod.uid: a81ef23a-489c-11ee-be56-0242ac120005 } - - match: { hits.hits.3._source.k8s.pod.label: null } + - match: { hits.hits.3._source.k8s\.pod\.name: cow } + - match: { hits.hits.3._source.k8s\.pod\.value: 20 } + - match: { hits.hits.3._source.k8s\.pod\.uid: a81ef23a-489c-11ee-be56-0242ac120005 } + - match: { hits.hits.3._source.k8s\.pod\.label: null } - match: { hits.hits.3._source.@timestamp: 2021-04-28T18:00:00.000Z } - do: indices.get_mapping: index: test-downsample-label-ignore-above - - match: { test-downsample-label-ignore-above.mappings.properties.k8s.properties.pod.properties.label.type: keyword } - - match: { test-downsample-label-ignore-above.mappings.properties.k8s.properties.pod.properties.label.ignore_above: 3 } + - match: { test-downsample-label-ignore-above.mappings.properties.k8s\.pod\.label.type: keyword } + - match: { test-downsample-label-ignore-above.mappings.properties.k8s\.pod\.label.ignore_above: 3 } --- "Downsample index with empty dimension": @@ -1555,6 +1563,7 @@ setup: start_time: 2021-04-28T00:00:00Z end_time: 2021-04-29T00:00:00Z mappings: + subobjects: false properties: "@timestamp": type: date @@ -1612,11 +1621,11 @@ setup: - length: { hits.hits: 2 } - match: { hits.hits.0._source._doc_count: 3 } - - match: { hits.hits.0._source.k8s.pod.name: cat } - - match: { hits.hits.0._source.k8s.pod.empty: null } + - match: { hits.hits.0._source.k8s\.pod\.name: cat } + - match: { hits.hits.0._source.k8s\.pod\.empty: null } - match: { hits.hits.1._source._doc_count: 1 } - - match: { hits.hits.1._source.k8s.pod.name: cat } - - match: { hits.hits.1._source.k8s.pod.empty: "" } + - match: { hits.hits.1._source.k8s\.pod\.name: cat } + - match: { hits.hits.1._source.k8s\.pod\.empty: "" } --- "Downsample index with empty dimension on routing path": @@ -1638,6 +1647,7 @@ setup: start_time: 2021-04-28T00:00:00Z end_time: 2021-04-29T00:00:00Z mappings: + subobjects: false properties: "@timestamp": type: date @@ -1695,8 +1705,8 @@ setup: - length: { hits.hits: 2 } - match: { hits.hits.0._source._doc_count: 3 } - - match: { hits.hits.0._source.k8s.pod.name: cat } - - match: { hits.hits.0._source.k8s.pod.empty: null } + - match: { hits.hits.0._source.k8s\.pod\.name: cat } + - match: { hits.hits.0._source.k8s\.pod\.empty: null } - match: { hits.hits.1._source._doc_count: 1 } - - match: { hits.hits.1._source.k8s.pod.name: cat } - - match: { hits.hits.1._source.k8s.pod.empty: "" } + - match: { hits.hits.1._source.k8s\.pod\.name: cat } + - match: { hits.hits.1._source.k8s\.pod\.empty: "" } From 97ed0a93bb75d0f920c976527f4f5fc0b6065beb Mon Sep 17 00:00:00 2001 From: shainaraskas <58563081+shainaraskas@users.noreply.github.com> Date: Thu, 24 Oct 2024 13:26:15 -0400 Subject: [PATCH 078/324] Make a minor change to trigger release note process (#113975) * changelog entry --- docs/changelog/113975.yaml | 19 +++++++++++++++++++ docs/reference/mapping/params/format.asciidoc | 4 ++-- 2 files changed, 21 insertions(+), 2 deletions(-) create mode 100644 docs/changelog/113975.yaml diff --git a/docs/changelog/113975.yaml b/docs/changelog/113975.yaml new file mode 100644 index 0000000000000..632ba038271bb --- /dev/null +++ b/docs/changelog/113975.yaml @@ -0,0 +1,19 @@ +pr: 113975 +summary: JDK locale database change +area: Mapping +type: breaking +issues: [] +breaking: + title: JDK locale database change + area: Mapping + details: | + {es} 8.16 changes the version of the JDK that is included from version 22 to version 23. This changes the locale database that is used by Elasticsearch from the COMPAT database to the CLDR database. This change can cause significant differences to the textual date formats accepted by Elasticsearch, and to calculated week-dates. + + If you run {es} 8.16 on JDK version 22 or below, it will use the COMPAT locale database to match the behavior of 8.15. However, starting with {es} 9.0, {es} will use the CLDR database regardless of JDK version it is run on. + impact: | + This affects you if you use custom date formats using textual or week-date field specifiers. If you use date fields or calculated week-dates that change between the COMPAT and CLDR databases, then this change will cause Elasticsearch to reject previously valid date fields as invalid data. You might need to modify your ingest or output integration code to account for the differences between these two JDK versions. + + Starting in version 8.15.2, Elasticsearch will log deprecation warnings if you are using date format specifiers that might change on upgrading to JDK 23. These warnings are visible in Kibana. + + For detailed guidance, refer to <> and the https://ela.st/jdk-23-locales[Elastic blog]. + notable: true diff --git a/docs/reference/mapping/params/format.asciidoc b/docs/reference/mapping/params/format.asciidoc index 943e8fb879ff3..6c82b04eb5fe5 100644 --- a/docs/reference/mapping/params/format.asciidoc +++ b/docs/reference/mapping/params/format.asciidoc @@ -34,13 +34,13 @@ down to the nearest day. Completely customizable date formats are supported. The syntax for these is explained in https://docs.oracle.com/en/java/javase/21/docs/api/java.base/java/time/format/DateTimeFormatter.html[DateTimeFormatter docs]. -Note that whilst the built-in formats for week dates use the ISO definition of weekyears, +Note that while the built-in formats for week dates use the ISO definition of weekyears, custom formatters using the `Y`, `W`, or `w` field specifiers use the JDK locale definition of weekyears. This can result in different values between the built-in formats and custom formats for week dates. [[built-in-date-formats]] -==== Built In Formats +==== Built-in formats Most of the below formats have a `strict` companion format, which means that year, month and day parts of the month must use respectively 4, 2 and 2 digits From e951984831cc499f5f13efee0d6283ee8957f295 Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Thu, 24 Oct 2024 11:40:59 -0700 Subject: [PATCH 079/324] Reenable CacheFileTests (#115582) The test issue was fixed by #110807 closes #110801 --- muted-tests.yml | 3 --- 1 file changed, 3 deletions(-) diff --git a/muted-tests.yml b/muted-tests.yml index ab5d686a041c1..827a604cd6a19 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -23,9 +23,6 @@ tests: - class: org.elasticsearch.xpack.security.authz.store.NativePrivilegeStoreCacheTests method: testPopulationOfCacheWhenLoadingPrivilegesForAllApplications issue: https://github.com/elastic/elasticsearch/issues/110789 -- class: org.elasticsearch.xpack.searchablesnapshots.cache.common.CacheFileTests - method: testCacheFileCreatedAsSparseFile - issue: https://github.com/elastic/elasticsearch/issues/110801 - class: org.elasticsearch.nativeaccess.VectorSystemPropertyTests method: testSystemPropertyDisabled issue: https://github.com/elastic/elasticsearch/issues/110949 From ad9c5a0a0640f62f763f63682f7e321c4d68ab41 Mon Sep 17 00:00:00 2001 From: Pawan Kartik Date: Thu, 24 Oct 2024 20:15:17 +0100 Subject: [PATCH 080/324] Correctly update search status for a nonexistent local index (#115138) * fix: correctly update search status for a nonexistent local index * Check for cluster existence before updation * Remove unnecessary `println` * Address review comment: add an explanatory code comment * Further clarify code comment --- .../search/ccs/CrossClusterSearchIT.java | 64 +++++++++++++++++++ .../action/search/TransportSearchAction.java | 23 +++++++ 2 files changed, 87 insertions(+) diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CrossClusterSearchIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CrossClusterSearchIT.java index 5233a0cd564ef..5984e1acc89af 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CrossClusterSearchIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CrossClusterSearchIT.java @@ -755,6 +755,70 @@ public void testNegativeRemoteIndexNameThrows() { assertNotNull(ee.getCause()); } + public void testClusterDetailsWhenLocalClusterHasNoMatchingIndex() throws Exception { + Map testClusterInfo = setupTwoClusters(); + String remoteIndex = (String) testClusterInfo.get("remote.index"); + int remoteNumShards = (Integer) testClusterInfo.get("remote.num_shards"); + + SearchRequest searchRequest = new SearchRequest("nomatch*", REMOTE_CLUSTER + ":" + remoteIndex); + if (randomBoolean()) { + searchRequest = searchRequest.scroll(TimeValue.timeValueMinutes(1)); + } + + searchRequest.allowPartialSearchResults(false); + if (randomBoolean()) { + searchRequest.setBatchedReduceSize(randomIntBetween(3, 20)); + } + + boolean minimizeRoundtrips = false; + searchRequest.setCcsMinimizeRoundtrips(minimizeRoundtrips); + + boolean dfs = randomBoolean(); + if (dfs) { + searchRequest.searchType(SearchType.DFS_QUERY_THEN_FETCH); + } + + if (randomBoolean()) { + searchRequest.setPreFilterShardSize(1); + } + + searchRequest.source(new SearchSourceBuilder().query(new MatchAllQueryBuilder()).size(10)); + assertResponse(client(LOCAL_CLUSTER).search(searchRequest), response -> { + assertNotNull(response); + + Clusters clusters = response.getClusters(); + assertFalse("search cluster results should BE successful", clusters.hasPartialResults()); + assertThat(clusters.getTotal(), equalTo(2)); + assertThat(clusters.getClusterStateCount(Cluster.Status.SUCCESSFUL), equalTo(2)); + assertThat(clusters.getClusterStateCount(Cluster.Status.SKIPPED), equalTo(0)); + assertThat(clusters.getClusterStateCount(Cluster.Status.RUNNING), equalTo(0)); + assertThat(clusters.getClusterStateCount(Cluster.Status.PARTIAL), equalTo(0)); + assertThat(clusters.getClusterStateCount(Cluster.Status.FAILED), equalTo(0)); + + Cluster localClusterSearchInfo = clusters.getCluster(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY); + assertNotNull(localClusterSearchInfo); + assertThat(localClusterSearchInfo.getStatus(), equalTo(Cluster.Status.SUCCESSFUL)); + assertThat(localClusterSearchInfo.getIndexExpression(), equalTo("nomatch*")); + assertThat(localClusterSearchInfo.getTotalShards(), equalTo(0)); + assertThat(localClusterSearchInfo.getSuccessfulShards(), equalTo(0)); + assertThat(localClusterSearchInfo.getSkippedShards(), equalTo(0)); + assertThat(localClusterSearchInfo.getFailedShards(), equalTo(0)); + assertThat(localClusterSearchInfo.getFailures().size(), equalTo(0)); + assertThat(localClusterSearchInfo.getTook().millis(), equalTo(0L)); + + Cluster remoteClusterSearchInfo = clusters.getCluster(REMOTE_CLUSTER); + assertNotNull(remoteClusterSearchInfo); + assertThat(remoteClusterSearchInfo.getStatus(), equalTo(Cluster.Status.SUCCESSFUL)); + assertThat(remoteClusterSearchInfo.getIndexExpression(), equalTo(remoteIndex)); + assertThat(remoteClusterSearchInfo.getTotalShards(), equalTo(remoteNumShards)); + assertThat(remoteClusterSearchInfo.getSuccessfulShards(), equalTo(remoteNumShards)); + assertThat(remoteClusterSearchInfo.getSkippedShards(), equalTo(0)); + assertThat(remoteClusterSearchInfo.getFailedShards(), equalTo(0)); + assertThat(remoteClusterSearchInfo.getFailures().size(), equalTo(0)); + assertThat(remoteClusterSearchInfo.getTook().millis(), greaterThan(0L)); + }); + } + private static void assertOneFailedShard(Cluster cluster, int totalShards) { assertNotNull(cluster); assertThat(cluster.getStatus(), equalTo(Cluster.Status.PARTIAL)); diff --git a/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java b/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java index 1645a378446a4..302c3e243a1f6 100644 --- a/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java @@ -1247,6 +1247,29 @@ private void executeSearch( indicesAndAliases, concreteLocalIndices ); + + // localShardIterators is empty since there are no matching indices. In such cases, + // we update the local cluster's status from RUNNING to SUCCESSFUL right away. Before + // we attempt to do that, we must ensure that the local cluster was specified in the user's + // search request. This is done by trying to fetch the local cluster via getCluster() and + // checking for a non-null return value. If the local cluster was never specified, its status + // update can be skipped. + if (localShardIterators.isEmpty() + && clusters != SearchResponse.Clusters.EMPTY + && clusters.getCluster(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY) != null) { + clusters.swapCluster( + RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY, + (alias, v) -> new SearchResponse.Cluster.Builder(v).setStatus(SearchResponse.Cluster.Status.SUCCESSFUL) + .setTotalShards(0) + .setSuccessfulShards(0) + .setSkippedShards(0) + .setFailedShards(0) + .setFailures(Collections.emptyList()) + .setTook(TimeValue.timeValueMillis(0)) + .setTimedOut(false) + .build() + ); + } } final GroupShardsIterator shardIterators = mergeShardsIterators(localShardIterators, remoteShardIterators); From 79cfcec065311165f7d491d164e99fed6c5cbeb9 Mon Sep 17 00:00:00 2001 From: Artem Prigoda Date: Thu, 24 Oct 2024 21:26:02 +0200 Subject: [PATCH 081/324] Clarify the null check for retention leases (#114979) `MetadataStateFormat.FORMAT.loadLatestState` can actually return null when the state directory hasn't been initialized yet, so we have to keep the null check when loading retention leases during the initialization of the engine. See #39359 --- .../java/org/elasticsearch/gateway/MetadataStateFormat.java | 2 ++ .../org/elasticsearch/index/seqno/ReplicationTracker.java | 5 ++--- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/gateway/MetadataStateFormat.java b/server/src/main/java/org/elasticsearch/gateway/MetadataStateFormat.java index 30b8d72b83f4c..3e68ec5243f5f 100644 --- a/server/src/main/java/org/elasticsearch/gateway/MetadataStateFormat.java +++ b/server/src/main/java/org/elasticsearch/gateway/MetadataStateFormat.java @@ -24,6 +24,7 @@ import org.elasticsearch.common.lucene.store.InputStreamIndexInput; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.core.IOUtils; +import org.elasticsearch.core.Nullable; import org.elasticsearch.core.Tuple; import org.elasticsearch.transport.Transports; import org.elasticsearch.xcontent.NamedXContentRegistry; @@ -485,6 +486,7 @@ public Tuple loadLatestStateWithGeneration(Logger logger, NamedXContent * @param dataLocations the data-locations to try. * @return the latest state or null if no state was found. */ + @Nullable public T loadLatestState(Logger logger, NamedXContentRegistry namedXContentRegistry, Path... dataLocations) throws IOException { return loadLatestStateWithGeneration(logger, namedXContentRegistry, dataLocations).v1(); } diff --git a/server/src/main/java/org/elasticsearch/index/seqno/ReplicationTracker.java b/server/src/main/java/org/elasticsearch/index/seqno/ReplicationTracker.java index e67e878fd3827..f1e3ac270d959 100644 --- a/server/src/main/java/org/elasticsearch/index/seqno/ReplicationTracker.java +++ b/server/src/main/java/org/elasticsearch/index/seqno/ReplicationTracker.java @@ -22,7 +22,6 @@ import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.util.Maps; import org.elasticsearch.core.SuppressForbidden; -import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.gateway.WriteStateException; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.IndexVersions; @@ -471,9 +470,9 @@ public RetentionLeases loadRetentionLeases(final Path path) throws IOException { return emptyIfNull(retentionLeases); } - @UpdateForV9(owner = UpdateForV9.Owner.DISTRIBUTED_INDEXING) private static RetentionLeases emptyIfNull(RetentionLeases retentionLeases) { - // we expect never to see a null in 8.x, so adjust this to throw an exception from v9 onwards. + // `MetadataStateFormat.FORMAT.loadLatestState` can actually return null when the state directory + // on a node hasn't been initialized yet return retentionLeases == null ? RetentionLeases.EMPTY : retentionLeases; } From b4edc3ddab0ea910582c0dd0091ed5b147048280 Mon Sep 17 00:00:00 2001 From: Artem Prigoda Date: Thu, 24 Oct 2024 21:26:23 +0200 Subject: [PATCH 082/324] Remove loading on-disk cluster metadata from the manifest file (#114698) Since metadata storage was moved to Lucene in #50907 (7.16.0), we shouldn't encounter any on-disk global metadata files, so we can remove support for loading them. --- .../gateway/GatewayIndexStateIT.java | 60 -------- .../gateway/GatewayMetaState.java | 13 -- .../gateway/MetaStateService.java | 119 +--------------- .../java/org/elasticsearch/node/Node.java | 1 - .../gateway/MetaStateServiceTests.java | 132 ------------------ .../gateway/MockGatewayMetaState.java | 8 -- 6 files changed, 1 insertion(+), 332 deletions(-) diff --git a/server/src/internalClusterTest/java/org/elasticsearch/gateway/GatewayIndexStateIT.java b/server/src/internalClusterTest/java/org/elasticsearch/gateway/GatewayIndexStateIT.java index 00bd350fe2b84..cdd5a52e048bd 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/gateway/GatewayIndexStateIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/gateway/GatewayIndexStateIT.java @@ -17,7 +17,6 @@ import org.elasticsearch.action.support.ActiveShardCount; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.coordination.CoordinationMetadata; import org.elasticsearch.cluster.metadata.IndexGraveyard; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.MappingMetadata; @@ -27,14 +26,9 @@ import org.elasticsearch.cluster.routing.RoutingTable; import org.elasticsearch.cluster.routing.ShardRoutingState; import org.elasticsearch.cluster.routing.UnassignedInfo; -import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Priority; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.core.CheckedConsumer; -import org.elasticsearch.core.IOUtils; -import org.elasticsearch.env.BuildVersion; import org.elasticsearch.env.NodeEnvironment; -import org.elasticsearch.env.NodeMetadata; import org.elasticsearch.index.IndexVersions; import org.elasticsearch.index.mapper.MapperParsingException; import org.elasticsearch.indices.IndexClosedException; @@ -46,13 +40,8 @@ import org.elasticsearch.xcontent.XContentFactory; import java.io.IOException; -import java.nio.file.Path; import java.util.List; -import java.util.Map; import java.util.concurrent.TimeUnit; -import java.util.function.Function; -import java.util.stream.Collectors; -import java.util.stream.Stream; import static org.elasticsearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; @@ -60,7 +49,6 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.notNullValue; @@ -545,52 +533,4 @@ public void testArchiveBrokenClusterSettings() throws Exception { assertHitCount(prepareSearch().setQuery(matchAllQuery()), 1L); } - public void testHalfDeletedIndexImport() throws Exception { - // It's possible for a 6.x node to add a tombstone for an index but not actually delete the index metadata from disk since that - // deletion is slightly deferred and may race against the node being shut down; if you upgrade to 7.x when in this state then the - // node won't start. - - final String nodeName = internalCluster().startNode(); - createIndex("test", 1, 0); - ensureGreen("test"); - - final Metadata metadata = internalCluster().getInstance(ClusterService.class).state().metadata(); - final Path[] paths = internalCluster().getInstance(NodeEnvironment.class).nodeDataPaths(); - final String nodeId = clusterAdmin().prepareNodesInfo(nodeName).clear().get().getNodes().get(0).getNode().getId(); - - writeBrokenMeta(nodeEnvironment -> { - for (final Path path : paths) { - IOUtils.rm(path.resolve(PersistedClusterStateService.METADATA_DIRECTORY_NAME)); - } - MetaStateWriterUtils.writeGlobalState( - nodeEnvironment, - "test", - Metadata.builder(metadata) - // we remove the manifest file, resetting the term and making this look like an upgrade from 6.x, so must also reset the - // term in the coordination metadata - .coordinationMetadata(CoordinationMetadata.builder(metadata.coordinationMetadata()).term(0L).build()) - // add a tombstone but do not delete the index metadata from disk - .putCustom(IndexGraveyard.TYPE, IndexGraveyard.builder().addTombstone(metadata.index("test").getIndex()).build()) - .build() - ); - NodeMetadata.FORMAT.writeAndCleanup(new NodeMetadata(nodeId, BuildVersion.current(), metadata.oldestIndexVersion()), paths); - }); - - ensureGreen(); - - assertBusy(() -> assertThat(internalCluster().getInstance(NodeEnvironment.class).availableIndexFolders(), empty())); - } - - private void writeBrokenMeta(CheckedConsumer writer) throws Exception { - Map nodeEnvironments = Stream.of(internalCluster().getNodeNames()) - .collect(Collectors.toMap(Function.identity(), nodeName -> internalCluster().getInstance(NodeEnvironment.class, nodeName))); - internalCluster().fullRestart(new RestartCallback() { - @Override - public Settings onNodeStopped(String nodeName) throws Exception { - final NodeEnvironment nodeEnvironment = nodeEnvironments.get(nodeName); - writer.accept(nodeEnvironment); - return super.onNodeStopped(nodeName); - } - }); - } } diff --git a/server/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java b/server/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java index c863a5bac973a..a7baca59e1857 100644 --- a/server/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java +++ b/server/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java @@ -23,7 +23,6 @@ import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.IndexMetadataVerifier; import org.elasticsearch.cluster.metadata.IndexTemplateMetadata; -import org.elasticsearch.cluster.metadata.Manifest; import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.service.ClusterService; @@ -33,8 +32,6 @@ import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.common.util.concurrent.EsThreadPoolExecutor; import org.elasticsearch.core.IOUtils; -import org.elasticsearch.core.Tuple; -import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.env.BuildVersion; import org.elasticsearch.env.NodeMetadata; import org.elasticsearch.index.IndexVersions; @@ -185,16 +182,6 @@ private PersistedState createOnDiskPersistedState( long lastAcceptedVersion = onDiskState.lastAcceptedVersion; long currentTerm = onDiskState.currentTerm; - if (onDiskState.empty()) { - @UpdateForV9(owner = UpdateForV9.Owner.DISTRIBUTED_COORDINATION) // legacy metadata loader is not needed anymore from v9 onwards - final Tuple legacyState = metaStateService.loadFullState(); - if (legacyState.v1().isEmpty() == false) { - metadata = legacyState.v2(); - lastAcceptedVersion = legacyState.v1().clusterStateVersion(); - currentTerm = legacyState.v1().currentTerm(); - } - } - PersistedState persistedState = null; boolean success = false; try { diff --git a/server/src/main/java/org/elasticsearch/gateway/MetaStateService.java b/server/src/main/java/org/elasticsearch/gateway/MetaStateService.java index 4260ef51a3976..5f07deff31eea 100644 --- a/server/src/main/java/org/elasticsearch/gateway/MetaStateService.java +++ b/server/src/main/java/org/elasticsearch/gateway/MetaStateService.java @@ -12,22 +12,17 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.Version; -import org.elasticsearch.cluster.metadata.IndexGraveyard; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.Manifest; import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.core.Nullable; -import org.elasticsearch.core.Tuple; -import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.index.Index; import org.elasticsearch.xcontent.NamedXContentRegistry; import java.io.IOException; import java.util.ArrayList; -import java.util.HashMap; import java.util.List; -import java.util.Map; import java.util.function.Predicate; /** @@ -45,118 +40,6 @@ public MetaStateService(NodeEnvironment nodeEnv, NamedXContentRegistry namedXCon this.namedXContentRegistry = namedXContentRegistry; } - /** - * Loads the full state, which includes both the global state and all the indices meta data.
- * When loading, manifest file is consulted (represented by {@link Manifest} class), to load proper generations.
- * If there is no manifest file on disk, this method fallbacks to BWC mode, where latest generation of global and indices - * metadata is loaded. Please note that currently there is no way to distinguish between manifest file being removed and manifest - * file was not yet created. It means that this method always fallbacks to BWC mode, if there is no manifest file. - * - * @return tuple of {@link Manifest} and {@link Metadata} with global metadata and indices metadata. If there is no state on disk, - * meta state with globalGeneration -1 and empty meta data is returned. - * @throws IOException if some IOException when loading files occurs or there is no metadata referenced by manifest file. - */ - @UpdateForV9(owner = UpdateForV9.Owner.DISTRIBUTED_COORDINATION) - public Tuple loadFullState() throws IOException { - final Manifest manifest = Manifest.FORMAT.loadLatestState(logger, namedXContentRegistry, nodeEnv.nodeDataPaths()); - if (manifest == null) { - return loadFullStateBWC(); - } - - final Metadata.Builder metadataBuilder; - if (manifest.isGlobalGenerationMissing()) { - metadataBuilder = Metadata.builder(); - } else { - final Metadata globalMetadata = Metadata.FORMAT.loadGeneration( - logger, - namedXContentRegistry, - manifest.globalGeneration(), - nodeEnv.nodeDataPaths() - ); - if (globalMetadata != null) { - metadataBuilder = Metadata.builder(globalMetadata); - } else { - throw new IOException("failed to find global metadata [generation: " + manifest.globalGeneration() + "]"); - } - } - - for (Map.Entry entry : manifest.indexGenerations().entrySet()) { - final Index index = entry.getKey(); - final long generation = entry.getValue(); - final String indexFolderName = index.getUUID(); - final IndexMetadata indexMetadata = IndexMetadata.FORMAT.loadGeneration( - logger, - namedXContentRegistry, - generation, - nodeEnv.resolveIndexFolder(indexFolderName) - ); - if (indexMetadata != null) { - metadataBuilder.put(indexMetadata, false); - } else { - throw new IOException( - "failed to find metadata for existing index " - + index.getName() - + " [location: " - + indexFolderName - + ", generation: " - + generation - + "]" - ); - } - } - - return new Tuple<>(manifest, metadataBuilder.build()); - } - - /** - * "Manifest-less" BWC version of loading metadata from disk. See also {@link #loadFullState()} - */ - private Tuple loadFullStateBWC() throws IOException { - Map indices = new HashMap<>(); - Metadata.Builder metadataBuilder; - - Tuple metadataAndGeneration = Metadata.FORMAT.loadLatestStateWithGeneration( - logger, - namedXContentRegistry, - nodeEnv.nodeDataPaths() - ); - Metadata globalMetadata = metadataAndGeneration.v1(); - long globalStateGeneration = metadataAndGeneration.v2(); - - final IndexGraveyard indexGraveyard; - if (globalMetadata != null) { - metadataBuilder = Metadata.builder(globalMetadata); - indexGraveyard = globalMetadata.custom(IndexGraveyard.TYPE); - } else { - metadataBuilder = Metadata.builder(); - indexGraveyard = IndexGraveyard.builder().build(); - } - - for (String indexFolderName : nodeEnv.availableIndexFolders()) { - Tuple indexMetadataAndGeneration = IndexMetadata.FORMAT.loadLatestStateWithGeneration( - logger, - namedXContentRegistry, - nodeEnv.resolveIndexFolder(indexFolderName) - ); - IndexMetadata indexMetadata = indexMetadataAndGeneration.v1(); - long generation = indexMetadataAndGeneration.v2(); - if (indexMetadata != null) { - if (indexGraveyard.containsIndex(indexMetadata.getIndex())) { - logger.debug("[{}] found metadata for deleted index [{}]", indexFolderName, indexMetadata.getIndex()); - // this index folder is cleared up when state is recovered - } else { - indices.put(indexMetadata.getIndex(), generation); - metadataBuilder.put(indexMetadata, false); - } - } else { - logger.debug("[{}] failed to find metadata for existing index location", indexFolderName); - } - } - - Manifest manifest = Manifest.unknownCurrentTermAndVersion(globalStateGeneration, indices); - return new Tuple<>(manifest, metadataBuilder.build()); - } - /** * Loads the index state for the provided index name, returning null if doesn't exists. */ @@ -193,7 +76,7 @@ List loadIndicesStates(Predicate excludeIndexPathIdsPredi } /** - * Loads the global state, *without* index state, see {@link #loadFullState()} for that. + * Loads the global state, *without* index state */ Metadata loadGlobalState() throws IOException { return Metadata.FORMAT.loadLatestState(logger, namedXContentRegistry, nodeEnv.nodeDataPaths()); diff --git a/server/src/main/java/org/elasticsearch/node/Node.java b/server/src/main/java/org/elasticsearch/node/Node.java index e30f76fdd9414..ec4a534fc883b 100644 --- a/server/src/main/java/org/elasticsearch/node/Node.java +++ b/server/src/main/java/org/elasticsearch/node/Node.java @@ -325,7 +325,6 @@ public Node start() throws NodeValidationException { // TODO: Do not expect that the legacy metadata file is always present https://github.com/elastic/elasticsearch/issues/95211 if (Assertions.ENABLED && DiscoveryNode.isStateless(settings()) == false) { try { - assert injector.getInstance(MetaStateService.class).loadFullState().v1().isEmpty(); final NodeMetadata nodeMetadata = NodeMetadata.FORMAT.loadLatestState( logger, NamedXContentRegistry.EMPTY, diff --git a/server/src/test/java/org/elasticsearch/gateway/MetaStateServiceTests.java b/server/src/test/java/org/elasticsearch/gateway/MetaStateServiceTests.java index 40c4e064216f1..1bbab8bf782bd 100644 --- a/server/src/test/java/org/elasticsearch/gateway/MetaStateServiceTests.java +++ b/server/src/test/java/org/elasticsearch/gateway/MetaStateServiceTests.java @@ -9,21 +9,15 @@ package org.elasticsearch.gateway; import org.elasticsearch.cluster.metadata.IndexMetadata; -import org.elasticsearch.cluster.metadata.Manifest; import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.core.Tuple; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.test.ESTestCase; -import java.io.IOException; -import java.util.HashMap; - import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.hasKey; import static org.hamcrest.Matchers.nullValue; public class MetaStateServiceTests extends ESTestCase { @@ -75,130 +69,4 @@ public void testWriteGlobalStateWithIndexAndNoIndexIsLoaded() throws Exception { assertThat(metaStateService.loadGlobalState().persistentSettings(), equalTo(metadata.persistentSettings())); assertThat(metaStateService.loadGlobalState().hasIndex("test1"), equalTo(false)); } - - public void testLoadFullStateBWC() throws Exception { - IndexMetadata indexMetadata = indexMetadata("test1"); - Metadata metadata = Metadata.builder() - .persistentSettings(Settings.builder().put("test1", "value1").build()) - .put(indexMetadata, true) - .build(); - - long globalGeneration = MetaStateWriterUtils.writeGlobalState(env, "test_write", metadata); - long indexGeneration = MetaStateWriterUtils.writeIndex(env, "test_write", indexMetadata); - - Tuple manifestAndMetadata = metaStateService.loadFullState(); - Manifest manifest = manifestAndMetadata.v1(); - assertThat(manifest.globalGeneration(), equalTo(globalGeneration)); - assertThat(manifest.indexGenerations(), hasKey(indexMetadata.getIndex())); - assertThat(manifest.indexGenerations().get(indexMetadata.getIndex()), equalTo(indexGeneration)); - - Metadata loadedMetadata = manifestAndMetadata.v2(); - assertThat(loadedMetadata.persistentSettings(), equalTo(metadata.persistentSettings())); - assertThat(loadedMetadata.hasIndex("test1"), equalTo(true)); - assertThat(loadedMetadata.index("test1"), equalTo(indexMetadata)); - } - - public void testLoadEmptyStateNoManifest() throws IOException { - Tuple manifestAndMetadata = metaStateService.loadFullState(); - - Manifest manifest = manifestAndMetadata.v1(); - assertTrue(manifest.isEmpty()); - - Metadata metadata = manifestAndMetadata.v2(); - assertTrue(Metadata.isGlobalStateEquals(metadata, Metadata.EMPTY_METADATA)); - } - - public void testLoadEmptyStateWithManifest() throws IOException { - Manifest manifest = Manifest.empty(); - MetaStateWriterUtils.writeManifestAndCleanup(env, "test", manifest); - - Tuple manifestAndMetadata = metaStateService.loadFullState(); - assertTrue(manifestAndMetadata.v1().isEmpty()); - Metadata metadata = manifestAndMetadata.v2(); - assertTrue(Metadata.isGlobalStateEquals(metadata, Metadata.EMPTY_METADATA)); - } - - public void testLoadFullStateMissingGlobalMetadata() throws IOException { - IndexMetadata index = indexMetadata("test1"); - long indexGeneration = MetaStateWriterUtils.writeIndex(env, "test", index); - Manifest manifest = new Manifest( - randomNonNegativeLong(), - randomNonNegativeLong(), - Manifest.empty().globalGeneration(), - new HashMap() { - { - put(index.getIndex(), indexGeneration); - } - } - ); - assertTrue(manifest.isGlobalGenerationMissing()); - MetaStateWriterUtils.writeManifestAndCleanup(env, "test", manifest); - - Tuple manifestAndMetadata = metaStateService.loadFullState(); - assertThat(manifestAndMetadata.v1(), equalTo(manifest)); - Metadata loadedMetadata = manifestAndMetadata.v2(); - assertTrue(Metadata.isGlobalStateEquals(loadedMetadata, Metadata.EMPTY_METADATA)); - assertThat(loadedMetadata.hasIndex("test1"), equalTo(true)); - assertThat(loadedMetadata.index("test1"), equalTo(index)); - } - - public void testLoadFullStateAndUpdateAndClean() throws IOException { - IndexMetadata index = indexMetadata("test1"); - Metadata metadata = Metadata.builder() - .persistentSettings(Settings.builder().put("test1", "value1").build()) - .put(index, true) - .build(); - - long globalGeneration = MetaStateWriterUtils.writeGlobalState(env, "first global state write", metadata); - long indexGeneration = MetaStateWriterUtils.writeIndex(env, "first index state write", index); - - Manifest manifest = new Manifest(randomNonNegativeLong(), randomNonNegativeLong(), globalGeneration, new HashMap() { - { - put(index.getIndex(), indexGeneration); - } - }); - MetaStateWriterUtils.writeManifestAndCleanup(env, "first manifest write", manifest); - - Metadata newMetadata = Metadata.builder() - .persistentSettings(Settings.builder().put("test1", "value2").build()) - .put(index, true) - .build(); - globalGeneration = MetaStateWriterUtils.writeGlobalState(env, "second global state write", newMetadata); - - Tuple manifestAndMetadata = metaStateService.loadFullState(); - assertThat(manifestAndMetadata.v1(), equalTo(manifest)); - - Metadata loadedMetadata = manifestAndMetadata.v2(); - assertThat(loadedMetadata.persistentSettings(), equalTo(metadata.persistentSettings())); - assertThat(loadedMetadata.hasIndex("test1"), equalTo(true)); - assertThat(loadedMetadata.index("test1"), equalTo(index)); - - manifest = new Manifest(randomNonNegativeLong(), randomNonNegativeLong(), globalGeneration, new HashMap() { - { - put(index.getIndex(), indexGeneration); - } - }); - - MetaStateWriterUtils.writeManifestAndCleanup(env, "second manifest write", manifest); - Metadata.FORMAT.cleanupOldFiles(globalGeneration, env.nodeDataPaths()); - IndexMetadata.FORMAT.cleanupOldFiles(indexGeneration, env.indexPaths(index.getIndex())); - - manifestAndMetadata = metaStateService.loadFullState(); - assertThat(manifestAndMetadata.v1(), equalTo(manifest)); - - loadedMetadata = manifestAndMetadata.v2(); - assertThat(loadedMetadata.persistentSettings(), equalTo(newMetadata.persistentSettings())); - assertThat(loadedMetadata.hasIndex("test1"), equalTo(true)); - assertThat(loadedMetadata.index("test1"), equalTo(index)); - - if (randomBoolean()) { - metaStateService.unreferenceAll(); - } else { - metaStateService.deleteAll(); - } - manifestAndMetadata = metaStateService.loadFullState(); - assertTrue(manifestAndMetadata.v1().isEmpty()); - metadata = manifestAndMetadata.v2(); - assertTrue(Metadata.isGlobalStateEquals(metadata, Metadata.EMPTY_METADATA)); - } } diff --git a/test/framework/src/main/java/org/elasticsearch/gateway/MockGatewayMetaState.java b/test/framework/src/main/java/org/elasticsearch/gateway/MockGatewayMetaState.java index d03396f9b53b3..64b468226e509 100644 --- a/test/framework/src/main/java/org/elasticsearch/gateway/MockGatewayMetaState.java +++ b/test/framework/src/main/java/org/elasticsearch/gateway/MockGatewayMetaState.java @@ -11,7 +11,6 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetadataVerifier; -import org.elasticsearch.cluster.metadata.Manifest; import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.service.ClusterService; @@ -19,14 +18,12 @@ import org.elasticsearch.cluster.version.CompatibilityVersionsUtils; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.core.Tuple; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.plugins.MetadataUpgrader; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xcontent.NamedXContentRegistry; -import java.io.IOException; import java.util.List; import static org.mockito.Mockito.mock; @@ -70,11 +67,6 @@ public void start(Settings settings, NodeEnvironment nodeEnvironment, NamedXCont new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) ); final MetaStateService metaStateService = mock(MetaStateService.class); - try { - when(metaStateService.loadFullState()).thenReturn(new Tuple<>(Manifest.empty(), Metadata.builder().build())); - } catch (IOException e) { - throw new AssertionError(e); - } start( settings, transportService, From e789039dfa8fee60dc2615c3876295ff7c6f3b01 Mon Sep 17 00:00:00 2001 From: Stanislav Malyshev Date: Thu, 24 Oct 2024 13:58:49 -0600 Subject: [PATCH 083/324] Fixing remote ENRICH by pushing the Enrich inside FragmentExec (#114665) * Fixing remote ENRICH by pushing the Enrich inside FragmentExec * Improve handling of more complex cases such as several enriches --- docs/changelog/114665.yaml | 6 ++ .../esql/action/CrossClustersEnrichIT.java | 102 ++++++++++++++++-- .../xpack/esql/analysis/Verifier.java | 7 -- .../xpack/esql/planner/Mapper.java | 42 ++++++++ .../optimizer/PhysicalPlanOptimizerTests.java | 63 +++++++++-- 5 files changed, 195 insertions(+), 25 deletions(-) create mode 100644 docs/changelog/114665.yaml diff --git a/docs/changelog/114665.yaml b/docs/changelog/114665.yaml new file mode 100644 index 0000000000000..b90bb799bd896 --- /dev/null +++ b/docs/changelog/114665.yaml @@ -0,0 +1,6 @@ +pr: 114665 +summary: Fixing remote ENRICH by pushing the Enrich inside `FragmentExec` +area: ES|QL +type: bug +issues: + - 105095 diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClustersEnrichIT.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClustersEnrichIT.java index 7d8bb738098d3..e8e9f45694e9c 100644 --- a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClustersEnrichIT.java +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClustersEnrichIT.java @@ -47,6 +47,7 @@ import java.util.Arrays; import java.util.Collection; import java.util.Collections; +import java.util.Comparator; import java.util.List; import java.util.Locale; import java.util.Map; @@ -469,27 +470,112 @@ public void testEnrichRemoteWithVendor() { } } + public void testEnrichRemoteWithVendorNoSort() { + Tuple includeCCSMetadata = randomIncludeCCSMetadata(); + Boolean requestIncludeMeta = includeCCSMetadata.v1(); + boolean responseExpectMeta = includeCCSMetadata.v2(); + + for (Enrich.Mode hostMode : List.of(Enrich.Mode.ANY, Enrich.Mode.REMOTE)) { + var query = String.format(Locale.ROOT, """ + FROM *:events,events + | LIMIT 100 + | eval ip= TO_STR(host) + | %s + | %s + | stats c = COUNT(*) by vendor + """, enrichHosts(hostMode), enrichVendors(Enrich.Mode.REMOTE)); + try (EsqlQueryResponse resp = runQuery(query, requestIncludeMeta)) { + var values = getValuesList(resp); + values.sort(Comparator.comparing(o -> (String) o.get(1), Comparator.nullsLast(Comparator.naturalOrder()))); + assertThat( + values, + equalTo( + List.of( + List.of(6L, "Apple"), + List.of(7L, "Microsoft"), + List.of(1L, "Redhat"), + List.of(2L, "Samsung"), + List.of(1L, "Sony"), + List.of(2L, "Suse"), + Arrays.asList(3L, (String) null) + ) + ) + ); + EsqlExecutionInfo executionInfo = resp.getExecutionInfo(); + assertThat(executionInfo.includeCCSMetadata(), equalTo(responseExpectMeta)); + assertThat(executionInfo.clusterAliases(), equalTo(Set.of("", "c1", "c2"))); + assertCCSExecutionInfoDetails(executionInfo); + } + } + } + public void testTopNThenEnrichRemote() { + Tuple includeCCSMetadata = randomIncludeCCSMetadata(); + Boolean requestIncludeMeta = includeCCSMetadata.v1(); + boolean responseExpectMeta = includeCCSMetadata.v2(); + String query = String.format(Locale.ROOT, """ FROM *:events,events | eval ip= TO_STR(host) - | SORT ip + | SORT timestamp, user, ip | LIMIT 5 - | %s + | %s | KEEP host, timestamp, user, os """, enrichHosts(Enrich.Mode.REMOTE)); - var error = expectThrows(VerificationException.class, () -> runQuery(query, randomBoolean()).close()); - assertThat(error.getMessage(), containsString("ENRICH with remote policy can't be executed after LIMIT")); + try (EsqlQueryResponse resp = runQuery(query, requestIncludeMeta)) { + assertThat( + getValuesList(resp), + equalTo( + List.of( + List.of("192.168.1.2", 1L, "andres", "Windows"), + List.of("192.168.1.3", 1L, "matthew", "MacOS"), + Arrays.asList("192.168.1.25", 1L, "park", (String) null), + List.of("192.168.1.5", 2L, "akio", "Android"), + List.of("192.168.1.6", 2L, "sergio", "iOS") + ) + ) + ); + EsqlExecutionInfo executionInfo = resp.getExecutionInfo(); + assertThat(executionInfo.includeCCSMetadata(), equalTo(responseExpectMeta)); + assertThat(executionInfo.clusterAliases(), equalTo(Set.of("", "c1", "c2"))); + assertCCSExecutionInfoDetails(executionInfo); + } } public void testLimitThenEnrichRemote() { + Tuple includeCCSMetadata = randomIncludeCCSMetadata(); + Boolean requestIncludeMeta = includeCCSMetadata.v1(); + boolean responseExpectMeta = includeCCSMetadata.v2(); + String query = String.format(Locale.ROOT, """ FROM *:events,events - | LIMIT 10 + | LIMIT 25 | eval ip= TO_STR(host) - | %s + | %s | KEEP host, timestamp, user, os """, enrichHosts(Enrich.Mode.REMOTE)); - var error = expectThrows(VerificationException.class, () -> runQuery(query, randomBoolean()).close()); - assertThat(error.getMessage(), containsString("ENRICH with remote policy can't be executed after LIMIT")); + try (EsqlQueryResponse resp = runQuery(query, requestIncludeMeta)) { + var values = getValuesList(resp); + values.sort( + Comparator.comparingLong((List o) -> (Long) o.get(1)) + .thenComparing(o -> (String) o.get(0)) + .thenComparing(o -> (String) o.get(2)) + ); + assertThat( + values.subList(0, 5), + equalTo( + List.of( + List.of("192.168.1.2", 1L, "andres", "Windows"), + Arrays.asList("192.168.1.25", 1L, "park", (String) null), + List.of("192.168.1.3", 1L, "matthew", "MacOS"), + List.of("192.168.1.5", 2L, "akio", "Android"), + List.of("192.168.1.5", 2L, "simon", "Android") + ) + ) + ); + EsqlExecutionInfo executionInfo = resp.getExecutionInfo(); + assertThat(executionInfo.includeCCSMetadata(), equalTo(responseExpectMeta)); + assertThat(executionInfo.clusterAliases(), equalTo(Set.of("", "c1", "c2"))); + assertCCSExecutionInfoDetails(executionInfo); + } } public void testAggThenEnrichRemote() { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Verifier.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Verifier.java index e2717cd9af0d1..fbaf43467a2e7 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Verifier.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Verifier.java @@ -609,22 +609,15 @@ private static void checkForSortableDataTypes(LogicalPlan p, Set localF */ private static void checkRemoteEnrich(LogicalPlan plan, Set failures) { boolean[] agg = { false }; - boolean[] limit = { false }; boolean[] enrichCoord = { false }; plan.forEachUp(UnaryPlan.class, u -> { - if (u instanceof Limit) { - limit[0] = true; // TODO: Make Limit then enrich_remote work - } if (u instanceof Aggregate) { agg[0] = true; } else if (u instanceof Enrich enrich && enrich.mode() == Enrich.Mode.COORDINATOR) { enrichCoord[0] = true; } if (u instanceof Enrich enrich && enrich.mode() == Enrich.Mode.REMOTE) { - if (limit[0]) { - failures.add(fail(enrich, "ENRICH with remote policy can't be executed after LIMIT")); - } if (agg[0]) { failures.add(fail(enrich, "ENRICH with remote policy can't be executed after STATS")); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/Mapper.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/Mapper.java index e571be54692c4..152c492a34433 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/Mapper.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/Mapper.java @@ -52,8 +52,10 @@ import org.elasticsearch.xpack.esql.plan.physical.RowExec; import org.elasticsearch.xpack.esql.plan.physical.ShowExec; import org.elasticsearch.xpack.esql.plan.physical.TopNExec; +import org.elasticsearch.xpack.esql.plan.physical.UnaryExec; import java.util.List; +import java.util.concurrent.atomic.AtomicBoolean; /** *

This class is part of the planner

@@ -104,6 +106,46 @@ public PhysicalPlan map(LogicalPlan p) { // // Unary Plan // + if (localMode == false && p instanceof Enrich enrich && enrich.mode() == Enrich.Mode.REMOTE) { + // When we have remote enrich, we want to put it under FragmentExec, so it would be executed remotely. + // We're only going to do it on the coordinator node. + // The way we're going to do it is as follows: + // 1. Locate FragmentExec in the tree. If we have no FragmentExec, we won't do anything. + // 2. Put this Enrich under it, removing everything that was below it previously. + // 3. Above FragmentExec, we should deal with pipeline breakers, since pipeline ops already are supposed to go under + // FragmentExec. + // 4. Aggregates can't appear here since the plan should have errored out if we have aggregate inside remote Enrich. + // 5. So we should be keeping: LimitExec, ExchangeExec, OrderExec, TopNExec (actually OrderExec probably can't happen anyway). + + var child = map(enrich.child()); + AtomicBoolean hasFragment = new AtomicBoolean(false); + + var childTransformed = child.transformUp((f) -> { + // Once we reached FragmentExec, we stuff our Enrich under it + if (f instanceof FragmentExec) { + hasFragment.set(true); + return new FragmentExec(p); + } + if (f instanceof EnrichExec enrichExec) { + // It can only be ANY because COORDINATOR would have errored out earlier, and REMOTE should be under FragmentExec + assert enrichExec.mode() == Enrich.Mode.ANY : "enrich must be in ANY mode here"; + return enrichExec.child(); + } + if (f instanceof UnaryExec unaryExec) { + if (f instanceof LimitExec || f instanceof ExchangeExec || f instanceof OrderExec || f instanceof TopNExec) { + return f; + } else { + return unaryExec.child(); + } + } + // Currently, it's either UnaryExec or LeafExec. Leaf will either resolve to FragmentExec or we'll ignore it. + return f; + }); + + if (hasFragment.get()) { + return childTransformed; + } + } if (p instanceof UnaryPlan ua) { var child = map(ua.child()); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/PhysicalPlanOptimizerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/PhysicalPlanOptimizerTests.java index 964039268e30d..961c70acada7b 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/PhysicalPlanOptimizerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/PhysicalPlanOptimizerTests.java @@ -172,7 +172,7 @@ import static org.hamcrest.Matchers.nullValue; import static org.hamcrest.Matchers.startsWith; -// @TestLogging(value = "org.elasticsearch.xpack.esql:TRACE", reason = "debug") +// @TestLogging(value = "org.elasticsearch.xpack.esql:DEBUG", reason = "debug") public class PhysicalPlanOptimizerTests extends ESTestCase { private static final String PARAM_FORMATTING = "%1$s"; @@ -5851,14 +5851,14 @@ public void testEnrichBeforeLimit() { | EVAL employee_id = to_str(emp_no) | ENRICH _remote:departments | LIMIT 10"""); - var enrich = as(plan, EnrichExec.class); - assertThat(enrich.mode(), equalTo(Enrich.Mode.REMOTE)); - assertThat(enrich.concreteIndices(), equalTo(Map.of("cluster_1", ".enrich-departments-2"))); - var eval = as(enrich.child(), EvalExec.class); - var finalLimit = as(eval.child(), LimitExec.class); + var finalLimit = as(plan, LimitExec.class); var exchange = as(finalLimit.child(), ExchangeExec.class); var fragment = as(exchange.child(), FragmentExec.class); - var partialLimit = as(fragment.fragment(), Limit.class); + var enrich = as(fragment.fragment(), Enrich.class); + assertThat(enrich.mode(), equalTo(Enrich.Mode.REMOTE)); + assertThat(enrich.concreteIndices(), equalTo(Map.of("cluster_1", ".enrich-departments-2"))); + var evalFragment = as(enrich.child(), Eval.class); + var partialLimit = as(evalFragment.child(), Limit.class); as(partialLimit.child(), EsRelation.class); } } @@ -5901,13 +5901,21 @@ public void testLimitThenEnrich() { } public void testLimitThenEnrichRemote() { - var error = expectThrows(VerificationException.class, () -> physicalPlan(""" + var plan = physicalPlan(""" FROM test | LIMIT 10 | EVAL employee_id = to_str(emp_no) | ENRICH _remote:departments - """)); - assertThat(error.getMessage(), containsString("line 4:3: ENRICH with remote policy can't be executed after LIMIT")); + """); + var finalLimit = as(plan, LimitExec.class); + var exchange = as(finalLimit.child(), ExchangeExec.class); + var fragment = as(exchange.child(), FragmentExec.class); + var enrich = as(fragment.fragment(), Enrich.class); + assertThat(enrich.mode(), equalTo(Enrich.Mode.REMOTE)); + assertThat(enrich.concreteIndices(), equalTo(Map.of("cluster_1", ".enrich-departments-2"))); + var evalFragment = as(enrich.child(), Eval.class); + var partialLimit = as(evalFragment.child(), Limit.class); + as(partialLimit.child(), EsRelation.class); } public void testEnrichBeforeTopN() { @@ -5961,6 +5969,23 @@ public void testEnrichBeforeTopN() { var eval = as(enrich.child(), Eval.class); as(eval.child(), EsRelation.class); } + { + var plan = physicalPlan(""" + FROM test + | EVAL employee_id = to_str(emp_no) + | ENRICH _remote:departments + | SORT department + | LIMIT 10"""); + var topN = as(plan, TopNExec.class); + var exchange = as(topN.child(), ExchangeExec.class); + var fragment = as(exchange.child(), FragmentExec.class); + var partialTopN = as(fragment.fragment(), TopN.class); + var enrich = as(partialTopN.child(), Enrich.class); + assertThat(enrich.mode(), equalTo(Enrich.Mode.REMOTE)); + assertThat(enrich.concreteIndices(), equalTo(Map.of("cluster_1", ".enrich-departments-2"))); + var eval = as(enrich.child(), Eval.class); + as(eval.child(), EsRelation.class); + } } public void testEnrichAfterTopN() { @@ -6000,6 +6025,24 @@ public void testEnrichAfterTopN() { var partialTopN = as(fragment.fragment(), TopN.class); as(partialTopN.child(), EsRelation.class); } + { + var plan = physicalPlan(""" + FROM test + | SORT emp_no + | LIMIT 10 + | EVAL employee_id = to_str(emp_no) + | ENRICH _remote:departments + """); + var topN = as(plan, TopNExec.class); + var exchange = as(topN.child(), ExchangeExec.class); + var fragment = as(exchange.child(), FragmentExec.class); + var enrich = as(fragment.fragment(), Enrich.class); + assertThat(enrich.mode(), equalTo(Enrich.Mode.REMOTE)); + assertThat(enrich.concreteIndices(), equalTo(Map.of("cluster_1", ".enrich-departments-2"))); + var evalFragment = as(enrich.child(), Eval.class); + var partialTopN = as(evalFragment.child(), TopN.class); + as(partialTopN.child(), EsRelation.class); + } } public void testManyEnrich() { From cade0021736d69f66db4bc73c022258833c3ff38 Mon Sep 17 00:00:00 2001 From: Keith Massey Date: Thu, 24 Oct 2024 15:34:27 -0500 Subject: [PATCH 084/324] Fixing ingest simulate yaml rest test when global legacy template is present (#115586) Sometimes the test framework adds a global legacy template. When this happens, a test that is using another legacy template to create an index emits a warning since the index matches two legacy templates. This PR allows that warning. --- .../resources/rest-api-spec/test/ingest/80_ingest_simulate.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/qa/smoke-test-ingest-with-all-dependencies/src/yamlRestTest/resources/rest-api-spec/test/ingest/80_ingest_simulate.yml b/qa/smoke-test-ingest-with-all-dependencies/src/yamlRestTest/resources/rest-api-spec/test/ingest/80_ingest_simulate.yml index 7ed5ad3154151..2d3fa6b568381 100644 --- a/qa/smoke-test-ingest-with-all-dependencies/src/yamlRestTest/resources/rest-api-spec/test/ingest/80_ingest_simulate.yml +++ b/qa/smoke-test-ingest-with-all-dependencies/src/yamlRestTest/resources/rest-api-spec/test/ingest/80_ingest_simulate.yml @@ -1537,6 +1537,8 @@ setup: - not_exists: docs.0.doc.error - do: + allowed_warnings: + - "index [foo-1] matches multiple legacy templates [global, my-legacy-template], composable templates will only match a single template" indices.create: index: foo-1 - match: { acknowledged: true } From d1c7e9886f483f2865b7780ce0ba44689fae622e Mon Sep 17 00:00:00 2001 From: Chris Hegarty <62058229+ChrisHegarty@users.noreply.github.com> Date: Thu, 24 Oct 2024 21:43:22 +0100 Subject: [PATCH 085/324] Update BlobCacheBufferedIndexInput::readVLong to correctly handle negative long values (#115594) --- docs/changelog/115594.yaml | 6 ++++++ .../blobcache/common/BlobCacheBufferedIndexInput.java | 2 +- 2 files changed, 7 insertions(+), 1 deletion(-) create mode 100644 docs/changelog/115594.yaml diff --git a/docs/changelog/115594.yaml b/docs/changelog/115594.yaml new file mode 100644 index 0000000000000..91a6089dfb3ce --- /dev/null +++ b/docs/changelog/115594.yaml @@ -0,0 +1,6 @@ +pr: 115594 +summary: Update `BlobCacheBufferedIndexInput::readVLong` to correctly handle negative + long values +area: Search +type: bug +issues: [] diff --git a/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/common/BlobCacheBufferedIndexInput.java b/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/common/BlobCacheBufferedIndexInput.java index 16645e7523c36..7e7e954d1fa72 100644 --- a/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/common/BlobCacheBufferedIndexInput.java +++ b/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/common/BlobCacheBufferedIndexInput.java @@ -175,7 +175,7 @@ public final int readVInt() throws IOException { @Override public final long readVLong() throws IOException { - if (9 <= buffer.remaining()) { + if (10 <= buffer.remaining()) { return ByteBufferStreamInput.readVLong(buffer); } else { return super.readVLong(); From f444c86f857db0f82f528d217bf0da6f5b9308c5 Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Thu, 24 Oct 2024 13:47:20 -0700 Subject: [PATCH 086/324] Add lookup index mode (#115143) This change introduces a new index mode, lookup, for indices intended for lookup operations in ES|QL. Lookup indices must have a single shard and be replicated to all data nodes by default. Aside from these requirements, they function as standard indices. Documentation will be added later when the lookup operator in ES|QL is implemented. --- .../test/indices.create/10_basic.yml | 67 ++++++ .../index/LookupIndexModeIT.java | 219 ++++++++++++++++++ .../org/elasticsearch/TransportVersions.java | 1 + .../metadata/MetadataCreateIndexService.java | 16 +- .../org/elasticsearch/index/IndexMode.java | 115 ++++++++- .../monitor/metrics/IndicesMetrics.java | 2 +- .../elasticsearch/node/NodeConstruction.java | 10 +- .../indices/CreateIndexCapabilities.java | 7 +- .../index/mapper/MapperServiceTestCase.java | 2 +- .../index/engine/FollowingEngineTests.java | 3 + 10 files changed, 436 insertions(+), 6 deletions(-) create mode 100644 server/src/internalClusterTest/java/org/elasticsearch/index/LookupIndexModeIT.java diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/10_basic.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/10_basic.yml index 8242b7cdd29e7..31d127b80c844 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/10_basic.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/10_basic.yml @@ -149,3 +149,70 @@ indices.exists_alias: name: logs_2022-12-31 - is_true: '' + +--- +"Create lookup index": + - requires: + test_runner_features: [ capabilities ] + capabilities: + - method: PUT + path: /{index} + capabilities: [ lookup_index_mode ] + reason: "Support for 'lookup' index mode capability required" + - do: + indices.create: + index: "test_lookup" + body: + settings: + index.mode: lookup + + - do: + indices.get_settings: + index: test_lookup + + - match: { test_lookup.settings.index.number_of_shards: "1"} + - match: { test_lookup.settings.index.auto_expand_replicas: "0-all"} + +--- +"Create lookup index with one shard": + - requires: + test_runner_features: [ capabilities ] + capabilities: + - method: PUT + path: /{index} + capabilities: [ lookup_index_mode ] + reason: "Support for 'lookup' index mode capability required" + - do: + indices.create: + index: "test_lookup" + body: + settings: + index: + mode: lookup + number_of_shards: 1 + + - do: + indices.get_settings: + index: test_lookup + + - match: { test_lookup.settings.index.number_of_shards: "1"} + - match: { test_lookup.settings.index.auto_expand_replicas: "0-all"} + +--- +"Create lookup index with two shards": + - requires: + test_runner_features: [ capabilities ] + capabilities: + - method: PUT + path: /{index} + capabilities: [ lookup_index_mode ] + reason: "Support for 'lookup' index mode capability required" + - do: + catch: /illegal_argument_exception/ + indices.create: + index: test_lookup + body: + settings: + index.mode: lookup + index.number_of_shards: 2 + diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/LookupIndexModeIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/LookupIndexModeIT.java new file mode 100644 index 0000000000000..f294d4a2e7943 --- /dev/null +++ b/server/src/internalClusterTest/java/org/elasticsearch/index/LookupIndexModeIT.java @@ -0,0 +1,219 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.index; + +import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; +import org.elasticsearch.action.admin.indices.create.TransportCreateIndexAction; +import org.elasticsearch.action.admin.indices.shrink.ResizeAction; +import org.elasticsearch.action.admin.indices.shrink.ResizeRequest; +import org.elasticsearch.action.admin.indices.shrink.ResizeType; +import org.elasticsearch.action.fieldcaps.FieldCapabilitiesIndexResponse; +import org.elasticsearch.action.fieldcaps.FieldCapabilitiesRequest; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.query.MatchQueryBuilder; +import org.elasticsearch.search.SearchHit; +import org.elasticsearch.test.ESIntegTestCase; + +import java.util.Map; + +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasSize; + +public class LookupIndexModeIT extends ESIntegTestCase { + + @Override + protected int numberOfShards() { + return 1; + } + + public void testBasic() { + internalCluster().ensureAtLeastNumDataNodes(1); + Settings.Builder lookupSettings = Settings.builder().put("index.mode", "lookup"); + if (randomBoolean()) { + lookupSettings.put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1); + } + CreateIndexRequest createRequest = new CreateIndexRequest("hosts"); + createRequest.settings(lookupSettings); + createRequest.simpleMapping("ip", "type=ip", "os", "type=keyword"); + assertAcked(client().admin().indices().execute(TransportCreateIndexAction.TYPE, createRequest)); + Settings settings = client().admin().indices().prepareGetSettings("hosts").get().getIndexToSettings().get("hosts"); + assertThat(settings.get("index.mode"), equalTo("lookup")); + assertThat(settings.get("index.auto_expand_replicas"), equalTo("0-all")); + Map allHosts = Map.of( + "192.168.1.2", + "Windows", + "192.168.1.3", + "MacOS", + "192.168.1.4", + "Linux", + "192.168.1.5", + "Android", + "192.168.1.6", + "iOS", + "192.168.1.7", + "Windows", + "192.168.1.8", + "MacOS", + "192.168.1.9", + "Linux", + "192.168.1.10", + "Linux", + "192.168.1.11", + "Windows" + ); + for (Map.Entry e : allHosts.entrySet()) { + client().prepareIndex("hosts").setSource("ip", e.getKey(), "os", e.getValue()).get(); + } + refresh("hosts"); + assertAcked(client().admin().indices().prepareCreate("events").setSettings(Settings.builder().put("index.mode", "logsdb")).get()); + int numDocs = between(1, 10); + for (int i = 0; i < numDocs; i++) { + String ip = randomFrom(allHosts.keySet()); + String message = randomFrom("login", "logout", "shutdown", "restart"); + client().prepareIndex("events").setSource("@timestamp", "2024-01-01", "ip", ip, "message", message).get(); + } + refresh("events"); + // _search + { + SearchResponse resp = prepareSearch("events", "hosts").setQuery(new MatchQueryBuilder("_index_mode", "lookup")) + .setSize(10000) + .get(); + for (SearchHit hit : resp.getHits()) { + assertThat(hit.getIndex(), equalTo("hosts")); + } + assertHitCount(resp, allHosts.size()); + resp.decRef(); + } + // field_caps + { + FieldCapabilitiesRequest request = new FieldCapabilitiesRequest(); + request.indices("events", "hosts"); + request.fields("*"); + request.setMergeResults(false); + request.indexFilter(new MatchQueryBuilder("_index_mode", "lookup")); + var resp = client().fieldCaps(request).actionGet(); + assertThat(resp.getIndexResponses(), hasSize(1)); + FieldCapabilitiesIndexResponse indexResponse = resp.getIndexResponses().getFirst(); + assertThat(indexResponse.getIndexMode(), equalTo(IndexMode.LOOKUP)); + assertThat(indexResponse.getIndexName(), equalTo("hosts")); + } + } + + public void testRejectMoreThanOneShard() { + int numberOfShards = between(2, 5); + IllegalArgumentException error = expectThrows(IllegalArgumentException.class, () -> { + client().admin() + .indices() + .prepareCreate("hosts") + .setSettings(Settings.builder().put("index.mode", "lookup").put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, numberOfShards)) + .setMapping("ip", "type=ip", "os", "type=keyword") + .get(); + }); + assertThat( + error.getMessage(), + equalTo("index with [lookup] mode must have [index.number_of_shards] set to 1 or unset; provided " + numberOfShards) + ); + } + + public void testResizeLookupIndex() { + Settings.Builder createSettings = Settings.builder().put("index.mode", "lookup"); + if (randomBoolean()) { + createSettings.put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1); + } + CreateIndexRequest createIndexRequest = new CreateIndexRequest("lookup-1").settings(createSettings); + assertAcked(client().admin().indices().execute(TransportCreateIndexAction.TYPE, createIndexRequest)); + client().admin().indices().prepareAddBlock(IndexMetadata.APIBlock.WRITE, "lookup-1").get(); + + ResizeRequest clone = new ResizeRequest("lookup-2", "lookup-1"); + clone.setResizeType(ResizeType.CLONE); + assertAcked(client().admin().indices().execute(ResizeAction.INSTANCE, clone).actionGet()); + Settings settings = client().admin().indices().prepareGetSettings("lookup-2").get().getIndexToSettings().get("lookup-2"); + assertThat(settings.get("index.mode"), equalTo("lookup")); + assertThat(settings.get("index.number_of_shards"), equalTo("1")); + assertThat(settings.get("index.auto_expand_replicas"), equalTo("0-all")); + + ResizeRequest split = new ResizeRequest("lookup-3", "lookup-1"); + split.setResizeType(ResizeType.SPLIT); + split.getTargetIndexRequest().settings(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 3)); + IllegalArgumentException error = expectThrows( + IllegalArgumentException.class, + () -> client().admin().indices().execute(ResizeAction.INSTANCE, split).actionGet() + ); + assertThat( + error.getMessage(), + equalTo("index with [lookup] mode must have [index.number_of_shards] set to 1 or unset; provided 3") + ); + } + + public void testResizeRegularIndexToLookup() { + String dataNode = internalCluster().startDataOnlyNode(); + assertAcked( + client().admin() + .indices() + .prepareCreate("regular-1") + .setSettings( + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 2) + .put("index.routing.allocation.require._name", dataNode) + ) + .setMapping("ip", "type=ip", "os", "type=keyword") + .get() + ); + client().admin().indices().prepareAddBlock(IndexMetadata.APIBlock.WRITE, "regular-1").get(); + client().admin() + .indices() + .prepareUpdateSettings("regular-1") + .setSettings(Settings.builder().put("index.number_of_replicas", 0)) + .get(); + + ResizeRequest clone = new ResizeRequest("lookup-3", "regular-1"); + clone.setResizeType(ResizeType.CLONE); + clone.getTargetIndexRequest().settings(Settings.builder().put("index.mode", "lookup")); + IllegalArgumentException error = expectThrows( + IllegalArgumentException.class, + () -> client().admin().indices().execute(ResizeAction.INSTANCE, clone).actionGet() + ); + assertThat( + error.getMessage(), + equalTo("index with [lookup] mode must have [index.number_of_shards] set to 1 or unset; provided 2") + ); + + ResizeRequest shrink = new ResizeRequest("lookup-4", "regular-1"); + shrink.setResizeType(ResizeType.SHRINK); + shrink.getTargetIndexRequest() + .settings(Settings.builder().put("index.mode", "lookup").put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1)); + + error = expectThrows( + IllegalArgumentException.class, + () -> client().admin().indices().execute(ResizeAction.INSTANCE, shrink).actionGet() + ); + assertThat(error.getMessage(), equalTo("can't change index.mode of index [regular-1] from [standard] to [lookup]")); + } + + public void testDoNotOverrideAutoExpandReplicas() { + internalCluster().ensureAtLeastNumDataNodes(1); + Settings.Builder createSettings = Settings.builder().put("index.mode", "lookup"); + if (randomBoolean()) { + createSettings.put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1); + } + createSettings.put("index.auto_expand_replicas", "3-5"); + CreateIndexRequest createRequest = new CreateIndexRequest("hosts"); + createRequest.settings(createSettings); + createRequest.simpleMapping("ip", "type=ip", "os", "type=keyword"); + assertAcked(client().admin().indices().execute(TransportCreateIndexAction.TYPE, createRequest)); + Settings settings = client().admin().indices().prepareGetSettings("hosts").get().getIndexToSettings().get("hosts"); + assertThat(settings.get("index.mode"), equalTo("lookup")); + assertThat(settings.get("index.auto_expand_replicas"), equalTo("3-5")); + } +} diff --git a/server/src/main/java/org/elasticsearch/TransportVersions.java b/server/src/main/java/org/elasticsearch/TransportVersions.java index 777ff083f33f8..25bb792d827a9 100644 --- a/server/src/main/java/org/elasticsearch/TransportVersions.java +++ b/server/src/main/java/org/elasticsearch/TransportVersions.java @@ -181,6 +181,7 @@ static TransportVersion def(int id) { public static final TransportVersion INFERENCE_DONT_PERSIST_ON_READ = def(8_776_00_0); public static final TransportVersion SIMULATE_MAPPING_ADDITION = def(8_777_00_0); public static final TransportVersion INTRODUCE_ALL_APPLICABLE_SELECTOR = def(8_778_00_0); + public static final TransportVersion INDEX_MODE_LOOKUP = def(8_779_00_0); /* * STOP! READ THIS FIRST! No, really, diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexService.java index 69e3b7b70ff82..ed029db54bf06 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexService.java @@ -308,7 +308,12 @@ private void onlyCreateIndex( final CreateIndexClusterStateUpdateRequest request, final ActionListener listener ) { - normalizeRequestSetting(request); + try { + normalizeRequestSetting(request); + } catch (Exception e) { + listener.onFailure(e); + return; + } var delegate = new AllocationActionListener<>(listener, threadPool.getThreadContext()); submitUnbatchedTask( @@ -1599,6 +1604,15 @@ static IndexMetadata validateResize( // of if the source shards are divisible by the number of target shards IndexMetadata.getRoutingFactor(sourceMetadata.getNumberOfShards(), INDEX_NUMBER_OF_SHARDS_SETTING.get(targetIndexSettings)); } + if (targetIndexSettings.hasValue(IndexSettings.MODE.getKey())) { + IndexMode oldMode = Objects.requireNonNullElse(sourceMetadata.getIndexMode(), IndexMode.STANDARD); + IndexMode newMode = IndexSettings.MODE.get(targetIndexSettings); + if (newMode != oldMode) { + throw new IllegalArgumentException( + "can't change index.mode of index [" + sourceIndex + "] from [" + oldMode + "] to [" + newMode + "]" + ); + } + } return sourceMetadata; } diff --git a/server/src/main/java/org/elasticsearch/index/IndexMode.java b/server/src/main/java/org/elasticsearch/index/IndexMode.java index 75ec67f26dd3a..e6339344b6e5f 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexMode.java +++ b/server/src/main/java/org/elasticsearch/index/IndexMode.java @@ -9,7 +9,9 @@ package org.elasticsearch.index; +import org.elasticsearch.TransportVersions; import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.cluster.metadata.MetadataCreateDataStreamService; import org.elasticsearch.cluster.routing.IndexRouting; import org.elasticsearch.common.compress.CompressedXContent; @@ -37,8 +39,10 @@ import org.elasticsearch.index.mapper.TsidExtractingIdFieldMapper; import java.io.IOException; +import java.time.Instant; import java.util.Arrays; import java.util.List; +import java.util.Locale; import java.util.Map; import java.util.Objects; import java.util.function.BooleanSupplier; @@ -308,6 +312,78 @@ public SourceFieldMapper.Mode defaultSourceMode() { public String getDefaultCodec() { return CodecService.BEST_COMPRESSION_CODEC; } + }, + LOOKUP("lookup") { + @Override + void validateWithOtherSettings(Map, Object> settings) { + final Integer providedNumberOfShards = (Integer) settings.get(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING); + if (providedNumberOfShards != null && providedNumberOfShards != 1) { + throw new IllegalArgumentException( + "index with [lookup] mode must have [index.number_of_shards] set to 1 or unset; provided " + providedNumberOfShards + ); + } + } + + @Override + public void validateMapping(MappingLookup lookup) {}; + + @Override + public void validateAlias(@Nullable String indexRouting, @Nullable String searchRouting) {} + + @Override + public void validateTimestampFieldMapping(boolean isDataStream, MappingLookup mappingLookup) { + + } + + @Override + public CompressedXContent getDefaultMapping(final IndexSettings indexSettings) { + return null; + } + + @Override + public TimestampBounds getTimestampBound(IndexMetadata indexMetadata) { + return null; + } + + @Override + public MetadataFieldMapper timeSeriesIdFieldMapper() { + // non time-series indices must not have a TimeSeriesIdFieldMapper + return null; + } + + @Override + public MetadataFieldMapper timeSeriesRoutingHashFieldMapper() { + // non time-series indices must not have a TimeSeriesRoutingIdFieldMapper + return null; + } + + @Override + public IdFieldMapper idFieldMapperWithoutFieldData() { + return ProvidedIdFieldMapper.NO_FIELD_DATA; + } + + @Override + public IdFieldMapper buildIdFieldMapper(BooleanSupplier fieldDataEnabled) { + return new ProvidedIdFieldMapper(fieldDataEnabled); + } + + @Override + public DocumentDimensions buildDocumentDimensions(IndexSettings settings) { + return DocumentDimensions.Noop.INSTANCE; + } + + @Override + public boolean shouldValidateTimestamp() { + return false; + } + + @Override + public void validateSourceFieldMapper(SourceFieldMapper sourceFieldMapper) {} + + @Override + public SourceFieldMapper.Mode defaultSourceMode() { + return SourceFieldMapper.Mode.STORED; + } }; private static final String HOST_NAME = "host.name"; @@ -370,6 +446,7 @@ private static CompressedXContent createDefaultMapping(boolean includeHostName) static final List> VALIDATE_WITH_SETTINGS = List.copyOf( Stream.concat( Stream.of( + IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING, IndexMetadata.INDEX_ROUTING_PARTITION_SIZE_SETTING, IndexMetadata.INDEX_ROUTING_PATH, IndexSettings.TIME_SERIES_START_TIME, @@ -476,11 +553,12 @@ public static IndexMode fromString(String value) { case "standard" -> IndexMode.STANDARD; case "time_series" -> IndexMode.TIME_SERIES; case "logsdb" -> IndexMode.LOGSDB; + case "lookup" -> IndexMode.LOOKUP; default -> throw new IllegalArgumentException( "[" + value + "] is an invalid index mode, valid modes are: [" - + Arrays.stream(IndexMode.values()).map(IndexMode::toString).collect(Collectors.joining()) + + Arrays.stream(IndexMode.values()).map(IndexMode::toString).collect(Collectors.joining(",")) + "]" ); }; @@ -492,6 +570,7 @@ public static IndexMode readFrom(StreamInput in) throws IOException { case 0 -> STANDARD; case 1 -> TIME_SERIES; case 2 -> LOGSDB; + case 3 -> LOOKUP; default -> throw new IllegalStateException("unexpected index mode [" + mode + "]"); }; } @@ -501,6 +580,7 @@ public static void writeTo(IndexMode indexMode, StreamOutput out) throws IOExcep case STANDARD -> 0; case TIME_SERIES -> 1; case LOGSDB -> 2; + case LOOKUP -> out.getTransportVersion().onOrAfter(TransportVersions.INDEX_MODE_LOOKUP) ? 3 : 0; }; out.writeByte((byte) code); } @@ -509,4 +589,37 @@ public static void writeTo(IndexMode indexMode, StreamOutput out) throws IOExcep public String toString() { return getName(); } + + /** + * A built-in index setting provider that supplies additional index settings based on the index mode. + * Currently, only the lookup index mode provides non-empty additional settings. + */ + public static final class IndexModeSettingsProvider implements IndexSettingProvider { + @Override + public Settings getAdditionalIndexSettings( + String indexName, + String dataStreamName, + IndexMode templateIndexMode, + Metadata metadata, + Instant resolvedAt, + Settings indexTemplateAndCreateRequestSettings, + List combinedTemplateMappings + ) { + IndexMode indexMode = templateIndexMode; + if (indexMode == null) { + String modeName = indexTemplateAndCreateRequestSettings.get(IndexSettings.MODE.getKey()); + if (modeName != null) { + indexMode = IndexMode.valueOf(modeName.toUpperCase(Locale.ROOT)); + } + } + if (indexMode == LOOKUP) { + return Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_AUTO_EXPAND_REPLICAS, "0-all") + .build(); + } else { + return Settings.EMPTY; + } + } + } } diff --git a/server/src/main/java/org/elasticsearch/monitor/metrics/IndicesMetrics.java b/server/src/main/java/org/elasticsearch/monitor/metrics/IndicesMetrics.java index 11df8710fad6c..ba67bc03e1441 100644 --- a/server/src/main/java/org/elasticsearch/monitor/metrics/IndicesMetrics.java +++ b/server/src/main/java/org/elasticsearch/monitor/metrics/IndicesMetrics.java @@ -55,7 +55,7 @@ public IndicesMetrics(MeterRegistry meterRegistry, IndicesService indicesService } private static List registerAsyncMetrics(MeterRegistry registry, IndicesStatsCache cache) { - final int TOTAL_METRICS = 36; + final int TOTAL_METRICS = 48; List metrics = new ArrayList<>(TOTAL_METRICS); for (IndexMode indexMode : IndexMode.values()) { String name = indexMode.getName(); diff --git a/server/src/main/java/org/elasticsearch/node/NodeConstruction.java b/server/src/main/java/org/elasticsearch/node/NodeConstruction.java index 7e3991c1df1f4..784e02059823b 100644 --- a/server/src/main/java/org/elasticsearch/node/NodeConstruction.java +++ b/server/src/main/java/org/elasticsearch/node/NodeConstruction.java @@ -80,6 +80,7 @@ import org.elasticsearch.common.settings.SettingsModule; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.PageCacheRecycler; +import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.core.IOUtils; import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.core.TimeValue; @@ -108,6 +109,7 @@ import org.elasticsearch.health.node.tracker.RepositoriesHealthTracker; import org.elasticsearch.health.stats.HealthApiStats; import org.elasticsearch.http.HttpServerTransport; +import org.elasticsearch.index.IndexMode; import org.elasticsearch.index.IndexSettingProvider; import org.elasticsearch.index.IndexSettingProviders; import org.elasticsearch.index.IndexingPressure; @@ -820,7 +822,10 @@ private void construct( final var parameters = new IndexSettingProvider.Parameters(indicesService::createIndexMapperServiceForValidation); IndexSettingProviders indexSettingProviders = new IndexSettingProviders( - pluginsService.flatMap(p -> p.getAdditionalIndexSettingProviders(parameters)).collect(Collectors.toSet()) + Sets.union( + builtinIndexSettingProviders(), + pluginsService.flatMap(p -> p.getAdditionalIndexSettingProviders(parameters)).collect(Collectors.toSet()) + ) ); final ShardLimitValidator shardLimitValidator = new ShardLimitValidator(settings, clusterService); @@ -1656,4 +1661,7 @@ private Module loadPersistentTasksService( }; } + private Set builtinIndexSettingProviders() { + return Set.of(new IndexMode.IndexModeSettingsProvider()); + } } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/CreateIndexCapabilities.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/CreateIndexCapabilities.java index 899486399af6b..900a352d42f30 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/CreateIndexCapabilities.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/CreateIndexCapabilities.java @@ -21,5 +21,10 @@ public class CreateIndexCapabilities { */ private static final String LOGSDB_INDEX_MODE_CAPABILITY = "logsdb_index_mode"; - public static Set CAPABILITIES = Set.of(LOGSDB_INDEX_MODE_CAPABILITY); + /** + * Support lookup index mode + */ + private static final String LOOKUP_INDEX_MODE_CAPABILITY = "lookup_index_mode"; + + public static Set CAPABILITIES = Set.of(LOGSDB_INDEX_MODE_CAPABILITY, LOOKUP_INDEX_MODE_CAPABILITY); } diff --git a/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperServiceTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperServiceTestCase.java index da04f30ff8023..3960aa5a91cc5 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperServiceTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperServiceTestCase.java @@ -142,7 +142,7 @@ protected static String randomIndexOptions() { protected final DocumentMapper createDocumentMapper(XContentBuilder mappings, IndexMode indexMode) throws IOException { return switch (indexMode) { - case STANDARD -> createDocumentMapper(mappings); + case STANDARD, LOOKUP -> createDocumentMapper(mappings); case TIME_SERIES -> createTimeSeriesModeDocumentMapper(mappings); case LOGSDB -> createLogsModeDocumentMapper(mappings); }; diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/index/engine/FollowingEngineTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/index/engine/FollowingEngineTests.java index 478a0d08d6612..150eddf039cec 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/index/engine/FollowingEngineTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/index/engine/FollowingEngineTests.java @@ -700,6 +700,9 @@ public void testProcessOnceOnPrimary() throws Exception { case LOGSDB: settingsBuilder.put("index.mode", IndexMode.LOGSDB.getName()); break; + case LOOKUP: + settingsBuilder.put("index.mode", IndexMode.LOOKUP.getName()); + break; default: throw new UnsupportedOperationException("Unknown index mode [" + indexMode + "]"); } From 057062bcae2b935294d3b9e91cdffdecd2a34208 Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Fri, 25 Oct 2024 08:09:46 +1100 Subject: [PATCH 087/324] Mute org.elasticsearch.xpack.security.CoreWithSecurityClientYamlTestSuiteIT test {yaml=cluster.stats/30_ccs_stats/cross-cluster search stats search} #115600 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index 827a604cd6a19..4af02859d88d4 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -273,6 +273,9 @@ tests: - class: org.elasticsearch.smoketest.DocsClientYamlTestSuiteIT method: test {yaml=reference/esql/esql-across-clusters/line_197} issue: https://github.com/elastic/elasticsearch/issues/115575 +- class: org.elasticsearch.xpack.security.CoreWithSecurityClientYamlTestSuiteIT + method: test {yaml=cluster.stats/30_ccs_stats/cross-cluster search stats search} + issue: https://github.com/elastic/elasticsearch/issues/115600 # Examples: # From d5265bef572eaa87cc07b861ad00c74f8a955fbf Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Thu, 24 Oct 2024 23:17:06 +0200 Subject: [PATCH 088/324] Replace IndexNameExpressionResolver.ExpressionList with imperative logic (#115487) The approach taken by `ExpressionList` becomes very expensive for large numbers of indices/datastreams. It implies that large lists of concrete names (as they are passed down from the transport layer via e.g. security) are copied at least twice during iteration. Removing the intermediary list and inlining the logic brings down the latency of searches targetting many shards/indices at once and allows for subsequent optimizations. The removed tests appear redundant as they tested an implementation detail of the IndexNameExpressionResolver which itself is well covered by its own tests. --- .../metadata/IndexNameExpressionResolver.java | 186 +++++------ .../cluster/metadata/ExpressionListTests.java | 309 ------------------ 2 files changed, 85 insertions(+), 410 deletions(-) delete mode 100644 server/src/test/java/org/elasticsearch/cluster/metadata/ExpressionListTests.java diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java index 2229166a2d779..39499253c8790 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java @@ -48,7 +48,6 @@ import java.util.Collections; import java.util.HashMap; import java.util.HashSet; -import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Objects; @@ -253,7 +252,7 @@ protected static Collection resolveExpressions(Context context, String.. } else { return ExplicitResourceNameFilter.filterUnavailable( context, - DateMathExpressionResolver.resolve(context, List.of(expressions)) + DateMathExpressionResolver.resolve(context, Arrays.asList(expressions)) ); } } else { @@ -264,7 +263,10 @@ protected static Collection resolveExpressions(Context context, String.. } else { return WildcardExpressionResolver.resolve( context, - ExplicitResourceNameFilter.filterUnavailable(context, DateMathExpressionResolver.resolve(context, List.of(expressions))) + ExplicitResourceNameFilter.filterUnavailable( + context, + DateMathExpressionResolver.resolve(context, Arrays.asList(expressions)) + ) ); } } @@ -1294,34 +1296,51 @@ private static boolean shouldIncludeIfAlias(IndexAbstraction ia, IndexNameExpres * */ public static Collection resolve(Context context, List expressions) { - ExpressionList expressionList = new ExpressionList(context, expressions); // fast exit if there are no wildcards to evaluate - if (expressionList.hasWildcard() == false) { + if (context.getOptions().expandWildcardExpressions() == false) { + return expressions; + } + int firstWildcardIndex = 0; + for (; firstWildcardIndex < expressions.size(); firstWildcardIndex++) { + String expression = expressions.get(firstWildcardIndex); + if (isWildcard(expression)) { + break; + } + } + if (firstWildcardIndex == expressions.size()) { return expressions; } Set result = new HashSet<>(); - for (ExpressionList.Expression expression : expressionList) { - if (expression.isWildcard()) { - Stream matchingResources = matchResourcesToWildcard(context, expression.get()); + for (int i = 0; i < firstWildcardIndex; i++) { + result.add(expressions.get(i)); + } + AtomicBoolean emptyWildcardExpansion = context.getOptions().allowNoIndices() ? null : new AtomicBoolean(); + for (int i = firstWildcardIndex; i < expressions.size(); i++) { + String expression = expressions.get(i); + boolean isExclusion = i > firstWildcardIndex && expression.charAt(0) == '-'; + if (i == firstWildcardIndex || isWildcard(expression)) { + Stream matchingResources = matchResourcesToWildcard( + context, + isExclusion ? expression.substring(1) : expression + ); Stream matchingOpenClosedNames = expandToOpenClosed(context, matchingResources); - AtomicBoolean emptyWildcardExpansion = new AtomicBoolean(false); - if (context.getOptions().allowNoIndices() == false) { + if (emptyWildcardExpansion != null) { emptyWildcardExpansion.set(true); matchingOpenClosedNames = matchingOpenClosedNames.peek(x -> emptyWildcardExpansion.set(false)); } - if (expression.isExclusion()) { - matchingOpenClosedNames.forEachOrdered(result::remove); + if (isExclusion) { + matchingOpenClosedNames.forEach(result::remove); } else { - matchingOpenClosedNames.forEachOrdered(result::add); + matchingOpenClosedNames.forEach(result::add); } - if (emptyWildcardExpansion.get()) { - throw notFoundException(expression.get()); + if (emptyWildcardExpansion != null && emptyWildcardExpansion.get()) { + throw notFoundException(expression); } } else { - if (expression.isExclusion()) { - result.remove(expression.get()); + if (isExclusion) { + result.remove(expression.substring(1)); } else { - result.add(expression.get()); + result.add(expression); } } } @@ -1507,27 +1526,35 @@ private DateMathExpressionResolver() { // utility class } + /** + * Resolves date math expressions. If this is a noop the given {@code expressions} list is returned without copying. + * As a result callers of this method should not mutate the returned list. Mutating it may come with unexpected side effects. + */ public static List resolve(Context context, List expressions) { - List result = new ArrayList<>(expressions.size()); - for (ExpressionList.Expression expression : new ExpressionList(context, expressions)) { - result.add(resolveExpression(expression, context::getStartTime)); + boolean wildcardSeen = false; + final boolean expandWildcards = context.getOptions().expandWildcardExpressions(); + String[] result = null; + for (int i = 0, n = expressions.size(); i < n; i++) { + String expression = expressions.get(i); + // accepts date-math exclusions that are of the form "-<...{}>",f i.e. the "-" is outside the "<>" date-math template + boolean isExclusion = wildcardSeen && expression.startsWith("-"); + wildcardSeen = wildcardSeen || (expandWildcards && isWildcard(expression)); + String toResolve = isExclusion ? expression.substring(1) : expression; + String resolved = resolveExpression(toResolve, context::getStartTime); + if (toResolve != resolved) { + if (result == null) { + result = expressions.toArray(Strings.EMPTY_ARRAY); + } + result[i] = isExclusion ? "-" + resolved : resolved; + } } - return result; + return result == null ? expressions : Arrays.asList(result); } static String resolveExpression(String expression) { return resolveExpression(expression, System::currentTimeMillis); } - static String resolveExpression(ExpressionList.Expression expression, LongSupplier getTime) { - if (expression.isExclusion()) { - // accepts date-math exclusions that are of the form "-<...{}>", i.e. the "-" is outside the "<>" date-math template - return "-" + resolveExpression(expression.get(), getTime); - } else { - return resolveExpression(expression.get(), getTime); - } - } - static String resolveExpression(String expression, LongSupplier getTime) { if (expression.startsWith(EXPRESSION_LEFT_BOUND) == false || expression.endsWith(EXPRESSION_RIGHT_BOUND) == false) { return expression; @@ -1689,14 +1716,35 @@ private ExplicitResourceNameFilter() { */ public static List filterUnavailable(Context context, List expressions) { ensureRemoteIndicesRequireIgnoreUnavailable(context.getOptions(), expressions); - List result = new ArrayList<>(expressions.size()); - for (ExpressionList.Expression expression : new ExpressionList(context, expressions)) { - validateAliasOrIndex(expression); - if (expression.isWildcard() || expression.isExclusion() || ensureAliasOrIndexExists(context, expression.get())) { - result.add(expression.expression()); + final boolean expandWildcards = context.getOptions().expandWildcardExpressions(); + boolean wildcardSeen = false; + List result = null; + for (int i = 0; i < expressions.size(); i++) { + String expression = expressions.get(i); + if (Strings.isEmpty(expression)) { + throw notFoundException(expression); + } + // Expressions can not start with an underscore. This is reserved for APIs. If the check gets here, the API + // does not exist and the path is interpreted as an expression. If the expression begins with an underscore, + // throw a specific error that is different from the [[IndexNotFoundException]], which is typically thrown + // if the expression can't be found. + if (expression.charAt(0) == '_') { + throw new InvalidIndexNameException(expression, "must not start with '_'."); + } + final boolean isWildcard = expandWildcards && isWildcard(expression); + if (isWildcard || (wildcardSeen && expression.charAt(0) == '-') || ensureAliasOrIndexExists(context, expression)) { + if (result != null) { + result.add(expression); + } + } else { + if (result == null) { + result = new ArrayList<>(expressions.size() - 1); + result.addAll(expressions.subList(0, i)); + } } + wildcardSeen |= isWildcard; } - return result; + return result == null ? expressions : result; } /** @@ -1736,19 +1784,6 @@ private static boolean ensureAliasOrIndexExists(Context context, String name) { return true; } - private static void validateAliasOrIndex(ExpressionList.Expression expression) { - if (Strings.isEmpty(expression.expression())) { - throw notFoundException(expression.expression()); - } - // Expressions can not start with an underscore. This is reserved for APIs. If the check gets here, the API - // does not exist and the path is interpreted as an expression. If the expression begins with an underscore, - // throw a specific error that is different from the [[IndexNotFoundException]], which is typically thrown - // if the expression can't be found. - if (expression.expression().charAt(0) == '_') { - throw new InvalidIndexNameException(expression.expression(), "must not start with '_'."); - } - } - private static void ensureRemoteIndicesRequireIgnoreUnavailable(IndicesOptions options, List indexExpressions) { if (options.ignoreUnavailable()) { return; @@ -1773,57 +1808,6 @@ private static void failOnRemoteIndicesNotIgnoringUnavailable(List index } } - /** - * Used to iterate expression lists and work out which expression item is a wildcard or an exclusion. - */ - public static final class ExpressionList implements Iterable { - private final List expressionsList; - private final boolean hasWildcard; - - public record Expression(String expression, boolean isWildcard, boolean isExclusion) { - public String get() { - if (isExclusion()) { - // drop the leading "-" if exclusion because it is easier for callers to handle it like this - return expression().substring(1); - } else { - return expression(); - } - } - } - - /** - * Creates the expression iterable that can be used to easily check which expression item is a wildcard or an exclusion (or both). - * The {@param context} is used to check if wildcards ought to be considered or not. - */ - public ExpressionList(Context context, List expressionStrings) { - List expressionsList = new ArrayList<>(expressionStrings.size()); - boolean wildcardSeen = false; - for (String expressionString : expressionStrings) { - boolean isExclusion = expressionString.startsWith("-") && wildcardSeen; - if (context.getOptions().expandWildcardExpressions() && isWildcard(expressionString)) { - wildcardSeen = true; - expressionsList.add(new Expression(expressionString, true, isExclusion)); - } else { - expressionsList.add(new Expression(expressionString, false, isExclusion)); - } - } - this.expressionsList = expressionsList; - this.hasWildcard = wildcardSeen; - } - - /** - * Returns {@code true} if the expression contains any wildcard and the options allow wildcard expansion - */ - public boolean hasWildcard() { - return this.hasWildcard; - } - - @Override - public Iterator iterator() { - return expressionsList.iterator(); - } - } - /** * This is a context for the DateMathExpressionResolver which does not require {@code IndicesOptions} or {@code ClusterState} * since it uses only the start time to resolve expressions. diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/ExpressionListTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/ExpressionListTests.java deleted file mode 100644 index 1ca59ff402bd8..0000000000000 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/ExpressionListTests.java +++ /dev/null @@ -1,309 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the "Elastic License - * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side - * Public License v 1"; you may not use this file except in compliance with, at - * your election, the "Elastic License 2.0", the "GNU Affero General Public - * License v3.0 only", or the "Server Side Public License, v 1". - */ - -package org.elasticsearch.cluster.metadata; - -import org.elasticsearch.action.support.IndicesOptions; -import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.Context; -import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.ExpressionList; -import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.ExpressionList.Expression; -import org.elasticsearch.core.Tuple; -import org.elasticsearch.test.ESTestCase; - -import java.util.ArrayList; -import java.util.Iterator; -import java.util.List; -import java.util.function.Supplier; - -import static org.hamcrest.Matchers.is; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - -public class ExpressionListTests extends ESTestCase { - - public void testEmpty() { - ExpressionList expressionList = new ExpressionList(getContextWithOptions(getExpandWildcardsIndicesOptions()), List.of()); - assertThat(expressionList.iterator().hasNext(), is(false)); - assertThat(expressionList.hasWildcard(), is(false)); - expressionList = new ExpressionList(getContextWithOptions(getNoExpandWildcardsIndicesOptions()), List.of()); - assertThat(expressionList.iterator().hasNext(), is(false)); - assertThat(expressionList.hasWildcard(), is(false)); - } - - public void testExplicitSingleNameExpression() { - for (IndicesOptions indicesOptions : List.of(getExpandWildcardsIndicesOptions(), getNoExpandWildcardsIndicesOptions())) { - for (String expressionString : List.of("non_wildcard", "-non_exclusion")) { - ExpressionList expressionList = new ExpressionList(getContextWithOptions(indicesOptions), List.of(expressionString)); - assertThat(expressionList.hasWildcard(), is(false)); - if (randomBoolean()) { - expressionList = new ExpressionList(getContextWithOptions(indicesOptions), List.of(expressionString)); - } - Iterator expressionIterator = expressionList.iterator(); - assertThat(expressionIterator.hasNext(), is(true)); - if (randomBoolean()) { - expressionIterator = expressionList.iterator(); - } - Expression expression = expressionIterator.next(); - assertThat(expression.isExclusion(), is(false)); - assertThat(expression.isWildcard(), is(false)); - assertThat(expression.get(), is(expressionString)); - assertThat(expressionIterator.hasNext(), is(false)); - } - } - } - - public void testWildcardSingleExpression() { - for (String wildcardTest : List.of("*", "a*", "*b", "a*b", "a-*b", "a*-b", "-*", "-a*", "-*b", "**", "*-*")) { - ExpressionList expressionList = new ExpressionList( - getContextWithOptions(getExpandWildcardsIndicesOptions()), - List.of(wildcardTest) - ); - assertThat(expressionList.hasWildcard(), is(true)); - if (randomBoolean()) { - expressionList = new ExpressionList(getContextWithOptions(getExpandWildcardsIndicesOptions()), List.of(wildcardTest)); - } - Iterator expressionIterator = expressionList.iterator(); - assertThat(expressionIterator.hasNext(), is(true)); - if (randomBoolean()) { - expressionIterator = expressionList.iterator(); - } - Expression expression = expressionIterator.next(); - assertThat(expression.isExclusion(), is(false)); - assertThat(expression.isWildcard(), is(true)); - assertThat(expression.get(), is(wildcardTest)); - assertThat(expressionIterator.hasNext(), is(false)); - } - } - - public void testWildcardLongerExpression() { - List onlyExplicits = randomList(7, () -> randomAlphaOfLengthBetween(0, 5)); - String wildcard = randomFrom("*", "*b", "-*", "*-", "c*", "a*b", "**"); - List expressionList = new ArrayList<>(onlyExplicits.size() + 1); - expressionList.addAll(randomSubsetOf(onlyExplicits)); - int wildcardPos = expressionList.size(); - expressionList.add(wildcard); - for (String item : onlyExplicits) { - if (expressionList.contains(item) == false) { - expressionList.add(item); - } - } - ExpressionList expressionIterable = new ExpressionList(getContextWithOptions(getExpandWildcardsIndicesOptions()), expressionList); - assertThat(expressionIterable.hasWildcard(), is(true)); - if (randomBoolean()) { - expressionIterable = new ExpressionList(getContextWithOptions(getExpandWildcardsIndicesOptions()), expressionList); - } - int i = 0; - for (Expression expression : expressionIterable) { - assertThat(expression.isExclusion(), is(false)); - if (i != wildcardPos) { - assertThat(expression.isWildcard(), is(false)); - } else { - assertThat(expression.isWildcard(), is(true)); - } - assertThat(expression.get(), is(expressionList.get(i++))); - } - } - - public void testWildcardsNoExclusionExpressions() { - for (List wildcardExpression : List.of( - List.of("*"), - List.of("a", "*"), - List.of("-b", "*c"), - List.of("-", "a", "c*"), - List.of("*", "a*", "*b"), - List.of("-*", "a", "b*") - )) { - ExpressionList expressionList = new ExpressionList( - getContextWithOptions(getExpandWildcardsIndicesOptions()), - wildcardExpression - ); - assertThat(expressionList.hasWildcard(), is(true)); - if (randomBoolean()) { - expressionList = new ExpressionList(getContextWithOptions(getExpandWildcardsIndicesOptions()), wildcardExpression); - } - int i = 0; - for (Expression expression : expressionList) { - assertThat(expression.isExclusion(), is(false)); - if (wildcardExpression.get(i).contains("*")) { - assertThat(expression.isWildcard(), is(true)); - } else { - assertThat(expression.isWildcard(), is(false)); - } - assertThat(expression.get(), is(wildcardExpression.get(i++))); - } - } - } - - public void testWildcardExpressionNoExpandOptions() { - for (List wildcardExpression : List.of( - List.of("*"), - List.of("a", "*"), - List.of("-b", "*c"), - List.of("*d", "-"), - List.of("*", "-*"), - List.of("-", "a", "c*"), - List.of("*", "a*", "*b") - )) { - ExpressionList expressionList = new ExpressionList( - getContextWithOptions(getNoExpandWildcardsIndicesOptions()), - wildcardExpression - ); - assertThat(expressionList.hasWildcard(), is(false)); - if (randomBoolean()) { - expressionList = new ExpressionList(getContextWithOptions(getNoExpandWildcardsIndicesOptions()), wildcardExpression); - } - int i = 0; - for (Expression expression : expressionList) { - assertThat(expression.isWildcard(), is(false)); - assertThat(expression.isExclusion(), is(false)); - assertThat(expression.get(), is(wildcardExpression.get(i++))); - } - } - } - - public void testSingleExclusionExpression() { - String wildcard = randomFrom("*", "*b", "-*", "*-", "c*", "a*b", "**", "*-*"); - int wildcardPos = randomIntBetween(0, 3); - String exclusion = randomFrom("-*", "-", "-c*", "-ab", "--"); - int exclusionPos = randomIntBetween(wildcardPos + 1, 7); - List exclusionExpression = new ArrayList<>(); - for (int i = 0; i < wildcardPos; i++) { - exclusionExpression.add(randomAlphaOfLengthBetween(0, 5)); - } - exclusionExpression.add(wildcard); - for (int i = wildcardPos + 1; i < exclusionPos; i++) { - exclusionExpression.add(randomAlphaOfLengthBetween(0, 5)); - } - exclusionExpression.add(exclusion); - for (int i = 0; i < randomIntBetween(0, 3); i++) { - exclusionExpression.add(randomAlphaOfLengthBetween(0, 5)); - } - ExpressionList expressionList = new ExpressionList(getContextWithOptions(getExpandWildcardsIndicesOptions()), exclusionExpression); - if (randomBoolean()) { - assertThat(expressionList.hasWildcard(), is(true)); - } - int i = 0; - for (Expression expression : expressionList) { - if (i == wildcardPos) { - assertThat(expression.isWildcard(), is(true)); - assertThat(expression.isExclusion(), is(false)); - assertThat(expression.get(), is(exclusionExpression.get(i++))); - } else if (i == exclusionPos) { - assertThat(expression.isExclusion(), is(true)); - assertThat(expression.isWildcard(), is(exclusionExpression.get(i).contains("*"))); - assertThat(expression.get(), is(exclusionExpression.get(i++).substring(1))); - } else { - assertThat(expression.isWildcard(), is(false)); - assertThat(expression.isExclusion(), is(false)); - assertThat(expression.get(), is(exclusionExpression.get(i++))); - } - } - } - - public void testExclusionsExpression() { - for (Tuple, List> exclusionExpression : List.of( - new Tuple<>(List.of("-a", "*", "-a"), List.of(false, false, true)), - new Tuple<>(List.of("-b*", "c", "-a"), List.of(false, false, true)), - new Tuple<>(List.of("*d", "-", "*b"), List.of(false, true, false)), - new Tuple<>(List.of("-", "--", "-*", "", "-*"), List.of(false, false, false, false, true)), - new Tuple<>(List.of("*-", "-*", "a", "-b"), List.of(false, true, false, true)), - new Tuple<>(List.of("a", "-b", "-*", "-b", "*", "-b"), List.of(false, false, false, true, false, true)), - new Tuple<>(List.of("-a", "*d", "-a", "-*b", "-b", "--"), List.of(false, false, true, true, true, true)) - )) { - ExpressionList expressionList = new ExpressionList( - getContextWithOptions(getExpandWildcardsIndicesOptions()), - exclusionExpression.v1() - ); - if (randomBoolean()) { - assertThat(expressionList.hasWildcard(), is(true)); - } - int i = 0; - for (Expression expression : expressionList) { - boolean isExclusion = exclusionExpression.v2().get(i); - assertThat(expression.isExclusion(), is(isExclusion)); - assertThat(expression.isWildcard(), is(exclusionExpression.v1().get(i).contains("*"))); - if (isExclusion) { - assertThat(expression.get(), is(exclusionExpression.v1().get(i++).substring(1))); - } else { - assertThat(expression.get(), is(exclusionExpression.v1().get(i++))); - } - } - } - } - - private IndicesOptions getExpandWildcardsToOpenOnlyIndicesOptions() { - return IndicesOptions.fromOptions( - randomBoolean(), - randomBoolean(), - true, - false, - randomBoolean(), - randomBoolean(), - randomBoolean(), - randomBoolean(), - randomBoolean() - ); - } - - private IndicesOptions getExpandWildcardsToCloseOnlyIndicesOptions() { - return IndicesOptions.fromOptions( - randomBoolean(), - randomBoolean(), - false, - true, - randomBoolean(), - randomBoolean(), - randomBoolean(), - randomBoolean(), - randomBoolean() - ); - } - - private IndicesOptions getExpandWildcardsToOpenCloseIndicesOptions() { - return IndicesOptions.fromOptions( - randomBoolean(), - randomBoolean(), - true, - true, - randomBoolean(), - randomBoolean(), - randomBoolean(), - randomBoolean(), - randomBoolean() - ); - } - - private IndicesOptions getExpandWildcardsIndicesOptions() { - return ESTestCase.>randomFrom( - this::getExpandWildcardsToOpenOnlyIndicesOptions, - this::getExpandWildcardsToCloseOnlyIndicesOptions, - this::getExpandWildcardsToOpenCloseIndicesOptions - ).get(); - } - - private IndicesOptions getNoExpandWildcardsIndicesOptions() { - return IndicesOptions.fromOptions( - randomBoolean(), - randomBoolean(), - false, - false, - randomBoolean(), - randomBoolean(), - randomBoolean(), - randomBoolean(), - randomBoolean() - ); - } - - private Context getContextWithOptions(IndicesOptions indicesOptions) { - Context context = mock(Context.class); - when(context.getOptions()).thenReturn(indicesOptions); - return context; - } -} From b2ab9df1a9ff71442ad8d695ec15fcf8b72e133d Mon Sep 17 00:00:00 2001 From: David Kyle Date: Thu, 24 Oct 2024 22:33:56 +0100 Subject: [PATCH 089/324] [ML] Fix timeout attaching to missing deployment (#115517) Fixes a timeout in the Inference API where if connecting to an existing deployment and that deployment does not exist the listener was not called. --- .../xpack/inference/CreateFromDeploymentIT.java | 8 ++++++++ .../ElasticsearchInternalService.java | 14 +++++++------- 2 files changed, 15 insertions(+), 7 deletions(-) diff --git a/x-pack/plugin/inference/qa/inference-service-tests/src/javaRestTest/java/org/elasticsearch/xpack/inference/CreateFromDeploymentIT.java b/x-pack/plugin/inference/qa/inference-service-tests/src/javaRestTest/java/org/elasticsearch/xpack/inference/CreateFromDeploymentIT.java index 0bfb6e9e43b03..273b16d295a3d 100644 --- a/x-pack/plugin/inference/qa/inference-service-tests/src/javaRestTest/java/org/elasticsearch/xpack/inference/CreateFromDeploymentIT.java +++ b/x-pack/plugin/inference/qa/inference-service-tests/src/javaRestTest/java/org/elasticsearch/xpack/inference/CreateFromDeploymentIT.java @@ -109,6 +109,14 @@ public void testModelIdDoesNotMatch() throws IOException { ); } + public void testDeploymentDoesNotExist() { + var deploymentId = "missing_deployment"; + + var inferenceId = "inference_on_missing_deployment"; + var e = expectThrows(ResponseException.class, () -> putModel(inferenceId, endpointConfig(deploymentId), TaskType.SPARSE_EMBEDDING)); + assertThat(e.getMessage(), containsString("Cannot find deployment [missing_deployment]")); + } + public void testNumAllocationsIsUpdated() throws IOException { var modelId = "update_num_allocations"; var deploymentId = modelId; diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalService.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalService.java index a0235f74ce511..fec690199d97d 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalService.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalService.java @@ -34,7 +34,6 @@ import org.elasticsearch.xpack.core.inference.results.SparseEmbeddingResults; import org.elasticsearch.xpack.core.ml.action.GetDeploymentStatsAction; import org.elasticsearch.xpack.core.ml.action.GetTrainedModelsAction; -import org.elasticsearch.xpack.core.ml.action.GetTrainedModelsStatsAction; import org.elasticsearch.xpack.core.ml.action.InferModelAction; import org.elasticsearch.xpack.core.ml.inference.assignment.AdaptiveAllocationsSettings; import org.elasticsearch.xpack.core.ml.inference.assignment.AssignmentStats; @@ -913,7 +912,7 @@ private void validateAgainstDeployment( listener.onFailure( new ElasticsearchStatusException( "Deployment [{}] uses model [{}] which does not match the model [{}] in the request.", - RestStatus.BAD_REQUEST, // TODO better message + RestStatus.BAD_REQUEST, deploymentId, response.get().getModelId(), modelId @@ -933,21 +932,22 @@ private void validateAgainstDeployment( checkTaskTypeForMlNodeModel(response.get().getModelId(), taskType, l.delegateFailureAndWrap((l2, compatibleTaskType) -> { l2.onResponse(updatedSettings); })); + } else { + listener.onFailure(new ElasticsearchStatusException("Cannot find deployment [{}]", RestStatus.NOT_FOUND, deploymentId)); } })); } private void getDeployment(String deploymentId, ActionListener> listener) { client.execute( - GetTrainedModelsStatsAction.INSTANCE, - new GetTrainedModelsStatsAction.Request(deploymentId), + GetDeploymentStatsAction.INSTANCE, + new GetDeploymentStatsAction.Request(deploymentId), listener.delegateFailureAndWrap((l, response) -> { l.onResponse( - response.getResources() + response.getStats() .results() .stream() - .filter(s -> s.getDeploymentStats() != null && s.getDeploymentStats().getDeploymentId().equals(deploymentId)) - .map(GetTrainedModelsStatsAction.Response.TrainedModelStats::getDeploymentStats) + .filter(s -> s.getDeploymentId() != null && s.getDeploymentId().equals(deploymentId)) .findFirst() ); }) From c556a293c384b92a9ef71ec37bd49fb143300236 Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Fri, 25 Oct 2024 08:50:44 +1100 Subject: [PATCH 090/324] Mute org.elasticsearch.test.rest.ClientYamlTestSuiteIT test {yaml=indices.create/10_basic/Create lookup index} #115605 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index 4af02859d88d4..084bf27d6a11b 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -276,6 +276,9 @@ tests: - class: org.elasticsearch.xpack.security.CoreWithSecurityClientYamlTestSuiteIT method: test {yaml=cluster.stats/30_ccs_stats/cross-cluster search stats search} issue: https://github.com/elastic/elasticsearch/issues/115600 +- class: org.elasticsearch.test.rest.ClientYamlTestSuiteIT + method: test {yaml=indices.create/10_basic/Create lookup index} + issue: https://github.com/elastic/elasticsearch/issues/115605 # Examples: # From 5714b989fabcf944fb719f31200661789e0824f2 Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Thu, 24 Oct 2024 16:58:41 -0700 Subject: [PATCH 091/324] Do not run lookup index YAML with two shards (#115608) We can randomly inject a global template that defaults to 2 shards instead of 1. This causes the lookup index YAML tests to fail. To avoid this, the change requires specifying the default_shards setting for these tests --- .../resources/rest-api-spec/test/indices.create/10_basic.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/10_basic.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/10_basic.yml index 31d127b80c844..d0e1759073e1b 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/10_basic.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/10_basic.yml @@ -153,7 +153,7 @@ --- "Create lookup index": - requires: - test_runner_features: [ capabilities ] + test_runner_features: [ capabilities, default_shards ] capabilities: - method: PUT path: /{index} @@ -176,7 +176,7 @@ --- "Create lookup index with one shard": - requires: - test_runner_features: [ capabilities ] + test_runner_features: [ capabilities, default_shards ] capabilities: - method: PUT path: /{index} From bbd887a66a1330188047825799dc8368dbd56ba8 Mon Sep 17 00:00:00 2001 From: Carlos Delgado <6339205+carlosdelest@users.noreply.github.com> Date: Fri, 25 Oct 2024 07:45:40 +0200 Subject: [PATCH 092/324] Identify system threads using a Thread subclass (#113562) --- .../common/util/concurrent/EsExecutors.java | 35 ++++++++++--- .../DefaultBuiltInExecutorBuilders.java | 12 +++-- .../threadpool/ExecutorBuilder.java | 7 ++- .../threadpool/FixedExecutorBuilder.java | 49 +++++++++++++++++-- .../threadpool/ScalingExecutorBuilder.java | 4 +- .../util/concurrent/EsExecutorsTests.java | 8 ++- 6 files changed, 98 insertions(+), 17 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/common/util/concurrent/EsExecutors.java b/server/src/main/java/org/elasticsearch/common/util/concurrent/EsExecutors.java index b10db7d4d1dd3..9120576815bac 100644 --- a/server/src/main/java/org/elasticsearch/common/util/concurrent/EsExecutors.java +++ b/server/src/main/java/org/elasticsearch/common/util/concurrent/EsExecutors.java @@ -326,16 +326,25 @@ public static String executorName(Thread thread) { } public static ThreadFactory daemonThreadFactory(Settings settings, String namePrefix) { - return daemonThreadFactory(threadName(settings, namePrefix)); + return createDaemonThreadFactory(threadName(settings, namePrefix), false); } public static ThreadFactory daemonThreadFactory(String nodeName, String namePrefix) { + return daemonThreadFactory(nodeName, namePrefix, false); + } + + public static ThreadFactory daemonThreadFactory(String nodeName, String namePrefix, boolean isSystemThread) { assert nodeName != null && false == nodeName.isEmpty(); - return daemonThreadFactory(threadName(nodeName, namePrefix)); + return createDaemonThreadFactory(threadName(nodeName, namePrefix), isSystemThread); } - public static ThreadFactory daemonThreadFactory(String namePrefix) { - return new EsThreadFactory(namePrefix); + public static ThreadFactory daemonThreadFactory(String name) { + assert name != null && name.isEmpty() == false; + return createDaemonThreadFactory(name, false); + } + + private static ThreadFactory createDaemonThreadFactory(String namePrefix, boolean isSystemThread) { + return new EsThreadFactory(namePrefix, isSystemThread); } static class EsThreadFactory implements ThreadFactory { @@ -343,22 +352,36 @@ static class EsThreadFactory implements ThreadFactory { final ThreadGroup group; final AtomicInteger threadNumber = new AtomicInteger(1); final String namePrefix; + final boolean isSystem; - EsThreadFactory(String namePrefix) { + EsThreadFactory(String namePrefix, boolean isSystem) { this.namePrefix = namePrefix; SecurityManager s = System.getSecurityManager(); group = (s != null) ? s.getThreadGroup() : Thread.currentThread().getThreadGroup(); + this.isSystem = isSystem; } @Override public Thread newThread(Runnable r) { return AccessController.doPrivileged((PrivilegedAction) () -> { - Thread t = new Thread(group, r, namePrefix + "[T#" + threadNumber.getAndIncrement() + "]", 0); + Thread t = new EsThread(group, r, namePrefix + "[T#" + threadNumber.getAndIncrement() + "]", 0, isSystem); t.setDaemon(true); return t; }); } + } + public static class EsThread extends Thread { + private final boolean isSystem; + + EsThread(ThreadGroup group, Runnable target, String name, long stackSize, boolean isSystem) { + super(group, target, name, stackSize); + this.isSystem = isSystem; + } + + public boolean isSystem() { + return isSystem; + } } /** diff --git a/server/src/main/java/org/elasticsearch/threadpool/DefaultBuiltInExecutorBuilders.java b/server/src/main/java/org/elasticsearch/threadpool/DefaultBuiltInExecutorBuilders.java index c3a24d012c013..a97d22a976631 100644 --- a/server/src/main/java/org/elasticsearch/threadpool/DefaultBuiltInExecutorBuilders.java +++ b/server/src/main/java/org/elasticsearch/threadpool/DefaultBuiltInExecutorBuilders.java @@ -170,7 +170,8 @@ public Map getBuilders(Settings settings, int allocated ThreadPool.Names.SYSTEM_READ, halfProcMaxAt5, 2000, - EsExecutors.TaskTrackingConfig.DO_NOT_TRACK + EsExecutors.TaskTrackingConfig.DO_NOT_TRACK, + true ) ); result.put( @@ -180,7 +181,8 @@ public Map getBuilders(Settings settings, int allocated ThreadPool.Names.SYSTEM_WRITE, halfProcMaxAt5, 1000, - new EsExecutors.TaskTrackingConfig(true, indexAutoscalingEWMA) + new EsExecutors.TaskTrackingConfig(true, indexAutoscalingEWMA), + true ) ); result.put( @@ -190,7 +192,8 @@ public Map getBuilders(Settings settings, int allocated ThreadPool.Names.SYSTEM_CRITICAL_READ, halfProcMaxAt5, 2000, - EsExecutors.TaskTrackingConfig.DO_NOT_TRACK + EsExecutors.TaskTrackingConfig.DO_NOT_TRACK, + true ) ); result.put( @@ -200,7 +203,8 @@ public Map getBuilders(Settings settings, int allocated ThreadPool.Names.SYSTEM_CRITICAL_WRITE, halfProcMaxAt5, 1500, - new EsExecutors.TaskTrackingConfig(true, indexAutoscalingEWMA) + new EsExecutors.TaskTrackingConfig(true, indexAutoscalingEWMA), + true ) ); return unmodifiableMap(result); diff --git a/server/src/main/java/org/elasticsearch/threadpool/ExecutorBuilder.java b/server/src/main/java/org/elasticsearch/threadpool/ExecutorBuilder.java index 2337d51d07571..c259feb1c978e 100644 --- a/server/src/main/java/org/elasticsearch/threadpool/ExecutorBuilder.java +++ b/server/src/main/java/org/elasticsearch/threadpool/ExecutorBuilder.java @@ -24,9 +24,11 @@ public abstract class ExecutorBuilder { private final String name; + private final boolean isSystemThread; - public ExecutorBuilder(String name) { + public ExecutorBuilder(String name, boolean isSystemThread) { this.name = name; + this.isSystemThread = isSystemThread; } protected String name() { @@ -90,4 +92,7 @@ abstract static class ExecutorSettings { } + public boolean isSystemThread() { + return isSystemThread; + } } diff --git a/server/src/main/java/org/elasticsearch/threadpool/FixedExecutorBuilder.java b/server/src/main/java/org/elasticsearch/threadpool/FixedExecutorBuilder.java index 07db563da39a1..9c723f241f1d0 100644 --- a/server/src/main/java/org/elasticsearch/threadpool/FixedExecutorBuilder.java +++ b/server/src/main/java/org/elasticsearch/threadpool/FixedExecutorBuilder.java @@ -51,7 +51,28 @@ public final class FixedExecutorBuilder extends ExecutorBuilder( sizeKey, @@ -102,7 +145,7 @@ FixedExecutorSettings getSettings(Settings settings) { ThreadPool.ExecutorHolder build(final FixedExecutorSettings settings, final ThreadContext threadContext) { int size = settings.size; int queueSize = settings.queueSize; - final ThreadFactory threadFactory = EsExecutors.daemonThreadFactory(EsExecutors.threadName(settings.nodeName, name())); + final ThreadFactory threadFactory = EsExecutors.daemonThreadFactory(settings.nodeName, name(), isSystemThread()); final ExecutorService executor = EsExecutors.newFixed( settings.nodeName + "/" + name(), size, diff --git a/server/src/main/java/org/elasticsearch/threadpool/ScalingExecutorBuilder.java b/server/src/main/java/org/elasticsearch/threadpool/ScalingExecutorBuilder.java index a31f940cdb2dc..1017d41a77444 100644 --- a/server/src/main/java/org/elasticsearch/threadpool/ScalingExecutorBuilder.java +++ b/server/src/main/java/org/elasticsearch/threadpool/ScalingExecutorBuilder.java @@ -104,7 +104,7 @@ public ScalingExecutorBuilder( final String prefix, final EsExecutors.TaskTrackingConfig trackingConfig ) { - super(name); + super(name, false); this.coreSetting = Setting.intSetting(settingsKey(prefix, "core"), core, Setting.Property.NodeScope); this.maxSetting = Setting.intSetting(settingsKey(prefix, "max"), max, Setting.Property.NodeScope); this.keepAliveSetting = Setting.timeSetting(settingsKey(prefix, "keep_alive"), keepAlive, Setting.Property.NodeScope); @@ -131,7 +131,7 @@ ThreadPool.ExecutorHolder build(final ScalingExecutorSettings settings, final Th int core = settings.core; int max = settings.max; final ThreadPool.Info info = new ThreadPool.Info(name(), ThreadPool.ThreadPoolType.SCALING, core, max, keepAlive, null); - final ThreadFactory threadFactory = EsExecutors.daemonThreadFactory(EsExecutors.threadName(settings.nodeName, name())); + final ThreadFactory threadFactory = EsExecutors.daemonThreadFactory(settings.nodeName, name()); ExecutorService executor; executor = EsExecutors.newScaling( settings.nodeName + "/" + name(), diff --git a/server/src/test/java/org/elasticsearch/common/util/concurrent/EsExecutorsTests.java b/server/src/test/java/org/elasticsearch/common/util/concurrent/EsExecutorsTests.java index bdfec9dfaa630..2867c9e007937 100644 --- a/server/src/test/java/org/elasticsearch/common/util/concurrent/EsExecutorsTests.java +++ b/server/src/test/java/org/elasticsearch/common/util/concurrent/EsExecutorsTests.java @@ -635,15 +635,19 @@ public void testParseExecutorName() throws InterruptedException { final var executorName = randomAlphaOfLength(10); final String nodeName = rarely() ? null : randomIdentifier(); final ThreadFactory threadFactory; + final boolean isSystem; if (nodeName == null) { + isSystem = false; threadFactory = EsExecutors.daemonThreadFactory(Settings.EMPTY, executorName); } else if (randomBoolean()) { + isSystem = false; threadFactory = EsExecutors.daemonThreadFactory( Settings.builder().put(Node.NODE_NAME_SETTING.getKey(), nodeName).build(), executorName ); } else { - threadFactory = EsExecutors.daemonThreadFactory(nodeName, executorName); + isSystem = randomBoolean(); + threadFactory = EsExecutors.daemonThreadFactory(nodeName, executorName, isSystem); } final var thread = threadFactory.newThread(() -> {}); @@ -652,6 +656,8 @@ public void testParseExecutorName() throws InterruptedException { assertThat(EsExecutors.executorName(thread), equalTo(executorName)); assertThat(EsExecutors.executorName("TEST-" + thread.getName()), is(nullValue())); assertThat(EsExecutors.executorName("LuceneTestCase" + thread.getName()), is(nullValue())); + assertThat(EsExecutors.executorName("LuceneTestCase" + thread.getName()), is(nullValue())); + assertThat(((EsExecutors.EsThread) thread).isSystem(), equalTo(isSystem)); } finally { thread.join(); } From 7f573c6c28fb42e89d8bb76d6764dc681c239e06 Mon Sep 17 00:00:00 2001 From: Matteo Piergiovanni <134913285+piergm@users.noreply.github.com> Date: Fri, 25 Oct 2024 08:50:05 +0200 Subject: [PATCH 093/324] Only aggregations require at least one shard request (#115314) * unskipping shards only when aggs * Update docs/changelog/115314.yaml * fixed more tests * null check for searchRequest.source() --- docs/changelog/115314.yaml | 5 +++ .../datastreams/TSDBIndexingIT.java | 2 +- .../org/elasticsearch/search/CCSDuelIT.java | 4 ++- .../test/multi_cluster/70_skip_shards.yml | 12 +++---- .../multi_cluster/90_index_name_query.yml | 4 +-- .../search/ccs/CrossClusterSearchIT.java | 4 +-- .../search/profile/query/QueryProfilerIT.java | 6 +++- .../search/stats/FieldUsageStatsIT.java | 12 ++++--- .../action/search/TransportSearchAction.java | 4 ++- .../search/CrossClusterAsyncSearchIT.java | 32 +++++++++++++------ .../mapper/SearchIdleTests.java | 10 ++---- .../rrf/RRFRankCoordinatorCanMatchIT.java | 5 +-- .../rank/rrf/RRFRankShardCanMatchIT.java | 5 +-- ...pshotsCanMatchOnCoordinatorIntegTests.java | 12 +++---- .../checkpoint/TransformCCSCanMatchIT.java | 6 ++-- .../oldrepos/OldRepositoryAccessIT.java | 3 +- 16 files changed, 70 insertions(+), 56 deletions(-) create mode 100644 docs/changelog/115314.yaml diff --git a/docs/changelog/115314.yaml b/docs/changelog/115314.yaml new file mode 100644 index 0000000000000..76ac12d58fcf3 --- /dev/null +++ b/docs/changelog/115314.yaml @@ -0,0 +1,5 @@ +pr: 115314 +summary: Only aggregations require at least one shard request +area: Search +type: enhancement +issues: [] diff --git a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/TSDBIndexingIT.java b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/TSDBIndexingIT.java index 29ec326548f2b..aad68660d2e4d 100644 --- a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/TSDBIndexingIT.java +++ b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/TSDBIndexingIT.java @@ -412,7 +412,7 @@ public void testSkippingShards() throws Exception { assertResponse(client().search(searchRequest), searchResponse -> { ElasticsearchAssertions.assertNoSearchHits(searchResponse); assertThat(searchResponse.getTotalShards(), equalTo(2)); - assertThat(searchResponse.getSkippedShards(), equalTo(1)); + assertThat(searchResponse.getSkippedShards(), equalTo(2)); assertThat(searchResponse.getSuccessfulShards(), equalTo(2)); }); } diff --git a/qa/multi-cluster-search/src/test/java/org/elasticsearch/search/CCSDuelIT.java b/qa/multi-cluster-search/src/test/java/org/elasticsearch/search/CCSDuelIT.java index 5dde1d664402f..79cdc1047aec9 100644 --- a/qa/multi-cluster-search/src/test/java/org/elasticsearch/search/CCSDuelIT.java +++ b/qa/multi-cluster-search/src/test/java/org/elasticsearch/search/CCSDuelIT.java @@ -43,6 +43,7 @@ import org.elasticsearch.rest.action.search.RestSearchAction; import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptType; +import org.elasticsearch.search.aggregations.AggregationBuilders; import org.elasticsearch.search.aggregations.BucketOrder; import org.elasticsearch.search.aggregations.bucket.filter.FilterAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramAggregationBuilder; @@ -580,13 +581,14 @@ public void testSortByField() throws Exception { public void testSortByFieldOneClusterHasNoResults() throws Exception { assumeMultiClusterSetup(); - // set to a value greater than the number of shards to avoid differences due to the skipping of shards + // setting aggs to avoid differences due to the skipping of shards when matching none SearchSourceBuilder sourceBuilder = new SearchSourceBuilder(); boolean onlyRemote = randomBoolean(); sourceBuilder.query(new TermQueryBuilder("_index", onlyRemote ? REMOTE_INDEX_NAME : INDEX_NAME)); sourceBuilder.sort("type.keyword", SortOrder.ASC); sourceBuilder.sort("creationDate", SortOrder.DESC); sourceBuilder.sort("user.keyword", SortOrder.ASC); + sourceBuilder.aggregation(AggregationBuilders.max("max").field("creationDate")); CheckedConsumer responseChecker = response -> { assertHits(response); int size = response.evaluateArraySize("hits.hits"); diff --git a/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/70_skip_shards.yml b/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/70_skip_shards.yml index 92ae11c712b25..f392ae6d09413 100644 --- a/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/70_skip_shards.yml +++ b/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/70_skip_shards.yml @@ -166,8 +166,7 @@ - match: { hits.total.value: 0 } - match: { _shards.total: 2 } - match: { _shards.successful: 2 } - # When all shards are skipped current logic returns 1 to produce a valid search result - - match: { _shards.skipped : 1} + - match: { _shards.skipped : 2} - match: { _shards.failed: 0 } # check that skipped when we don't match the alias with a terms query @@ -183,8 +182,7 @@ - match: { hits.total.value: 0 } - match: { _shards.total: 2 } - match: { _shards.successful: 2 } - # When all shards are skipped current logic returns 1 to produce a valid search result - - match: { _shards.skipped : 1} + - match: { _shards.skipped : 2} - match: { _shards.failed: 0 } # check that skipped when we don't match the alias with a prefix query @@ -200,8 +198,7 @@ - match: { hits.total.value: 0 } - match: { _shards.total: 2 } - match: { _shards.successful: 2 } - # When all shards are skipped current logic returns 1 to produce a valid search result - - match: { _shards.skipped : 1} + - match: { _shards.skipped : 2} - match: { _shards.failed: 0 } # check that skipped when we don't match the alias with a wildcard query @@ -217,7 +214,6 @@ - match: { hits.total.value: 0 } - match: { _shards.total: 2 } - match: { _shards.successful: 2 } - # When all shards are skipped current logic returns 1 to produce a valid search result - - match: { _shards.skipped : 1} + - match: { _shards.skipped : 2} - match: { _shards.failed: 0 } diff --git a/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/90_index_name_query.yml b/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/90_index_name_query.yml index a60a1b0d812ee..be2ce033b123c 100644 --- a/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/90_index_name_query.yml +++ b/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/90_index_name_query.yml @@ -81,7 +81,7 @@ teardown: - match: { hits.total.value: 0 } - match: { _shards.total: 2 } - match: { _shards.successful: 2 } - - match: { _shards.skipped : 1} + - match: { _shards.skipped : 2} - match: { _shards.failed: 0 } - do: @@ -98,5 +98,5 @@ teardown: - match: { hits.total.value: 0 } - match: { _shards.total: 2 } - match: { _shards.successful: 2 } - - match: { _shards.skipped : 1} + - match: { _shards.skipped : 2} - match: { _shards.failed: 0 } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CrossClusterSearchIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CrossClusterSearchIT.java index 5984e1acc89af..63eece88a53fc 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CrossClusterSearchIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CrossClusterSearchIT.java @@ -214,7 +214,7 @@ public void testCCSClusterDetailsWhereAllShardsSkippedInCanMatch() throws Except // with DFS_QUERY_THEN_FETCH, the local shards are never skipped assertThat(localClusterSearchInfo.getSkippedShards(), equalTo(0)); } else { - assertThat(localClusterSearchInfo.getSkippedShards(), equalTo(localNumShards - 1)); + assertThat(localClusterSearchInfo.getSkippedShards(), equalTo(localNumShards)); } assertThat(localClusterSearchInfo.getFailedShards(), equalTo(0)); assertThat(localClusterSearchInfo.getFailures().size(), equalTo(0)); @@ -224,7 +224,7 @@ public void testCCSClusterDetailsWhereAllShardsSkippedInCanMatch() throws Except assertThat(remoteClusterSearchInfo.getTotalShards(), equalTo(remoteNumShards)); assertThat(remoteClusterSearchInfo.getSuccessfulShards(), equalTo(remoteNumShards)); if (clusters.isCcsMinimizeRoundtrips()) { - assertThat(remoteClusterSearchInfo.getSkippedShards(), equalTo(remoteNumShards - 1)); + assertThat(remoteClusterSearchInfo.getSkippedShards(), equalTo(remoteNumShards)); } else { assertThat(remoteClusterSearchInfo.getSkippedShards(), equalTo(remoteNumShards)); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/profile/query/QueryProfilerIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/profile/query/QueryProfilerIT.java index e6cd89c09b979..0c1012c520dac 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/profile/query/QueryProfilerIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/profile/query/QueryProfilerIT.java @@ -68,7 +68,11 @@ public void testProfileQuery() throws Exception { prepareSearch().setQuery(q).setTrackTotalHits(true).setProfile(true).setSearchType(SearchType.QUERY_THEN_FETCH), response -> { assertNotNull("Profile response element should not be null", response.getProfileResults()); - assertThat("Profile response should not be an empty array", response.getProfileResults().size(), not(0)); + if (response.getSkippedShards() == response.getSuccessfulShards()) { + assertEquals(0, response.getProfileResults().size()); + } else { + assertThat("Profile response should not be an empty array", response.getProfileResults().size(), not(0)); + } for (Map.Entry shard : response.getProfileResults().entrySet()) { for (QueryProfileShardResult searchProfiles : shard.getValue().getQueryProfileResults()) { for (ProfileResult result : searchProfiles.getQueryResults()) { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/stats/FieldUsageStatsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/stats/FieldUsageStatsIT.java index 140afd6b269b3..3d5120226ebed 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/stats/FieldUsageStatsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/stats/FieldUsageStatsIT.java @@ -158,11 +158,15 @@ public void testFieldUsageStats() throws ExecutionException, InterruptedExceptio assertTrue(stats.hasField("date_field")); assertEquals(Set.of(UsageContext.POINTS), stats.get("date_field").keySet()); - // can_match does not enter search stats - // there is a special case though where we have no hit but we need to get at least one search response in order - // to produce a valid search result with all the aggs etc., so we hit one of the two shards + + long expectedShards = 2L * numShards; + if (numShards == 1) { + // with 1 shard and setPreFilterShardSize(1) we don't perform can_match phase but instead directly query the shard + expectedShards += 1; + } + assertEquals( - (2 * numShards) + 1, + expectedShards, indicesAdmin().prepareStats("test") .clear() .setSearch(true) diff --git a/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java b/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java index 302c3e243a1f6..8f718972c2eaa 100644 --- a/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java @@ -1458,6 +1458,8 @@ public SearchPhase newSearchPhase( SearchResponse.Clusters clusters ) { if (preFilter) { + // only for aggs we need to contact shards even if there are no matches + boolean requireAtLeastOneMatch = searchRequest.source() != null && searchRequest.source().aggregations() != null; return new CanMatchPreFilterSearchPhase( logger, searchTransportService, @@ -1469,7 +1471,7 @@ public SearchPhase newSearchPhase( shardIterators, timeProvider, task, - true, + requireAtLeastOneMatch, searchService.getCoordinatorRewriteContextProvider(timeProvider::absoluteStartMillis), listener.delegateFailureAndWrap( (l, iters) -> newSearchPhase( diff --git a/x-pack/plugin/async-search/src/internalClusterTest/java/org/elasticsearch/xpack/search/CrossClusterAsyncSearchIT.java b/x-pack/plugin/async-search/src/internalClusterTest/java/org/elasticsearch/xpack/search/CrossClusterAsyncSearchIT.java index 9d83f88a043e2..3cd8778069d0c 100644 --- a/x-pack/plugin/async-search/src/internalClusterTest/java/org/elasticsearch/xpack/search/CrossClusterAsyncSearchIT.java +++ b/x-pack/plugin/async-search/src/internalClusterTest/java/org/elasticsearch/xpack/search/CrossClusterAsyncSearchIT.java @@ -274,6 +274,8 @@ public void testCCSClusterDetailsWhereAllShardsSkippedInCanMatch() throws Except boolean dfs = randomBoolean(); if (dfs) { request.getSearchRequest().searchType(SearchType.DFS_QUERY_THEN_FETCH); + } else { + request.getSearchRequest().searchType(SearchType.QUERY_THEN_FETCH); } RangeQueryBuilder rangeQueryBuilder = new RangeQueryBuilder("@timestamp").from(100).to(2000); request.getSearchRequest().source(new SearchSourceBuilder().query(rangeQueryBuilder).size(10)); @@ -288,20 +290,30 @@ public void testCCSClusterDetailsWhereAllShardsSkippedInCanMatch() throws Except assertTrue(response.isRunning()); SearchResponse.Clusters clusters = response.getSearchResponse().getClusters(); assertThat(clusters.getTotal(), equalTo(2)); - assertTrue("search cluster results should be marked as partial", clusters.hasPartialResults()); - + if (dfs) { + assertTrue("search cluster results should be marked as partial", clusters.hasPartialResults()); + } else { + assertFalse( + "search cluster results should not be marked as partial as all shards are skipped", + clusters.hasPartialResults() + ); + } SearchResponse.Cluster localClusterSearchInfo = clusters.getCluster(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY); assertNotNull(localClusterSearchInfo); - assertThat(localClusterSearchInfo.getStatus(), equalTo(SearchResponse.Cluster.Status.RUNNING)); + if (dfs) { + assertThat(localClusterSearchInfo.getStatus(), equalTo(SearchResponse.Cluster.Status.RUNNING)); + } else { + assertThat(localClusterSearchInfo.getStatus(), equalTo(SearchResponse.Cluster.Status.SUCCESSFUL)); + } SearchResponse.Cluster remoteClusterSearchInfo = clusters.getCluster(REMOTE_CLUSTER); assertNotNull(remoteClusterSearchInfo); - assertThat(localClusterSearchInfo.getStatus(), equalTo(SearchResponse.Cluster.Status.RUNNING)); } finally { response.decRef(); } - - SearchListenerPlugin.waitSearchStarted(); + if (dfs) { + SearchListenerPlugin.waitSearchStarted(); + } SearchListenerPlugin.allowQueryPhase(); waitForSearchTasksToFinish(); @@ -331,7 +343,7 @@ public void testCCSClusterDetailsWhereAllShardsSkippedInCanMatch() throws Except // no skipped shards locally when DFS_QUERY_THEN_FETCH is used assertThat(localClusterSearchInfo.getSkippedShards(), equalTo(0)); } else { - assertThat(localClusterSearchInfo.getSkippedShards(), equalTo(localNumShards - 1)); + assertThat(localClusterSearchInfo.getSkippedShards(), equalTo(localNumShards)); } assertThat(localClusterSearchInfo.getFailedShards(), equalTo(0)); assertThat(localClusterSearchInfo.getFailures().size(), equalTo(0)); @@ -341,7 +353,7 @@ public void testCCSClusterDetailsWhereAllShardsSkippedInCanMatch() throws Except assertThat(remoteClusterSearchInfo.getTotalShards(), equalTo(remoteNumShards)); assertThat(remoteClusterSearchInfo.getSuccessfulShards(), equalTo(remoteNumShards)); if (minimizeRoundtrips) { - assertThat(remoteClusterSearchInfo.getSkippedShards(), equalTo(remoteNumShards - 1)); + assertThat(remoteClusterSearchInfo.getSkippedShards(), equalTo(remoteNumShards)); } else { assertThat(remoteClusterSearchInfo.getSkippedShards(), equalTo(remoteNumShards)); } @@ -377,7 +389,7 @@ public void testCCSClusterDetailsWhereAllShardsSkippedInCanMatch() throws Except // no skipped shards locally when DFS_QUERY_THEN_FETCH is used assertThat(localClusterSearchInfo.getSkippedShards(), equalTo(0)); } else { - assertThat(localClusterSearchInfo.getSkippedShards(), equalTo(localNumShards - 1)); + assertThat(localClusterSearchInfo.getSkippedShards(), equalTo(localNumShards)); } assertThat(localClusterSearchInfo.getFailedShards(), equalTo(0)); assertThat(localClusterSearchInfo.getFailures().size(), equalTo(0)); @@ -387,7 +399,7 @@ public void testCCSClusterDetailsWhereAllShardsSkippedInCanMatch() throws Except assertThat(remoteClusterSearchInfo.getTotalShards(), equalTo(remoteNumShards)); assertThat(remoteClusterSearchInfo.getSuccessfulShards(), equalTo(remoteNumShards)); if (minimizeRoundtrips) { - assertThat(remoteClusterSearchInfo.getSkippedShards(), equalTo(remoteNumShards - 1)); + assertThat(remoteClusterSearchInfo.getSkippedShards(), equalTo(remoteNumShards)); } else { assertThat(remoteClusterSearchInfo.getSkippedShards(), equalTo(remoteNumShards)); } diff --git a/x-pack/plugin/mapper-constant-keyword/src/test/java/org/elasticsearch/xpack/constantkeyword/mapper/SearchIdleTests.java b/x-pack/plugin/mapper-constant-keyword/src/test/java/org/elasticsearch/xpack/constantkeyword/mapper/SearchIdleTests.java index 2da4e2802bdbe..9eb792428537b 100644 --- a/x-pack/plugin/mapper-constant-keyword/src/test/java/org/elasticsearch/xpack/constantkeyword/mapper/SearchIdleTests.java +++ b/x-pack/plugin/mapper-constant-keyword/src/test/java/org/elasticsearch/xpack/constantkeyword/mapper/SearchIdleTests.java @@ -42,7 +42,6 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.hamcrest.Matchers.empty; -import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasSize; public class SearchIdleTests extends ESSingleNodeTestCase { @@ -133,8 +132,7 @@ public void testSearchIdleConstantKeywordMatchNoIndex() throws InterruptedExcept // WHEN assertResponse(search("test*", "constant_keyword", randomAlphaOfLength(5), 5), searchResponse -> { assertEquals(RestStatus.OK, searchResponse.status()); - // NOTE: we need an empty result from at least one shard - assertEquals(idleIndexShardsCount + activeIndexShardsCount - 1, searchResponse.getSkippedShards()); + assertEquals(idleIndexShardsCount + activeIndexShardsCount, searchResponse.getSkippedShards()); assertEquals(0, searchResponse.getFailedShards()); assertEquals(0, searchResponse.getHits().getHits().length); }); @@ -144,12 +142,8 @@ public void testSearchIdleConstantKeywordMatchNoIndex() throws InterruptedExcept assertIdleShardsRefreshStats(beforeStatsResponse, afterStatsResponse); - // If no shards match the can match phase then at least one shard gets queries for an empty response. - // However, this affects the search idle stats. List active = Arrays.stream(afterStatsResponse.getShards()).filter(s -> s.isSearchIdle() == false).toList(); - assertThat(active, hasSize(1)); - assertThat(active.get(0).getShardRouting().getIndexName(), equalTo("test1")); - assertThat(active.get(0).getShardRouting().id(), equalTo(0)); + assertThat(active, hasSize(0)); } public void testSearchIdleConstantKeywordMatchOneIndex() throws InterruptedException { diff --git a/x-pack/plugin/rank-rrf/src/internalClusterTest/java/org/elasticsearch/xpack/rank/rrf/RRFRankCoordinatorCanMatchIT.java b/x-pack/plugin/rank-rrf/src/internalClusterTest/java/org/elasticsearch/xpack/rank/rrf/RRFRankCoordinatorCanMatchIT.java index 445aeaa375e11..467668f008b04 100644 --- a/x-pack/plugin/rank-rrf/src/internalClusterTest/java/org/elasticsearch/xpack/rank/rrf/RRFRankCoordinatorCanMatchIT.java +++ b/x-pack/plugin/rank-rrf/src/internalClusterTest/java/org/elasticsearch/xpack/rank/rrf/RRFRankCoordinatorCanMatchIT.java @@ -10,6 +10,7 @@ import org.apache.lucene.document.LongPoint; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.PointValues; +import org.apache.lucene.search.TotalHits; import org.elasticsearch.action.search.SearchType; import org.elasticsearch.common.Strings; import org.elasticsearch.index.IndexSettings; @@ -206,10 +207,10 @@ public void testCanMatchCoordinator() throws Exception { ) .setSize(5), response -> { - assertNull(response.getHits().getTotalHits()); + assertEquals(new TotalHits(0, TotalHits.Relation.EQUAL_TO), response.getHits().getTotalHits()); assertEquals(0, response.getHits().getHits().length); assertEquals(5, response.getSuccessfulShards()); - assertEquals(4, response.getSkippedShards()); + assertEquals(5, response.getSkippedShards()); } ); diff --git a/x-pack/plugin/rank-rrf/src/internalClusterTest/java/org/elasticsearch/xpack/rank/rrf/RRFRankShardCanMatchIT.java b/x-pack/plugin/rank-rrf/src/internalClusterTest/java/org/elasticsearch/xpack/rank/rrf/RRFRankShardCanMatchIT.java index 084ccc88bee33..09fe8d1b7ad6e 100644 --- a/x-pack/plugin/rank-rrf/src/internalClusterTest/java/org/elasticsearch/xpack/rank/rrf/RRFRankShardCanMatchIT.java +++ b/x-pack/plugin/rank-rrf/src/internalClusterTest/java/org/elasticsearch/xpack/rank/rrf/RRFRankShardCanMatchIT.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.rank.rrf; +import org.apache.lucene.search.TotalHits; import org.apache.lucene.util.BytesRef; import org.elasticsearch.action.DocWriteResponse; import org.elasticsearch.action.search.SearchType; @@ -199,10 +200,10 @@ public void testCanMatchShard() throws IOException { ) .setSize(5), response -> { - assertNull(response.getHits().getTotalHits()); + assertEquals(new TotalHits(0, TotalHits.Relation.EQUAL_TO), response.getHits().getTotalHits()); assertEquals(0, response.getHits().getHits().length); assertEquals(5, response.getSuccessfulShards()); - assertEquals(4, response.getSkippedShards()); + assertEquals(5, response.getSkippedShards()); } ); diff --git a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsCanMatchOnCoordinatorIntegTests.java b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsCanMatchOnCoordinatorIntegTests.java index ed42d86bc8c49..259d38b1fe8ee 100644 --- a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsCanMatchOnCoordinatorIntegTests.java +++ b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsCanMatchOnCoordinatorIntegTests.java @@ -384,11 +384,9 @@ public void testSearchableSnapshotShardsAreSkippedBySearchRequestWithoutQuerying } } else { assertResponse(client().search(request), newSearchResponse -> { - // When all shards are skipped, at least one of them should be queried in order to - // provide a proper search response. - assertThat(newSearchResponse.getSkippedShards(), equalTo(indexOutsideSearchRangeShardCount - 1)); - assertThat(newSearchResponse.getSuccessfulShards(), equalTo(indexOutsideSearchRangeShardCount - 1)); - assertThat(newSearchResponse.getFailedShards(), equalTo(1)); + assertThat(newSearchResponse.getSkippedShards(), equalTo(indexOutsideSearchRangeShardCount)); + assertThat(newSearchResponse.getSuccessfulShards(), equalTo(indexOutsideSearchRangeShardCount)); + assertThat(newSearchResponse.getFailedShards(), equalTo(0)); assertThat(newSearchResponse.getTotalShards(), equalTo(indexOutsideSearchRangeShardCount)); }); @@ -748,9 +746,7 @@ public void testQueryPhaseIsExecutedInAnAvailableNodeWhenAllShardsCanBeSkipped() // All the regular index searches succeeded assertThat(newSearchResponse.getSuccessfulShards(), equalTo(totalShards)); assertThat(newSearchResponse.getFailedShards(), equalTo(0)); - // We have to query at least one node to construct a valid response, and we pick - // a shard that's available in order to construct the search response - assertThat(newSearchResponse.getSkippedShards(), equalTo(totalShards - 1)); + assertThat(newSearchResponse.getSkippedShards(), equalTo(totalShards)); assertThat(newSearchResponse.getTotalShards(), equalTo(totalShards)); assertThat(newSearchResponse.getHits().getTotalHits().value(), equalTo(0L)); }); diff --git a/x-pack/plugin/transform/src/internalClusterTest/java/org/elasticsearch/xpack/transform/checkpoint/TransformCCSCanMatchIT.java b/x-pack/plugin/transform/src/internalClusterTest/java/org/elasticsearch/xpack/transform/checkpoint/TransformCCSCanMatchIT.java index a7f7b5bd3edda..208da4177fd4c 100644 --- a/x-pack/plugin/transform/src/internalClusterTest/java/org/elasticsearch/xpack/transform/checkpoint/TransformCCSCanMatchIT.java +++ b/x-pack/plugin/transform/src/internalClusterTest/java/org/elasticsearch/xpack/transform/checkpoint/TransformCCSCanMatchIT.java @@ -197,15 +197,13 @@ public void testSearchAction_RangeQueryThatMatchesNoShards() throws ExecutionExc QueryBuilders.rangeQuery("@timestamp").from(100_000_000), // This query matches no documents true, 0, - // All but 2 shards are skipped. TBH I don't know why this 2 shards are not skipped - oldLocalNumShards + newLocalNumShards + oldRemoteNumShards + newRemoteNumShards - 2 + oldLocalNumShards + newLocalNumShards + oldRemoteNumShards + newRemoteNumShards ); testSearchAction( QueryBuilders.rangeQuery("@timestamp").from(100_000_000), // This query matches no documents false, 0, - // All but 1 shards are skipped. TBH I don't know why this 1 shard is not skipped - oldLocalNumShards + newLocalNumShards + oldRemoteNumShards + newRemoteNumShards - 1 + oldLocalNumShards + newLocalNumShards + oldRemoteNumShards + newRemoteNumShards ); } diff --git a/x-pack/qa/repository-old-versions/src/test/java/org/elasticsearch/oldrepos/OldRepositoryAccessIT.java b/x-pack/qa/repository-old-versions/src/test/java/org/elasticsearch/oldrepos/OldRepositoryAccessIT.java index f502683e42eb2..30ec6630b9618 100644 --- a/x-pack/qa/repository-old-versions/src/test/java/org/elasticsearch/oldrepos/OldRepositoryAccessIT.java +++ b/x-pack/qa/repository-old-versions/src/test/java/org/elasticsearch/oldrepos/OldRepositoryAccessIT.java @@ -484,8 +484,7 @@ private void assertDocs( logger.info(searchResponse); assertEquals(0, searchResponse.getHits().getTotalHits().value()); assertEquals(numberOfShards, searchResponse.getSuccessfulShards()); - // When all shards are skipped, at least one of them is queried in order to provide a proper search response. - assertEquals(numberOfShards - 1, searchResponse.getSkippedShards()); + assertEquals(numberOfShards, searchResponse.getSkippedShards()); } finally { searchResponse.decRef(); } From a0c1df0d0c4ecdb39d05186f96c4ae976fde4f3e Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Fri, 25 Oct 2024 08:51:00 +0200 Subject: [PATCH 094/324] Speedup Query Phase Merging (#113355) Reducing contention and context switching in merging for the query phase by avoiding respining the merge task repeatedly, removing things that don't need synchronization from the synchronized blocks and merging repeated loops over the same query result arrays. --- .../search/QueryPhaseResultConsumer.java | 395 +++++++++--------- .../action/search/SearchPhaseController.java | 45 +- 2 files changed, 218 insertions(+), 222 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/action/search/QueryPhaseResultConsumer.java b/server/src/main/java/org/elasticsearch/action/search/QueryPhaseResultConsumer.java index 89411ac302b10..6c654d9235ec2 100644 --- a/server/src/main/java/org/elasticsearch/action/search/QueryPhaseResultConsumer.java +++ b/server/src/main/java/org/elasticsearch/action/search/QueryPhaseResultConsumer.java @@ -19,7 +19,6 @@ import org.elasticsearch.common.lucene.search.TopDocsAndMaxScore; import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.core.Releasable; -import org.elasticsearch.core.Releasables; import org.elasticsearch.search.SearchPhaseResult; import org.elasticsearch.search.SearchService; import org.elasticsearch.search.SearchShardTarget; @@ -121,26 +120,50 @@ public void consumeResult(SearchPhaseResult result, Runnable next) { public SearchPhaseController.ReducedQueryPhase reduce() throws Exception { if (pendingMerges.hasPendingMerges()) { throw new AssertionError("partial reduce in-flight"); - } else if (pendingMerges.hasFailure()) { - throw pendingMerges.getFailure(); + } + Exception failure = pendingMerges.failure.get(); + if (failure != null) { + throw failure; } // ensure consistent ordering pendingMerges.sortBuffer(); - final TopDocsStats topDocsStats = pendingMerges.consumeTopDocsStats(); - final List topDocsList = pendingMerges.consumeTopDocs(); + final TopDocsStats topDocsStats = pendingMerges.topDocsStats; + final int resultSize = pendingMerges.buffer.size() + (pendingMerges.mergeResult == null ? 0 : 1); + final List topDocsList = hasTopDocs ? new ArrayList<>(resultSize) : null; + final List> aggsList = hasAggs ? new ArrayList<>(resultSize) : null; + synchronized (pendingMerges) { + if (pendingMerges.mergeResult != null) { + if (topDocsList != null) { + topDocsList.add(pendingMerges.mergeResult.reducedTopDocs); + } + if (aggsList != null) { + aggsList.add(DelayableWriteable.referencing(pendingMerges.mergeResult.reducedAggs)); + } + } + for (QuerySearchResult result : pendingMerges.buffer) { + topDocsStats.add(result.topDocs(), result.searchTimedOut(), result.terminatedEarly()); + if (topDocsList != null) { + TopDocsAndMaxScore topDocs = result.consumeTopDocs(); + setShardIndex(topDocs.topDocs, result.getShardIndex()); + topDocsList.add(topDocs.topDocs); + } + if (aggsList != null) { + aggsList.add(result.getAggs()); + } + } + } SearchPhaseController.ReducedQueryPhase reducePhase; long breakerSize = pendingMerges.circuitBreakerBytes; try { - final List> aggsList = pendingMerges.getAggs(); - if (hasAggs) { + if (aggsList != null) { // Add an estimate of the final reduce size breakerSize = pendingMerges.addEstimateAndMaybeBreak(PendingMerges.estimateRamBytesUsedForReduce(breakerSize)); } reducePhase = SearchPhaseController.reducedQueryPhase( results.asList(), aggsList, - topDocsList, + topDocsList == null ? Collections.emptyList() : topDocsList, topDocsStats, pendingMerges.numReducePhases, false, @@ -183,65 +206,59 @@ private MergeResult partialReduce( // ensure consistent ordering Arrays.sort(toConsume, RESULT_COMPARATOR); - for (QuerySearchResult result : toConsume) { - topDocsStats.add(result.topDocs(), result.searchTimedOut(), result.terminatedEarly()); - } - + final List processedShards = new ArrayList<>(emptyResults); final TopDocs newTopDocs; + final InternalAggregations newAggs; + final List> aggsList; + final int resultSetSize = toConsume.length + (lastMerge != null ? 1 : 0); + if (hasAggs) { + aggsList = new ArrayList<>(resultSetSize); + if (lastMerge != null) { + aggsList.add(DelayableWriteable.referencing(lastMerge.reducedAggs)); + } + } else { + aggsList = null; + } + List topDocsList; if (hasTopDocs) { - List topDocsList = new ArrayList<>(); + topDocsList = new ArrayList<>(resultSetSize); if (lastMerge != null) { topDocsList.add(lastMerge.reducedTopDocs); } - for (QuerySearchResult result : toConsume) { - TopDocsAndMaxScore topDocs = result.consumeTopDocs(); - setShardIndex(topDocs.topDocs, result.getShardIndex()); - topDocsList.add(topDocs.topDocs); - } - newTopDocs = mergeTopDocs( - topDocsList, - // we have to merge here in the same way we collect on a shard - topNSize, - 0 - ); } else { - newTopDocs = null; + topDocsList = null; } - - final InternalAggregations newAggs; - if (hasAggs) { - try { - final List> aggsList = new ArrayList<>(); - if (lastMerge != null) { - aggsList.add(DelayableWriteable.referencing(lastMerge.reducedAggs)); - } - for (QuerySearchResult result : toConsume) { + try { + for (QuerySearchResult result : toConsume) { + topDocsStats.add(result.topDocs(), result.searchTimedOut(), result.terminatedEarly()); + SearchShardTarget target = result.getSearchShardTarget(); + processedShards.add(new SearchShard(target.getClusterAlias(), target.getShardId())); + if (aggsList != null) { aggsList.add(result.getAggs()); } - newAggs = InternalAggregations.topLevelReduceDelayable(aggsList, aggReduceContextBuilder.forPartialReduction()); - } finally { - for (QuerySearchResult result : toConsume) { - result.releaseAggs(); + if (topDocsList != null) { + TopDocsAndMaxScore topDocs = result.consumeTopDocs(); + setShardIndex(topDocs.topDocs, result.getShardIndex()); + topDocsList.add(topDocs.topDocs); } } - } else { - newAggs = null; + // we have to merge here in the same way we collect on a shard + newTopDocs = topDocsList == null ? null : mergeTopDocs(topDocsList, topNSize, 0); + newAggs = aggsList == null + ? null + : InternalAggregations.topLevelReduceDelayable(aggsList, aggReduceContextBuilder.forPartialReduction()); + } finally { + releaseAggs(toConsume); } - List processedShards = new ArrayList<>(emptyResults); if (lastMerge != null) { processedShards.addAll(lastMerge.processedShards); } - for (QuerySearchResult result : toConsume) { - SearchShardTarget target = result.getSearchShardTarget(); - processedShards.add(new SearchShard(target.getClusterAlias(), target.getShardId())); - } if (progressListener != SearchProgressListener.NOOP) { progressListener.notifyPartialReduce(processedShards, topDocsStats.getTotalHits(), newAggs, numReducePhases); } // we leave the results un-serialized because serializing is slow but we compute the serialized // size as an estimate of the memory used by the newly reduced aggregations. - long serializedSize = hasAggs ? DelayableWriteable.getSerializedSize(newAggs) : 0; - return new MergeResult(processedShards, newTopDocs, newAggs, hasAggs ? serializedSize : 0); + return new MergeResult(processedShards, newTopDocs, newAggs, newAggs != null ? DelayableWriteable.getSerializedSize(newAggs) : 0); } public int getNumReducePhases() { @@ -274,11 +291,7 @@ private class PendingMerges implements Releasable { @Override public synchronized void close() { - if (hasFailure()) { - assert circuitBreakerBytes == 0; - } else { - assert circuitBreakerBytes >= 0; - } + assert assertFailureAndBreakerConsistent(); releaseBuffer(); circuitBreaker.addWithoutBreaking(-circuitBreakerBytes); @@ -290,8 +303,14 @@ public synchronized void close() { } } - synchronized Exception getFailure() { - return failure.get(); + private boolean assertFailureAndBreakerConsistent() { + boolean hasFailure = failure.get() != null; + if (hasFailure) { + assert circuitBreakerBytes == 0; + } else { + assert circuitBreakerBytes >= 0; + } + return true; } boolean hasFailure() { @@ -342,56 +361,71 @@ static long estimateRamBytesUsedForReduce(long size) { } public void consume(QuerySearchResult result, Runnable next) { - boolean executeNextImmediately = true; - synchronized (this) { - if (hasFailure() || result.isNull()) { - result.consumeAll(); - if (result.isNull()) { - SearchShardTarget target = result.getSearchShardTarget(); - emptyResults.add(new SearchShard(target.getClusterAlias(), target.getShardId())); - } - } else { - if (hasAggs) { - long aggsSize = ramBytesUsedQueryResult(result); - try { - addEstimateAndMaybeBreak(aggsSize); - } catch (Exception exc) { - result.releaseAggs(); - releaseBuffer(); - onMergeFailure(exc); - next.run(); - return; + if (hasFailure()) { + result.consumeAll(); + next.run(); + } else if (result.isNull()) { + result.consumeAll(); + SearchShardTarget target = result.getSearchShardTarget(); + SearchShard searchShard = new SearchShard(target.getClusterAlias(), target.getShardId()); + synchronized (this) { + emptyResults.add(searchShard); + } + next.run(); + } else { + final long aggsSize = ramBytesUsedQueryResult(result); + boolean executeNextImmediately = true; + boolean hasFailure = false; + synchronized (this) { + if (hasFailure()) { + hasFailure = true; + } else { + if (hasAggs) { + try { + addEstimateAndMaybeBreak(aggsSize); + } catch (Exception exc) { + releaseBuffer(); + onMergeFailure(exc); + hasFailure = true; + } + } + if (hasFailure == false) { + aggsCurrentBufferSize += aggsSize; + // add one if a partial merge is pending + int size = buffer.size() + (hasPartialReduce ? 1 : 0); + if (size >= batchReduceSize) { + hasPartialReduce = true; + executeNextImmediately = false; + QuerySearchResult[] clone = buffer.toArray(QuerySearchResult[]::new); + MergeTask task = new MergeTask(clone, aggsCurrentBufferSize, new ArrayList<>(emptyResults), next); + aggsCurrentBufferSize = 0; + buffer.clear(); + emptyResults.clear(); + queue.add(task); + tryExecuteNext(); + } + buffer.add(result); } - aggsCurrentBufferSize += aggsSize; - } - // add one if a partial merge is pending - int size = buffer.size() + (hasPartialReduce ? 1 : 0); - if (size >= batchReduceSize) { - hasPartialReduce = true; - executeNextImmediately = false; - QuerySearchResult[] clone = buffer.toArray(QuerySearchResult[]::new); - MergeTask task = new MergeTask(clone, aggsCurrentBufferSize, new ArrayList<>(emptyResults), next); - aggsCurrentBufferSize = 0; - buffer.clear(); - emptyResults.clear(); - queue.add(task); - tryExecuteNext(); } - buffer.add(result); } - } - if (executeNextImmediately) { - next.run(); + if (hasFailure) { + result.consumeAll(); + } + if (executeNextImmediately) { + next.run(); + } } } private void releaseBuffer() { - buffer.forEach(QuerySearchResult::releaseAggs); + for (QuerySearchResult querySearchResult : buffer) { + querySearchResult.releaseAggs(); + } buffer.clear(); } private synchronized void onMergeFailure(Exception exc) { - if (hasFailure()) { + if (failure.compareAndSet(null, exc) == false) { assert circuitBreakerBytes == 0; return; } @@ -401,79 +435,89 @@ private synchronized void onMergeFailure(Exception exc) { circuitBreaker.addWithoutBreaking(-circuitBreakerBytes); circuitBreakerBytes = 0; } - failure.compareAndSet(null, exc); - final List toCancels = new ArrayList<>(); - toCancels.add(() -> onPartialMergeFailure.accept(exc)); + onPartialMergeFailure.accept(exc); final MergeTask task = runningTask.getAndSet(null); if (task != null) { - toCancels.add(task::cancel); + task.cancel(); } MergeTask mergeTask; while ((mergeTask = queue.pollFirst()) != null) { - toCancels.add(mergeTask::cancel); + mergeTask.cancel(); } mergeResult = null; - Releasables.close(toCancels); - } - - private void onAfterMerge(MergeTask task, MergeResult newResult, long estimatedSize) { - synchronized (this) { - if (hasFailure()) { - return; - } - runningTask.compareAndSet(task, null); - mergeResult = newResult; - if (hasAggs) { - // Update the circuit breaker to remove the size of the source aggregations - // and replace the estimation with the serialized size of the newly reduced result. - long newSize = mergeResult.estimatedSize - estimatedSize; - addWithoutBreaking(newSize); - logger.trace( - "aggs partial reduction [{}->{}] max [{}]", - estimatedSize, - mergeResult.estimatedSize, - maxAggsCurrentBufferSize - ); - } - task.consumeListener(); - } } private void tryExecuteNext() { final MergeTask task; synchronized (this) { - if (queue.isEmpty() || hasFailure() || runningTask.get() != null) { + if (hasFailure() || runningTask.get() != null) { return; } task = queue.poll(); - runningTask.compareAndSet(null, task); + runningTask.set(task); + } + if (task == null) { + return; } executor.execute(new AbstractRunnable() { @Override protected void doRun() { - final MergeResult thisMergeResult = mergeResult; - long estimatedTotalSize = (thisMergeResult != null ? thisMergeResult.estimatedSize : 0) + task.aggsBufferSize; - final MergeResult newMerge; - final QuerySearchResult[] toConsume = task.consumeBuffer(); - if (toConsume == null) { - return; - } - try { - long estimatedMergeSize = estimateRamBytesUsedForReduce(estimatedTotalSize); - addEstimateAndMaybeBreak(estimatedMergeSize); - estimatedTotalSize += estimatedMergeSize; - ++numReducePhases; - newMerge = partialReduce(toConsume, task.emptyResults, topDocsStats, thisMergeResult, numReducePhases); - } catch (Exception t) { - for (QuerySearchResult result : toConsume) { - result.releaseAggs(); + MergeTask mergeTask = task; + QuerySearchResult[] toConsume = mergeTask.consumeBuffer(); + while (mergeTask != null) { + final MergeResult thisMergeResult = mergeResult; + long estimatedTotalSize = (thisMergeResult != null ? thisMergeResult.estimatedSize : 0) + mergeTask.aggsBufferSize; + final MergeResult newMerge; + try { + long estimatedMergeSize = estimateRamBytesUsedForReduce(estimatedTotalSize); + addEstimateAndMaybeBreak(estimatedMergeSize); + estimatedTotalSize += estimatedMergeSize; + ++numReducePhases; + newMerge = partialReduce(toConsume, mergeTask.emptyResults, topDocsStats, thisMergeResult, numReducePhases); + } catch (Exception t) { + QueryPhaseResultConsumer.releaseAggs(toConsume); + onMergeFailure(t); + return; + } + synchronized (QueryPhaseResultConsumer.this) { + if (hasFailure()) { + return; + } + mergeResult = newMerge; + if (hasAggs) { + // Update the circuit breaker to remove the size of the source aggregations + // and replace the estimation with the serialized size of the newly reduced result. + long newSize = mergeResult.estimatedSize - estimatedTotalSize; + addWithoutBreaking(newSize); + if (logger.isTraceEnabled()) { + logger.trace( + "aggs partial reduction [{}->{}] max [{}]", + estimatedTotalSize, + mergeResult.estimatedSize, + maxAggsCurrentBufferSize + ); + } + } + } + Runnable r = mergeTask.consumeListener(); + synchronized (QueryPhaseResultConsumer.this) { + while (true) { + mergeTask = queue.poll(); + runningTask.set(mergeTask); + if (mergeTask == null) { + break; + } + toConsume = mergeTask.consumeBuffer(); + if (toConsume != null) { + break; + } + } + } + if (r != null) { + r.run(); } - onMergeFailure(t); - return; } - onAfterMerge(task, newMerge, estimatedTotalSize); - tryExecuteNext(); } @Override @@ -483,43 +527,6 @@ public void onFailure(Exception exc) { }); } - public synchronized TopDocsStats consumeTopDocsStats() { - for (QuerySearchResult result : buffer) { - topDocsStats.add(result.topDocs(), result.searchTimedOut(), result.terminatedEarly()); - } - return topDocsStats; - } - - public synchronized List consumeTopDocs() { - if (hasTopDocs == false) { - return Collections.emptyList(); - } - List topDocsList = new ArrayList<>(); - if (mergeResult != null) { - topDocsList.add(mergeResult.reducedTopDocs); - } - for (QuerySearchResult result : buffer) { - TopDocsAndMaxScore topDocs = result.consumeTopDocs(); - setShardIndex(topDocs.topDocs, result.getShardIndex()); - topDocsList.add(topDocs.topDocs); - } - return topDocsList; - } - - public synchronized List> getAggs() { - if (hasAggs == false) { - return Collections.emptyList(); - } - List> aggsList = new ArrayList<>(); - if (mergeResult != null) { - aggsList.add(DelayableWriteable.referencing(mergeResult.reducedAggs)); - } - for (QuerySearchResult result : buffer) { - aggsList.add(result.getAggs()); - } - return aggsList; - } - public synchronized void releaseAggs() { if (hasAggs) { for (QuerySearchResult result : buffer) { @@ -529,6 +536,12 @@ public synchronized void releaseAggs() { } } + private static void releaseAggs(QuerySearchResult... toConsume) { + for (QuerySearchResult result : toConsume) { + result.releaseAggs(); + } + } + private record MergeResult( List processedShards, TopDocs reducedTopDocs, @@ -555,21 +568,21 @@ public synchronized QuerySearchResult[] consumeBuffer() { return toRet; } - public void consumeListener() { - if (next != null) { - next.run(); - next = null; - } + public synchronized Runnable consumeListener() { + Runnable n = next; + next = null; + return n; } - public synchronized void cancel() { + public void cancel() { QuerySearchResult[] buffer = consumeBuffer(); if (buffer != null) { - for (QuerySearchResult result : buffer) { - result.releaseAggs(); - } + releaseAggs(buffer); + } + Runnable next = consumeListener(); + if (next != null) { + next.run(); } - consumeListener(); } } } diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java b/server/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java index ca9c4ab44c423..b118c2560925e 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java @@ -29,6 +29,7 @@ import org.elasticsearch.common.lucene.search.TopDocsAndMaxScore; import org.elasticsearch.common.util.Maps; import org.elasticsearch.common.util.concurrent.AtomicArray; +import org.elasticsearch.core.Nullable; import org.elasticsearch.index.fielddata.IndexFieldData; import org.elasticsearch.lucene.grouping.TopFieldGroups; import org.elasticsearch.search.DocValueFormat; @@ -190,7 +191,7 @@ public static List mergeKnnResults(SearchRequest request, List topDocs, + final List topDocs, int from, int size, List reducedCompletionSuggestions @@ -233,22 +234,22 @@ static SortedTopDocs sortDocs( return new SortedTopDocs(scoreDocs, isSortedByField, sortFields, groupField, groupValues, numSuggestDocs); } - static TopDocs mergeTopDocs(Collection results, int topN, int from) { + static TopDocs mergeTopDocs(List results, int topN, int from) { if (results.isEmpty()) { return null; } - final TopDocs topDocs = results.stream().findFirst().get(); + final TopDocs topDocs = results.getFirst(); final TopDocs mergedTopDocs; final int numShards = results.size(); if (numShards == 1 && from == 0) { // only one shard and no pagination we can just return the topDocs as we got them. return topDocs; } else if (topDocs instanceof TopFieldGroups firstTopDocs) { final Sort sort = new Sort(firstTopDocs.fields); - final TopFieldGroups[] shardTopDocs = results.toArray(new TopFieldGroups[numShards]); + final TopFieldGroups[] shardTopDocs = results.toArray(new TopFieldGroups[0]); mergedTopDocs = TopFieldGroups.merge(sort, from, topN, shardTopDocs, false); } else if (topDocs instanceof TopFieldDocs firstTopDocs) { final Sort sort = checkSameSortTypes(results, firstTopDocs.fields); - final TopFieldDocs[] shardTopDocs = results.toArray(new TopFieldDocs[numShards]); + final TopFieldDocs[] shardTopDocs = results.toArray(new TopFieldDocs[0]); mergedTopDocs = TopDocs.merge(sort, from, topN, shardTopDocs); } else { final TopDocs[] shardTopDocs = results.toArray(new TopDocs[numShards]); @@ -524,17 +525,7 @@ public AggregationReduceContext forFinalReduction() { topDocs.add(td.topDocs); } } - return reducedQueryPhase( - queryResults, - Collections.emptyList(), - topDocs, - topDocsStats, - 0, - true, - aggReduceContextBuilder, - null, - true - ); + return reducedQueryPhase(queryResults, null, topDocs, topDocsStats, 0, true, aggReduceContextBuilder, null, true); } /** @@ -548,7 +539,7 @@ public AggregationReduceContext forFinalReduction() { */ static ReducedQueryPhase reducedQueryPhase( Collection queryResults, - List> bufferedAggs, + @Nullable List> bufferedAggs, List bufferedTopDocs, TopDocsStats topDocsStats, int numReducePhases, @@ -642,7 +633,12 @@ static ReducedQueryPhase reducedQueryPhase( reducedSuggest = new Suggest(Suggest.reduce(groupedSuggestions)); reducedCompletionSuggestions = reducedSuggest.filter(CompletionSuggestion.class); } - final InternalAggregations aggregations = reduceAggs(aggReduceContextBuilder, performFinalReduce, bufferedAggs); + final InternalAggregations aggregations = bufferedAggs == null + ? null + : InternalAggregations.topLevelReduceDelayable( + bufferedAggs, + performFinalReduce ? aggReduceContextBuilder.forFinalReduction() : aggReduceContextBuilder.forPartialReduction() + ); final SearchProfileResultsBuilder profileBuilder = profileShardResults.isEmpty() ? null : new SearchProfileResultsBuilder(profileShardResults); @@ -681,19 +677,6 @@ static ReducedQueryPhase reducedQueryPhase( ); } - private static InternalAggregations reduceAggs( - AggregationReduceContext.Builder aggReduceContextBuilder, - boolean performFinalReduce, - List> toReduce - ) { - return toReduce.isEmpty() - ? null - : InternalAggregations.topLevelReduceDelayable( - toReduce, - performFinalReduce ? aggReduceContextBuilder.forFinalReduction() : aggReduceContextBuilder.forPartialReduction() - ); - } - /** * Checks that query results from all shards have consistent unsigned_long format. * Sort queries on a field that has long type in one index, and unsigned_long in another index From a02f68217a5bfb226fbcd3b26cfc2b125806be94 Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Fri, 25 Oct 2024 08:53:27 +0200 Subject: [PATCH 095/324] Lazy initialize HttpRouteStatsTracker in MethodHandlers (#114107) We use about 1M for the route stats trackers instances per ES instance. Making this lazy init should come at a trivial overhead and in fact makes the computation of the node stats cheaper by saving spurious sums on 0-valued long adders. --- .../elasticsearch/http/HttpRouteStats.java | 2 + .../elasticsearch/rest/MethodHandlers.java | 42 ++++++++++++++----- .../elasticsearch/rest/RestController.java | 25 +++++------ 3 files changed, 46 insertions(+), 23 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/http/HttpRouteStats.java b/server/src/main/java/org/elasticsearch/http/HttpRouteStats.java index 5be1ae9312c46..a15b929fd3c1b 100644 --- a/server/src/main/java/org/elasticsearch/http/HttpRouteStats.java +++ b/server/src/main/java/org/elasticsearch/http/HttpRouteStats.java @@ -49,6 +49,8 @@ public record HttpRouteStats( long[] responseTimeHistogram ) implements Writeable, ToXContentObject { + public static final HttpRouteStats EMPTY = new HttpRouteStats(0, 0, new long[0], 0, 0, new long[0], new long[0]); + public HttpRouteStats(StreamInput in) throws IOException { this(in.readVLong(), in.readVLong(), in.readVLongArray(), in.readVLong(), in.readVLong(), in.readVLongArray(), in.readVLongArray()); } diff --git a/server/src/main/java/org/elasticsearch/rest/MethodHandlers.java b/server/src/main/java/org/elasticsearch/rest/MethodHandlers.java index a947ddce2b9f3..2f53f48f9ae5b 100644 --- a/server/src/main/java/org/elasticsearch/rest/MethodHandlers.java +++ b/server/src/main/java/org/elasticsearch/rest/MethodHandlers.java @@ -13,6 +13,8 @@ import org.elasticsearch.http.HttpRouteStats; import org.elasticsearch.http.HttpRouteStatsTracker; +import java.lang.invoke.MethodHandles; +import java.lang.invoke.VarHandle; import java.util.EnumMap; import java.util.Map; import java.util.Set; @@ -25,7 +27,18 @@ final class MethodHandlers { private final String path; private final Map> methodHandlers; - private final HttpRouteStatsTracker statsTracker = new HttpRouteStatsTracker(); + @SuppressWarnings("unused") // only accessed via #STATS_TRACKER_HANDLE, lazy initialized because instances consume non-trivial heap + private volatile HttpRouteStatsTracker statsTracker; + + private static final VarHandle STATS_TRACKER_HANDLE; + + static { + try { + STATS_TRACKER_HANDLE = MethodHandles.lookup().findVarHandle(MethodHandlers.class, "statsTracker", HttpRouteStatsTracker.class); + } catch (NoSuchFieldException | IllegalAccessException e) { + throw new ExceptionInInitializerError(e); + } + } MethodHandlers(String path) { this.path = path; @@ -73,19 +86,26 @@ Set getValidMethods() { return methodHandlers.keySet(); } - public void addRequestStats(int contentLength) { - statsTracker.addRequestStats(contentLength); - } - - public void addResponseStats(long contentLength) { - statsTracker.addResponseStats(contentLength); + public HttpRouteStats getStats() { + var tracker = existingStatsTracker(); + if (tracker == null) { + return HttpRouteStats.EMPTY; + } + return tracker.getStats(); } - public void addResponseTime(long timeMillis) { - statsTracker.addResponseTime(timeMillis); + public HttpRouteStatsTracker statsTracker() { + var tracker = existingStatsTracker(); + if (tracker == null) { + var newTracker = new HttpRouteStatsTracker(); + if ((tracker = (HttpRouteStatsTracker) STATS_TRACKER_HANDLE.compareAndExchange(this, null, newTracker)) == null) { + tracker = newTracker; + } + } + return tracker; } - public HttpRouteStats getStats() { - return statsTracker.getStats(); + private HttpRouteStatsTracker existingStatsTracker() { + return (HttpRouteStatsTracker) STATS_TRACKER_HANDLE.getAcquire(this); } } diff --git a/server/src/main/java/org/elasticsearch/rest/RestController.java b/server/src/main/java/org/elasticsearch/rest/RestController.java index c2064fdd931de..7446ec5bb6717 100644 --- a/server/src/main/java/org/elasticsearch/rest/RestController.java +++ b/server/src/main/java/org/elasticsearch/rest/RestController.java @@ -36,6 +36,7 @@ import org.elasticsearch.core.TimeValue; import org.elasticsearch.http.HttpHeadersValidationException; import org.elasticsearch.http.HttpRouteStats; +import org.elasticsearch.http.HttpRouteStatsTracker; import org.elasticsearch.http.HttpServerTransport; import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.rest.RestHandler.Route; @@ -879,7 +880,7 @@ public void sendResponse(RestResponse response) { private static final class ResourceHandlingHttpChannel extends DelegatingRestChannel { private final CircuitBreakerService circuitBreakerService; private final int contentLength; - private final MethodHandlers methodHandlers; + private final HttpRouteStatsTracker statsTracker; private final long startTime; private final AtomicBoolean closed = new AtomicBoolean(); @@ -892,7 +893,7 @@ private static final class ResourceHandlingHttpChannel extends DelegatingRestCha super(delegate); this.circuitBreakerService = circuitBreakerService; this.contentLength = contentLength; - this.methodHandlers = methodHandlers; + this.statsTracker = methodHandlers.statsTracker(); this.startTime = rawRelativeTimeInMillis(); } @@ -901,12 +902,12 @@ public void sendResponse(RestResponse response) { boolean success = false; try { close(); - methodHandlers.addRequestStats(contentLength); - methodHandlers.addResponseTime(rawRelativeTimeInMillis() - startTime); + statsTracker.addRequestStats(contentLength); + statsTracker.addResponseTime(rawRelativeTimeInMillis() - startTime); if (response.isChunked() == false) { - methodHandlers.addResponseStats(response.content().length()); + statsTracker.addResponseStats(response.content().length()); } else { - final var responseLengthRecorder = new ResponseLengthRecorder(methodHandlers); + final var responseLengthRecorder = new ResponseLengthRecorder(statsTracker); final var headers = response.getHeaders(); response = RestResponse.chunked( response.status(), @@ -941,11 +942,11 @@ private void close() { } } - private static class ResponseLengthRecorder extends AtomicReference implements Releasable { + private static class ResponseLengthRecorder extends AtomicReference implements Releasable { private long responseLength; - private ResponseLengthRecorder(MethodHandlers methodHandlers) { - super(methodHandlers); + private ResponseLengthRecorder(HttpRouteStatsTracker routeStatsTracker) { + super(routeStatsTracker); } @Override @@ -953,11 +954,11 @@ public void close() { // closed just before sending the last chunk, and also when the whole RestResponse is closed since the client might abort the // connection before we send the last chunk, in which case we won't have recorded the response in the // stats yet; thus we need run-once semantics here: - final var methodHandlers = getAndSet(null); - if (methodHandlers != null) { + final var routeStatsTracker = getAndSet(null); + if (routeStatsTracker != null) { // if we started sending chunks then we're closed on the transport worker, no need for sync assert responseLength == 0L || Transports.assertTransportThread(); - methodHandlers.addResponseStats(responseLength); + routeStatsTracker.addResponseStats(responseLength); } } From ca4009e29823ae3eaaad26b75d8bb47ade5e218c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Istv=C3=A1n=20Zolt=C3=A1n=20Szab=C3=B3?= Date: Fri, 25 Oct 2024 09:13:18 +0200 Subject: [PATCH 096/324] [DOCS] Adds stream inference API docs (#115333) Co-authored-by: Pat Whelan --- .../inference/inference-apis.asciidoc | 2 + .../inference/stream-inference.asciidoc | 122 ++++++++++++++++++ 2 files changed, 124 insertions(+) create mode 100644 docs/reference/inference/stream-inference.asciidoc diff --git a/docs/reference/inference/inference-apis.asciidoc b/docs/reference/inference/inference-apis.asciidoc index ddcff1abc7dce..1206cb02ba89a 100644 --- a/docs/reference/inference/inference-apis.asciidoc +++ b/docs/reference/inference/inference-apis.asciidoc @@ -19,6 +19,7 @@ the following APIs to manage {infer} models and perform {infer}: * <> * <> * <> +* <> * <> [[inference-landscape]] @@ -56,6 +57,7 @@ include::delete-inference.asciidoc[] include::get-inference.asciidoc[] include::post-inference.asciidoc[] include::put-inference.asciidoc[] +include::stream-inference.asciidoc[] include::update-inference.asciidoc[] include::service-alibabacloud-ai-search.asciidoc[] include::service-amazon-bedrock.asciidoc[] diff --git a/docs/reference/inference/stream-inference.asciidoc b/docs/reference/inference/stream-inference.asciidoc new file mode 100644 index 0000000000000..e66acd630cb3e --- /dev/null +++ b/docs/reference/inference/stream-inference.asciidoc @@ -0,0 +1,122 @@ +[role="xpack"] +[[stream-inference-api]] +=== Stream inference API + +Streams a chat completion response. + +IMPORTANT: The {infer} APIs enable you to use certain services, such as built-in {ml} models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. +For built-in models and models uploaded through Eland, the {infer} APIs offer an alternative way to use and manage trained models. +However, if you do not plan to use the {infer} APIs to use these models or if you want to use non-NLP models, use the <>. + + +[discrete] +[[stream-inference-api-request]] +==== {api-request-title} + +`POST /_inference//_stream` + +`POST /_inference///_stream` + + +[discrete] +[[stream-inference-api-prereqs]] +==== {api-prereq-title} + +* Requires the `monitor_inference` <> +(the built-in `inference_admin` and `inference_user` roles grant this privilege) +* You must use a client that supports streaming. + + +[discrete] +[[stream-inference-api-desc]] +==== {api-description-title} + +The stream {infer} API enables real-time responses for completion tasks by delivering answers incrementally, reducing response times during computation. +It only works with the `completion` task type. + + +[discrete] +[[stream-inference-api-path-params]] +==== {api-path-parms-title} + +``:: +(Required, string) +The unique identifier of the {infer} endpoint. + + +``:: +(Optional, string) +The type of {infer} task that the model performs. + + +[discrete] +[[stream-inference-api-request-body]] +==== {api-request-body-title} + +`input`:: +(Required, string or array of strings) +The text on which you want to perform the {infer} task. +`input` can be a single string or an array. ++ +-- +[NOTE] +==== +Inference endpoints for the `completion` task type currently only support a +single string as input. +==== +-- + + +[discrete] +[[stream-inference-api-example]] +==== {api-examples-title} + +The following example performs a completion on the example question with streaming. + + +[source,console] +------------------------------------------------------------ +POST _inference/completion/openai-completion/_stream +{ + "input": "What is Elastic?" +} +------------------------------------------------------------ +// TEST[skip:TBD] + + +The API returns the following response: + + +[source,txt] +------------------------------------------------------------ +event: message +data: { + "completion":[{ + "delta":"Elastic" + }] +} + +event: message +data: { + "completion":[{ + "delta":" is" + }, + { + "delta":" a" + } + ] +} + +event: message +data: { + "completion":[{ + "delta":" software" + }, + { + "delta":" company" + }] +} + +(...) +------------------------------------------------------------ +// NOTCONSOLE From 6688fe225584cfa8d12ebb5e56662918a593f690 Mon Sep 17 00:00:00 2001 From: Kostas Krikellas <131142368+kkrik-es@users.noreply.github.com> Date: Fri, 25 Oct 2024 10:26:12 +0300 Subject: [PATCH 097/324] Remove excluded tests from rest compat (#115617) --- x-pack/plugin/downsample/qa/rest/build.gradle | 14 -------------- 1 file changed, 14 deletions(-) diff --git a/x-pack/plugin/downsample/qa/rest/build.gradle b/x-pack/plugin/downsample/qa/rest/build.gradle index 5142632a36006..ba5ac7b0c7317 100644 --- a/x-pack/plugin/downsample/qa/rest/build.gradle +++ b/x-pack/plugin/downsample/qa/rest/build.gradle @@ -32,20 +32,6 @@ tasks.named('yamlRestTest') { tasks.named('yamlRestCompatTest') { usesDefaultDistribution() } -tasks.named("yamlRestCompatTestTransform").configure ({ task -> - task.skipTest("downsample/10_basic/Downsample index with empty dimension on routing path", "Skip until pr/115358 gets backported") - task.skipTest("downsample/10_basic/Downsample histogram as label", "Skip until pr/115358 gets backported") - task.skipTest("downsample/10_basic/Downsample date timestamp field using strict_date_optional_time_nanos format", - "Skip until pr/115358 gets backported") - task.skipTest("downsample/10_basic/Downsample a downsampled index", "Skip until pr/115358 gets backported") - task.skipTest("downsample/10_basic/Downsample date_nanos timestamp field using custom format", "Skip until pr/115358 gets backported") - task.skipTest("downsample/10_basic/Downsample using coarse grained timestamp", "Skip until pr/115358 gets backported") - task.skipTest("downsample/10_basic/Downsample label with ignore_above", "Skip until pr/115358 gets backported") - task.skipTest("downsample/10_basic/Downsample object field", "Skip until pr/115358 gets backported") - task.skipTest("downsample/10_basic/Downsample empty and missing labels", "Skip until pr/115358 gets backported") - task.skipTest("downsample/10_basic/Downsample index", "Skip until pr/115358 gets backported") - task.skipTest("downsample/10_basic/Downsample index with empty dimension", "Skip until pr/115358 gets backported") -}) if (BuildParams.inFipsJvm){ // This test cluster is using a BASIC license and FIPS 140 mode is not supported in BASIC tasks.named("yamlRestTest").configure{enabled = false } From e7897bdeff7f4ec76e8a0801c86f5dea11cacabd Mon Sep 17 00:00:00 2001 From: Salvatore Campagna <93581129+salvatore-campagna@users.noreply.github.com> Date: Fri, 25 Oct 2024 09:57:12 +0200 Subject: [PATCH 098/324] Return `_ignored_source` as a top level array field (#115328) This PR introduces a fix for the `fields` and `stored_fields` APIs and the way `_ignored_source` field is handled: 1. **Return `_ignored_source` as a top-level array metadata field**: - The `_ignored_source` field is now returned as a top-level array in the metadata as done with other metadata fields. 2. **Return `_ignored_source` as an array of values**: - Even when there is only a single ignored field, `_ignored_source` will now be returned as an array of values. This is done to be consistent with how the `_ignored` field is returned. Without this fix, we would return the `_ignored_source` field twice, as a top-level field and as part of the `fields` array. Also, without this fix, we would only return a single value instead of all ignored field values. --- .../mapper/IgnoredSourceFieldMapper.java | 3 + .../index/mapper/MapperFeatures.java | 1 + .../org/elasticsearch/search/SearchHit.java | 3 +- .../fetch/subphase/FetchFieldsPhase.java | 25 ++- .../index/get/DocumentFieldTests.java | 5 +- .../search/SearchResponseUtils.java | 3 +- .../rest-api-spec/test/20_ignored_source.yml | 158 +++++++++++++++++- 7 files changed, 182 insertions(+), 16 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/index/mapper/IgnoredSourceFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/IgnoredSourceFieldMapper.java index 70d73fc2ffb9a..7e2bebfd403cb 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/IgnoredSourceFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/IgnoredSourceFieldMapper.java @@ -58,6 +58,9 @@ public class IgnoredSourceFieldMapper extends MetadataFieldMapper { static final NodeFeature TRACK_IGNORED_SOURCE = new NodeFeature("mapper.track_ignored_source"); static final NodeFeature DONT_EXPAND_DOTS_IN_IGNORED_SOURCE = new NodeFeature("mapper.ignored_source.dont_expand_dots"); + static final NodeFeature IGNORED_SOURCE_AS_TOP_LEVEL_METADATA_ARRAY_FIELD = new NodeFeature( + "mapper.ignored_source_as_top_level_metadata_array_field" + ); static final NodeFeature ALWAYS_STORE_OBJECT_ARRAYS_IN_NESTED_OBJECTS = new NodeFeature( "mapper.ignored_source.always_store_object_arrays_in_nested" ); diff --git a/server/src/main/java/org/elasticsearch/index/mapper/MapperFeatures.java b/server/src/main/java/org/elasticsearch/index/mapper/MapperFeatures.java index 026c7c98d7aeb..a5f173afffba2 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/MapperFeatures.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/MapperFeatures.java @@ -63,6 +63,7 @@ public Set getTestFeatures() { RangeFieldMapper.DATE_RANGE_INDEXING_FIX, IgnoredSourceFieldMapper.DONT_EXPAND_DOTS_IN_IGNORED_SOURCE, SourceFieldMapper.REMOVE_SYNTHETIC_SOURCE_ONLY_VALIDATION, + IgnoredSourceFieldMapper.IGNORED_SOURCE_AS_TOP_LEVEL_METADATA_ARRAY_FIELD, IgnoredSourceFieldMapper.ALWAYS_STORE_OBJECT_ARRAYS_IN_NESTED_OBJECTS ); } diff --git a/server/src/main/java/org/elasticsearch/search/SearchHit.java b/server/src/main/java/org/elasticsearch/search/SearchHit.java index 1611c95d99df4..98f7c92d9997a 100644 --- a/server/src/main/java/org/elasticsearch/search/SearchHit.java +++ b/server/src/main/java/org/elasticsearch/search/SearchHit.java @@ -29,6 +29,7 @@ import org.elasticsearch.core.RefCounted; import org.elasticsearch.core.SimpleRefCounted; import org.elasticsearch.index.mapper.IgnoredFieldMapper; +import org.elasticsearch.index.mapper.IgnoredSourceFieldMapper; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.SourceFieldMapper; import org.elasticsearch.index.seqno.SequenceNumbers; @@ -847,7 +848,7 @@ public XContentBuilder toInnerXContent(XContentBuilder builder, Params params) t } // _ignored is the only multi-valued meta field // TODO: can we avoid having an exception here? - if (field.getName().equals(IgnoredFieldMapper.NAME)) { + if (IgnoredFieldMapper.NAME.equals(field.getName()) || IgnoredSourceFieldMapper.NAME.equals(field.getName())) { builder.field(field.getName(), field.getValues()); } else { builder.field(field.getName(), field.getValue()); diff --git a/server/src/main/java/org/elasticsearch/search/fetch/subphase/FetchFieldsPhase.java b/server/src/main/java/org/elasticsearch/search/fetch/subphase/FetchFieldsPhase.java index 03bfbd40d97be..e0cb5a668b4ab 100644 --- a/server/src/main/java/org/elasticsearch/search/fetch/subphase/FetchFieldsPhase.java +++ b/server/src/main/java/org/elasticsearch/search/fetch/subphase/FetchFieldsPhase.java @@ -57,13 +57,28 @@ public FetchSubPhaseProcessor getProcessor(FetchContext fetchContext) { return null; } + // NOTE: FieldFetcher for non-metadata fields, as well as `_id` and `_source`. + // We need to retain `_id` and `_source` here to correctly populate the `StoredFieldSpecs` created by the + // `FieldFetcher` constructor. final SearchExecutionContext searchExecutionContext = fetchContext.getSearchExecutionContext(); - final FieldFetcher fieldFetcher = fetchFieldsContext == null ? null - : fetchFieldsContext.fields() == null ? null - : fetchFieldsContext.fields().isEmpty() ? null - : FieldFetcher.create(searchExecutionContext, fetchFieldsContext.fields()); + final FieldFetcher fieldFetcher = (fetchFieldsContext == null + || fetchFieldsContext.fields() == null + || fetchFieldsContext.fields().isEmpty()) + ? null + : FieldFetcher.create( + searchExecutionContext, + fetchFieldsContext.fields() + .stream() + .filter( + fieldAndFormat -> (searchExecutionContext.isMetadataField(fieldAndFormat.field) == false + || searchExecutionContext.getFieldType(fieldAndFormat.field).isStored() == false + || IdFieldMapper.NAME.equals(fieldAndFormat.field) + || SourceFieldMapper.NAME.equals(fieldAndFormat.field)) + ) + .toList() + ); - // NOTE: Collect stored metadata fields requested via `fields` (in FetchFieldsContext`) like for instance the _ignored source field + // NOTE: Collect stored metadata fields requested via `fields` (in FetchFieldsContext) like for instance the _ignored source field final Set fetchContextMetadataFields = new HashSet<>(); if (fetchFieldsContext != null && fetchFieldsContext.fields() != null && fetchFieldsContext.fields().isEmpty() == false) { for (final FieldAndFormat fieldAndFormat : fetchFieldsContext.fields()) { diff --git a/server/src/test/java/org/elasticsearch/index/get/DocumentFieldTests.java b/server/src/test/java/org/elasticsearch/index/get/DocumentFieldTests.java index 76426e9df83d8..8a27c3545a110 100644 --- a/server/src/test/java/org/elasticsearch/index/get/DocumentFieldTests.java +++ b/server/src/test/java/org/elasticsearch/index/get/DocumentFieldTests.java @@ -14,6 +14,7 @@ import org.elasticsearch.common.document.DocumentField; import org.elasticsearch.core.Tuple; import org.elasticsearch.index.mapper.IgnoredFieldMapper; +import org.elasticsearch.index.mapper.IgnoredSourceFieldMapper; import org.elasticsearch.indices.IndicesModule; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.RandomObjects; @@ -122,7 +123,7 @@ public static Tuple randomDocumentField( if (isMetafield) { String metaField = randomValueOtherThanMany(excludeMetaFieldFilter, () -> randomFrom(IndicesModule.getBuiltInMetadataFields())); DocumentField documentField; - if (metaField.equals(IgnoredFieldMapper.NAME)) { + if (IgnoredFieldMapper.NAME.equals(metaField) || IgnoredSourceFieldMapper.NAME.equals(metaField)) { int numValues = randomIntBetween(1, 3); List ignoredFields = new ArrayList<>(numValues); for (int i = 0; i < numValues; i++) { @@ -130,7 +131,7 @@ public static Tuple randomDocumentField( } documentField = new DocumentField(metaField, ignoredFields); } else { - // meta fields are single value only, besides _ignored + // meta fields are single value only, besides _ignored and _ignored_source documentField = new DocumentField(metaField, Collections.singletonList(randomAlphaOfLengthBetween(3, 10))); } return Tuple.tuple(documentField, documentField); diff --git a/test/framework/src/main/java/org/elasticsearch/search/SearchResponseUtils.java b/test/framework/src/main/java/org/elasticsearch/search/SearchResponseUtils.java index df1ea6b756405..b0edbb829df2a 100644 --- a/test/framework/src/main/java/org/elasticsearch/search/SearchResponseUtils.java +++ b/test/framework/src/main/java/org/elasticsearch/search/SearchResponseUtils.java @@ -29,6 +29,7 @@ import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.Index; import org.elasticsearch.index.mapper.IgnoredFieldMapper; +import org.elasticsearch.index.mapper.IgnoredSourceFieldMapper; import org.elasticsearch.index.mapper.SourceFieldMapper; import org.elasticsearch.index.seqno.SequenceNumbers; import org.elasticsearch.index.shard.ShardId; @@ -83,7 +84,7 @@ public enum SearchResponseUtils { SearchHit.METADATA_FIELDS, v -> new HashMap() ); - if (fieldName.equals(IgnoredFieldMapper.NAME)) { + if (IgnoredFieldMapper.NAME.equals(fieldName) || IgnoredSourceFieldMapper.NAME.equals(fieldName)) { fieldMap.put(fieldName, new DocumentField(fieldName, (List) fieldValue)); } else { fieldMap.put(fieldName, new DocumentField(fieldName, Collections.singletonList(fieldValue))); diff --git a/x-pack/plugin/logsdb/src/yamlRestTest/resources/rest-api-spec/test/20_ignored_source.yml b/x-pack/plugin/logsdb/src/yamlRestTest/resources/rest-api-spec/test/20_ignored_source.yml index c54edb0994860..2f111d579ebb1 100644 --- a/x-pack/plugin/logsdb/src/yamlRestTest/resources/rest-api-spec/test/20_ignored_source.yml +++ b/x-pack/plugin/logsdb/src/yamlRestTest/resources/rest-api-spec/test/20_ignored_source.yml @@ -27,6 +27,10 @@ setup: --- "fetch stored fields wildcard": + - requires: + cluster_features: [ mapper.ignored_source_as_top_level_metadata_array_field ] + reason: requires returning the _ignored_source field as a top level array metadata field + - do: headers: Content-Type: application/yaml @@ -40,6 +44,10 @@ setup: --- "fetch fields wildcard": + - requires: + cluster_features: [ mapper.ignored_source_as_top_level_metadata_array_field ] + reason: requires returning the _ignored_source field as a top level array metadata field + - do: headers: Content-Type: application/yaml @@ -53,6 +61,10 @@ setup: --- "fetch stored fields by name": + - requires: + cluster_features: [ mapper.ignored_source_as_top_level_metadata_array_field ] + reason: requires returning the _ignored_source field as a top level array metadata field + - do: headers: Content-Type: application/yaml @@ -62,10 +74,14 @@ setup: stored_fields: [ _ignored_source ] - match: { hits.total.value: 1 } - - match: { hits.hits.0._ignored_source: !!binary "BgAAAG9iamVjdHktLS0KbmFtZTogImZvbyIKdmFsdWU6IDEwCg==" } + - match: { hits.hits.0._ignored_source.0: !!binary "BgAAAG9iamVjdHktLS0KbmFtZTogImZvbyIKdmFsdWU6IDEwCg==" } --- "fetch fields by name": + - requires: + cluster_features: [ mapper.ignored_source_as_top_level_metadata_array_field ] + reason: requires returning the _ignored_source field as a top level array metadata field + - do: headers: Content-Type: application/yaml @@ -75,10 +91,14 @@ setup: fields: [ _ignored_source ] - match: { hits.total.value: 1 } - - match: { hits.hits.0._ignored_source: !!binary "BgAAAG9iamVjdHktLS0KbmFtZTogImZvbyIKdmFsdWU6IDEwCg==" } + - match: { hits.hits.0._ignored_source.0: !!binary "BgAAAG9iamVjdHktLS0KbmFtZTogImZvbyIKdmFsdWU6IDEwCg==" } --- "fields and stored fields combination": + - requires: + cluster_features: [ mapper.ignored_source_as_top_level_metadata_array_field ] + reason: requires returning the _ignored_source field as a top level array metadata field + - do: headers: Content-Type: application/yaml @@ -92,10 +112,14 @@ setup: - match: { hits.total.value: 1 } - match: { hits.hits.0.fields.object: null } - - match: { hits.hits.0._ignored_source: !!binary "BgAAAG9iamVjdHktLS0KbmFtZTogImZvbyIKdmFsdWU6IDEwCg==" } + - match: { hits.hits.0._ignored_source.0: !!binary "BgAAAG9iamVjdHktLS0KbmFtZTogImZvbyIKdmFsdWU6IDEwCg==" } --- "wildcard fields and stored fields combination": + - requires: + cluster_features: [ mapper.ignored_source_as_top_level_metadata_array_field ] + reason: requires returning the _ignored_source field as a top level array metadata field + - do: search: index: test @@ -108,6 +132,10 @@ setup: --- "fields with ignored source in stored fields": + - requires: + cluster_features: [ mapper.ignored_source_as_top_level_metadata_array_field ] + reason: requires returning the _ignored_source field as a top level array metadata field + - do: headers: Content-Type: application/yaml @@ -118,11 +146,15 @@ setup: fields: [ object ] - match: { hits.total.value: 1 } - - match: { hits.hits.0._ignored_source: !!binary "BgAAAG9iamVjdHktLS0KbmFtZTogImZvbyIKdmFsdWU6IDEwCg==" } + - match: { hits.hits.0._ignored_source.0: !!binary "BgAAAG9iamVjdHktLS0KbmFtZTogImZvbyIKdmFsdWU6IDEwCg==" } - match: { hits.hits.0.fields: null } --- "fields with ignored source in fields": + - requires: + cluster_features: [ mapper.ignored_source_as_top_level_metadata_array_field ] + reason: requires returning the _ignored_source field as a top level array metadata field + - do: headers: Content-Type: application/yaml @@ -133,10 +165,14 @@ setup: fields: [ _ignored_source ] - match: { hits.total.value: 1 } - - match: { hits.hits.0._ignored_source: !!binary "BgAAAG9iamVjdHktLS0KbmFtZTogImZvbyIKdmFsdWU6IDEwCg==" } + - match: { hits.hits.0._ignored_source.0: !!binary "BgAAAG9iamVjdHktLS0KbmFtZTogImZvbyIKdmFsdWU6IDEwCg==" } --- "ignored source via fields and wildcard stored fields": + - requires: + cluster_features: [ mapper.ignored_source_as_top_level_metadata_array_field ] + reason: requires returning the _ignored_source field as a top level array metadata field + - do: headers: Content-Type: application/yaml @@ -147,10 +183,14 @@ setup: fields: [ _ignored_source ] - match: { hits.total.value: 1 } - - match: { hits.hits.0._ignored_source: !!binary "BgAAAG9iamVjdHktLS0KbmFtZTogImZvbyIKdmFsdWU6IDEwCg==" } + - match: { hits.hits.0._ignored_source.0: !!binary "BgAAAG9iamVjdHktLS0KbmFtZTogImZvbyIKdmFsdWU6IDEwCg==" } --- "wildcard fields and ignored source via stored fields": + - requires: + cluster_features: [ mapper.ignored_source_as_top_level_metadata_array_field ] + reason: requires returning the _ignored_source field as a top level array metadata field + - do: headers: Content-Type: application/yaml @@ -161,4 +201,108 @@ setup: fields: [ "*" ] - match: { hits.total.value: 1 } - - match: { hits.hits.0._ignored_source: !!binary "BgAAAG9iamVjdHktLS0KbmFtZTogImZvbyIKdmFsdWU6IDEwCg==" } + - match: { hits.hits.0._ignored_source.0: !!binary "BgAAAG9iamVjdHktLS0KbmFtZTogImZvbyIKdmFsdWU6IDEwCg==" } + +--- +ignored source array via fields: + - requires: + cluster_features: [mapper.ignored_source_as_top_level_metadata_array_field] + reason: requires returning the _ignored_source field as a top level array metadata field + + - do: + indices.create: + index: test-dynamic-fields + body: + settings: + index: + mapping: + source: + mode: synthetic + total_fields: + ignore_dynamic_beyond_limit: true + limit: 1 # only `name` static mapping is allowed + mappings: + properties: + name: + type: keyword + + - do: + bulk: + index: test-dynamic-fields + refresh: true + body: + - '{ "index": { } }' + - '{ "name": "foo", "value": 1, "id": "f5t7-66gt" }' + - match: { errors: false } + + - do: + headers: + Content-Type: application/yaml + search: + index: test-dynamic-fields + body: + fields: [ "_ignored_source" ] + query: + match_all: {} + + - match: { hits.total.value: 1 } + - match: { hits.hits.0._source.name: "foo" } + - match: { hits.hits.0._source.value: 1 } + - match: { hits.hits.0._source.id: "f5t7-66gt" } + - match: { hits.hits.0._ignored: [ "id", "value" ]} + - length: { hits.hits.0._ignored_source: 2 } + - match: { hits.hits.0._ignored_source.0: !!binary "AgAAAGlkU2Y1dDctNjZndA==" } # `id` field + - match: { hits.hits.0._ignored_source.1: !!binary "BQAAAHZhbHVlSQEAAAA=" } # `value` field + +--- +ignored source array via stored_fields: + - requires: + cluster_features: [mapper.ignored_source_as_top_level_metadata_array_field] + reason: requires returning the _ignored_source field as a top level array metadata field + + - do: + indices.create: + index: test-dynamic-stored-fields + body: + settings: + index: + mapping: + source: + mode: synthetic + total_fields: + ignore_dynamic_beyond_limit: true + limit: 1 # only `name` static mapping is allowed + mappings: + properties: + name: + type: keyword + + - do: + bulk: + index: test-dynamic-stored-fields + refresh: true + body: + - '{ "index": { } }' + - '{ "name": "foo", "value": 1, "id": "f5t7-66gt" }' + - match: { errors: false } + + - do: + headers: + Content-Type: application/yaml + search: + index: test-dynamic-stored-fields + body: + # NOTE: when using synthetic source `_source` field needs to be explicitly requested via `stored_fields`, + # a wildcard request would not include it. + stored_fields: [ "_ignored_source", "_source" ] + query: + match_all: {} + + - match: { hits.total.value: 1 } + - match: { hits.hits.0._source.name: "foo" } + - match: { hits.hits.0._source.value: 1 } + - match: { hits.hits.0._source.id: "f5t7-66gt" } + - match: { hits.hits.0._ignored: [ "id", "value" ]} + - length: { hits.hits.0._ignored_source: 2 } + - match: { hits.hits.0._ignored_source.0: !!binary "AgAAAGlkU2Y1dDctNjZndA==" } # `id` field + - match: { hits.hits.0._ignored_source.1: !!binary "BQAAAHZhbHVlSQEAAAA=" } # `value` field From 3d307e0d7867116585dccfb335e0cab0c192bdb9 Mon Sep 17 00:00:00 2001 From: Craig Taverner Date: Fri, 25 Oct 2024 10:09:53 +0200 Subject: [PATCH 099/324] Don't return TEXT type for functions that take TEXT (#114334) Always return `KEYWORD` for functions that previously returned `TEXT`, because any change to the value, no matter how small, is enough to render meaningless the original analyzer associated with the `TEXT` field value. In principle, if the attribute is no longer the original `FieldAttribute`, it can no longer claim to have the type `TEXT`. This has been done for all functions: conversion functions, aggregating functions, multi-value functions. There were several that already produced `KEYWORD` for `TEXT` input (eg. ToString, FromBase64 and ToBase64, MvZip, ToLower, ToUpper, DateFormat, Concat, Left, Repeat, Replace, Right, Split, Substring), but many others that incorrectly claimed to produce `TEXT`, while this was really a false claim. This PR makes that now strict, and includes changes to the functions' units tests to disallow the tests to expect any functions output to be `TEXT`. One side effect of this change is that methods that take multiple parameters that require all of them to have the same type, will now treat TEXT and KEYWORD the same. This was already the case for functions like `Concat`, but is now also the case for `Greatest`, `Least`, `Case`, `Coalesce` and `MvAppend`. An associated change is that the type casting operator `::text` has been entirely removed. It used to map onto the `ToString` function which returned type KEYWORD, and so `::text` really produced a `KEYWORD`, which is a lie, or at least a `bug`, which is now fixed. Should we ever wish to actually produce real `TEXT`, we might love the fact that this operator has been freed up for future use (although it seems likely that function will require parameters to specify the analyzer, so might never be an operator again). ### Backwards compatibility issues: This is a change that will fail BWC tests, since we have many tests that assert on TEXT output to functions. For this reason we needed to block two scenarios: * We used the capability `functions_never_emit_text` to prevent 7 csv-spec tests and 2 yaml tests from being run against older versions that still emit text. * We used `skipTest` to also block those two yaml tests from being run against the latest build, but using older yaml files downloaded (as far back as 8.14). In all cases the change observed in these tests was simply the results columns no longer having `text` type, and instead being `keyword`. --------- Co-authored-by: Luigi Dell'Aquila --- docs/changelog/114334.yaml | 7 +++ .../functions/kibana/definition/case.json | 52 +++++++++++++++- .../functions/kibana/definition/coalesce.json | 4 +- .../functions/kibana/definition/greatest.json | 4 +- .../functions/kibana/definition/least.json | 4 +- .../functions/kibana/definition/ltrim.json | 2 +- .../esql/functions/kibana/definition/max.json | 2 +- .../esql/functions/kibana/definition/min.json | 2 +- .../kibana/definition/mv_append.json | 2 +- .../kibana/definition/mv_dedupe.json | 2 +- .../functions/kibana/definition/mv_first.json | 2 +- .../functions/kibana/definition/mv_last.json | 2 +- .../functions/kibana/definition/mv_max.json | 2 +- .../functions/kibana/definition/mv_min.json | 2 +- .../functions/kibana/definition/mv_slice.json | 2 +- .../functions/kibana/definition/mv_sort.json | 2 +- .../functions/kibana/definition/reverse.json | 2 +- .../functions/kibana/definition/rtrim.json | 2 +- .../functions/kibana/definition/to_lower.json | 2 +- .../functions/kibana/definition/to_upper.json | 2 +- .../esql/functions/kibana/definition/top.json | 2 +- .../functions/kibana/definition/trim.json | 2 +- .../functions/kibana/definition/values.json | 2 +- .../esql/functions/kibana/inline_cast.json | 1 - .../esql/functions/types/case.asciidoc | 6 +- .../esql/functions/types/coalesce.asciidoc | 4 +- .../esql/functions/types/greatest.asciidoc | 4 +- .../esql/functions/types/least.asciidoc | 4 +- .../esql/functions/types/ltrim.asciidoc | 2 +- .../esql/functions/types/max.asciidoc | 2 +- .../esql/functions/types/min.asciidoc | 2 +- .../esql/functions/types/mv_append.asciidoc | 2 +- .../esql/functions/types/mv_dedupe.asciidoc | 2 +- .../esql/functions/types/mv_first.asciidoc | 2 +- .../esql/functions/types/mv_last.asciidoc | 2 +- .../esql/functions/types/mv_max.asciidoc | 2 +- .../esql/functions/types/mv_min.asciidoc | 2 +- .../esql/functions/types/mv_slice.asciidoc | 2 +- .../esql/functions/types/mv_sort.asciidoc | 2 +- .../esql/functions/types/reverse.asciidoc | 2 +- .../esql/functions/types/rtrim.asciidoc | 2 +- .../esql/functions/types/to_lower.asciidoc | 2 +- .../esql/functions/types/to_upper.asciidoc | 2 +- .../esql/functions/types/top.asciidoc | 2 +- .../esql/functions/types/trim.asciidoc | 2 +- .../esql/functions/types/values.asciidoc | 2 +- x-pack/plugin/build.gradle | 2 + .../xpack/esql/core/type/DataType.java | 4 ++ .../src/main/resources/convert.csv-spec | 6 +- .../src/main/resources/stats.csv-spec | 14 +++-- .../src/main/resources/stats_top.csv-spec | 6 +- .../src/main/resources/string.csv-spec | 3 +- .../xpack/esql/action/EsqlCapabilities.java | 5 ++ .../expression/function/aggregate/Max.java | 4 +- .../expression/function/aggregate/Min.java | 4 +- .../expression/function/aggregate/Top.java | 4 +- .../expression/function/aggregate/Values.java | 4 +- .../function/scalar/UnaryScalarFunction.java | 2 +- .../function/scalar/conditional/Case.java | 5 +- .../function/scalar/conditional/Greatest.java | 6 +- .../function/scalar/conditional/Least.java | 6 +- .../function/scalar/multivalue/MvAppend.java | 7 +-- .../function/scalar/multivalue/MvDedupe.java | 1 - .../function/scalar/multivalue/MvFirst.java | 1 - .../function/scalar/multivalue/MvLast.java | 1 - .../function/scalar/multivalue/MvMax.java | 2 +- .../function/scalar/multivalue/MvMin.java | 2 +- .../function/scalar/multivalue/MvSlice.java | 3 +- .../function/scalar/multivalue/MvSort.java | 4 +- .../function/scalar/nulls/Coalesce.java | 5 +- .../function/scalar/string/LTrim.java | 2 +- .../function/scalar/string/RTrim.java | 2 +- .../function/scalar/string/Reverse.java | 2 +- .../function/scalar/string/ToLower.java | 4 +- .../function/scalar/string/ToUpper.java | 4 +- .../function/scalar/string/Trim.java | 2 +- .../esql/type/EsqlDataTypeConverter.java | 1 - .../xpack/esql/analysis/ParsingTests.java | 3 - .../expression/function/TestCaseSupplier.java | 2 +- .../function/aggregate/MaxTests.java | 2 +- .../function/aggregate/MinTests.java | 2 +- .../scalar/conditional/CaseTests.java | 59 ++++++++++++++++++- .../function/scalar/string/ToLowerTests.java | 2 +- .../function/scalar/string/ToUpperTests.java | 2 +- .../rest-api-spec/test/esql/80_text.yml | 24 ++++++-- 85 files changed, 253 insertions(+), 123 deletions(-) create mode 100644 docs/changelog/114334.yaml diff --git a/docs/changelog/114334.yaml b/docs/changelog/114334.yaml new file mode 100644 index 0000000000000..d0fefe40c6970 --- /dev/null +++ b/docs/changelog/114334.yaml @@ -0,0 +1,7 @@ +pr: 114334 +summary: Don't return TEXT type for functions that take TEXT +area: ES|QL +type: bug +issues: + - 111537 + - 114333 diff --git a/docs/reference/esql/functions/kibana/definition/case.json b/docs/reference/esql/functions/kibana/definition/case.json index 1cf2c6ce7a579..bf498f690551c 100644 --- a/docs/reference/esql/functions/kibana/definition/case.json +++ b/docs/reference/esql/functions/kibana/definition/case.json @@ -424,6 +424,30 @@ "variadic" : true, "returnType" : "keyword" }, + { + "params" : [ + { + "name" : "condition", + "type" : "boolean", + "optional" : false, + "description" : "A condition." + }, + { + "name" : "trueValue", + "type" : "keyword", + "optional" : false, + "description" : "The value that's returned when the corresponding condition is the first to evaluate to `true`. The default value is returned when no condition matches." + }, + { + "name" : "elseValue", + "type" : "text", + "optional" : true, + "description" : "The value that's returned when no condition evaluates to `true`." + } + ], + "variadic" : true, + "returnType" : "keyword" + }, { "params" : [ { @@ -482,7 +506,31 @@ } ], "variadic" : true, - "returnType" : "text" + "returnType" : "keyword" + }, + { + "params" : [ + { + "name" : "condition", + "type" : "boolean", + "optional" : false, + "description" : "A condition." + }, + { + "name" : "trueValue", + "type" : "text", + "optional" : false, + "description" : "The value that's returned when the corresponding condition is the first to evaluate to `true`. The default value is returned when no condition matches." + }, + { + "name" : "elseValue", + "type" : "keyword", + "optional" : true, + "description" : "The value that's returned when no condition evaluates to `true`." + } + ], + "variadic" : true, + "returnType" : "keyword" }, { "params" : [ @@ -506,7 +554,7 @@ } ], "variadic" : true, - "returnType" : "text" + "returnType" : "keyword" }, { "params" : [ diff --git a/docs/reference/esql/functions/kibana/definition/coalesce.json b/docs/reference/esql/functions/kibana/definition/coalesce.json index 9ebc5a97229cd..7f49195190951 100644 --- a/docs/reference/esql/functions/kibana/definition/coalesce.json +++ b/docs/reference/esql/functions/kibana/definition/coalesce.json @@ -242,7 +242,7 @@ } ], "variadic" : true, - "returnType" : "text" + "returnType" : "keyword" }, { "params" : [ @@ -260,7 +260,7 @@ } ], "variadic" : true, - "returnType" : "text" + "returnType" : "keyword" }, { "params" : [ diff --git a/docs/reference/esql/functions/kibana/definition/greatest.json b/docs/reference/esql/functions/kibana/definition/greatest.json index 2818a5ac56339..eebb4fad1eb1d 100644 --- a/docs/reference/esql/functions/kibana/definition/greatest.json +++ b/docs/reference/esql/functions/kibana/definition/greatest.json @@ -189,7 +189,7 @@ } ], "variadic" : true, - "returnType" : "text" + "returnType" : "keyword" }, { "params" : [ @@ -207,7 +207,7 @@ } ], "variadic" : true, - "returnType" : "text" + "returnType" : "keyword" }, { "params" : [ diff --git a/docs/reference/esql/functions/kibana/definition/least.json b/docs/reference/esql/functions/kibana/definition/least.json index 7b545896f4ddc..02fa58f92eaef 100644 --- a/docs/reference/esql/functions/kibana/definition/least.json +++ b/docs/reference/esql/functions/kibana/definition/least.json @@ -188,7 +188,7 @@ } ], "variadic" : true, - "returnType" : "text" + "returnType" : "keyword" }, { "params" : [ @@ -206,7 +206,7 @@ } ], "variadic" : true, - "returnType" : "text" + "returnType" : "keyword" }, { "params" : [ diff --git a/docs/reference/esql/functions/kibana/definition/ltrim.json b/docs/reference/esql/functions/kibana/definition/ltrim.json index e85c2d42dedee..6d992b9db7b2c 100644 --- a/docs/reference/esql/functions/kibana/definition/ltrim.json +++ b/docs/reference/esql/functions/kibana/definition/ltrim.json @@ -26,7 +26,7 @@ } ], "variadic" : false, - "returnType" : "text" + "returnType" : "keyword" } ], "examples" : [ diff --git a/docs/reference/esql/functions/kibana/definition/max.json b/docs/reference/esql/functions/kibana/definition/max.json index 09ca95a0afeff..45fd26571b091 100644 --- a/docs/reference/esql/functions/kibana/definition/max.json +++ b/docs/reference/esql/functions/kibana/definition/max.json @@ -98,7 +98,7 @@ } ], "variadic" : false, - "returnType" : "text" + "returnType" : "keyword" }, { "params" : [ diff --git a/docs/reference/esql/functions/kibana/definition/min.json b/docs/reference/esql/functions/kibana/definition/min.json index 3e87b3e9038e1..ae71fba049dbe 100644 --- a/docs/reference/esql/functions/kibana/definition/min.json +++ b/docs/reference/esql/functions/kibana/definition/min.json @@ -98,7 +98,7 @@ } ], "variadic" : false, - "returnType" : "text" + "returnType" : "keyword" }, { "params" : [ diff --git a/docs/reference/esql/functions/kibana/definition/mv_append.json b/docs/reference/esql/functions/kibana/definition/mv_append.json index c14a3337a25a7..81c1b777be498 100644 --- a/docs/reference/esql/functions/kibana/definition/mv_append.json +++ b/docs/reference/esql/functions/kibana/definition/mv_append.json @@ -218,7 +218,7 @@ } ], "variadic" : false, - "returnType" : "text" + "returnType" : "keyword" }, { "params" : [ diff --git a/docs/reference/esql/functions/kibana/definition/mv_dedupe.json b/docs/reference/esql/functions/kibana/definition/mv_dedupe.json index 9bb0935c6a5de..bfca58bc3e140 100644 --- a/docs/reference/esql/functions/kibana/definition/mv_dedupe.json +++ b/docs/reference/esql/functions/kibana/definition/mv_dedupe.json @@ -147,7 +147,7 @@ } ], "variadic" : false, - "returnType" : "text" + "returnType" : "keyword" }, { "params" : [ diff --git a/docs/reference/esql/functions/kibana/definition/mv_first.json b/docs/reference/esql/functions/kibana/definition/mv_first.json index 80e761faafab9..a2b6358023e4b 100644 --- a/docs/reference/esql/functions/kibana/definition/mv_first.json +++ b/docs/reference/esql/functions/kibana/definition/mv_first.json @@ -146,7 +146,7 @@ } ], "variadic" : false, - "returnType" : "text" + "returnType" : "keyword" }, { "params" : [ diff --git a/docs/reference/esql/functions/kibana/definition/mv_last.json b/docs/reference/esql/functions/kibana/definition/mv_last.json index fb16400f86e62..b6dc268af5305 100644 --- a/docs/reference/esql/functions/kibana/definition/mv_last.json +++ b/docs/reference/esql/functions/kibana/definition/mv_last.json @@ -146,7 +146,7 @@ } ], "variadic" : false, - "returnType" : "text" + "returnType" : "keyword" }, { "params" : [ diff --git a/docs/reference/esql/functions/kibana/definition/mv_max.json b/docs/reference/esql/functions/kibana/definition/mv_max.json index 17cdae8a3d39c..27d2b010dc02c 100644 --- a/docs/reference/esql/functions/kibana/definition/mv_max.json +++ b/docs/reference/esql/functions/kibana/definition/mv_max.json @@ -98,7 +98,7 @@ } ], "variadic" : false, - "returnType" : "text" + "returnType" : "keyword" }, { "params" : [ diff --git a/docs/reference/esql/functions/kibana/definition/mv_min.json b/docs/reference/esql/functions/kibana/definition/mv_min.json index 3718a0f6e1de5..410e97335687f 100644 --- a/docs/reference/esql/functions/kibana/definition/mv_min.json +++ b/docs/reference/esql/functions/kibana/definition/mv_min.json @@ -98,7 +98,7 @@ } ], "variadic" : false, - "returnType" : "text" + "returnType" : "keyword" }, { "params" : [ diff --git a/docs/reference/esql/functions/kibana/definition/mv_slice.json b/docs/reference/esql/functions/kibana/definition/mv_slice.json index 399a6145b040e..dbbfe0ffb5a78 100644 --- a/docs/reference/esql/functions/kibana/definition/mv_slice.json +++ b/docs/reference/esql/functions/kibana/definition/mv_slice.json @@ -290,7 +290,7 @@ } ], "variadic" : false, - "returnType" : "text" + "returnType" : "keyword" }, { "params" : [ diff --git a/docs/reference/esql/functions/kibana/definition/mv_sort.json b/docs/reference/esql/functions/kibana/definition/mv_sort.json index c78ade7c8a94f..4cb255fb0afcb 100644 --- a/docs/reference/esql/functions/kibana/definition/mv_sort.json +++ b/docs/reference/esql/functions/kibana/definition/mv_sort.json @@ -146,7 +146,7 @@ } ], "variadic" : false, - "returnType" : "text" + "returnType" : "keyword" }, { "params" : [ diff --git a/docs/reference/esql/functions/kibana/definition/reverse.json b/docs/reference/esql/functions/kibana/definition/reverse.json index 1b222691530f2..0652d9cfa6b15 100644 --- a/docs/reference/esql/functions/kibana/definition/reverse.json +++ b/docs/reference/esql/functions/kibana/definition/reverse.json @@ -26,7 +26,7 @@ } ], "variadic" : false, - "returnType" : "text" + "returnType" : "keyword" } ], "examples" : [ diff --git a/docs/reference/esql/functions/kibana/definition/rtrim.json b/docs/reference/esql/functions/kibana/definition/rtrim.json index 028f442de9632..9c8a7578ed789 100644 --- a/docs/reference/esql/functions/kibana/definition/rtrim.json +++ b/docs/reference/esql/functions/kibana/definition/rtrim.json @@ -26,7 +26,7 @@ } ], "variadic" : false, - "returnType" : "text" + "returnType" : "keyword" } ], "examples" : [ diff --git a/docs/reference/esql/functions/kibana/definition/to_lower.json b/docs/reference/esql/functions/kibana/definition/to_lower.json index f9b49a29a8c7d..07bb057fe080d 100644 --- a/docs/reference/esql/functions/kibana/definition/to_lower.json +++ b/docs/reference/esql/functions/kibana/definition/to_lower.json @@ -26,7 +26,7 @@ } ], "variadic" : false, - "returnType" : "text" + "returnType" : "keyword" } ], "examples" : [ diff --git a/docs/reference/esql/functions/kibana/definition/to_upper.json b/docs/reference/esql/functions/kibana/definition/to_upper.json index edf36a982f56b..caa9d563b08b1 100644 --- a/docs/reference/esql/functions/kibana/definition/to_upper.json +++ b/docs/reference/esql/functions/kibana/definition/to_upper.json @@ -26,7 +26,7 @@ } ], "variadic" : false, - "returnType" : "text" + "returnType" : "keyword" } ], "examples" : [ diff --git a/docs/reference/esql/functions/kibana/definition/top.json b/docs/reference/esql/functions/kibana/definition/top.json index 7fa4ff123eec7..82bd80636152c 100644 --- a/docs/reference/esql/functions/kibana/definition/top.json +++ b/docs/reference/esql/functions/kibana/definition/top.json @@ -194,7 +194,7 @@ } ], "variadic" : false, - "returnType" : "text" + "returnType" : "keyword" } ], "examples" : [ diff --git a/docs/reference/esql/functions/kibana/definition/trim.json b/docs/reference/esql/functions/kibana/definition/trim.json index 6edf13e588e62..45805b3bfb054 100644 --- a/docs/reference/esql/functions/kibana/definition/trim.json +++ b/docs/reference/esql/functions/kibana/definition/trim.json @@ -26,7 +26,7 @@ } ], "variadic" : false, - "returnType" : "text" + "returnType" : "keyword" } ], "examples" : [ diff --git a/docs/reference/esql/functions/kibana/definition/values.json b/docs/reference/esql/functions/kibana/definition/values.json index e289173d9d989..ae69febd4f755 100644 --- a/docs/reference/esql/functions/kibana/definition/values.json +++ b/docs/reference/esql/functions/kibana/definition/values.json @@ -98,7 +98,7 @@ } ], "variadic" : false, - "returnType" : "text" + "returnType" : "keyword" }, { "params" : [ diff --git a/docs/reference/esql/functions/kibana/inline_cast.json b/docs/reference/esql/functions/kibana/inline_cast.json index 81a1966773238..9f663c8d0d6a3 100644 --- a/docs/reference/esql/functions/kibana/inline_cast.json +++ b/docs/reference/esql/functions/kibana/inline_cast.json @@ -15,7 +15,6 @@ "keyword" : "to_string", "long" : "to_long", "string" : "to_string", - "text" : "to_string", "time_duration" : "to_timeduration", "unsigned_long" : "to_unsigned_long", "version" : "to_version" diff --git a/docs/reference/esql/functions/types/case.asciidoc b/docs/reference/esql/functions/types/case.asciidoc index e8aa3eaf5daae..c6fb6a091e9d0 100644 --- a/docs/reference/esql/functions/types/case.asciidoc +++ b/docs/reference/esql/functions/types/case.asciidoc @@ -24,11 +24,13 @@ boolean | integer | | integer boolean | ip | ip | ip boolean | ip | | ip boolean | keyword | keyword | keyword +boolean | keyword | text | keyword boolean | keyword | | keyword boolean | long | long | long boolean | long | | long -boolean | text | text | text -boolean | text | | text +boolean | text | keyword | keyword +boolean | text | text | keyword +boolean | text | | keyword boolean | unsigned_long | unsigned_long | unsigned_long boolean | unsigned_long | | unsigned_long boolean | version | version | version diff --git a/docs/reference/esql/functions/types/coalesce.asciidoc b/docs/reference/esql/functions/types/coalesce.asciidoc index 368a12db0dca4..23a249494e0a2 100644 --- a/docs/reference/esql/functions/types/coalesce.asciidoc +++ b/docs/reference/esql/functions/types/coalesce.asciidoc @@ -19,7 +19,7 @@ keyword | keyword | keyword keyword | | keyword long | long | long long | | long -text | text | text -text | | text +text | text | keyword +text | | keyword version | version | version |=== diff --git a/docs/reference/esql/functions/types/greatest.asciidoc b/docs/reference/esql/functions/types/greatest.asciidoc index 1454bbb6f81c1..7df77a6991315 100644 --- a/docs/reference/esql/functions/types/greatest.asciidoc +++ b/docs/reference/esql/functions/types/greatest.asciidoc @@ -16,7 +16,7 @@ keyword | keyword | keyword keyword | | keyword long | long | long long | | long -text | text | text -text | | text +text | text | keyword +text | | keyword version | version | version |=== diff --git a/docs/reference/esql/functions/types/least.asciidoc b/docs/reference/esql/functions/types/least.asciidoc index 1454bbb6f81c1..7df77a6991315 100644 --- a/docs/reference/esql/functions/types/least.asciidoc +++ b/docs/reference/esql/functions/types/least.asciidoc @@ -16,7 +16,7 @@ keyword | keyword | keyword keyword | | keyword long | long | long long | | long -text | text | text -text | | text +text | text | keyword +text | | keyword version | version | version |=== diff --git a/docs/reference/esql/functions/types/ltrim.asciidoc b/docs/reference/esql/functions/types/ltrim.asciidoc index 41d60049d59b8..1ba0e98ec8f09 100644 --- a/docs/reference/esql/functions/types/ltrim.asciidoc +++ b/docs/reference/esql/functions/types/ltrim.asciidoc @@ -6,5 +6,5 @@ |=== string | result keyword | keyword -text | text +text | keyword |=== diff --git a/docs/reference/esql/functions/types/max.asciidoc b/docs/reference/esql/functions/types/max.asciidoc index 35ce5811e0cd0..564fb8dc3bfb0 100644 --- a/docs/reference/esql/functions/types/max.asciidoc +++ b/docs/reference/esql/functions/types/max.asciidoc @@ -12,6 +12,6 @@ integer | integer ip | ip keyword | keyword long | long -text | text +text | keyword version | version |=== diff --git a/docs/reference/esql/functions/types/min.asciidoc b/docs/reference/esql/functions/types/min.asciidoc index 35ce5811e0cd0..564fb8dc3bfb0 100644 --- a/docs/reference/esql/functions/types/min.asciidoc +++ b/docs/reference/esql/functions/types/min.asciidoc @@ -12,6 +12,6 @@ integer | integer ip | ip keyword | keyword long | long -text | text +text | keyword version | version |=== diff --git a/docs/reference/esql/functions/types/mv_append.asciidoc b/docs/reference/esql/functions/types/mv_append.asciidoc index a1894e429ae82..05f9ff6b19f9e 100644 --- a/docs/reference/esql/functions/types/mv_append.asciidoc +++ b/docs/reference/esql/functions/types/mv_append.asciidoc @@ -16,6 +16,6 @@ integer | integer | integer ip | ip | ip keyword | keyword | keyword long | long | long -text | text | text +text | text | keyword version | version | version |=== diff --git a/docs/reference/esql/functions/types/mv_dedupe.asciidoc b/docs/reference/esql/functions/types/mv_dedupe.asciidoc index 68e546451c8cb..976de79bb0910 100644 --- a/docs/reference/esql/functions/types/mv_dedupe.asciidoc +++ b/docs/reference/esql/functions/types/mv_dedupe.asciidoc @@ -16,6 +16,6 @@ integer | integer ip | ip keyword | keyword long | long -text | text +text | keyword version | version |=== diff --git a/docs/reference/esql/functions/types/mv_first.asciidoc b/docs/reference/esql/functions/types/mv_first.asciidoc index 35633544d99a0..47736e76d1db4 100644 --- a/docs/reference/esql/functions/types/mv_first.asciidoc +++ b/docs/reference/esql/functions/types/mv_first.asciidoc @@ -16,7 +16,7 @@ integer | integer ip | ip keyword | keyword long | long -text | text +text | keyword unsigned_long | unsigned_long version | version |=== diff --git a/docs/reference/esql/functions/types/mv_last.asciidoc b/docs/reference/esql/functions/types/mv_last.asciidoc index 35633544d99a0..47736e76d1db4 100644 --- a/docs/reference/esql/functions/types/mv_last.asciidoc +++ b/docs/reference/esql/functions/types/mv_last.asciidoc @@ -16,7 +16,7 @@ integer | integer ip | ip keyword | keyword long | long -text | text +text | keyword unsigned_long | unsigned_long version | version |=== diff --git a/docs/reference/esql/functions/types/mv_max.asciidoc b/docs/reference/esql/functions/types/mv_max.asciidoc index 8ea36aebbad37..d4e014554c86c 100644 --- a/docs/reference/esql/functions/types/mv_max.asciidoc +++ b/docs/reference/esql/functions/types/mv_max.asciidoc @@ -12,7 +12,7 @@ integer | integer ip | ip keyword | keyword long | long -text | text +text | keyword unsigned_long | unsigned_long version | version |=== diff --git a/docs/reference/esql/functions/types/mv_min.asciidoc b/docs/reference/esql/functions/types/mv_min.asciidoc index 8ea36aebbad37..d4e014554c86c 100644 --- a/docs/reference/esql/functions/types/mv_min.asciidoc +++ b/docs/reference/esql/functions/types/mv_min.asciidoc @@ -12,7 +12,7 @@ integer | integer ip | ip keyword | keyword long | long -text | text +text | keyword unsigned_long | unsigned_long version | version |=== diff --git a/docs/reference/esql/functions/types/mv_slice.asciidoc b/docs/reference/esql/functions/types/mv_slice.asciidoc index 0a9dc073370c7..60c1f6315a599 100644 --- a/docs/reference/esql/functions/types/mv_slice.asciidoc +++ b/docs/reference/esql/functions/types/mv_slice.asciidoc @@ -16,6 +16,6 @@ integer | integer | integer | integer ip | integer | integer | ip keyword | integer | integer | keyword long | integer | integer | long -text | integer | integer | text +text | integer | integer | keyword version | integer | integer | version |=== diff --git a/docs/reference/esql/functions/types/mv_sort.asciidoc b/docs/reference/esql/functions/types/mv_sort.asciidoc index 93965187482ac..c21ea5983945e 100644 --- a/docs/reference/esql/functions/types/mv_sort.asciidoc +++ b/docs/reference/esql/functions/types/mv_sort.asciidoc @@ -12,6 +12,6 @@ integer | keyword | integer ip | keyword | ip keyword | keyword | keyword long | keyword | long -text | keyword | text +text | keyword | keyword version | keyword | version |=== diff --git a/docs/reference/esql/functions/types/reverse.asciidoc b/docs/reference/esql/functions/types/reverse.asciidoc index 974066d225bca..9e5dc1c477316 100644 --- a/docs/reference/esql/functions/types/reverse.asciidoc +++ b/docs/reference/esql/functions/types/reverse.asciidoc @@ -6,5 +6,5 @@ |=== str | result keyword | keyword -text | text +text | keyword |=== diff --git a/docs/reference/esql/functions/types/rtrim.asciidoc b/docs/reference/esql/functions/types/rtrim.asciidoc index 41d60049d59b8..1ba0e98ec8f09 100644 --- a/docs/reference/esql/functions/types/rtrim.asciidoc +++ b/docs/reference/esql/functions/types/rtrim.asciidoc @@ -6,5 +6,5 @@ |=== string | result keyword | keyword -text | text +text | keyword |=== diff --git a/docs/reference/esql/functions/types/to_lower.asciidoc b/docs/reference/esql/functions/types/to_lower.asciidoc index 974066d225bca..9e5dc1c477316 100644 --- a/docs/reference/esql/functions/types/to_lower.asciidoc +++ b/docs/reference/esql/functions/types/to_lower.asciidoc @@ -6,5 +6,5 @@ |=== str | result keyword | keyword -text | text +text | keyword |=== diff --git a/docs/reference/esql/functions/types/to_upper.asciidoc b/docs/reference/esql/functions/types/to_upper.asciidoc index 974066d225bca..9e5dc1c477316 100644 --- a/docs/reference/esql/functions/types/to_upper.asciidoc +++ b/docs/reference/esql/functions/types/to_upper.asciidoc @@ -6,5 +6,5 @@ |=== str | result keyword | keyword -text | text +text | keyword |=== diff --git a/docs/reference/esql/functions/types/top.asciidoc b/docs/reference/esql/functions/types/top.asciidoc index 25d7962a27252..699bc7b10ce84 100644 --- a/docs/reference/esql/functions/types/top.asciidoc +++ b/docs/reference/esql/functions/types/top.asciidoc @@ -12,5 +12,5 @@ integer | integer | keyword | integer ip | integer | keyword | ip keyword | integer | keyword | keyword long | integer | keyword | long -text | integer | keyword | text +text | integer | keyword | keyword |=== diff --git a/docs/reference/esql/functions/types/trim.asciidoc b/docs/reference/esql/functions/types/trim.asciidoc index 41d60049d59b8..1ba0e98ec8f09 100644 --- a/docs/reference/esql/functions/types/trim.asciidoc +++ b/docs/reference/esql/functions/types/trim.asciidoc @@ -6,5 +6,5 @@ |=== string | result keyword | keyword -text | text +text | keyword |=== diff --git a/docs/reference/esql/functions/types/values.asciidoc b/docs/reference/esql/functions/types/values.asciidoc index 35ce5811e0cd0..564fb8dc3bfb0 100644 --- a/docs/reference/esql/functions/types/values.asciidoc +++ b/docs/reference/esql/functions/types/values.asciidoc @@ -12,6 +12,6 @@ integer | integer ip | ip keyword | keyword long | long -text | text +text | keyword version | version |=== diff --git a/x-pack/plugin/build.gradle b/x-pack/plugin/build.gradle index 8297ef5161fb0..cf6a8f51d1b81 100644 --- a/x-pack/plugin/build.gradle +++ b/x-pack/plugin/build.gradle @@ -84,5 +84,7 @@ tasks.named("yamlRestCompatTestTransform").configure({ task -> task.skipTest("security/10_forbidden/Test bulk response with invalid credentials", "warning does not exist for compatibility") task.skipTest("inference/inference_crud/Test get all", "Assertions on number of inference models break due to default configs") task.skipTest("esql/60_usage/Basic ESQL usage output (telemetry)", "The telemetry output changed. We dropped a column. That's safe.") + task.skipTest("esql/80_text/reverse text", "The output type changed from TEXT to KEYWORD.") + task.skipTest("esql/80_text/values function", "The output type changed from TEXT to KEYWORD.") }) diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/DataType.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/DataType.java index 5041c96128a1e..1b1eff8a07b1d 100644 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/DataType.java +++ b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/DataType.java @@ -584,6 +584,10 @@ static Builder builder() { return new Builder(); } + public DataType noText() { + return this == TEXT ? KEYWORD : this; + } + /** * Named parameters with default values. It's just easier to do this with * a builder in java.... diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/convert.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/convert.csv-spec index 1397965145a1a..49960d1b5b0f3 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/convert.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/convert.csv-spec @@ -58,11 +58,11 @@ ROW zero="0"::double convertToString required_capability: casting_operator -ROW one=1::keyword, two=2::text, three=3::string +ROW one=1::keyword, two=2::double, three=3::string ; - one:keyword | two:keyword | three:keyword -1 |2 |3 +one:keyword | two:double | three:keyword +1 | 2.0 | 3 ; convertToDatetime diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats.csv-spec index 2dc21a86e6394..80ba18b85a004 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats.csv-spec @@ -131,17 +131,19 @@ OPQS | OPQS | OPQS | ___ | small maxOfText required_capability: agg_max_min_string_support +required_capability: functions_never_emit_text from airports | eval x = name | where scalerank >= 9 | stats max(name), a = max(name), b = max(x); -max(name):text | a:text | b:text -Zaporozhye Int'l | Zaporozhye Int'l | Zaporozhye Int'l +max(name):keyword | a:keyword | b:keyword +Zaporozhye Int'l | Zaporozhye Int'l | Zaporozhye Int'l ; maxOfTextGrouping required_capability: agg_max_min_string_support +required_capability: functions_never_emit_text from airports | eval x = name | where scalerank >= 9 @@ -149,7 +151,7 @@ from airports | sort type asc | limit 4; -max(name):text | a:text | b:text | type:keyword +max(name):keyword| a:keyword | b:keyword | type:keyword Cheongju Int'l | Cheongju Int'l | Cheongju Int'l | major Zaporozhye Int'l | Zaporozhye Int'l | Zaporozhye Int'l | mid Zaporozhye Int'l | Zaporozhye Int'l | Zaporozhye Int'l | military @@ -211,17 +213,19 @@ LUH | LUH | LUH | ___ | small minOfText required_capability: agg_max_min_string_support +required_capability: functions_never_emit_text from airports | eval x = name | where scalerank >= 9 | stats min(name), a = min(name), b = min(x); -min(name):text | a:text | b:text +min(name):keyword | a:keyword | b:keyword Abdul Rachman Saleh | Abdul Rachman Saleh | Abdul Rachman Saleh ; minOfTextGrouping required_capability: agg_max_min_string_support +required_capability: functions_never_emit_text from airports | eval x = name | where scalerank >= 9 @@ -229,7 +233,7 @@ from airports | sort type asc | limit 4; -min(name):text | a:text | b:text | type:keyword +min(name):keyword | a:keyword | b:keyword | type:keyword Chandigarh Int'l | Chandigarh Int'l | Chandigarh Int'l | major Abdul Rachman Saleh | Abdul Rachman Saleh | Abdul Rachman Saleh | mid Abdul Rachman Saleh | Abdul Rachman Saleh | Abdul Rachman Saleh | military diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats_top.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats_top.csv-spec index 80d11425c5bb6..6eebb2f4d19da 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats_top.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats_top.csv-spec @@ -263,6 +263,7 @@ FROM employees topText required_capability: agg_top required_capability: agg_top_string_support +required_capability: functions_never_emit_text # we don't need MATCH, but the loader for books.csv is busted in CsvTests required_capability: match_operator @@ -273,13 +274,14 @@ FROM books calc = TOP(calc, 3, "asc"), evil = TOP(CASE(year < 1980, title, author), 3, "desc"); -title:text | calc:keyword | evil:text +title:keyword | calc:keyword | evil:keyword [Worlds of Exile and Illusion: Three Complete Novels of the Hainish Series in One Volume--Rocannon's World, Planet of Exile, City of Illusions, Woman-The Full Story: A Dynamic Celebration of Freedoms, Winter notes on summer impressions] | ["'Bria", "Gent", "HE UN"] | [William Faulkner, William Faulkner, William Faulkner] ; topTextGrouping required_capability: agg_top required_capability: agg_top_string_support +required_capability: functions_never_emit_text # we don't need MATCH, but the loader for books.csv is busted in CsvTests required_capability: match_operator @@ -293,7 +295,7 @@ FROM books | SORT author | LIMIT 3; - title:text | calc:keyword | evil:text | author:text + title:keyword | calc:keyword | evil:keyword | author:text A Tolkien Compass: Including J. R. R. Tolkien's Guide to the Names in The Lord of the Rings | Tolk | A Tolkien Compass: Including J. R. R. Tolkien's Guide to the Names in The Lord of the Rings | Agnes Perkins The Lord of the Rings Poster Collection: Six Paintings by Alan Lee (No. 1) | he Lo | [J. R. R. Tolkien, Alan Lee] | Alan Lee A Gentle Creature and Other Stories: White Nights, A Gentle Creature, and The Dream of a Ridiculous Man (The World's Classics) | Gent | [W. J. Leatherbarrow, Fyodor Dostoevsky, Alan Myers] | Alan Myers diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/string.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/string.csv-spec index 00fa2fddb2106..305b8f3d8011e 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/string.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/string.csv-spec @@ -1289,6 +1289,7 @@ x:integer | y:string reverseWithTextFields required_capability: fn_reverse +required_capability: functions_never_emit_text FROM books | EVAL title_reversed = REVERSE(title), author_reversed_twice = REVERSE(REVERSE(author)), eq = author_reversed_twice == author | KEEP title, title_reversed, author, author_reversed_twice, eq, book_no @@ -1296,7 +1297,7 @@ FROM books | WHERE book_no IN ("1211", "1463") | LIMIT 2; -title:text | title_reversed:text | author:text | author_reversed_twice:text | eq:boolean | book_no:keyword +title:text | title_reversed:keyword | author:text | author_reversed_twice:keyword | eq:boolean | book_no:keyword The brothers Karamazov | vozamaraK srehtorb ehT | Fyodor Dostoevsky | Fyodor Dostoevsky | true | 1211 Realms of Tolkien: Images of Middle-earth | htrae-elddiM fo segamI :neikloT fo smlaeR | J. R. R. Tolkien | J. R. R. Tolkien | true | 1463 ; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java index 55236af648236..196a864db2c15 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java @@ -74,6 +74,11 @@ public enum Cap { */ FN_SUBSTRING_EMPTY_NULL, + /** + * All functions that take TEXT should never emit TEXT, only KEYWORD. #114334 + */ + FUNCTIONS_NEVER_EMIT_TEXT, + /** * Support for the {@code INLINESTATS} syntax. */ diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Max.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Max.java index ee16193efdccc..ac2d4ff3cbc43 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Max.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Max.java @@ -55,7 +55,7 @@ public class Max extends AggregateFunction implements ToAggregator, SurrogateExp ); @FunctionInfo( - returnType = { "boolean", "double", "integer", "long", "date", "ip", "keyword", "text", "long", "version" }, + returnType = { "boolean", "double", "integer", "long", "date", "ip", "keyword", "long", "version" }, description = "The maximum value of a field.", isAggregation = true, examples = { @@ -119,7 +119,7 @@ protected TypeResolution resolveType() { @Override public DataType dataType() { - return field().dataType(); + return field().dataType().noText(); } @Override diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Min.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Min.java index 7aaa41ea6ab11..a5fc8196847b7 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Min.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Min.java @@ -55,7 +55,7 @@ public class Min extends AggregateFunction implements ToAggregator, SurrogateExp ); @FunctionInfo( - returnType = { "boolean", "double", "integer", "long", "date", "ip", "keyword", "text", "long", "version" }, + returnType = { "boolean", "double", "integer", "long", "date", "ip", "keyword", "long", "version" }, description = "The minimum value of a field.", isAggregation = true, examples = { @@ -119,7 +119,7 @@ protected TypeResolution resolveType() { @Override public DataType dataType() { - return field().dataType(); + return field().dataType().noText(); } @Override diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Top.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Top.java index 4f81e0a897f9c..e0a7da806b3ac 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Top.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Top.java @@ -51,7 +51,7 @@ public class Top extends AggregateFunction implements ToAggregator, SurrogateExp private static final String ORDER_DESC = "DESC"; @FunctionInfo( - returnType = { "boolean", "double", "integer", "long", "date", "ip", "keyword", "text" }, + returnType = { "boolean", "double", "integer", "long", "date", "ip", "keyword" }, description = "Collects the top values for a field. Includes repeated values.", isAggregation = true, examples = @Example(file = "stats_top", tag = "top") @@ -175,7 +175,7 @@ protected TypeResolution resolveType() { @Override public DataType dataType() { - return field().dataType(); + return field().dataType().noText(); } @Override diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Values.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Values.java index 8d576839c3c5c..111eab051719b 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Values.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Values.java @@ -52,7 +52,7 @@ public class Values extends AggregateFunction implements ToAggregator { ); @FunctionInfo( - returnType = { "boolean", "date", "double", "integer", "ip", "keyword", "long", "text", "version" }, + returnType = { "boolean", "date", "double", "integer", "ip", "keyword", "long", "version" }, preview = true, description = "Returns all values in a group as a multivalued field. The order of the returned values isn't guaranteed. " + "If you need the values returned in order use <>.", @@ -105,7 +105,7 @@ public Values withFilter(Expression filter) { @Override public DataType dataType() { - return field().dataType(); + return field().dataType().noText(); } @Override diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/UnaryScalarFunction.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/UnaryScalarFunction.java index 4d34033286f52..53b51f16d4183 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/UnaryScalarFunction.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/UnaryScalarFunction.java @@ -164,6 +164,6 @@ public final Expression field() { @Override public DataType dataType() { - return field.dataType(); + return field.dataType().noText(); } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/Case.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/Case.java index d833a796cbecc..824f02ca7ccbb 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/Case.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/Case.java @@ -73,7 +73,6 @@ ConditionEvaluatorSupplier toEvaluator(ToEvaluator toEvaluator) { "ip", "keyword", "long", - "text", "unsigned_long", "version" }, description = """ @@ -195,12 +194,12 @@ protected TypeResolution resolveType() { private TypeResolution resolveValueType(Expression value, int position) { if (dataType == null || dataType == NULL) { - dataType = value.dataType(); + dataType = value.dataType().noText(); return TypeResolution.TYPE_RESOLVED; } return TypeResolutions.isType( value, - t -> t == dataType, + t -> t.noText() == dataType, sourceText(), TypeResolutions.ParamOrdinal.fromIndex(position), dataType.typeName() diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/Greatest.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/Greatest.java index aad2d37d414b8..abc2ea85198fa 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/Greatest.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/Greatest.java @@ -43,7 +43,7 @@ public class Greatest extends EsqlScalarFunction implements OptionalArgument { private DataType dataType; @FunctionInfo( - returnType = { "boolean", "date", "date_nanos", "double", "integer", "ip", "keyword", "long", "text", "version" }, + returnType = { "boolean", "date", "date_nanos", "double", "integer", "ip", "keyword", "long", "version" }, description = "Returns the maximum value from multiple columns. This is similar to <>\n" + "except it is intended to run on multiple columns at once.", note = "When run on `keyword` or `text` fields, this returns the last string in alphabetical order. " @@ -104,12 +104,12 @@ protected TypeResolution resolveType() { for (int position = 0; position < children().size(); position++) { Expression child = children().get(position); if (dataType == null || dataType == NULL) { - dataType = child.dataType(); + dataType = child.dataType().noText(); continue; } TypeResolution resolution = TypeResolutions.isType( child, - t -> t == dataType, + t -> t.noText() == dataType, sourceText(), TypeResolutions.ParamOrdinal.fromIndex(position), dataType.typeName() diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/Least.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/Least.java index 70ba9319385f3..a49fff0aa888b 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/Least.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/Least.java @@ -43,7 +43,7 @@ public class Least extends EsqlScalarFunction implements OptionalArgument { private DataType dataType; @FunctionInfo( - returnType = { "boolean", "date", "date_nanos", "double", "integer", "ip", "keyword", "long", "text", "version" }, + returnType = { "boolean", "date", "date_nanos", "double", "integer", "ip", "keyword", "long", "version" }, description = "Returns the minimum value from multiple columns. " + "This is similar to <> except it is intended to run on multiple columns at once.", examples = @Example(file = "math", tag = "least") @@ -102,12 +102,12 @@ protected TypeResolution resolveType() { for (int position = 0; position < children().size(); position++) { Expression child = children().get(position); if (dataType == null || dataType == NULL) { - dataType = child.dataType(); + dataType = child.dataType().noText(); continue; } TypeResolution resolution = TypeResolutions.isType( child, - t -> t == dataType, + t -> t.noText() == dataType, sourceText(), TypeResolutions.ParamOrdinal.fromIndex(position), dataType.typeName() diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvAppend.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvAppend.java index 72d96a86d31eb..bcd6f4c30bf8a 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvAppend.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvAppend.java @@ -62,7 +62,6 @@ public class MvAppend extends EsqlScalarFunction implements EvaluatorMapper { "ip", "keyword", "long", - "text", "version" }, description = "Concatenates values of two multi-value fields." ) @@ -134,12 +133,12 @@ protected TypeResolution resolveType() { if (resolution.unresolved()) { return resolution; } - dataType = field1.dataType(); + dataType = field1.dataType().noText(); if (dataType == DataType.NULL) { - dataType = field2.dataType(); + dataType = field2.dataType().noText(); return isType(field2, DataType::isRepresentable, sourceText(), SECOND, "representable"); } - return isType(field2, t -> t == dataType, sourceText(), SECOND, dataType.typeName()); + return isType(field2, t -> t.noText() == dataType, sourceText(), SECOND, dataType.typeName()); } @Override diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvDedupe.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvDedupe.java index 34b89b4f78997..9a2b041fafeb6 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvDedupe.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvDedupe.java @@ -46,7 +46,6 @@ public class MvDedupe extends AbstractMultivalueFunction { "ip", "keyword", "long", - "text", "version" }, description = "Remove duplicate values from a multivalued field.", note = "`MV_DEDUPE` may, but won't always, sort the values in the column.", diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvFirst.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvFirst.java index d5d203e7bb3d1..957c74883ffdf 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvFirst.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvFirst.java @@ -53,7 +53,6 @@ public class MvFirst extends AbstractMultivalueFunction { "ip", "keyword", "long", - "text", "unsigned_long", "version" }, description = """ diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvLast.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvLast.java index 21487f14817cd..fedbc1934d1be 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvLast.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvLast.java @@ -53,7 +53,6 @@ public class MvLast extends AbstractMultivalueFunction { "ip", "keyword", "long", - "text", "unsigned_long", "version" }, description = """ diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMax.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMax.java index 6a53c652d3420..5386a9e3ef763 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMax.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMax.java @@ -36,7 +36,7 @@ public class MvMax extends AbstractMultivalueFunction { public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry(Expression.class, "MvMax", MvMax::new); @FunctionInfo( - returnType = { "boolean", "date", "date_nanos", "double", "integer", "ip", "keyword", "long", "text", "unsigned_long", "version" }, + returnType = { "boolean", "date", "date_nanos", "double", "integer", "ip", "keyword", "long", "unsigned_long", "version" }, description = "Converts a multivalued expression into a single valued column containing the maximum value.", examples = { @Example(file = "math", tag = "mv_max"), diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMin.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMin.java index 4cc83c99b2c08..a2b3c53f322ba 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMin.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMin.java @@ -36,7 +36,7 @@ public class MvMin extends AbstractMultivalueFunction { public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry(Expression.class, "MvMin", MvMin::new); @FunctionInfo( - returnType = { "boolean", "date", "date_nanos", "double", "integer", "ip", "keyword", "long", "text", "unsigned_long", "version" }, + returnType = { "boolean", "date", "date_nanos", "double", "integer", "ip", "keyword", "long", "unsigned_long", "version" }, description = "Converts a multivalued expression into a single valued column containing the minimum value.", examples = { @Example(file = "math", tag = "mv_min"), diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSlice.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSlice.java index ef562c339dfd9..f4f9679dc3704 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSlice.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSlice.java @@ -67,7 +67,6 @@ public class MvSlice extends EsqlScalarFunction implements OptionalArgument, Eva "ip", "keyword", "long", - "text", "version" }, description = """ Returns a subset of the multivalued field using the start and end index values. @@ -240,7 +239,7 @@ protected NodeInfo info() { @Override public DataType dataType() { - return field.dataType(); + return field.dataType().noText(); } static int adjustIndex(int oldOffset, int fieldValueCount, int first) { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSort.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSort.java index 5ca5618bf2a54..2286a1357ced8 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSort.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSort.java @@ -69,7 +69,7 @@ public class MvSort extends EsqlScalarFunction implements OptionalArgument, Vali private static final String INVALID_ORDER_ERROR = "Invalid order value in [{}], expected one of [{}, {}] but got [{}]"; @FunctionInfo( - returnType = { "boolean", "date", "date_nanos", "double", "integer", "ip", "keyword", "long", "text", "version" }, + returnType = { "boolean", "date", "date_nanos", "double", "integer", "ip", "keyword", "long", "version" }, description = "Sorts a multivalued field in lexicographical order.", examples = @Example(file = "ints", tag = "mv_sort") ) @@ -226,7 +226,7 @@ protected NodeInfo info() { @Override public DataType dataType() { - return field.dataType(); + return field.dataType().noText(); } @Override diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/nulls/Coalesce.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/nulls/Coalesce.java index 6b9c8d0da025b..52686430ca5b5 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/nulls/Coalesce.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/nulls/Coalesce.java @@ -61,7 +61,6 @@ public class Coalesce extends EsqlScalarFunction implements OptionalArgument { "ip", "keyword", "long", - "text", "version" }, description = "Returns the first of its arguments that is not null. If all arguments are null, it returns `null`.", examples = { @Example(file = "null", tag = "coalesce") } @@ -145,12 +144,12 @@ protected TypeResolution resolveType() { for (int position = 0; position < children().size(); position++) { if (dataType == null || dataType == NULL) { - dataType = children().get(position).dataType(); + dataType = children().get(position).dataType().noText(); continue; } TypeResolution resolution = TypeResolutions.isType( children().get(position), - t -> t == dataType, + t -> t.noText() == dataType, sourceText(), TypeResolutions.ParamOrdinal.fromIndex(position), dataType.typeName() diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/LTrim.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/LTrim.java index 8a4a5f4d841a5..0b7233f10b454 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/LTrim.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/LTrim.java @@ -34,7 +34,7 @@ public class LTrim extends UnaryScalarFunction { public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry(Expression.class, "LTrim", LTrim::new); @FunctionInfo( - returnType = { "keyword", "text" }, + returnType = { "keyword" }, description = "Removes leading whitespaces from a string.", examples = @Example(file = "string", tag = "ltrim") ) diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/RTrim.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/RTrim.java index b79e1adf99a20..80809a444f5e8 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/RTrim.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/RTrim.java @@ -34,7 +34,7 @@ public class RTrim extends UnaryScalarFunction { public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry(Expression.class, "RTrim", RTrim::new); @FunctionInfo( - returnType = { "keyword", "text" }, + returnType = { "keyword" }, description = "Removes trailing whitespaces from a string.", examples = @Example(file = "string", tag = "rtrim") ) diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/Reverse.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/Reverse.java index e161566838cd9..02787999f24f7 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/Reverse.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/Reverse.java @@ -37,7 +37,7 @@ public class Reverse extends UnaryScalarFunction { public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry(Expression.class, "Reverse", Reverse::new); @FunctionInfo( - returnType = { "keyword", "text" }, + returnType = { "keyword" }, description = "Returns a new string representing the input string in reverse order.", examples = { @Example(file = "string", tag = "reverse"), diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ToLower.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ToLower.java index c475469488d7b..5f2bbcde52166 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ToLower.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ToLower.java @@ -39,7 +39,7 @@ public class ToLower extends EsqlConfigurationFunction { private final Expression field; @FunctionInfo( - returnType = { "keyword", "text" }, + returnType = { "keyword" }, description = "Returns a new string representing the input string converted to lower case.", examples = @Example(file = "string", tag = "to_lower") ) @@ -72,7 +72,7 @@ public String getWriteableName() { @Override public DataType dataType() { - return field.dataType(); + return DataType.KEYWORD; } @Override diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ToUpper.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ToUpper.java index 1b5084a7916ef..7fdd5e39f96f3 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ToUpper.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ToUpper.java @@ -39,7 +39,7 @@ public class ToUpper extends EsqlConfigurationFunction { private final Expression field; @FunctionInfo( - returnType = { "keyword", "text" }, + returnType = { "keyword" }, description = "Returns a new string representing the input string converted to upper case.", examples = @Example(file = "string", tag = "to_upper") ) @@ -72,7 +72,7 @@ public String getWriteableName() { @Override public DataType dataType() { - return field.dataType(); + return DataType.KEYWORD; } @Override diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/Trim.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/Trim.java index 1fe7529caa2da..ef0afc3a4e6cb 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/Trim.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/Trim.java @@ -34,7 +34,7 @@ public final class Trim extends UnaryScalarFunction { public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry(Expression.class, "Trim", Trim::new); @FunctionInfo( - returnType = { "keyword", "text" }, + returnType = { "keyword" }, description = "Removes leading and trailing whitespaces from a string.", examples = @Example(file = "string", tag = "trim") ) diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/type/EsqlDataTypeConverter.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/type/EsqlDataTypeConverter.java index edc3081a33681..05a658ec411f3 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/type/EsqlDataTypeConverter.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/type/EsqlDataTypeConverter.java @@ -117,7 +117,6 @@ public class EsqlDataTypeConverter { entry(LONG, ToLong::new), // ToRadians, typeless entry(KEYWORD, ToString::new), - entry(TEXT, ToString::new), entry(UNSIGNED_LONG, ToUnsignedLong::new), entry(VERSION, ToVersion::new), entry(DATE_PERIOD, ToDatePeriod::new), diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/ParsingTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/ParsingTests.java index 8867e7425a92e..3cafd42b731f6 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/ParsingTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/ParsingTests.java @@ -88,9 +88,6 @@ public void testInlineCast() throws IOException { Collections.sort(namesAndAliases); for (String nameOrAlias : namesAndAliases) { DataType expectedType = DataType.fromNameOrAlias(nameOrAlias); - if (expectedType == DataType.TEXT) { - expectedType = DataType.KEYWORD; - } if (EsqlDataTypeConverter.converterFunctionFactory(expectedType) == null) { continue; } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/TestCaseSupplier.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/TestCaseSupplier.java index 2ba175657b6c2..c12e0a8684ba9 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/TestCaseSupplier.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/TestCaseSupplier.java @@ -1435,7 +1435,7 @@ public static TestCase typeError(List data, String expectedTypeError) this.source = Source.EMPTY; this.data = data; this.evaluatorToString = evaluatorToString; - this.expectedType = expectedType; + this.expectedType = expectedType == null ? null : expectedType.noText(); @SuppressWarnings("unchecked") Matcher downcast = (Matcher) matcher; this.matcher = downcast; diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/MaxTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/MaxTests.java index ce2bf7e262ae9..9756804a1ec0f 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/MaxTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/MaxTests.java @@ -128,7 +128,7 @@ public static Iterable parameters() { return new TestCaseSupplier.TestCase( List.of(TestCaseSupplier.TypedData.multiRow(List.of(value), DataType.TEXT, "field")), "Max[field=Attribute[channel=0]]", - DataType.TEXT, + DataType.KEYWORD, equalTo(value) ); }), diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/MinTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/MinTests.java index 7250072cd2003..171181496c889 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/MinTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/MinTests.java @@ -128,7 +128,7 @@ public static Iterable parameters() { return new TestCaseSupplier.TestCase( List.of(TestCaseSupplier.TypedData.multiRow(List.of(value), DataType.TEXT, "field")), "Min[field=Attribute[channel=0]]", - DataType.TEXT, + DataType.KEYWORD, equalTo(value) ); }), diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/CaseTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/CaseTests.java index fbb7c691b1d94..51b1c72c6e287 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/CaseTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/CaseTests.java @@ -151,6 +151,33 @@ private static void twoAndThreeArgs( return testCase(type, typedData, lhsOrRhs ? lhs : rhs, toStringMatcher(1, false), false, null, addWarnings(warnings)); }) ); + if (type.noText() == DataType.KEYWORD) { + DataType otherType = type == DataType.KEYWORD ? DataType.TEXT : DataType.KEYWORD; + suppliers.add( + new TestCaseSupplier( + TestCaseSupplier.nameFrom(Arrays.asList(cond, type, otherType)), + List.of(DataType.BOOLEAN, type, otherType), + () -> { + Object lhs = randomLiteral(type).value(); + Object rhs = randomLiteral(otherType).value(); + List typedData = List.of( + cond(cond, "cond"), + new TestCaseSupplier.TypedData(lhs, type, "lhs"), + new TestCaseSupplier.TypedData(rhs, otherType, "rhs") + ); + return testCase( + type, + typedData, + lhsOrRhs ? lhs : rhs, + toStringMatcher(1, false), + false, + null, + addWarnings(warnings) + ); + } + ) + ); + } if (lhsOrRhs) { suppliers.add( new TestCaseSupplier( @@ -222,7 +249,6 @@ private static void twoAndThreeArgs( ) ); } - suppliers.add( new TestCaseSupplier( "partial foldable " + TestCaseSupplier.nameFrom(Arrays.asList(cond, type, type)), @@ -292,6 +318,33 @@ private static void twoAndThreeArgs( } ) ); + if (type.noText() == DataType.KEYWORD) { + DataType otherType = type == DataType.KEYWORD ? DataType.TEXT : DataType.KEYWORD; + suppliers.add( + new TestCaseSupplier( + TestCaseSupplier.nameFrom(Arrays.asList(DataType.NULL, type, otherType)), + List.of(DataType.NULL, type, otherType), + () -> { + Object lhs = randomLiteral(type).value(); + Object rhs = randomLiteral(otherType).value(); + List typedData = List.of( + new TestCaseSupplier.TypedData(null, DataType.NULL, "cond"), + new TestCaseSupplier.TypedData(lhs, type, "lhs"), + new TestCaseSupplier.TypedData(rhs, otherType, "rhs") + ); + return testCase( + type, + typedData, + lhsOrRhs ? lhs : rhs, + startsWith("CaseEagerEvaluator[conditions=[ConditionEvaluator[condition="), + false, + null, + addWarnings(warnings) + ); + } + ) + ); + } } suppliers.add( new TestCaseSupplier( @@ -804,7 +857,7 @@ private static String typeErrorMessage(boolean includeOrdinal, List ty if (types.get(0) != DataType.BOOLEAN && types.get(0) != DataType.NULL) { return typeErrorMessage(includeOrdinal, types, 0, "boolean"); } - DataType mainType = types.get(1); + DataType mainType = types.get(1).noText(); for (int i = 2; i < types.size(); i++) { if (i % 2 == 0 && i != types.size() - 1) { // condition @@ -813,7 +866,7 @@ private static String typeErrorMessage(boolean includeOrdinal, List ty } } else { // value - if (types.get(i) != mainType) { + if (types.get(i).noText() != mainType) { return typeErrorMessage(includeOrdinal, types, i, mainType.typeName()); } } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ToLowerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ToLowerTests.java index 7af1c180fd7b9..1f564ecb87f1e 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ToLowerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ToLowerTests.java @@ -47,7 +47,7 @@ public static Iterable parameters() { suppliers.add(supplier("text unicode", DataType.TEXT, () -> randomUnicodeOfLengthBetween(1, 10))); // add null as parameter - return parameterSuppliersFromTypedDataWithDefaultChecks(false, suppliers, (v, p) -> "string"); + return parameterSuppliersFromTypedDataWithDefaultChecks(true, suppliers, (v, p) -> "string"); } public void testRandomLocale() { diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ToUpperTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ToUpperTests.java index c8bbe03bde411..7c136c3bb83c2 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ToUpperTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ToUpperTests.java @@ -47,7 +47,7 @@ public static Iterable parameters() { suppliers.add(supplier("text unicode", DataType.TEXT, () -> randomUnicodeOfLengthBetween(1, 10))); // add null as parameter - return parameterSuppliersFromTypedDataWithDefaultChecks(false, suppliers, (v, p) -> "string"); + return parameterSuppliersFromTypedDataWithDefaultChecks(true, suppliers, (v, p) -> "string"); } public void testRandomLocale() { diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/80_text.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/80_text.yml index 88ef03a22d70c..55bd39bdd73cc 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/80_text.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/80_text.yml @@ -392,7 +392,7 @@ setup: - method: POST path: /_query parameters: [method, path, parameters, capabilities] - capabilities: [fn_reverse] + capabilities: [fn_reverse, functions_never_emit_text] reason: "reverse not yet added" - do: allowed_warnings_regex: @@ -402,10 +402,10 @@ setup: query: 'FROM test | SORT name | EVAL job_reversed = REVERSE(job), tag_reversed = REVERSE(tag) | KEEP job_reversed, tag_reversed' - match: { columns.0.name: "job_reversed" } - - match: { columns.0.type: "text" } + - match: { columns.0.type: "keyword" } - match: { columns.1.name: "tag_reversed" } - - match: { columns.1.type: "text" } + - match: { columns.1.type: "keyword" } - length: { values: 2 } - match: { values.0: [ "rotceriD TI", "rab oof" ] } @@ -573,7 +573,6 @@ setup: body: query: 'FROM test | STATS job = VALUES(job) | EVAL job = MV_SORT(job) | LIMIT 1' - match: { columns.0.name: "job" } - - match: { columns.0.type: "text" } - length: { values: 1 } - match: { values.0: [ [ "IT Director", "Payroll Specialist" ] ] } @@ -592,7 +591,22 @@ setup: - match: { columns.0.name: "tag" } - match: { columns.0.type: "text" } - match: { columns.1.name: "job" } - - match: { columns.1.type: "text" } - length: { values: 2 } - match: { values.0: [ "baz", [ "Other", "Payroll Specialist" ] ] } - match: { values.1: [ "foo bar", "IT Director" ] } + +--- +"remove text typecast": + - requires: + capabilities: + - method: POST + path: /_query + parameters: [ method, path, parameters, capabilities ] + capabilities: [ functions_never_emit_text ] + reason: "Disabling ::text was done in 8.17 as part of removing all possibilities to emit text" + + - do: + catch: /Unsupported conversion to type \[TEXT\]/ + esql.query: + body: + query: 'FROM test | EVAL tag = name::text | KEEP name' From 16f61b460033baed6e7ae725fad96860d7a7f5e5 Mon Sep 17 00:00:00 2001 From: Moritz Mack Date: Fri, 25 Oct 2024 10:15:56 +0200 Subject: [PATCH 100/324] Increase assert timeout for DeprecationHttpIT to reduce risk of failing when test cluster is slow to warm up (fixes #115179) (#115621) --- .../xpack/deprecation/DeprecationHttpIT.java | 24 +++++++++---------- 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/x-pack/plugin/deprecation/qa/rest/src/javaRestTest/java/org/elasticsearch/xpack/deprecation/DeprecationHttpIT.java b/x-pack/plugin/deprecation/qa/rest/src/javaRestTest/java/org/elasticsearch/xpack/deprecation/DeprecationHttpIT.java index 3fb9573dd7b62..4a17c2abbd797 100644 --- a/x-pack/plugin/deprecation/qa/rest/src/javaRestTest/java/org/elasticsearch/xpack/deprecation/DeprecationHttpIT.java +++ b/x-pack/plugin/deprecation/qa/rest/src/javaRestTest/java/org/elasticsearch/xpack/deprecation/DeprecationHttpIT.java @@ -121,7 +121,7 @@ public void testDeprecatedSettingsReturnWarnings() throws Exception { List> documents = DeprecationTestUtils.getIndexedDeprecations(client(), xOpaqueId()); logger.warn(documents); assertThat(documents, hasSize(2)); - }, 30, TimeUnit.SECONDS); + }, 45, TimeUnit.SECONDS); } finally { Response response = cleanupSettings(); List warningHeaders = getWarningHeaders(response.getHeaders()); @@ -245,7 +245,7 @@ private void doTestDeprecationWarningsAppearInHeaders(String xOpaqueId) throws E var documents = DeprecationTestUtils.getIndexedDeprecations(client(), xOpaqueId); logger.warn(documents); assertThat(documents, hasSize(headerMatchers.size())); - }, 30, TimeUnit.SECONDS); + }, 45, TimeUnit.SECONDS); } public void testDeprecationRouteThrottling() throws Exception { @@ -275,7 +275,7 @@ public void testDeprecationRouteThrottling() throws Exception { ) ) ); - }, 30, TimeUnit.SECONDS); + }, 45, TimeUnit.SECONDS); } @@ -303,7 +303,7 @@ public void testDisableDeprecationLogIndexing() throws Exception { ) ) ); - }, 30, TimeUnit.SECONDS); + }, 45, TimeUnit.SECONDS); } finally { configureWriteDeprecationLogsToIndex(null); } @@ -369,7 +369,7 @@ public void testDeprecationMessagesCanBeIndexed() throws Exception { ) ) ); - }, 30, TimeUnit.SECONDS); + }, 45, TimeUnit.SECONDS); } @@ -414,7 +414,7 @@ public void testDeprecationCriticalWarnMessagesCanBeIndexed() throws Exception { ) ) ); - }, 30, TimeUnit.SECONDS); + }, 45, TimeUnit.SECONDS); } @@ -473,7 +473,7 @@ public void testDeprecationWarnMessagesCanBeIndexed() throws Exception { ) ) ); - }, 30, TimeUnit.SECONDS); + }, 45, TimeUnit.SECONDS); } @@ -504,7 +504,7 @@ public void testDeprecateAndKeep() throws Exception { ) ) ); - }, 30, TimeUnit.SECONDS); + }, 45, TimeUnit.SECONDS); } public void testReplacesInCurrentVersion() throws Exception { @@ -534,7 +534,7 @@ public void testReplacesInCurrentVersion() throws Exception { ) ) ); - }, 30, TimeUnit.SECONDS); + }, 45, TimeUnit.SECONDS); } public void testReplacesInCompatibleVersion() throws Exception { @@ -579,7 +579,7 @@ public void testReplacesInCompatibleVersion() throws Exception { ) ) ); - }, 30, TimeUnit.SECONDS); + }, 45, TimeUnit.SECONDS); } /** @@ -649,7 +649,7 @@ public void testCompatibleMessagesCanBeIndexed() throws Exception { ) ) ); - }, 30, TimeUnit.SECONDS); + }, 45, TimeUnit.SECONDS); } @@ -690,7 +690,7 @@ public void testDeprecationIndexingCacheReset() throws Exception { ) ) ); - }, 30, TimeUnit.SECONDS); + }, 45, TimeUnit.SECONDS); } From 9394e88c0f00e58e6b49e7607fb70bde119e4e1e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Istv=C3=A1n=20Zolt=C3=A1n=20Szab=C3=B3?= Date: Fri, 25 Oct 2024 10:18:01 +0200 Subject: [PATCH 101/324] [DOCS] Updates inference processor docs. (#115566) --- docs/reference/ingest/processors/inference.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/reference/ingest/processors/inference.asciidoc b/docs/reference/ingest/processors/inference.asciidoc index 4699f634afe37..9c6f0592a1d91 100644 --- a/docs/reference/ingest/processors/inference.asciidoc +++ b/docs/reference/ingest/processors/inference.asciidoc @@ -16,7 +16,7 @@ ingested in the pipeline. [options="header"] |====== | Name | Required | Default | Description -| `model_id` . | yes | - | (String) The ID or alias for the trained model, or the ID of the deployment. +| `model_id` . | yes | - | (String) An inference ID, a model deployment ID, a trained model ID or an alias. | `input_output` | no | - | (List) Input fields for {infer} and output (destination) fields for the {infer} results. This option is incompatible with the `target_field` and `field_map` options. | `target_field` | no | `ml.inference.` | (String) Field added to incoming documents to contain results objects. | `field_map` | no | If defined the model's default field map | (Object) Maps the document field names to the known field names of the model. This mapping takes precedence over any default mappings provided in the model configuration. From 11401a35d41c723e98c0dcc09f4874c9c842d349 Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Fri, 25 Oct 2024 19:45:39 +1100 Subject: [PATCH 102/324] Mute org.elasticsearch.oldrepos.OldRepositoryAccessIT testOldRepoAccess #115631 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index 084bf27d6a11b..5c94c0aff60b6 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -279,6 +279,9 @@ tests: - class: org.elasticsearch.test.rest.ClientYamlTestSuiteIT method: test {yaml=indices.create/10_basic/Create lookup index} issue: https://github.com/elastic/elasticsearch/issues/115605 +- class: org.elasticsearch.oldrepos.OldRepositoryAccessIT + method: testOldRepoAccess + issue: https://github.com/elastic/elasticsearch/issues/115631 # Examples: # From 452ca351d3d0887db96c124dd83bb755e6e5894f Mon Sep 17 00:00:00 2001 From: Liam Thompson <32779855+leemthompo@users.noreply.github.com> Date: Fri, 25 Oct 2024 11:19:31 +0200 Subject: [PATCH 103/324] [DOCS] Test trivial commit (#115579) (#115628) (cherry picked from commit e642dd84815ea476d1e7b99f26f65cb5099d4e39) --- .../search-your-data/search-application-overview.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/reference/search/search-your-data/search-application-overview.asciidoc b/docs/reference/search/search-your-data/search-application-overview.asciidoc index e12b55911740b..13cc97bb8aeab 100644 --- a/docs/reference/search/search-your-data/search-application-overview.asciidoc +++ b/docs/reference/search/search-your-data/search-application-overview.asciidoc @@ -74,7 +74,7 @@ To create a new search application in {kib}: . Name your search application. . Select *Create*. -Your search application should now be available in the list of search applications. +Your search application should now be available in the list. //[.screenshot] // image::../../images/search-applications/search-applications-create.png[Create search application screen] From b83042aa432776e4e1bcfe5c3c2f17ff2467a5e5 Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Fri, 25 Oct 2024 20:30:37 +1100 Subject: [PATCH 104/324] Mute org.elasticsearch.xpack.esql.analysis.AnalyzerTests testMvAppendValidation #115636 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index 5c94c0aff60b6..4869b669f6220 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -282,6 +282,9 @@ tests: - class: org.elasticsearch.oldrepos.OldRepositoryAccessIT method: testOldRepoAccess issue: https://github.com/elastic/elasticsearch/issues/115631 +- class: org.elasticsearch.xpack.esql.analysis.AnalyzerTests + method: testMvAppendValidation + issue: https://github.com/elastic/elasticsearch/issues/115636 # Examples: # From f1de84b51cf753e2bd1e381c0a6858797229b233 Mon Sep 17 00:00:00 2001 From: Liam Thompson <32779855+leemthompo@users.noreply.github.com> Date: Fri, 25 Oct 2024 11:39:52 +0200 Subject: [PATCH 105/324] [DOCS] Fix casing in servicenow docs config (#115634) --- .../connector/docs/connectors-servicenow.asciidoc | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/docs/reference/connector/docs/connectors-servicenow.asciidoc b/docs/reference/connector/docs/connectors-servicenow.asciidoc index 089a3b405d8a5..a02c418f11d74 100644 --- a/docs/reference/connector/docs/connectors-servicenow.asciidoc +++ b/docs/reference/connector/docs/connectors-servicenow.asciidoc @@ -81,7 +81,7 @@ Comma-separated list of services to fetch data from ServiceNow. If the value is - link:https://docs.servicenow.com/bundle/tokyo-it-service-management/page/product/incident-management/concept/c_IncidentManagement.html[Incident] - link:https://docs.servicenow.com/bundle/tokyo-servicenow-platform/page/use/service-catalog-requests/task/t_AddNewRequestItems.html[Requested Item] - link:https://docs.servicenow.com/bundle/tokyo-customer-service-management/page/product/customer-service-management/task/t_SearchTheKnowledgeBase.html[Knowledge] -- link:https://docs.servicenow.com/bundle/tokyo-it-service-management/page/product/change-management/task/t_CreateAChange.html[Change Request] +- link:https://docs.servicenow.com/bundle/tokyo-it-service-management/page/product/change-management/task/t_CreateAChange.html[Change request] + [NOTE] ==== @@ -89,7 +89,7 @@ If you have configured a custom service, the `*` value will not fetch data from ==== Default value is `*`. Examples: + - - `User, Incident, Requested Item, Knowledge, Change Request` + - `User, Incident, Requested Item, Knowledge, Change request` - `*` Enable document level security:: @@ -139,7 +139,7 @@ For default services, connectors use the following roles to find users who have | Knowledge | `admin`, `knowledge`, `knowledge_manager`, `knowledge_admin` -| Change Request | `admin`, `sn_change_read`, `itil` +| Change request | `admin`, `sn_change_read`, `itil` |=== For services other than these defaults, the connector iterates over access controls with `read` operations and finds the respective roles for those services. @@ -305,7 +305,7 @@ Comma-separated list of services to fetch data from ServiceNow. If the value is - link:https://docs.servicenow.com/bundle/tokyo-it-service-management/page/product/incident-management/concept/c_IncidentManagement.html[Incident] - link:https://docs.servicenow.com/bundle/tokyo-servicenow-platform/page/use/service-catalog-requests/task/t_AddNewRequestItems.html[Requested Item] - link:https://docs.servicenow.com/bundle/tokyo-customer-service-management/page/product/customer-service-management/task/t_SearchTheKnowledgeBase.html[Knowledge] -- link:https://docs.servicenow.com/bundle/tokyo-it-service-management/page/product/change-management/task/t_CreateAChange.html[Change Request] +- link:https://docs.servicenow.com/bundle/tokyo-it-service-management/page/product/change-management/task/t_CreateAChange.html[Change request] + [NOTE] ==== @@ -313,7 +313,7 @@ If you have configured a custom service, the `*` value will not fetch data from ==== Default value is `*`. Examples: + - - `User, Incident, Requested Item, Knowledge, Change Request` + - `User, Incident, Requested Item, Knowledge, Change request` - `*` `retry_count`:: @@ -374,7 +374,7 @@ For default services, connectors use the following roles to find users who have | Knowledge | `admin`, `knowledge`, `knowledge_manager`, `knowledge_admin` -| Change Request | `admin`, `sn_change_read`, `itil` +| Change request | `admin`, `sn_change_read`, `itil` |=== For services other than these defaults, the connector iterates over access controls with `read` operations and finds the respective roles for those services. From 2d854768bc98b34bd4ea8217aced2e1d95140aef Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Fri, 25 Oct 2024 12:37:38 +0200 Subject: [PATCH 106/324] Optimize threading in AbstractSearchAsyncAction (#113230) Forking when an action completes on the current thread is needlessly heavy handed in preventing stack-overflows. Also, we don't need locking/synchronization to deal with a worker-count + queue length problem. Both of these allow for non-trivial optimization even in the current execution model, also this change helps with moving to a more efficient execution model by saving needless forking to the search pool in particular. -> refactored the code to never fork but instead avoid stack-depth issues through use of a `SubscribableListener` -> replaced our home brew queue and semaphore combination by JDK primitives which saves blocking synchronization on task start and completion. --- .../search/AbstractSearchAsyncAction.java | 220 ++++++++---------- 1 file changed, 94 insertions(+), 126 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java b/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java index 0c585c705dcd0..cf25c5730d341 100644 --- a/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java @@ -20,14 +20,13 @@ import org.elasticsearch.action.OriginalIndices; import org.elasticsearch.action.ShardOperationFailedException; import org.elasticsearch.action.search.TransportSearchAction.SearchTimeProvider; +import org.elasticsearch.action.support.SubscribableListener; import org.elasticsearch.action.support.TransportActions; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.routing.GroupShardsIterator; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; -import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.AtomicArray; -import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; import org.elasticsearch.core.Releasable; import org.elasticsearch.core.Releasables; import org.elasticsearch.index.shard.ShardId; @@ -43,7 +42,6 @@ import org.elasticsearch.tasks.TaskCancelledException; import org.elasticsearch.transport.Transport; -import java.util.ArrayDeque; import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; @@ -51,9 +49,12 @@ import java.util.Map; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.Executor; +import java.util.concurrent.LinkedTransferQueue; +import java.util.concurrent.Semaphore; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.function.BiFunction; +import java.util.function.Consumer; import java.util.stream.Collectors; import static org.elasticsearch.core.Strings.format; @@ -238,7 +239,12 @@ public final void run() { assert shardRoutings.skip() == false; assert shardIndexMap.containsKey(shardRoutings); int shardIndex = shardIndexMap.get(shardRoutings); - performPhaseOnShard(shardIndex, shardRoutings, shardRoutings.nextOrNull()); + final SearchShardTarget routing = shardRoutings.nextOrNull(); + if (routing == null) { + failOnUnavailable(shardIndex, shardRoutings); + } else { + performPhaseOnShard(shardIndex, shardRoutings, routing); + } } } } @@ -258,7 +264,7 @@ private static boolean assertExecuteOnStartThread() { int index = 0; assert stackTraceElements[index++].getMethodName().equals("getStackTrace"); assert stackTraceElements[index++].getMethodName().equals("assertExecuteOnStartThread"); - assert stackTraceElements[index++].getMethodName().equals("performPhaseOnShard"); + assert stackTraceElements[index++].getMethodName().equals("failOnUnavailable"); if (stackTraceElements[index].getMethodName().equals("performPhaseOnShard")) { assert stackTraceElements[index].getClassName().endsWith("CanMatchPreFilterSearchPhase"); index++; @@ -277,65 +283,53 @@ private static boolean assertExecuteOnStartThread() { } protected void performPhaseOnShard(final int shardIndex, final SearchShardIterator shardIt, final SearchShardTarget shard) { - /* - * We capture the thread that this phase is starting on. When we are called back after executing the phase, we are either on the - * same thread (because we never went async, or the same thread was selected from the thread pool) or a different thread. If we - * continue on the same thread in the case that we never went async and this happens repeatedly we will end up recursing deeply and - * could stack overflow. To prevent this, we fork if we are called back on the same thread that execution started on and otherwise - * we can continue (cf. InitialSearchPhase#maybeFork). - */ - if (shard == null) { - assert assertExecuteOnStartThread(); - SearchShardTarget unassignedShard = new SearchShardTarget(null, shardIt.shardId(), shardIt.getClusterAlias()); - onShardFailure(shardIndex, unassignedShard, shardIt, new NoShardAvailableActionException(shardIt.shardId())); + if (throttleConcurrentRequests) { + var pendingExecutions = pendingExecutionsPerNode.computeIfAbsent( + shard.getNodeId(), + n -> new PendingExecutions(maxConcurrentRequestsPerNode) + ); + pendingExecutions.submit(l -> doPerformPhaseOnShard(shardIndex, shardIt, shard, l)); } else { - final PendingExecutions pendingExecutions = throttleConcurrentRequests - ? pendingExecutionsPerNode.computeIfAbsent(shard.getNodeId(), n -> new PendingExecutions(maxConcurrentRequestsPerNode)) - : null; - Runnable r = () -> { - final Thread thread = Thread.currentThread(); - try { - executePhaseOnShard(shardIt, shard, new SearchActionListener<>(shard, shardIndex) { - @Override - public void innerOnResponse(Result result) { - try { - onShardResult(result, shardIt); - } catch (Exception exc) { - onShardFailure(shardIndex, shard, shardIt, exc); - } finally { - executeNext(pendingExecutions, thread); - } - } + doPerformPhaseOnShard(shardIndex, shardIt, shard, () -> {}); + } + } - @Override - public void onFailure(Exception t) { - try { - onShardFailure(shardIndex, shard, shardIt, t); - } finally { - executeNext(pendingExecutions, thread); - } - } - }); - } catch (final Exception e) { - try { - /* - * It is possible to run into connection exceptions here because we are getting the connection early and might - * run into nodes that are not connected. In this case, on shard failure will move us to the next shard copy. - */ - fork(() -> onShardFailure(shardIndex, shard, shardIt, e)); - } finally { - executeNext(pendingExecutions, thread); + private void doPerformPhaseOnShard(int shardIndex, SearchShardIterator shardIt, SearchShardTarget shard, Releasable releasable) { + try { + executePhaseOnShard(shardIt, shard, new SearchActionListener<>(shard, shardIndex) { + @Override + public void innerOnResponse(Result result) { + try (releasable) { + onShardResult(result, shardIt); + } catch (Exception exc) { + onShardFailure(shardIndex, shard, shardIt, exc); } } - }; - if (throttleConcurrentRequests) { - pendingExecutions.tryRun(r); - } else { - r.run(); + + @Override + public void onFailure(Exception e) { + try (releasable) { + onShardFailure(shardIndex, shard, shardIt, e); + } + } + }); + } catch (final Exception e) { + /* + * It is possible to run into connection exceptions here because we are getting the connection early and might + * run into nodes that are not connected. In this case, on shard failure will move us to the next shard copy. + */ + try (releasable) { + onShardFailure(shardIndex, shard, shardIt, e); } } } + private void failOnUnavailable(int shardIndex, SearchShardIterator shardIt) { + assert assertExecuteOnStartThread(); + SearchShardTarget unassignedShard = new SearchShardTarget(null, shardIt.shardId(), shardIt.getClusterAlias()); + onShardFailure(shardIndex, unassignedShard, shardIt, new NoShardAvailableActionException(shardIt.shardId())); + } + /** * Sends the request to the actual shard. * @param shardIt the shards iterator @@ -348,34 +342,6 @@ protected abstract void executePhaseOnShard( SearchActionListener listener ); - protected void fork(final Runnable runnable) { - executor.execute(new AbstractRunnable() { - @Override - public void onFailure(Exception e) { - logger.error(() -> "unexpected error during [" + task + "]", e); - assert false : e; - } - - @Override - public void onRejection(Exception e) { - // avoid leaks during node shutdown by executing on the current thread if the executor shuts down - assert e instanceof EsRejectedExecutionException esre && esre.isExecutorShutdown() : e; - doRun(); - } - - @Override - protected void doRun() { - runnable.run(); - } - - @Override - public boolean isForceExecution() { - // we can not allow a stuffed queue to reject execution here - return true; - } - }); - } - @Override public final void executeNextPhase(SearchPhase currentPhase, SearchPhase nextPhase) { /* This is the main search phase transition where we move to the next phase. If all shards @@ -794,61 +760,63 @@ protected final ShardSearchRequest buildShardSearchRequest(SearchShardIterator s */ protected abstract SearchPhase getNextPhase(SearchPhaseResults results, SearchPhaseContext context); - private void executeNext(PendingExecutions pendingExecutions, Thread originalThread) { - executeNext(pendingExecutions == null ? null : pendingExecutions.finishAndGetNext(), originalThread); - } - - void executeNext(Runnable runnable, Thread originalThread) { - if (runnable != null) { - assert throttleConcurrentRequests; - if (originalThread == Thread.currentThread()) { - fork(runnable); - } else { - runnable.run(); - } - } - } - private static final class PendingExecutions { - private final int permits; - private int permitsTaken = 0; - private final ArrayDeque queue = new ArrayDeque<>(); + private final Semaphore semaphore; + private final LinkedTransferQueue> queue = new LinkedTransferQueue<>(); PendingExecutions(int permits) { assert permits > 0 : "not enough permits: " + permits; - this.permits = permits; + semaphore = new Semaphore(permits); } - Runnable finishAndGetNext() { - synchronized (this) { - permitsTaken--; - assert permitsTaken >= 0 : "illegal taken permits: " + permitsTaken; + void submit(Consumer task) { + if (semaphore.tryAcquire()) { + executeAndRelease(task); + } else { + queue.add(task); + if (semaphore.tryAcquire()) { + task = pollNextTaskOrReleasePermit(); + if (task != null) { + executeAndRelease(task); + } + } } - return tryQueue(null); + } - void tryRun(Runnable runnable) { - Runnable r = tryQueue(runnable); - if (r != null) { - r.run(); + private void executeAndRelease(Consumer task) { + while (task != null) { + final SubscribableListener onDone = new SubscribableListener<>(); + task.accept(() -> onDone.onResponse(null)); + if (onDone.isDone()) { + // keep going on the current thread, no need to fork + task = pollNextTaskOrReleasePermit(); + } else { + onDone.addListener(new ActionListener<>() { + @Override + public void onResponse(Void unused) { + final Consumer nextTask = pollNextTaskOrReleasePermit(); + if (nextTask != null) { + executeAndRelease(nextTask); + } + } + + @Override + public void onFailure(Exception e) { + assert false : e; + } + }); + return; + } } } - private synchronized Runnable tryQueue(Runnable runnable) { - Runnable toExecute = null; - if (permitsTaken < permits) { - permitsTaken++; - toExecute = runnable; - if (toExecute == null) { // only poll if we don't have anything to execute - toExecute = queue.poll(); - } - if (toExecute == null) { - permitsTaken--; - } - } else if (runnable != null) { - queue.add(runnable); + private Consumer pollNextTaskOrReleasePermit() { + var task = queue.poll(); + if (task == null) { + semaphore.release(); } - return toExecute; + return task; } } } From 13e67bdd0803914ac75ec13853828fec1b42d4a0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Aur=C3=A9lien=20FOUCRET?= Date: Fri, 25 Oct 2024 12:43:13 +0200 Subject: [PATCH 107/324] Refactoring of the KQL grammar. (#115632) --- x-pack/plugin/kql/src/main/antlr/KqlBase.g4 | 95 +- .../plugin/kql/src/main/antlr/KqlBase.tokens | 31 +- .../kql/src/main/antlr/KqlBaseLexer.tokens | 31 +- .../xpack/kql/parser/KqlBase.interp | 28 +- .../xpack/kql/parser/KqlBaseBaseListener.java | 56 +- .../xpack/kql/parser/KqlBaseBaseVisitor.java | 30 +- .../xpack/kql/parser/KqlBaseLexer.interp | 22 +- .../xpack/kql/parser/KqlBaseLexer.java | 255 ++--- .../xpack/kql/parser/KqlBaseListener.java | 84 +- .../xpack/kql/parser/KqlBaseParser.java | 1010 ++++++++++------- .../xpack/kql/parser/KqlBaseVisitor.java | 44 +- .../kql/src/test/resources/supported-queries | 9 + .../src/test/resources/unsupported-queries | 8 - 13 files changed, 900 insertions(+), 803 deletions(-) diff --git a/x-pack/plugin/kql/src/main/antlr/KqlBase.g4 b/x-pack/plugin/kql/src/main/antlr/KqlBase.g4 index cffa2db9f959a..dbf7c1979796a 100644 --- a/x-pack/plugin/kql/src/main/antlr/KqlBase.g4 +++ b/x-pack/plugin/kql/src/main/antlr/KqlBase.g4 @@ -26,70 +26,68 @@ topLevelQuery ; query - : query (AND | OR) query #booleanQuery - | NOT subQuery=simpleQuery #notQuery - | simpleQuery #defaultQuery + : query operator=(AND | OR) query #booleanQuery + | NOT subQuery=simpleQuery #notQuery + | simpleQuery #defaultQuery ; simpleQuery : nestedQuery - | expression | parenthesizedQuery - ; - -expression - : fieldTermQuery - | fieldRangeQuery + | matchAllQuery + | existsQuery + | rangeQuery + | fieldQuery + | fieldLessQuery ; nestedQuery : fieldName COLON LEFT_CURLY_BRACKET query RIGHT_CURLY_BRACKET ; -parenthesizedQuery: - LEFT_PARENTHESIS query RIGHT_PARENTHESIS; - -fieldRangeQuery - : fieldName operator=OP_COMPARE rangeQueryValue +matchAllQuery + : (WILDCARD COLON)? WILDCARD ; -fieldTermQuery - : (fieldName COLON)? termQueryValue +parenthesizedQuery + : LEFT_PARENTHESIS query RIGHT_PARENTHESIS ; -fieldName - : wildcardExpression - | unquotedLiteralExpression - | quotedStringExpression +rangeQuery + : fieldName operator=(OP_LESS|OP_LESS_EQ|OP_MORE|OP_MORE_EQ) rangeQueryValue ; rangeQueryValue - : unquotedLiteralExpression - | quotedStringExpression - ; - -termQueryValue - : wildcardExpression - | quotedStringExpression - | termValue=unquotedLiteralExpression - | groupingTermExpression; + : (UNQUOTED_LITERAL|WILDCARD)+ + | QUOTED_STRING + ; -groupingTermExpression - : LEFT_PARENTHESIS unquotedLiteralExpression RIGHT_PARENTHESIS +existsQuery + :fieldName COLON WILDCARD ; -unquotedLiteralExpression - : UNQUOTED_LITERAL+ +fieldQuery + : fieldName COLON fieldQueryValue + | fieldName COLON LEFT_PARENTHESIS fieldQueryValue RIGHT_PARENTHESIS ; -quotedStringExpression - : QUOTED_STRING +fieldLessQuery + : fieldQueryValue + | LEFT_PARENTHESIS fieldQueryValue RIGHT_PARENTHESIS ; -wildcardExpression - : WILDCARD -; +fieldQueryValue + : (AND|OR)? (UNQUOTED_LITERAL | WILDCARD )+ + | (UNQUOTED_LITERAL | WILDCARD )+ (AND|OR)? + | (NOT|AND|OR) + | QUOTED_STRING + ; +fieldName + : value=UNQUOTED_LITERAL+ + | value=QUOTED_STRING + | value=WILDCARD + ; DEFAULT_SKIP: WHITESPACE -> skip; @@ -98,31 +96,34 @@ OR: 'or'; NOT: 'not'; COLON: ':'; -OP_COMPARE: OP_LESS | OP_MORE | OP_LESS_EQ | OP_MORE_EQ; +OP_LESS: '<'; +OP_LESS_EQ: '<='; +OP_MORE: '>'; +OP_MORE_EQ: '>='; LEFT_PARENTHESIS: '('; RIGHT_PARENTHESIS: ')'; LEFT_CURLY_BRACKET: '{'; RIGHT_CURLY_BRACKET: '}'; -UNQUOTED_LITERAL: WILDCARD* UNQUOTED_LITERAL_CHAR+ WILDCARD*; +UNQUOTED_LITERAL: UNQUOTED_LITERAL_CHAR+; QUOTED_STRING: '"'QUOTED_CHAR*'"'; -WILDCARD: WILDCARD_CHAR+; +WILDCARD: WILDCARD_CHAR; fragment WILDCARD_CHAR: '*'; -fragment OP_LESS: '<'; -fragment OP_LESS_EQ: '<='; -fragment OP_MORE: '>'; -fragment OP_MORE_EQ: '>='; fragment UNQUOTED_LITERAL_CHAR + : WILDCARD_CHAR* UNQUOTED_LITERAL_BASE_CHAR WILDCARD_CHAR* + | WILDCARD_CHAR WILDCARD_CHAR+ + ; + +fragment UNQUOTED_LITERAL_BASE_CHAR : ESCAPED_WHITESPACE | ESCAPED_SPECIAL_CHAR | ESCAPE_UNICODE_SEQUENCE | '\\' (AND | OR | NOT) - | WILDCARD_CHAR UNQUOTED_LITERAL_CHAR | NON_SPECIAL_CHAR ; @@ -135,7 +136,7 @@ fragment QUOTED_CHAR fragment WHITESPACE: [ \t\n\r\u3000]; fragment ESCAPED_WHITESPACE: '\\r' | '\\t' | '\\n'; -fragment NON_SPECIAL_CHAR: ~[ \\():<>"*{}]; +fragment NON_SPECIAL_CHAR: ~[ \n\r\t\u3000\\():<>"*{}]; fragment ESCAPED_SPECIAL_CHAR: '\\'[ \\():<>"*{}]; fragment ESCAPED_QUOTE: '\\"'; diff --git a/x-pack/plugin/kql/src/main/antlr/KqlBase.tokens b/x-pack/plugin/kql/src/main/antlr/KqlBase.tokens index 268ae0613b9f0..f26b6b9c3da55 100644 --- a/x-pack/plugin/kql/src/main/antlr/KqlBase.tokens +++ b/x-pack/plugin/kql/src/main/antlr/KqlBase.tokens @@ -3,19 +3,26 @@ AND=2 OR=3 NOT=4 COLON=5 -OP_COMPARE=6 -LEFT_PARENTHESIS=7 -RIGHT_PARENTHESIS=8 -LEFT_CURLY_BRACKET=9 -RIGHT_CURLY_BRACKET=10 -UNQUOTED_LITERAL=11 -QUOTED_STRING=12 -WILDCARD=13 +OP_LESS=6 +OP_LESS_EQ=7 +OP_MORE=8 +OP_MORE_EQ=9 +LEFT_PARENTHESIS=10 +RIGHT_PARENTHESIS=11 +LEFT_CURLY_BRACKET=12 +RIGHT_CURLY_BRACKET=13 +UNQUOTED_LITERAL=14 +QUOTED_STRING=15 +WILDCARD=16 'and'=2 'or'=3 'not'=4 ':'=5 -'('=7 -')'=8 -'{'=9 -'}'=10 +'<'=6 +'<='=7 +'>'=8 +'>='=9 +'('=10 +')'=11 +'{'=12 +'}'=13 diff --git a/x-pack/plugin/kql/src/main/antlr/KqlBaseLexer.tokens b/x-pack/plugin/kql/src/main/antlr/KqlBaseLexer.tokens index 268ae0613b9f0..f26b6b9c3da55 100644 --- a/x-pack/plugin/kql/src/main/antlr/KqlBaseLexer.tokens +++ b/x-pack/plugin/kql/src/main/antlr/KqlBaseLexer.tokens @@ -3,19 +3,26 @@ AND=2 OR=3 NOT=4 COLON=5 -OP_COMPARE=6 -LEFT_PARENTHESIS=7 -RIGHT_PARENTHESIS=8 -LEFT_CURLY_BRACKET=9 -RIGHT_CURLY_BRACKET=10 -UNQUOTED_LITERAL=11 -QUOTED_STRING=12 -WILDCARD=13 +OP_LESS=6 +OP_LESS_EQ=7 +OP_MORE=8 +OP_MORE_EQ=9 +LEFT_PARENTHESIS=10 +RIGHT_PARENTHESIS=11 +LEFT_CURLY_BRACKET=12 +RIGHT_CURLY_BRACKET=13 +UNQUOTED_LITERAL=14 +QUOTED_STRING=15 +WILDCARD=16 'and'=2 'or'=3 'not'=4 ':'=5 -'('=7 -')'=8 -'{'=9 -'}'=10 +'<'=6 +'<='=7 +'>'=8 +'>='=9 +'('=10 +')'=11 +'{'=12 +'}'=13 diff --git a/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBase.interp b/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBase.interp index 1954195b52363..111cac6d641b9 100644 --- a/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBase.interp +++ b/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBase.interp @@ -5,7 +5,10 @@ null 'or' 'not' ':' -null +'<' +'<=' +'>' +'>=' '(' ')' '{' @@ -21,7 +24,10 @@ AND OR NOT COLON -OP_COMPARE +OP_LESS +OP_LESS_EQ +OP_MORE +OP_MORE_EQ LEFT_PARENTHESIS RIGHT_PARENTHESIS LEFT_CURLY_BRACKET @@ -34,19 +40,17 @@ rule names: topLevelQuery query simpleQuery -expression nestedQuery +matchAllQuery parenthesizedQuery -fieldRangeQuery -fieldTermQuery -fieldName +rangeQuery rangeQueryValue -termQueryValue -groupingTermExpression -unquotedLiteralExpression -quotedStringExpression -wildcardExpression +existsQuery +fieldQuery +fieldLessQuery +fieldQueryValue +fieldName atn: -[4, 1, 13, 108, 2, 0, 7, 0, 2, 1, 7, 1, 2, 2, 7, 2, 2, 3, 7, 3, 2, 4, 7, 4, 2, 5, 7, 5, 2, 6, 7, 6, 2, 7, 7, 7, 2, 8, 7, 8, 2, 9, 7, 9, 2, 10, 7, 10, 2, 11, 7, 11, 2, 12, 7, 12, 2, 13, 7, 13, 2, 14, 7, 14, 1, 0, 3, 0, 32, 8, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 3, 1, 40, 8, 1, 1, 1, 1, 1, 1, 1, 5, 1, 45, 8, 1, 10, 1, 12, 1, 48, 9, 1, 1, 2, 1, 2, 1, 2, 3, 2, 53, 8, 2, 1, 3, 1, 3, 3, 3, 57, 8, 3, 1, 4, 1, 4, 1, 4, 1, 4, 1, 4, 1, 4, 1, 5, 1, 5, 1, 5, 1, 5, 1, 6, 1, 6, 1, 6, 1, 6, 1, 7, 1, 7, 1, 7, 3, 7, 76, 8, 7, 1, 7, 1, 7, 1, 8, 1, 8, 1, 8, 3, 8, 83, 8, 8, 1, 9, 1, 9, 3, 9, 87, 8, 9, 1, 10, 1, 10, 1, 10, 1, 10, 3, 10, 93, 8, 10, 1, 11, 1, 11, 1, 11, 1, 11, 1, 12, 4, 12, 100, 8, 12, 11, 12, 12, 12, 101, 1, 13, 1, 13, 1, 14, 1, 14, 1, 14, 0, 1, 2, 15, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 0, 1, 1, 0, 2, 3, 106, 0, 31, 1, 0, 0, 0, 2, 39, 1, 0, 0, 0, 4, 52, 1, 0, 0, 0, 6, 56, 1, 0, 0, 0, 8, 58, 1, 0, 0, 0, 10, 64, 1, 0, 0, 0, 12, 68, 1, 0, 0, 0, 14, 75, 1, 0, 0, 0, 16, 82, 1, 0, 0, 0, 18, 86, 1, 0, 0, 0, 20, 92, 1, 0, 0, 0, 22, 94, 1, 0, 0, 0, 24, 99, 1, 0, 0, 0, 26, 103, 1, 0, 0, 0, 28, 105, 1, 0, 0, 0, 30, 32, 3, 2, 1, 0, 31, 30, 1, 0, 0, 0, 31, 32, 1, 0, 0, 0, 32, 33, 1, 0, 0, 0, 33, 34, 5, 0, 0, 1, 34, 1, 1, 0, 0, 0, 35, 36, 6, 1, -1, 0, 36, 37, 5, 4, 0, 0, 37, 40, 3, 4, 2, 0, 38, 40, 3, 4, 2, 0, 39, 35, 1, 0, 0, 0, 39, 38, 1, 0, 0, 0, 40, 46, 1, 0, 0, 0, 41, 42, 10, 3, 0, 0, 42, 43, 7, 0, 0, 0, 43, 45, 3, 2, 1, 4, 44, 41, 1, 0, 0, 0, 45, 48, 1, 0, 0, 0, 46, 44, 1, 0, 0, 0, 46, 47, 1, 0, 0, 0, 47, 3, 1, 0, 0, 0, 48, 46, 1, 0, 0, 0, 49, 53, 3, 8, 4, 0, 50, 53, 3, 6, 3, 0, 51, 53, 3, 10, 5, 0, 52, 49, 1, 0, 0, 0, 52, 50, 1, 0, 0, 0, 52, 51, 1, 0, 0, 0, 53, 5, 1, 0, 0, 0, 54, 57, 3, 14, 7, 0, 55, 57, 3, 12, 6, 0, 56, 54, 1, 0, 0, 0, 56, 55, 1, 0, 0, 0, 57, 7, 1, 0, 0, 0, 58, 59, 3, 16, 8, 0, 59, 60, 5, 5, 0, 0, 60, 61, 5, 9, 0, 0, 61, 62, 3, 2, 1, 0, 62, 63, 5, 10, 0, 0, 63, 9, 1, 0, 0, 0, 64, 65, 5, 7, 0, 0, 65, 66, 3, 2, 1, 0, 66, 67, 5, 8, 0, 0, 67, 11, 1, 0, 0, 0, 68, 69, 3, 16, 8, 0, 69, 70, 5, 6, 0, 0, 70, 71, 3, 18, 9, 0, 71, 13, 1, 0, 0, 0, 72, 73, 3, 16, 8, 0, 73, 74, 5, 5, 0, 0, 74, 76, 1, 0, 0, 0, 75, 72, 1, 0, 0, 0, 75, 76, 1, 0, 0, 0, 76, 77, 1, 0, 0, 0, 77, 78, 3, 20, 10, 0, 78, 15, 1, 0, 0, 0, 79, 83, 3, 28, 14, 0, 80, 83, 3, 24, 12, 0, 81, 83, 3, 26, 13, 0, 82, 79, 1, 0, 0, 0, 82, 80, 1, 0, 0, 0, 82, 81, 1, 0, 0, 0, 83, 17, 1, 0, 0, 0, 84, 87, 3, 24, 12, 0, 85, 87, 3, 26, 13, 0, 86, 84, 1, 0, 0, 0, 86, 85, 1, 0, 0, 0, 87, 19, 1, 0, 0, 0, 88, 93, 3, 28, 14, 0, 89, 93, 3, 26, 13, 0, 90, 93, 3, 24, 12, 0, 91, 93, 3, 22, 11, 0, 92, 88, 1, 0, 0, 0, 92, 89, 1, 0, 0, 0, 92, 90, 1, 0, 0, 0, 92, 91, 1, 0, 0, 0, 93, 21, 1, 0, 0, 0, 94, 95, 5, 7, 0, 0, 95, 96, 3, 24, 12, 0, 96, 97, 5, 8, 0, 0, 97, 23, 1, 0, 0, 0, 98, 100, 5, 11, 0, 0, 99, 98, 1, 0, 0, 0, 100, 101, 1, 0, 0, 0, 101, 99, 1, 0, 0, 0, 101, 102, 1, 0, 0, 0, 102, 25, 1, 0, 0, 0, 103, 104, 5, 12, 0, 0, 104, 27, 1, 0, 0, 0, 105, 106, 5, 13, 0, 0, 106, 29, 1, 0, 0, 0, 10, 31, 39, 46, 52, 56, 75, 82, 86, 92, 101] \ No newline at end of file +[4, 1, 16, 135, 2, 0, 7, 0, 2, 1, 7, 1, 2, 2, 7, 2, 2, 3, 7, 3, 2, 4, 7, 4, 2, 5, 7, 5, 2, 6, 7, 6, 2, 7, 7, 7, 2, 8, 7, 8, 2, 9, 7, 9, 2, 10, 7, 10, 2, 11, 7, 11, 2, 12, 7, 12, 1, 0, 3, 0, 28, 8, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 3, 1, 36, 8, 1, 1, 1, 1, 1, 1, 1, 5, 1, 41, 8, 1, 10, 1, 12, 1, 44, 9, 1, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 3, 2, 53, 8, 2, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 1, 4, 1, 4, 3, 4, 63, 8, 4, 1, 4, 1, 4, 1, 5, 1, 5, 1, 5, 1, 5, 1, 6, 1, 6, 1, 6, 1, 6, 1, 7, 4, 7, 76, 8, 7, 11, 7, 12, 7, 77, 1, 7, 3, 7, 81, 8, 7, 1, 8, 1, 8, 1, 8, 1, 8, 1, 9, 1, 9, 1, 9, 1, 9, 1, 9, 1, 9, 1, 9, 1, 9, 1, 9, 1, 9, 3, 9, 97, 8, 9, 1, 10, 1, 10, 1, 10, 1, 10, 1, 10, 3, 10, 104, 8, 10, 1, 11, 3, 11, 107, 8, 11, 1, 11, 4, 11, 110, 8, 11, 11, 11, 12, 11, 111, 1, 11, 4, 11, 115, 8, 11, 11, 11, 12, 11, 116, 1, 11, 3, 11, 120, 8, 11, 1, 11, 1, 11, 3, 11, 124, 8, 11, 1, 12, 4, 12, 127, 8, 12, 11, 12, 12, 12, 128, 1, 12, 1, 12, 3, 12, 133, 8, 12, 1, 12, 0, 1, 2, 13, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 0, 4, 1, 0, 2, 3, 1, 0, 6, 9, 2, 0, 14, 14, 16, 16, 1, 0, 2, 4, 145, 0, 27, 1, 0, 0, 0, 2, 35, 1, 0, 0, 0, 4, 52, 1, 0, 0, 0, 6, 54, 1, 0, 0, 0, 8, 62, 1, 0, 0, 0, 10, 66, 1, 0, 0, 0, 12, 70, 1, 0, 0, 0, 14, 80, 1, 0, 0, 0, 16, 82, 1, 0, 0, 0, 18, 96, 1, 0, 0, 0, 20, 103, 1, 0, 0, 0, 22, 123, 1, 0, 0, 0, 24, 132, 1, 0, 0, 0, 26, 28, 3, 2, 1, 0, 27, 26, 1, 0, 0, 0, 27, 28, 1, 0, 0, 0, 28, 29, 1, 0, 0, 0, 29, 30, 5, 0, 0, 1, 30, 1, 1, 0, 0, 0, 31, 32, 6, 1, -1, 0, 32, 33, 5, 4, 0, 0, 33, 36, 3, 4, 2, 0, 34, 36, 3, 4, 2, 0, 35, 31, 1, 0, 0, 0, 35, 34, 1, 0, 0, 0, 36, 42, 1, 0, 0, 0, 37, 38, 10, 3, 0, 0, 38, 39, 7, 0, 0, 0, 39, 41, 3, 2, 1, 3, 40, 37, 1, 0, 0, 0, 41, 44, 1, 0, 0, 0, 42, 40, 1, 0, 0, 0, 42, 43, 1, 0, 0, 0, 43, 3, 1, 0, 0, 0, 44, 42, 1, 0, 0, 0, 45, 53, 3, 6, 3, 0, 46, 53, 3, 10, 5, 0, 47, 53, 3, 8, 4, 0, 48, 53, 3, 16, 8, 0, 49, 53, 3, 12, 6, 0, 50, 53, 3, 18, 9, 0, 51, 53, 3, 20, 10, 0, 52, 45, 1, 0, 0, 0, 52, 46, 1, 0, 0, 0, 52, 47, 1, 0, 0, 0, 52, 48, 1, 0, 0, 0, 52, 49, 1, 0, 0, 0, 52, 50, 1, 0, 0, 0, 52, 51, 1, 0, 0, 0, 53, 5, 1, 0, 0, 0, 54, 55, 3, 24, 12, 0, 55, 56, 5, 5, 0, 0, 56, 57, 5, 12, 0, 0, 57, 58, 3, 2, 1, 0, 58, 59, 5, 13, 0, 0, 59, 7, 1, 0, 0, 0, 60, 61, 5, 16, 0, 0, 61, 63, 5, 5, 0, 0, 62, 60, 1, 0, 0, 0, 62, 63, 1, 0, 0, 0, 63, 64, 1, 0, 0, 0, 64, 65, 5, 16, 0, 0, 65, 9, 1, 0, 0, 0, 66, 67, 5, 10, 0, 0, 67, 68, 3, 2, 1, 0, 68, 69, 5, 11, 0, 0, 69, 11, 1, 0, 0, 0, 70, 71, 3, 24, 12, 0, 71, 72, 7, 1, 0, 0, 72, 73, 3, 14, 7, 0, 73, 13, 1, 0, 0, 0, 74, 76, 7, 2, 0, 0, 75, 74, 1, 0, 0, 0, 76, 77, 1, 0, 0, 0, 77, 75, 1, 0, 0, 0, 77, 78, 1, 0, 0, 0, 78, 81, 1, 0, 0, 0, 79, 81, 5, 15, 0, 0, 80, 75, 1, 0, 0, 0, 80, 79, 1, 0, 0, 0, 81, 15, 1, 0, 0, 0, 82, 83, 3, 24, 12, 0, 83, 84, 5, 5, 0, 0, 84, 85, 5, 16, 0, 0, 85, 17, 1, 0, 0, 0, 86, 87, 3, 24, 12, 0, 87, 88, 5, 5, 0, 0, 88, 89, 3, 22, 11, 0, 89, 97, 1, 0, 0, 0, 90, 91, 3, 24, 12, 0, 91, 92, 5, 5, 0, 0, 92, 93, 5, 10, 0, 0, 93, 94, 3, 22, 11, 0, 94, 95, 5, 11, 0, 0, 95, 97, 1, 0, 0, 0, 96, 86, 1, 0, 0, 0, 96, 90, 1, 0, 0, 0, 97, 19, 1, 0, 0, 0, 98, 104, 3, 22, 11, 0, 99, 100, 5, 10, 0, 0, 100, 101, 3, 22, 11, 0, 101, 102, 5, 11, 0, 0, 102, 104, 1, 0, 0, 0, 103, 98, 1, 0, 0, 0, 103, 99, 1, 0, 0, 0, 104, 21, 1, 0, 0, 0, 105, 107, 7, 0, 0, 0, 106, 105, 1, 0, 0, 0, 106, 107, 1, 0, 0, 0, 107, 109, 1, 0, 0, 0, 108, 110, 7, 2, 0, 0, 109, 108, 1, 0, 0, 0, 110, 111, 1, 0, 0, 0, 111, 109, 1, 0, 0, 0, 111, 112, 1, 0, 0, 0, 112, 124, 1, 0, 0, 0, 113, 115, 7, 2, 0, 0, 114, 113, 1, 0, 0, 0, 115, 116, 1, 0, 0, 0, 116, 114, 1, 0, 0, 0, 116, 117, 1, 0, 0, 0, 117, 119, 1, 0, 0, 0, 118, 120, 7, 0, 0, 0, 119, 118, 1, 0, 0, 0, 119, 120, 1, 0, 0, 0, 120, 124, 1, 0, 0, 0, 121, 124, 7, 3, 0, 0, 122, 124, 5, 15, 0, 0, 123, 106, 1, 0, 0, 0, 123, 114, 1, 0, 0, 0, 123, 121, 1, 0, 0, 0, 123, 122, 1, 0, 0, 0, 124, 23, 1, 0, 0, 0, 125, 127, 5, 14, 0, 0, 126, 125, 1, 0, 0, 0, 127, 128, 1, 0, 0, 0, 128, 126, 1, 0, 0, 0, 128, 129, 1, 0, 0, 0, 129, 133, 1, 0, 0, 0, 130, 133, 5, 15, 0, 0, 131, 133, 5, 16, 0, 0, 132, 126, 1, 0, 0, 0, 132, 130, 1, 0, 0, 0, 132, 131, 1, 0, 0, 0, 133, 25, 1, 0, 0, 0, 16, 27, 35, 42, 52, 62, 77, 80, 96, 103, 106, 111, 116, 119, 123, 128, 132] \ No newline at end of file diff --git a/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBaseBaseListener.java b/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBaseBaseListener.java index 1b4282b5dbbea..426af7f7115b9 100644 --- a/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBaseBaseListener.java +++ b/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBaseBaseListener.java @@ -80,18 +80,6 @@ class KqlBaseBaseListener implements KqlBaseListener { *

The default implementation does nothing.

*/ @Override public void exitSimpleQuery(KqlBaseParser.SimpleQueryContext ctx) { } - /** - * {@inheritDoc} - * - *

The default implementation does nothing.

- */ - @Override public void enterExpression(KqlBaseParser.ExpressionContext ctx) { } - /** - * {@inheritDoc} - * - *

The default implementation does nothing.

- */ - @Override public void exitExpression(KqlBaseParser.ExpressionContext ctx) { } /** * {@inheritDoc} * @@ -109,49 +97,37 @@ class KqlBaseBaseListener implements KqlBaseListener { * *

The default implementation does nothing.

*/ - @Override public void enterParenthesizedQuery(KqlBaseParser.ParenthesizedQueryContext ctx) { } - /** - * {@inheritDoc} - * - *

The default implementation does nothing.

- */ - @Override public void exitParenthesizedQuery(KqlBaseParser.ParenthesizedQueryContext ctx) { } - /** - * {@inheritDoc} - * - *

The default implementation does nothing.

- */ - @Override public void enterFieldRangeQuery(KqlBaseParser.FieldRangeQueryContext ctx) { } + @Override public void enterMatchAllQuery(KqlBaseParser.MatchAllQueryContext ctx) { } /** * {@inheritDoc} * *

The default implementation does nothing.

*/ - @Override public void exitFieldRangeQuery(KqlBaseParser.FieldRangeQueryContext ctx) { } + @Override public void exitMatchAllQuery(KqlBaseParser.MatchAllQueryContext ctx) { } /** * {@inheritDoc} * *

The default implementation does nothing.

*/ - @Override public void enterFieldTermQuery(KqlBaseParser.FieldTermQueryContext ctx) { } + @Override public void enterParenthesizedQuery(KqlBaseParser.ParenthesizedQueryContext ctx) { } /** * {@inheritDoc} * *

The default implementation does nothing.

*/ - @Override public void exitFieldTermQuery(KqlBaseParser.FieldTermQueryContext ctx) { } + @Override public void exitParenthesizedQuery(KqlBaseParser.ParenthesizedQueryContext ctx) { } /** * {@inheritDoc} * *

The default implementation does nothing.

*/ - @Override public void enterFieldName(KqlBaseParser.FieldNameContext ctx) { } + @Override public void enterRangeQuery(KqlBaseParser.RangeQueryContext ctx) { } /** * {@inheritDoc} * *

The default implementation does nothing.

*/ - @Override public void exitFieldName(KqlBaseParser.FieldNameContext ctx) { } + @Override public void exitRangeQuery(KqlBaseParser.RangeQueryContext ctx) { } /** * {@inheritDoc} * @@ -169,61 +145,61 @@ class KqlBaseBaseListener implements KqlBaseListener { * *

The default implementation does nothing.

*/ - @Override public void enterTermQueryValue(KqlBaseParser.TermQueryValueContext ctx) { } + @Override public void enterExistsQuery(KqlBaseParser.ExistsQueryContext ctx) { } /** * {@inheritDoc} * *

The default implementation does nothing.

*/ - @Override public void exitTermQueryValue(KqlBaseParser.TermQueryValueContext ctx) { } + @Override public void exitExistsQuery(KqlBaseParser.ExistsQueryContext ctx) { } /** * {@inheritDoc} * *

The default implementation does nothing.

*/ - @Override public void enterGroupingTermExpression(KqlBaseParser.GroupingTermExpressionContext ctx) { } + @Override public void enterFieldQuery(KqlBaseParser.FieldQueryContext ctx) { } /** * {@inheritDoc} * *

The default implementation does nothing.

*/ - @Override public void exitGroupingTermExpression(KqlBaseParser.GroupingTermExpressionContext ctx) { } + @Override public void exitFieldQuery(KqlBaseParser.FieldQueryContext ctx) { } /** * {@inheritDoc} * *

The default implementation does nothing.

*/ - @Override public void enterUnquotedLiteralExpression(KqlBaseParser.UnquotedLiteralExpressionContext ctx) { } + @Override public void enterFieldLessQuery(KqlBaseParser.FieldLessQueryContext ctx) { } /** * {@inheritDoc} * *

The default implementation does nothing.

*/ - @Override public void exitUnquotedLiteralExpression(KqlBaseParser.UnquotedLiteralExpressionContext ctx) { } + @Override public void exitFieldLessQuery(KqlBaseParser.FieldLessQueryContext ctx) { } /** * {@inheritDoc} * *

The default implementation does nothing.

*/ - @Override public void enterQuotedStringExpression(KqlBaseParser.QuotedStringExpressionContext ctx) { } + @Override public void enterFieldQueryValue(KqlBaseParser.FieldQueryValueContext ctx) { } /** * {@inheritDoc} * *

The default implementation does nothing.

*/ - @Override public void exitQuotedStringExpression(KqlBaseParser.QuotedStringExpressionContext ctx) { } + @Override public void exitFieldQueryValue(KqlBaseParser.FieldQueryValueContext ctx) { } /** * {@inheritDoc} * *

The default implementation does nothing.

*/ - @Override public void enterWildcardExpression(KqlBaseParser.WildcardExpressionContext ctx) { } + @Override public void enterFieldName(KqlBaseParser.FieldNameContext ctx) { } /** * {@inheritDoc} * *

The default implementation does nothing.

*/ - @Override public void exitWildcardExpression(KqlBaseParser.WildcardExpressionContext ctx) { } + @Override public void exitFieldName(KqlBaseParser.FieldNameContext ctx) { } /** * {@inheritDoc} diff --git a/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBaseBaseVisitor.java b/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBaseBaseVisitor.java index 09cd668804154..cf1f2b3972823 100644 --- a/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBaseBaseVisitor.java +++ b/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBaseBaseVisitor.java @@ -55,13 +55,6 @@ class KqlBaseBaseVisitor extends AbstractParseTreeVisitor implements KqlBa * {@link #visitChildren} on {@code ctx}.

*/ @Override public T visitSimpleQuery(KqlBaseParser.SimpleQueryContext ctx) { return visitChildren(ctx); } - /** - * {@inheritDoc} - * - *

The default implementation returns the result of calling - * {@link #visitChildren} on {@code ctx}.

- */ - @Override public T visitExpression(KqlBaseParser.ExpressionContext ctx) { return visitChildren(ctx); } /** * {@inheritDoc} * @@ -75,28 +68,21 @@ class KqlBaseBaseVisitor extends AbstractParseTreeVisitor implements KqlBa *

The default implementation returns the result of calling * {@link #visitChildren} on {@code ctx}.

*/ - @Override public T visitParenthesizedQuery(KqlBaseParser.ParenthesizedQueryContext ctx) { return visitChildren(ctx); } - /** - * {@inheritDoc} - * - *

The default implementation returns the result of calling - * {@link #visitChildren} on {@code ctx}.

- */ - @Override public T visitFieldRangeQuery(KqlBaseParser.FieldRangeQueryContext ctx) { return visitChildren(ctx); } + @Override public T visitMatchAllQuery(KqlBaseParser.MatchAllQueryContext ctx) { return visitChildren(ctx); } /** * {@inheritDoc} * *

The default implementation returns the result of calling * {@link #visitChildren} on {@code ctx}.

*/ - @Override public T visitFieldTermQuery(KqlBaseParser.FieldTermQueryContext ctx) { return visitChildren(ctx); } + @Override public T visitParenthesizedQuery(KqlBaseParser.ParenthesizedQueryContext ctx) { return visitChildren(ctx); } /** * {@inheritDoc} * *

The default implementation returns the result of calling * {@link #visitChildren} on {@code ctx}.

*/ - @Override public T visitFieldName(KqlBaseParser.FieldNameContext ctx) { return visitChildren(ctx); } + @Override public T visitRangeQuery(KqlBaseParser.RangeQueryContext ctx) { return visitChildren(ctx); } /** * {@inheritDoc} * @@ -110,33 +96,33 @@ class KqlBaseBaseVisitor extends AbstractParseTreeVisitor implements KqlBa *

The default implementation returns the result of calling * {@link #visitChildren} on {@code ctx}.

*/ - @Override public T visitTermQueryValue(KqlBaseParser.TermQueryValueContext ctx) { return visitChildren(ctx); } + @Override public T visitExistsQuery(KqlBaseParser.ExistsQueryContext ctx) { return visitChildren(ctx); } /** * {@inheritDoc} * *

The default implementation returns the result of calling * {@link #visitChildren} on {@code ctx}.

*/ - @Override public T visitGroupingTermExpression(KqlBaseParser.GroupingTermExpressionContext ctx) { return visitChildren(ctx); } + @Override public T visitFieldQuery(KqlBaseParser.FieldQueryContext ctx) { return visitChildren(ctx); } /** * {@inheritDoc} * *

The default implementation returns the result of calling * {@link #visitChildren} on {@code ctx}.

*/ - @Override public T visitUnquotedLiteralExpression(KqlBaseParser.UnquotedLiteralExpressionContext ctx) { return visitChildren(ctx); } + @Override public T visitFieldLessQuery(KqlBaseParser.FieldLessQueryContext ctx) { return visitChildren(ctx); } /** * {@inheritDoc} * *

The default implementation returns the result of calling * {@link #visitChildren} on {@code ctx}.

*/ - @Override public T visitQuotedStringExpression(KqlBaseParser.QuotedStringExpressionContext ctx) { return visitChildren(ctx); } + @Override public T visitFieldQueryValue(KqlBaseParser.FieldQueryValueContext ctx) { return visitChildren(ctx); } /** * {@inheritDoc} * *

The default implementation returns the result of calling * {@link #visitChildren} on {@code ctx}.

*/ - @Override public T visitWildcardExpression(KqlBaseParser.WildcardExpressionContext ctx) { return visitChildren(ctx); } + @Override public T visitFieldName(KqlBaseParser.FieldNameContext ctx) { return visitChildren(ctx); } } diff --git a/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBaseLexer.interp b/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBaseLexer.interp index d178df5fcbc88..f9afe07af3b40 100644 --- a/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBaseLexer.interp +++ b/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBaseLexer.interp @@ -5,7 +5,10 @@ null 'or' 'not' ':' -null +'<' +'<=' +'>' +'>=' '(' ')' '{' @@ -21,7 +24,10 @@ AND OR NOT COLON -OP_COMPARE +OP_LESS +OP_LESS_EQ +OP_MORE +OP_MORE_EQ LEFT_PARENTHESIS RIGHT_PARENTHESIS LEFT_CURLY_BRACKET @@ -36,7 +42,10 @@ AND OR NOT COLON -OP_COMPARE +OP_LESS +OP_LESS_EQ +OP_MORE +OP_MORE_EQ LEFT_PARENTHESIS RIGHT_PARENTHESIS LEFT_CURLY_BRACKET @@ -45,11 +54,8 @@ UNQUOTED_LITERAL QUOTED_STRING WILDCARD WILDCARD_CHAR -OP_LESS -OP_LESS_EQ -OP_MORE -OP_MORE_EQ UNQUOTED_LITERAL_CHAR +UNQUOTED_LITERAL_BASE_CHAR QUOTED_CHAR WHITESPACE ESCAPED_WHITESPACE @@ -68,4 +74,4 @@ mode names: DEFAULT_MODE atn: -[4, 0, 13, 181, 6, -1, 2, 0, 7, 0, 2, 1, 7, 1, 2, 2, 7, 2, 2, 3, 7, 3, 2, 4, 7, 4, 2, 5, 7, 5, 2, 6, 7, 6, 2, 7, 7, 7, 2, 8, 7, 8, 2, 9, 7, 9, 2, 10, 7, 10, 2, 11, 7, 11, 2, 12, 7, 12, 2, 13, 7, 13, 2, 14, 7, 14, 2, 15, 7, 15, 2, 16, 7, 16, 2, 17, 7, 17, 2, 18, 7, 18, 2, 19, 7, 19, 2, 20, 7, 20, 2, 21, 7, 21, 2, 22, 7, 22, 2, 23, 7, 23, 2, 24, 7, 24, 2, 25, 7, 25, 2, 26, 7, 26, 2, 27, 7, 27, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 2, 1, 2, 1, 3, 1, 3, 1, 3, 1, 3, 1, 4, 1, 4, 1, 5, 1, 5, 1, 5, 1, 5, 3, 5, 79, 8, 5, 1, 6, 1, 6, 1, 7, 1, 7, 1, 8, 1, 8, 1, 9, 1, 9, 1, 10, 5, 10, 90, 8, 10, 10, 10, 12, 10, 93, 9, 10, 1, 10, 4, 10, 96, 8, 10, 11, 10, 12, 10, 97, 1, 10, 5, 10, 101, 8, 10, 10, 10, 12, 10, 104, 9, 10, 1, 11, 1, 11, 5, 11, 108, 8, 11, 10, 11, 12, 11, 111, 9, 11, 1, 11, 1, 11, 1, 12, 4, 12, 116, 8, 12, 11, 12, 12, 12, 117, 1, 13, 1, 13, 1, 14, 1, 14, 1, 15, 1, 15, 1, 15, 1, 16, 1, 16, 1, 17, 1, 17, 1, 17, 1, 18, 1, 18, 1, 18, 1, 18, 1, 18, 1, 18, 1, 18, 3, 18, 139, 8, 18, 1, 18, 1, 18, 1, 18, 1, 18, 3, 18, 145, 8, 18, 1, 19, 1, 19, 1, 19, 1, 19, 3, 19, 151, 8, 19, 1, 20, 1, 20, 1, 21, 1, 21, 1, 21, 1, 21, 1, 21, 1, 21, 3, 21, 161, 8, 21, 1, 22, 1, 22, 1, 23, 1, 23, 1, 23, 1, 24, 1, 24, 1, 24, 1, 25, 1, 25, 1, 25, 1, 26, 1, 26, 1, 26, 1, 26, 1, 26, 1, 26, 1, 27, 1, 27, 0, 0, 28, 1, 1, 3, 2, 5, 3, 7, 4, 9, 5, 11, 6, 13, 7, 15, 8, 17, 9, 19, 10, 21, 11, 23, 12, 25, 13, 27, 0, 29, 0, 31, 0, 33, 0, 35, 0, 37, 0, 39, 0, 41, 0, 43, 0, 45, 0, 47, 0, 49, 0, 51, 0, 53, 0, 55, 0, 1, 0, 11, 2, 0, 65, 65, 97, 97, 2, 0, 78, 78, 110, 110, 2, 0, 68, 68, 100, 100, 2, 0, 79, 79, 111, 111, 2, 0, 82, 82, 114, 114, 2, 0, 84, 84, 116, 116, 1, 0, 34, 34, 4, 0, 9, 10, 13, 13, 32, 32, 12288, 12288, 9, 0, 32, 32, 34, 34, 40, 42, 58, 58, 60, 60, 62, 62, 92, 92, 123, 123, 125, 125, 2, 0, 85, 85, 117, 117, 3, 0, 48, 57, 65, 70, 97, 102, 185, 0, 1, 1, 0, 0, 0, 0, 3, 1, 0, 0, 0, 0, 5, 1, 0, 0, 0, 0, 7, 1, 0, 0, 0, 0, 9, 1, 0, 0, 0, 0, 11, 1, 0, 0, 0, 0, 13, 1, 0, 0, 0, 0, 15, 1, 0, 0, 0, 0, 17, 1, 0, 0, 0, 0, 19, 1, 0, 0, 0, 0, 21, 1, 0, 0, 0, 0, 23, 1, 0, 0, 0, 0, 25, 1, 0, 0, 0, 1, 57, 1, 0, 0, 0, 3, 61, 1, 0, 0, 0, 5, 65, 1, 0, 0, 0, 7, 68, 1, 0, 0, 0, 9, 72, 1, 0, 0, 0, 11, 78, 1, 0, 0, 0, 13, 80, 1, 0, 0, 0, 15, 82, 1, 0, 0, 0, 17, 84, 1, 0, 0, 0, 19, 86, 1, 0, 0, 0, 21, 91, 1, 0, 0, 0, 23, 105, 1, 0, 0, 0, 25, 115, 1, 0, 0, 0, 27, 119, 1, 0, 0, 0, 29, 121, 1, 0, 0, 0, 31, 123, 1, 0, 0, 0, 33, 126, 1, 0, 0, 0, 35, 128, 1, 0, 0, 0, 37, 144, 1, 0, 0, 0, 39, 150, 1, 0, 0, 0, 41, 152, 1, 0, 0, 0, 43, 160, 1, 0, 0, 0, 45, 162, 1, 0, 0, 0, 47, 164, 1, 0, 0, 0, 49, 167, 1, 0, 0, 0, 51, 170, 1, 0, 0, 0, 53, 173, 1, 0, 0, 0, 55, 179, 1, 0, 0, 0, 57, 58, 3, 41, 20, 0, 58, 59, 1, 0, 0, 0, 59, 60, 6, 0, 0, 0, 60, 2, 1, 0, 0, 0, 61, 62, 7, 0, 0, 0, 62, 63, 7, 1, 0, 0, 63, 64, 7, 2, 0, 0, 64, 4, 1, 0, 0, 0, 65, 66, 7, 3, 0, 0, 66, 67, 7, 4, 0, 0, 67, 6, 1, 0, 0, 0, 68, 69, 7, 1, 0, 0, 69, 70, 7, 3, 0, 0, 70, 71, 7, 5, 0, 0, 71, 8, 1, 0, 0, 0, 72, 73, 5, 58, 0, 0, 73, 10, 1, 0, 0, 0, 74, 79, 3, 29, 14, 0, 75, 79, 3, 33, 16, 0, 76, 79, 3, 31, 15, 0, 77, 79, 3, 35, 17, 0, 78, 74, 1, 0, 0, 0, 78, 75, 1, 0, 0, 0, 78, 76, 1, 0, 0, 0, 78, 77, 1, 0, 0, 0, 79, 12, 1, 0, 0, 0, 80, 81, 5, 40, 0, 0, 81, 14, 1, 0, 0, 0, 82, 83, 5, 41, 0, 0, 83, 16, 1, 0, 0, 0, 84, 85, 5, 123, 0, 0, 85, 18, 1, 0, 0, 0, 86, 87, 5, 125, 0, 0, 87, 20, 1, 0, 0, 0, 88, 90, 3, 25, 12, 0, 89, 88, 1, 0, 0, 0, 90, 93, 1, 0, 0, 0, 91, 89, 1, 0, 0, 0, 91, 92, 1, 0, 0, 0, 92, 95, 1, 0, 0, 0, 93, 91, 1, 0, 0, 0, 94, 96, 3, 37, 18, 0, 95, 94, 1, 0, 0, 0, 96, 97, 1, 0, 0, 0, 97, 95, 1, 0, 0, 0, 97, 98, 1, 0, 0, 0, 98, 102, 1, 0, 0, 0, 99, 101, 3, 25, 12, 0, 100, 99, 1, 0, 0, 0, 101, 104, 1, 0, 0, 0, 102, 100, 1, 0, 0, 0, 102, 103, 1, 0, 0, 0, 103, 22, 1, 0, 0, 0, 104, 102, 1, 0, 0, 0, 105, 109, 5, 34, 0, 0, 106, 108, 3, 39, 19, 0, 107, 106, 1, 0, 0, 0, 108, 111, 1, 0, 0, 0, 109, 107, 1, 0, 0, 0, 109, 110, 1, 0, 0, 0, 110, 112, 1, 0, 0, 0, 111, 109, 1, 0, 0, 0, 112, 113, 5, 34, 0, 0, 113, 24, 1, 0, 0, 0, 114, 116, 3, 27, 13, 0, 115, 114, 1, 0, 0, 0, 116, 117, 1, 0, 0, 0, 117, 115, 1, 0, 0, 0, 117, 118, 1, 0, 0, 0, 118, 26, 1, 0, 0, 0, 119, 120, 5, 42, 0, 0, 120, 28, 1, 0, 0, 0, 121, 122, 5, 60, 0, 0, 122, 30, 1, 0, 0, 0, 123, 124, 5, 60, 0, 0, 124, 125, 5, 61, 0, 0, 125, 32, 1, 0, 0, 0, 126, 127, 5, 62, 0, 0, 127, 34, 1, 0, 0, 0, 128, 129, 5, 62, 0, 0, 129, 130, 5, 61, 0, 0, 130, 36, 1, 0, 0, 0, 131, 145, 3, 43, 21, 0, 132, 145, 3, 47, 23, 0, 133, 145, 3, 51, 25, 0, 134, 138, 5, 92, 0, 0, 135, 139, 3, 3, 1, 0, 136, 139, 3, 5, 2, 0, 137, 139, 3, 7, 3, 0, 138, 135, 1, 0, 0, 0, 138, 136, 1, 0, 0, 0, 138, 137, 1, 0, 0, 0, 139, 145, 1, 0, 0, 0, 140, 141, 3, 27, 13, 0, 141, 142, 3, 37, 18, 0, 142, 145, 1, 0, 0, 0, 143, 145, 3, 45, 22, 0, 144, 131, 1, 0, 0, 0, 144, 132, 1, 0, 0, 0, 144, 133, 1, 0, 0, 0, 144, 134, 1, 0, 0, 0, 144, 140, 1, 0, 0, 0, 144, 143, 1, 0, 0, 0, 145, 38, 1, 0, 0, 0, 146, 151, 3, 43, 21, 0, 147, 151, 3, 51, 25, 0, 148, 151, 3, 49, 24, 0, 149, 151, 8, 6, 0, 0, 150, 146, 1, 0, 0, 0, 150, 147, 1, 0, 0, 0, 150, 148, 1, 0, 0, 0, 150, 149, 1, 0, 0, 0, 151, 40, 1, 0, 0, 0, 152, 153, 7, 7, 0, 0, 153, 42, 1, 0, 0, 0, 154, 155, 5, 92, 0, 0, 155, 161, 7, 4, 0, 0, 156, 157, 5, 92, 0, 0, 157, 161, 7, 5, 0, 0, 158, 159, 5, 92, 0, 0, 159, 161, 7, 1, 0, 0, 160, 154, 1, 0, 0, 0, 160, 156, 1, 0, 0, 0, 160, 158, 1, 0, 0, 0, 161, 44, 1, 0, 0, 0, 162, 163, 8, 8, 0, 0, 163, 46, 1, 0, 0, 0, 164, 165, 5, 92, 0, 0, 165, 166, 7, 8, 0, 0, 166, 48, 1, 0, 0, 0, 167, 168, 5, 92, 0, 0, 168, 169, 5, 34, 0, 0, 169, 50, 1, 0, 0, 0, 170, 171, 5, 92, 0, 0, 171, 172, 3, 53, 26, 0, 172, 52, 1, 0, 0, 0, 173, 174, 7, 9, 0, 0, 174, 175, 3, 55, 27, 0, 175, 176, 3, 55, 27, 0, 176, 177, 3, 55, 27, 0, 177, 178, 3, 55, 27, 0, 178, 54, 1, 0, 0, 0, 179, 180, 7, 10, 0, 0, 180, 56, 1, 0, 0, 0, 11, 0, 78, 91, 97, 102, 109, 117, 138, 144, 150, 160, 1, 6, 0, 0] \ No newline at end of file +[4, 0, 16, 178, 6, -1, 2, 0, 7, 0, 2, 1, 7, 1, 2, 2, 7, 2, 2, 3, 7, 3, 2, 4, 7, 4, 2, 5, 7, 5, 2, 6, 7, 6, 2, 7, 7, 7, 2, 8, 7, 8, 2, 9, 7, 9, 2, 10, 7, 10, 2, 11, 7, 11, 2, 12, 7, 12, 2, 13, 7, 13, 2, 14, 7, 14, 2, 15, 7, 15, 2, 16, 7, 16, 2, 17, 7, 17, 2, 18, 7, 18, 2, 19, 7, 19, 2, 20, 7, 20, 2, 21, 7, 21, 2, 22, 7, 22, 2, 23, 7, 23, 2, 24, 7, 24, 2, 25, 7, 25, 2, 26, 7, 26, 2, 27, 7, 27, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 2, 1, 2, 1, 3, 1, 3, 1, 3, 1, 3, 1, 4, 1, 4, 1, 5, 1, 5, 1, 6, 1, 6, 1, 6, 1, 7, 1, 7, 1, 8, 1, 8, 1, 8, 1, 9, 1, 9, 1, 10, 1, 10, 1, 11, 1, 11, 1, 12, 1, 12, 1, 13, 4, 13, 94, 8, 13, 11, 13, 12, 13, 95, 1, 14, 1, 14, 5, 14, 100, 8, 14, 10, 14, 12, 14, 103, 9, 14, 1, 14, 1, 14, 1, 15, 1, 15, 1, 16, 1, 16, 1, 17, 5, 17, 112, 8, 17, 10, 17, 12, 17, 115, 9, 17, 1, 17, 1, 17, 5, 17, 119, 8, 17, 10, 17, 12, 17, 122, 9, 17, 1, 17, 1, 17, 4, 17, 126, 8, 17, 11, 17, 12, 17, 127, 3, 17, 130, 8, 17, 1, 18, 1, 18, 1, 18, 1, 18, 1, 18, 1, 18, 1, 18, 3, 18, 139, 8, 18, 1, 18, 3, 18, 142, 8, 18, 1, 19, 1, 19, 1, 19, 1, 19, 3, 19, 148, 8, 19, 1, 20, 1, 20, 1, 21, 1, 21, 1, 21, 1, 21, 1, 21, 1, 21, 3, 21, 158, 8, 21, 1, 22, 1, 22, 1, 23, 1, 23, 1, 23, 1, 24, 1, 24, 1, 24, 1, 25, 1, 25, 1, 25, 1, 26, 1, 26, 1, 26, 1, 26, 1, 26, 1, 26, 1, 27, 1, 27, 0, 0, 28, 1, 1, 3, 2, 5, 3, 7, 4, 9, 5, 11, 6, 13, 7, 15, 8, 17, 9, 19, 10, 21, 11, 23, 12, 25, 13, 27, 14, 29, 15, 31, 16, 33, 0, 35, 0, 37, 0, 39, 0, 41, 0, 43, 0, 45, 0, 47, 0, 49, 0, 51, 0, 53, 0, 55, 0, 1, 0, 12, 2, 0, 65, 65, 97, 97, 2, 0, 78, 78, 110, 110, 2, 0, 68, 68, 100, 100, 2, 0, 79, 79, 111, 111, 2, 0, 82, 82, 114, 114, 2, 0, 84, 84, 116, 116, 1, 0, 34, 34, 4, 0, 9, 10, 13, 13, 32, 32, 12288, 12288, 12, 0, 9, 10, 13, 13, 32, 32, 34, 34, 40, 42, 58, 58, 60, 60, 62, 62, 92, 92, 123, 123, 125, 125, 12288, 12288, 9, 0, 32, 32, 34, 34, 40, 42, 58, 58, 60, 60, 62, 62, 92, 92, 123, 123, 125, 125, 2, 0, 85, 85, 117, 117, 3, 0, 48, 57, 65, 70, 97, 102, 182, 0, 1, 1, 0, 0, 0, 0, 3, 1, 0, 0, 0, 0, 5, 1, 0, 0, 0, 0, 7, 1, 0, 0, 0, 0, 9, 1, 0, 0, 0, 0, 11, 1, 0, 0, 0, 0, 13, 1, 0, 0, 0, 0, 15, 1, 0, 0, 0, 0, 17, 1, 0, 0, 0, 0, 19, 1, 0, 0, 0, 0, 21, 1, 0, 0, 0, 0, 23, 1, 0, 0, 0, 0, 25, 1, 0, 0, 0, 0, 27, 1, 0, 0, 0, 0, 29, 1, 0, 0, 0, 0, 31, 1, 0, 0, 0, 1, 57, 1, 0, 0, 0, 3, 61, 1, 0, 0, 0, 5, 65, 1, 0, 0, 0, 7, 68, 1, 0, 0, 0, 9, 72, 1, 0, 0, 0, 11, 74, 1, 0, 0, 0, 13, 76, 1, 0, 0, 0, 15, 79, 1, 0, 0, 0, 17, 81, 1, 0, 0, 0, 19, 84, 1, 0, 0, 0, 21, 86, 1, 0, 0, 0, 23, 88, 1, 0, 0, 0, 25, 90, 1, 0, 0, 0, 27, 93, 1, 0, 0, 0, 29, 97, 1, 0, 0, 0, 31, 106, 1, 0, 0, 0, 33, 108, 1, 0, 0, 0, 35, 129, 1, 0, 0, 0, 37, 141, 1, 0, 0, 0, 39, 147, 1, 0, 0, 0, 41, 149, 1, 0, 0, 0, 43, 157, 1, 0, 0, 0, 45, 159, 1, 0, 0, 0, 47, 161, 1, 0, 0, 0, 49, 164, 1, 0, 0, 0, 51, 167, 1, 0, 0, 0, 53, 170, 1, 0, 0, 0, 55, 176, 1, 0, 0, 0, 57, 58, 3, 41, 20, 0, 58, 59, 1, 0, 0, 0, 59, 60, 6, 0, 0, 0, 60, 2, 1, 0, 0, 0, 61, 62, 7, 0, 0, 0, 62, 63, 7, 1, 0, 0, 63, 64, 7, 2, 0, 0, 64, 4, 1, 0, 0, 0, 65, 66, 7, 3, 0, 0, 66, 67, 7, 4, 0, 0, 67, 6, 1, 0, 0, 0, 68, 69, 7, 1, 0, 0, 69, 70, 7, 3, 0, 0, 70, 71, 7, 5, 0, 0, 71, 8, 1, 0, 0, 0, 72, 73, 5, 58, 0, 0, 73, 10, 1, 0, 0, 0, 74, 75, 5, 60, 0, 0, 75, 12, 1, 0, 0, 0, 76, 77, 5, 60, 0, 0, 77, 78, 5, 61, 0, 0, 78, 14, 1, 0, 0, 0, 79, 80, 5, 62, 0, 0, 80, 16, 1, 0, 0, 0, 81, 82, 5, 62, 0, 0, 82, 83, 5, 61, 0, 0, 83, 18, 1, 0, 0, 0, 84, 85, 5, 40, 0, 0, 85, 20, 1, 0, 0, 0, 86, 87, 5, 41, 0, 0, 87, 22, 1, 0, 0, 0, 88, 89, 5, 123, 0, 0, 89, 24, 1, 0, 0, 0, 90, 91, 5, 125, 0, 0, 91, 26, 1, 0, 0, 0, 92, 94, 3, 35, 17, 0, 93, 92, 1, 0, 0, 0, 94, 95, 1, 0, 0, 0, 95, 93, 1, 0, 0, 0, 95, 96, 1, 0, 0, 0, 96, 28, 1, 0, 0, 0, 97, 101, 5, 34, 0, 0, 98, 100, 3, 39, 19, 0, 99, 98, 1, 0, 0, 0, 100, 103, 1, 0, 0, 0, 101, 99, 1, 0, 0, 0, 101, 102, 1, 0, 0, 0, 102, 104, 1, 0, 0, 0, 103, 101, 1, 0, 0, 0, 104, 105, 5, 34, 0, 0, 105, 30, 1, 0, 0, 0, 106, 107, 3, 33, 16, 0, 107, 32, 1, 0, 0, 0, 108, 109, 5, 42, 0, 0, 109, 34, 1, 0, 0, 0, 110, 112, 3, 33, 16, 0, 111, 110, 1, 0, 0, 0, 112, 115, 1, 0, 0, 0, 113, 111, 1, 0, 0, 0, 113, 114, 1, 0, 0, 0, 114, 116, 1, 0, 0, 0, 115, 113, 1, 0, 0, 0, 116, 120, 3, 37, 18, 0, 117, 119, 3, 33, 16, 0, 118, 117, 1, 0, 0, 0, 119, 122, 1, 0, 0, 0, 120, 118, 1, 0, 0, 0, 120, 121, 1, 0, 0, 0, 121, 130, 1, 0, 0, 0, 122, 120, 1, 0, 0, 0, 123, 125, 3, 33, 16, 0, 124, 126, 3, 33, 16, 0, 125, 124, 1, 0, 0, 0, 126, 127, 1, 0, 0, 0, 127, 125, 1, 0, 0, 0, 127, 128, 1, 0, 0, 0, 128, 130, 1, 0, 0, 0, 129, 113, 1, 0, 0, 0, 129, 123, 1, 0, 0, 0, 130, 36, 1, 0, 0, 0, 131, 142, 3, 43, 21, 0, 132, 142, 3, 47, 23, 0, 133, 142, 3, 51, 25, 0, 134, 138, 5, 92, 0, 0, 135, 139, 3, 3, 1, 0, 136, 139, 3, 5, 2, 0, 137, 139, 3, 7, 3, 0, 138, 135, 1, 0, 0, 0, 138, 136, 1, 0, 0, 0, 138, 137, 1, 0, 0, 0, 139, 142, 1, 0, 0, 0, 140, 142, 3, 45, 22, 0, 141, 131, 1, 0, 0, 0, 141, 132, 1, 0, 0, 0, 141, 133, 1, 0, 0, 0, 141, 134, 1, 0, 0, 0, 141, 140, 1, 0, 0, 0, 142, 38, 1, 0, 0, 0, 143, 148, 3, 43, 21, 0, 144, 148, 3, 51, 25, 0, 145, 148, 3, 49, 24, 0, 146, 148, 8, 6, 0, 0, 147, 143, 1, 0, 0, 0, 147, 144, 1, 0, 0, 0, 147, 145, 1, 0, 0, 0, 147, 146, 1, 0, 0, 0, 148, 40, 1, 0, 0, 0, 149, 150, 7, 7, 0, 0, 150, 42, 1, 0, 0, 0, 151, 152, 5, 92, 0, 0, 152, 158, 7, 4, 0, 0, 153, 154, 5, 92, 0, 0, 154, 158, 7, 5, 0, 0, 155, 156, 5, 92, 0, 0, 156, 158, 7, 1, 0, 0, 157, 151, 1, 0, 0, 0, 157, 153, 1, 0, 0, 0, 157, 155, 1, 0, 0, 0, 158, 44, 1, 0, 0, 0, 159, 160, 8, 8, 0, 0, 160, 46, 1, 0, 0, 0, 161, 162, 5, 92, 0, 0, 162, 163, 7, 9, 0, 0, 163, 48, 1, 0, 0, 0, 164, 165, 5, 92, 0, 0, 165, 166, 5, 34, 0, 0, 166, 50, 1, 0, 0, 0, 167, 168, 5, 92, 0, 0, 168, 169, 3, 53, 26, 0, 169, 52, 1, 0, 0, 0, 170, 171, 7, 10, 0, 0, 171, 172, 3, 55, 27, 0, 172, 173, 3, 55, 27, 0, 173, 174, 3, 55, 27, 0, 174, 175, 3, 55, 27, 0, 175, 54, 1, 0, 0, 0, 176, 177, 7, 11, 0, 0, 177, 56, 1, 0, 0, 0, 11, 0, 95, 101, 113, 120, 127, 129, 138, 141, 147, 157, 1, 6, 0, 0] \ No newline at end of file diff --git a/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBaseLexer.java b/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBaseLexer.java index b397a412d5e8e..f9353afd6e114 100644 --- a/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBaseLexer.java +++ b/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBaseLexer.java @@ -25,9 +25,9 @@ class KqlBaseLexer extends Lexer { protected static final PredictionContextCache _sharedContextCache = new PredictionContextCache(); public static final int - DEFAULT_SKIP=1, AND=2, OR=3, NOT=4, COLON=5, OP_COMPARE=6, LEFT_PARENTHESIS=7, - RIGHT_PARENTHESIS=8, LEFT_CURLY_BRACKET=9, RIGHT_CURLY_BRACKET=10, UNQUOTED_LITERAL=11, - QUOTED_STRING=12, WILDCARD=13; + DEFAULT_SKIP=1, AND=2, OR=3, NOT=4, COLON=5, OP_LESS=6, OP_LESS_EQ=7, + OP_MORE=8, OP_MORE_EQ=9, LEFT_PARENTHESIS=10, RIGHT_PARENTHESIS=11, LEFT_CURLY_BRACKET=12, + RIGHT_CURLY_BRACKET=13, UNQUOTED_LITERAL=14, QUOTED_STRING=15, WILDCARD=16; public static String[] channelNames = { "DEFAULT_TOKEN_CHANNEL", "HIDDEN" }; @@ -38,28 +38,29 @@ class KqlBaseLexer extends Lexer { private static String[] makeRuleNames() { return new String[] { - "DEFAULT_SKIP", "AND", "OR", "NOT", "COLON", "OP_COMPARE", "LEFT_PARENTHESIS", - "RIGHT_PARENTHESIS", "LEFT_CURLY_BRACKET", "RIGHT_CURLY_BRACKET", "UNQUOTED_LITERAL", - "QUOTED_STRING", "WILDCARD", "WILDCARD_CHAR", "OP_LESS", "OP_LESS_EQ", - "OP_MORE", "OP_MORE_EQ", "UNQUOTED_LITERAL_CHAR", "QUOTED_CHAR", "WHITESPACE", - "ESCAPED_WHITESPACE", "NON_SPECIAL_CHAR", "ESCAPED_SPECIAL_CHAR", "ESCAPED_QUOTE", - "ESCAPE_UNICODE_SEQUENCE", "UNICODE_SEQUENCE", "HEX_DIGIT" + "DEFAULT_SKIP", "AND", "OR", "NOT", "COLON", "OP_LESS", "OP_LESS_EQ", + "OP_MORE", "OP_MORE_EQ", "LEFT_PARENTHESIS", "RIGHT_PARENTHESIS", "LEFT_CURLY_BRACKET", + "RIGHT_CURLY_BRACKET", "UNQUOTED_LITERAL", "QUOTED_STRING", "WILDCARD", + "WILDCARD_CHAR", "UNQUOTED_LITERAL_CHAR", "UNQUOTED_LITERAL_BASE_CHAR", + "QUOTED_CHAR", "WHITESPACE", "ESCAPED_WHITESPACE", "NON_SPECIAL_CHAR", + "ESCAPED_SPECIAL_CHAR", "ESCAPED_QUOTE", "ESCAPE_UNICODE_SEQUENCE", "UNICODE_SEQUENCE", + "HEX_DIGIT" }; } public static final String[] ruleNames = makeRuleNames(); private static String[] makeLiteralNames() { return new String[] { - null, null, "'and'", "'or'", "'not'", "':'", null, "'('", "')'", "'{'", - "'}'" + null, null, "'and'", "'or'", "'not'", "':'", "'<'", "'<='", "'>'", "'>='", + "'('", "')'", "'{'", "'}'" }; } private static final String[] _LITERAL_NAMES = makeLiteralNames(); private static String[] makeSymbolicNames() { return new String[] { - null, "DEFAULT_SKIP", "AND", "OR", "NOT", "COLON", "OP_COMPARE", "LEFT_PARENTHESIS", - "RIGHT_PARENTHESIS", "LEFT_CURLY_BRACKET", "RIGHT_CURLY_BRACKET", "UNQUOTED_LITERAL", - "QUOTED_STRING", "WILDCARD" + null, "DEFAULT_SKIP", "AND", "OR", "NOT", "COLON", "OP_LESS", "OP_LESS_EQ", + "OP_MORE", "OP_MORE_EQ", "LEFT_PARENTHESIS", "RIGHT_PARENTHESIS", "LEFT_CURLY_BRACKET", + "RIGHT_CURLY_BRACKET", "UNQUOTED_LITERAL", "QUOTED_STRING", "WILDCARD" }; } private static final String[] _SYMBOLIC_NAMES = makeSymbolicNames(); @@ -121,119 +122,119 @@ public KqlBaseLexer(CharStream input) { public ATN getATN() { return _ATN; } public static final String _serializedATN = - "\u0004\u0000\r\u00b5\u0006\uffff\uffff\u0002\u0000\u0007\u0000\u0002\u0001"+ - "\u0007\u0001\u0002\u0002\u0007\u0002\u0002\u0003\u0007\u0003\u0002\u0004"+ - "\u0007\u0004\u0002\u0005\u0007\u0005\u0002\u0006\u0007\u0006\u0002\u0007"+ - "\u0007\u0007\u0002\b\u0007\b\u0002\t\u0007\t\u0002\n\u0007\n\u0002\u000b"+ - "\u0007\u000b\u0002\f\u0007\f\u0002\r\u0007\r\u0002\u000e\u0007\u000e\u0002"+ - "\u000f\u0007\u000f\u0002\u0010\u0007\u0010\u0002\u0011\u0007\u0011\u0002"+ - "\u0012\u0007\u0012\u0002\u0013\u0007\u0013\u0002\u0014\u0007\u0014\u0002"+ - "\u0015\u0007\u0015\u0002\u0016\u0007\u0016\u0002\u0017\u0007\u0017\u0002"+ - "\u0018\u0007\u0018\u0002\u0019\u0007\u0019\u0002\u001a\u0007\u001a\u0002"+ - "\u001b\u0007\u001b\u0001\u0000\u0001\u0000\u0001\u0000\u0001\u0000\u0001"+ - "\u0001\u0001\u0001\u0001\u0001\u0001\u0001\u0001\u0002\u0001\u0002\u0001"+ - "\u0002\u0001\u0003\u0001\u0003\u0001\u0003\u0001\u0003\u0001\u0004\u0001"+ - "\u0004\u0001\u0005\u0001\u0005\u0001\u0005\u0001\u0005\u0003\u0005O\b"+ - "\u0005\u0001\u0006\u0001\u0006\u0001\u0007\u0001\u0007\u0001\b\u0001\b"+ - "\u0001\t\u0001\t\u0001\n\u0005\nZ\b\n\n\n\f\n]\t\n\u0001\n\u0004\n`\b"+ - "\n\u000b\n\f\na\u0001\n\u0005\ne\b\n\n\n\f\nh\t\n\u0001\u000b\u0001\u000b"+ - "\u0005\u000bl\b\u000b\n\u000b\f\u000bo\t\u000b\u0001\u000b\u0001\u000b"+ - "\u0001\f\u0004\ft\b\f\u000b\f\f\fu\u0001\r\u0001\r\u0001\u000e\u0001\u000e"+ - "\u0001\u000f\u0001\u000f\u0001\u000f\u0001\u0010\u0001\u0010\u0001\u0011"+ - "\u0001\u0011\u0001\u0011\u0001\u0012\u0001\u0012\u0001\u0012\u0001\u0012"+ - "\u0001\u0012\u0001\u0012\u0001\u0012\u0003\u0012\u008b\b\u0012\u0001\u0012"+ - "\u0001\u0012\u0001\u0012\u0001\u0012\u0003\u0012\u0091\b\u0012\u0001\u0013"+ - "\u0001\u0013\u0001\u0013\u0001\u0013\u0003\u0013\u0097\b\u0013\u0001\u0014"+ - "\u0001\u0014\u0001\u0015\u0001\u0015\u0001\u0015\u0001\u0015\u0001\u0015"+ - "\u0001\u0015\u0003\u0015\u00a1\b\u0015\u0001\u0016\u0001\u0016\u0001\u0017"+ - "\u0001\u0017\u0001\u0017\u0001\u0018\u0001\u0018\u0001\u0018\u0001\u0019"+ - "\u0001\u0019\u0001\u0019\u0001\u001a\u0001\u001a\u0001\u001a\u0001\u001a"+ - "\u0001\u001a\u0001\u001a\u0001\u001b\u0001\u001b\u0000\u0000\u001c\u0001"+ - "\u0001\u0003\u0002\u0005\u0003\u0007\u0004\t\u0005\u000b\u0006\r\u0007"+ - "\u000f\b\u0011\t\u0013\n\u0015\u000b\u0017\f\u0019\r\u001b\u0000\u001d"+ - "\u0000\u001f\u0000!\u0000#\u0000%\u0000\'\u0000)\u0000+\u0000-\u0000/"+ - "\u00001\u00003\u00005\u00007\u0000\u0001\u0000\u000b\u0002\u0000AAaa\u0002"+ - "\u0000NNnn\u0002\u0000DDdd\u0002\u0000OOoo\u0002\u0000RRrr\u0002\u0000"+ - "TTtt\u0001\u0000\"\"\u0004\u0000\t\n\r\r \u3000\u3000\t\u0000 \"\"("+ - "*::<<>>\\\\{{}}\u0002\u0000UUuu\u0003\u000009AFaf\u00b9\u0000\u0001\u0001"+ - "\u0000\u0000\u0000\u0000\u0003\u0001\u0000\u0000\u0000\u0000\u0005\u0001"+ - "\u0000\u0000\u0000\u0000\u0007\u0001\u0000\u0000\u0000\u0000\t\u0001\u0000"+ - "\u0000\u0000\u0000\u000b\u0001\u0000\u0000\u0000\u0000\r\u0001\u0000\u0000"+ - "\u0000\u0000\u000f\u0001\u0000\u0000\u0000\u0000\u0011\u0001\u0000\u0000"+ - "\u0000\u0000\u0013\u0001\u0000\u0000\u0000\u0000\u0015\u0001\u0000\u0000"+ - "\u0000\u0000\u0017\u0001\u0000\u0000\u0000\u0000\u0019\u0001\u0000\u0000"+ - "\u0000\u00019\u0001\u0000\u0000\u0000\u0003=\u0001\u0000\u0000\u0000\u0005"+ - "A\u0001\u0000\u0000\u0000\u0007D\u0001\u0000\u0000\u0000\tH\u0001\u0000"+ - "\u0000\u0000\u000bN\u0001\u0000\u0000\u0000\rP\u0001\u0000\u0000\u0000"+ - "\u000fR\u0001\u0000\u0000\u0000\u0011T\u0001\u0000\u0000\u0000\u0013V"+ - "\u0001\u0000\u0000\u0000\u0015[\u0001\u0000\u0000\u0000\u0017i\u0001\u0000"+ - "\u0000\u0000\u0019s\u0001\u0000\u0000\u0000\u001bw\u0001\u0000\u0000\u0000"+ - "\u001dy\u0001\u0000\u0000\u0000\u001f{\u0001\u0000\u0000\u0000!~\u0001"+ - "\u0000\u0000\u0000#\u0080\u0001\u0000\u0000\u0000%\u0090\u0001\u0000\u0000"+ - "\u0000\'\u0096\u0001\u0000\u0000\u0000)\u0098\u0001\u0000\u0000\u0000"+ - "+\u00a0\u0001\u0000\u0000\u0000-\u00a2\u0001\u0000\u0000\u0000/\u00a4"+ - "\u0001\u0000\u0000\u00001\u00a7\u0001\u0000\u0000\u00003\u00aa\u0001\u0000"+ - "\u0000\u00005\u00ad\u0001\u0000\u0000\u00007\u00b3\u0001\u0000\u0000\u0000"+ - "9:\u0003)\u0014\u0000:;\u0001\u0000\u0000\u0000;<\u0006\u0000\u0000\u0000"+ - "<\u0002\u0001\u0000\u0000\u0000=>\u0007\u0000\u0000\u0000>?\u0007\u0001"+ - "\u0000\u0000?@\u0007\u0002\u0000\u0000@\u0004\u0001\u0000\u0000\u0000"+ - "AB\u0007\u0003\u0000\u0000BC\u0007\u0004\u0000\u0000C\u0006\u0001\u0000"+ - "\u0000\u0000DE\u0007\u0001\u0000\u0000EF\u0007\u0003\u0000\u0000FG\u0007"+ - "\u0005\u0000\u0000G\b\u0001\u0000\u0000\u0000HI\u0005:\u0000\u0000I\n"+ - "\u0001\u0000\u0000\u0000JO\u0003\u001d\u000e\u0000KO\u0003!\u0010\u0000"+ - "LO\u0003\u001f\u000f\u0000MO\u0003#\u0011\u0000NJ\u0001\u0000\u0000\u0000"+ - "NK\u0001\u0000\u0000\u0000NL\u0001\u0000\u0000\u0000NM\u0001\u0000\u0000"+ - "\u0000O\f\u0001\u0000\u0000\u0000PQ\u0005(\u0000\u0000Q\u000e\u0001\u0000"+ - "\u0000\u0000RS\u0005)\u0000\u0000S\u0010\u0001\u0000\u0000\u0000TU\u0005"+ - "{\u0000\u0000U\u0012\u0001\u0000\u0000\u0000VW\u0005}\u0000\u0000W\u0014"+ - "\u0001\u0000\u0000\u0000XZ\u0003\u0019\f\u0000YX\u0001\u0000\u0000\u0000"+ - "Z]\u0001\u0000\u0000\u0000[Y\u0001\u0000\u0000\u0000[\\\u0001\u0000\u0000"+ - "\u0000\\_\u0001\u0000\u0000\u0000][\u0001\u0000\u0000\u0000^`\u0003%\u0012"+ - "\u0000_^\u0001\u0000\u0000\u0000`a\u0001\u0000\u0000\u0000a_\u0001\u0000"+ - "\u0000\u0000ab\u0001\u0000\u0000\u0000bf\u0001\u0000\u0000\u0000ce\u0003"+ - "\u0019\f\u0000dc\u0001\u0000\u0000\u0000eh\u0001\u0000\u0000\u0000fd\u0001"+ - "\u0000\u0000\u0000fg\u0001\u0000\u0000\u0000g\u0016\u0001\u0000\u0000"+ - "\u0000hf\u0001\u0000\u0000\u0000im\u0005\"\u0000\u0000jl\u0003\'\u0013"+ - "\u0000kj\u0001\u0000\u0000\u0000lo\u0001\u0000\u0000\u0000mk\u0001\u0000"+ - "\u0000\u0000mn\u0001\u0000\u0000\u0000np\u0001\u0000\u0000\u0000om\u0001"+ - "\u0000\u0000\u0000pq\u0005\"\u0000\u0000q\u0018\u0001\u0000\u0000\u0000"+ - "rt\u0003\u001b\r\u0000sr\u0001\u0000\u0000\u0000tu\u0001\u0000\u0000\u0000"+ - "us\u0001\u0000\u0000\u0000uv\u0001\u0000\u0000\u0000v\u001a\u0001\u0000"+ - "\u0000\u0000wx\u0005*\u0000\u0000x\u001c\u0001\u0000\u0000\u0000yz\u0005"+ - "<\u0000\u0000z\u001e\u0001\u0000\u0000\u0000{|\u0005<\u0000\u0000|}\u0005"+ - "=\u0000\u0000} \u0001\u0000\u0000\u0000~\u007f\u0005>\u0000\u0000\u007f"+ - "\"\u0001\u0000\u0000\u0000\u0080\u0081\u0005>\u0000\u0000\u0081\u0082"+ - "\u0005=\u0000\u0000\u0082$\u0001\u0000\u0000\u0000\u0083\u0091\u0003+"+ - "\u0015\u0000\u0084\u0091\u0003/\u0017\u0000\u0085\u0091\u00033\u0019\u0000"+ - "\u0086\u008a\u0005\\\u0000\u0000\u0087\u008b\u0003\u0003\u0001\u0000\u0088"+ - "\u008b\u0003\u0005\u0002\u0000\u0089\u008b\u0003\u0007\u0003\u0000\u008a"+ - "\u0087\u0001\u0000\u0000\u0000\u008a\u0088\u0001\u0000\u0000\u0000\u008a"+ - "\u0089\u0001\u0000\u0000\u0000\u008b\u0091\u0001\u0000\u0000\u0000\u008c"+ - "\u008d\u0003\u001b\r\u0000\u008d\u008e\u0003%\u0012\u0000\u008e\u0091"+ - "\u0001\u0000\u0000\u0000\u008f\u0091\u0003-\u0016\u0000\u0090\u0083\u0001"+ - "\u0000\u0000\u0000\u0090\u0084\u0001\u0000\u0000\u0000\u0090\u0085\u0001"+ - "\u0000\u0000\u0000\u0090\u0086\u0001\u0000\u0000\u0000\u0090\u008c\u0001"+ - "\u0000\u0000\u0000\u0090\u008f\u0001\u0000\u0000\u0000\u0091&\u0001\u0000"+ - "\u0000\u0000\u0092\u0097\u0003+\u0015\u0000\u0093\u0097\u00033\u0019\u0000"+ - "\u0094\u0097\u00031\u0018\u0000\u0095\u0097\b\u0006\u0000\u0000\u0096"+ - "\u0092\u0001\u0000\u0000\u0000\u0096\u0093\u0001\u0000\u0000\u0000\u0096"+ - "\u0094\u0001\u0000\u0000\u0000\u0096\u0095\u0001\u0000\u0000\u0000\u0097"+ - "(\u0001\u0000\u0000\u0000\u0098\u0099\u0007\u0007\u0000\u0000\u0099*\u0001"+ - "\u0000\u0000\u0000\u009a\u009b\u0005\\\u0000\u0000\u009b\u00a1\u0007\u0004"+ - "\u0000\u0000\u009c\u009d\u0005\\\u0000\u0000\u009d\u00a1\u0007\u0005\u0000"+ - "\u0000\u009e\u009f\u0005\\\u0000\u0000\u009f\u00a1\u0007\u0001\u0000\u0000"+ - "\u00a0\u009a\u0001\u0000\u0000\u0000\u00a0\u009c\u0001\u0000\u0000\u0000"+ - "\u00a0\u009e\u0001\u0000\u0000\u0000\u00a1,\u0001\u0000\u0000\u0000\u00a2"+ - "\u00a3\b\b\u0000\u0000\u00a3.\u0001\u0000\u0000\u0000\u00a4\u00a5\u0005"+ - "\\\u0000\u0000\u00a5\u00a6\u0007\b\u0000\u0000\u00a60\u0001\u0000\u0000"+ - "\u0000\u00a7\u00a8\u0005\\\u0000\u0000\u00a8\u00a9\u0005\"\u0000\u0000"+ - "\u00a92\u0001\u0000\u0000\u0000\u00aa\u00ab\u0005\\\u0000\u0000\u00ab"+ - "\u00ac\u00035\u001a\u0000\u00ac4\u0001\u0000\u0000\u0000\u00ad\u00ae\u0007"+ - "\t\u0000\u0000\u00ae\u00af\u00037\u001b\u0000\u00af\u00b0\u00037\u001b"+ - "\u0000\u00b0\u00b1\u00037\u001b\u0000\u00b1\u00b2\u00037\u001b\u0000\u00b2"+ - "6\u0001\u0000\u0000\u0000\u00b3\u00b4\u0007\n\u0000\u0000\u00b48\u0001"+ - "\u0000\u0000\u0000\u000b\u0000N[afmu\u008a\u0090\u0096\u00a0\u0001\u0006"+ - "\u0000\u0000"; + "\u0004\u0000\u0010\u00b2\u0006\uffff\uffff\u0002\u0000\u0007\u0000\u0002"+ + "\u0001\u0007\u0001\u0002\u0002\u0007\u0002\u0002\u0003\u0007\u0003\u0002"+ + "\u0004\u0007\u0004\u0002\u0005\u0007\u0005\u0002\u0006\u0007\u0006\u0002"+ + "\u0007\u0007\u0007\u0002\b\u0007\b\u0002\t\u0007\t\u0002\n\u0007\n\u0002"+ + "\u000b\u0007\u000b\u0002\f\u0007\f\u0002\r\u0007\r\u0002\u000e\u0007\u000e"+ + "\u0002\u000f\u0007\u000f\u0002\u0010\u0007\u0010\u0002\u0011\u0007\u0011"+ + "\u0002\u0012\u0007\u0012\u0002\u0013\u0007\u0013\u0002\u0014\u0007\u0014"+ + "\u0002\u0015\u0007\u0015\u0002\u0016\u0007\u0016\u0002\u0017\u0007\u0017"+ + "\u0002\u0018\u0007\u0018\u0002\u0019\u0007\u0019\u0002\u001a\u0007\u001a"+ + "\u0002\u001b\u0007\u001b\u0001\u0000\u0001\u0000\u0001\u0000\u0001\u0000"+ + "\u0001\u0001\u0001\u0001\u0001\u0001\u0001\u0001\u0001\u0002\u0001\u0002"+ + "\u0001\u0002\u0001\u0003\u0001\u0003\u0001\u0003\u0001\u0003\u0001\u0004"+ + "\u0001\u0004\u0001\u0005\u0001\u0005\u0001\u0006\u0001\u0006\u0001\u0006"+ + "\u0001\u0007\u0001\u0007\u0001\b\u0001\b\u0001\b\u0001\t\u0001\t\u0001"+ + "\n\u0001\n\u0001\u000b\u0001\u000b\u0001\f\u0001\f\u0001\r\u0004\r^\b"+ + "\r\u000b\r\f\r_\u0001\u000e\u0001\u000e\u0005\u000ed\b\u000e\n\u000e\f"+ + "\u000eg\t\u000e\u0001\u000e\u0001\u000e\u0001\u000f\u0001\u000f\u0001"+ + "\u0010\u0001\u0010\u0001\u0011\u0005\u0011p\b\u0011\n\u0011\f\u0011s\t"+ + "\u0011\u0001\u0011\u0001\u0011\u0005\u0011w\b\u0011\n\u0011\f\u0011z\t"+ + "\u0011\u0001\u0011\u0001\u0011\u0004\u0011~\b\u0011\u000b\u0011\f\u0011"+ + "\u007f\u0003\u0011\u0082\b\u0011\u0001\u0012\u0001\u0012\u0001\u0012\u0001"+ + "\u0012\u0001\u0012\u0001\u0012\u0001\u0012\u0003\u0012\u008b\b\u0012\u0001"+ + "\u0012\u0003\u0012\u008e\b\u0012\u0001\u0013\u0001\u0013\u0001\u0013\u0001"+ + "\u0013\u0003\u0013\u0094\b\u0013\u0001\u0014\u0001\u0014\u0001\u0015\u0001"+ + "\u0015\u0001\u0015\u0001\u0015\u0001\u0015\u0001\u0015\u0003\u0015\u009e"+ + "\b\u0015\u0001\u0016\u0001\u0016\u0001\u0017\u0001\u0017\u0001\u0017\u0001"+ + "\u0018\u0001\u0018\u0001\u0018\u0001\u0019\u0001\u0019\u0001\u0019\u0001"+ + "\u001a\u0001\u001a\u0001\u001a\u0001\u001a\u0001\u001a\u0001\u001a\u0001"+ + "\u001b\u0001\u001b\u0000\u0000\u001c\u0001\u0001\u0003\u0002\u0005\u0003"+ + "\u0007\u0004\t\u0005\u000b\u0006\r\u0007\u000f\b\u0011\t\u0013\n\u0015"+ + "\u000b\u0017\f\u0019\r\u001b\u000e\u001d\u000f\u001f\u0010!\u0000#\u0000"+ + "%\u0000\'\u0000)\u0000+\u0000-\u0000/\u00001\u00003\u00005\u00007\u0000"+ + "\u0001\u0000\f\u0002\u0000AAaa\u0002\u0000NNnn\u0002\u0000DDdd\u0002\u0000"+ + "OOoo\u0002\u0000RRrr\u0002\u0000TTtt\u0001\u0000\"\"\u0004\u0000\t\n\r"+ + "\r \u3000\u3000\f\u0000\t\n\r\r \"\"(*::<<>>\\\\{{}}\u3000\u3000\t\u0000"+ + " \"\"(*::<<>>\\\\{{}}\u0002\u0000UUuu\u0003\u000009AFaf\u00b6\u0000\u0001"+ + "\u0001\u0000\u0000\u0000\u0000\u0003\u0001\u0000\u0000\u0000\u0000\u0005"+ + "\u0001\u0000\u0000\u0000\u0000\u0007\u0001\u0000\u0000\u0000\u0000\t\u0001"+ + "\u0000\u0000\u0000\u0000\u000b\u0001\u0000\u0000\u0000\u0000\r\u0001\u0000"+ + "\u0000\u0000\u0000\u000f\u0001\u0000\u0000\u0000\u0000\u0011\u0001\u0000"+ + "\u0000\u0000\u0000\u0013\u0001\u0000\u0000\u0000\u0000\u0015\u0001\u0000"+ + "\u0000\u0000\u0000\u0017\u0001\u0000\u0000\u0000\u0000\u0019\u0001\u0000"+ + "\u0000\u0000\u0000\u001b\u0001\u0000\u0000\u0000\u0000\u001d\u0001\u0000"+ + "\u0000\u0000\u0000\u001f\u0001\u0000\u0000\u0000\u00019\u0001\u0000\u0000"+ + "\u0000\u0003=\u0001\u0000\u0000\u0000\u0005A\u0001\u0000\u0000\u0000\u0007"+ + "D\u0001\u0000\u0000\u0000\tH\u0001\u0000\u0000\u0000\u000bJ\u0001\u0000"+ + "\u0000\u0000\rL\u0001\u0000\u0000\u0000\u000fO\u0001\u0000\u0000\u0000"+ + "\u0011Q\u0001\u0000\u0000\u0000\u0013T\u0001\u0000\u0000\u0000\u0015V"+ + "\u0001\u0000\u0000\u0000\u0017X\u0001\u0000\u0000\u0000\u0019Z\u0001\u0000"+ + "\u0000\u0000\u001b]\u0001\u0000\u0000\u0000\u001da\u0001\u0000\u0000\u0000"+ + "\u001fj\u0001\u0000\u0000\u0000!l\u0001\u0000\u0000\u0000#\u0081\u0001"+ + "\u0000\u0000\u0000%\u008d\u0001\u0000\u0000\u0000\'\u0093\u0001\u0000"+ + "\u0000\u0000)\u0095\u0001\u0000\u0000\u0000+\u009d\u0001\u0000\u0000\u0000"+ + "-\u009f\u0001\u0000\u0000\u0000/\u00a1\u0001\u0000\u0000\u00001\u00a4"+ + "\u0001\u0000\u0000\u00003\u00a7\u0001\u0000\u0000\u00005\u00aa\u0001\u0000"+ + "\u0000\u00007\u00b0\u0001\u0000\u0000\u00009:\u0003)\u0014\u0000:;\u0001"+ + "\u0000\u0000\u0000;<\u0006\u0000\u0000\u0000<\u0002\u0001\u0000\u0000"+ + "\u0000=>\u0007\u0000\u0000\u0000>?\u0007\u0001\u0000\u0000?@\u0007\u0002"+ + "\u0000\u0000@\u0004\u0001\u0000\u0000\u0000AB\u0007\u0003\u0000\u0000"+ + "BC\u0007\u0004\u0000\u0000C\u0006\u0001\u0000\u0000\u0000DE\u0007\u0001"+ + "\u0000\u0000EF\u0007\u0003\u0000\u0000FG\u0007\u0005\u0000\u0000G\b\u0001"+ + "\u0000\u0000\u0000HI\u0005:\u0000\u0000I\n\u0001\u0000\u0000\u0000JK\u0005"+ + "<\u0000\u0000K\f\u0001\u0000\u0000\u0000LM\u0005<\u0000\u0000MN\u0005"+ + "=\u0000\u0000N\u000e\u0001\u0000\u0000\u0000OP\u0005>\u0000\u0000P\u0010"+ + "\u0001\u0000\u0000\u0000QR\u0005>\u0000\u0000RS\u0005=\u0000\u0000S\u0012"+ + "\u0001\u0000\u0000\u0000TU\u0005(\u0000\u0000U\u0014\u0001\u0000\u0000"+ + "\u0000VW\u0005)\u0000\u0000W\u0016\u0001\u0000\u0000\u0000XY\u0005{\u0000"+ + "\u0000Y\u0018\u0001\u0000\u0000\u0000Z[\u0005}\u0000\u0000[\u001a\u0001"+ + "\u0000\u0000\u0000\\^\u0003#\u0011\u0000]\\\u0001\u0000\u0000\u0000^_"+ + "\u0001\u0000\u0000\u0000_]\u0001\u0000\u0000\u0000_`\u0001\u0000\u0000"+ + "\u0000`\u001c\u0001\u0000\u0000\u0000ae\u0005\"\u0000\u0000bd\u0003\'"+ + "\u0013\u0000cb\u0001\u0000\u0000\u0000dg\u0001\u0000\u0000\u0000ec\u0001"+ + "\u0000\u0000\u0000ef\u0001\u0000\u0000\u0000fh\u0001\u0000\u0000\u0000"+ + "ge\u0001\u0000\u0000\u0000hi\u0005\"\u0000\u0000i\u001e\u0001\u0000\u0000"+ + "\u0000jk\u0003!\u0010\u0000k \u0001\u0000\u0000\u0000lm\u0005*\u0000\u0000"+ + "m\"\u0001\u0000\u0000\u0000np\u0003!\u0010\u0000on\u0001\u0000\u0000\u0000"+ + "ps\u0001\u0000\u0000\u0000qo\u0001\u0000\u0000\u0000qr\u0001\u0000\u0000"+ + "\u0000rt\u0001\u0000\u0000\u0000sq\u0001\u0000\u0000\u0000tx\u0003%\u0012"+ + "\u0000uw\u0003!\u0010\u0000vu\u0001\u0000\u0000\u0000wz\u0001\u0000\u0000"+ + "\u0000xv\u0001\u0000\u0000\u0000xy\u0001\u0000\u0000\u0000y\u0082\u0001"+ + "\u0000\u0000\u0000zx\u0001\u0000\u0000\u0000{}\u0003!\u0010\u0000|~\u0003"+ + "!\u0010\u0000}|\u0001\u0000\u0000\u0000~\u007f\u0001\u0000\u0000\u0000"+ + "\u007f}\u0001\u0000\u0000\u0000\u007f\u0080\u0001\u0000\u0000\u0000\u0080"+ + "\u0082\u0001\u0000\u0000\u0000\u0081q\u0001\u0000\u0000\u0000\u0081{\u0001"+ + "\u0000\u0000\u0000\u0082$\u0001\u0000\u0000\u0000\u0083\u008e\u0003+\u0015"+ + "\u0000\u0084\u008e\u0003/\u0017\u0000\u0085\u008e\u00033\u0019\u0000\u0086"+ + "\u008a\u0005\\\u0000\u0000\u0087\u008b\u0003\u0003\u0001\u0000\u0088\u008b"+ + "\u0003\u0005\u0002\u0000\u0089\u008b\u0003\u0007\u0003\u0000\u008a\u0087"+ + "\u0001\u0000\u0000\u0000\u008a\u0088\u0001\u0000\u0000\u0000\u008a\u0089"+ + "\u0001\u0000\u0000\u0000\u008b\u008e\u0001\u0000\u0000\u0000\u008c\u008e"+ + "\u0003-\u0016\u0000\u008d\u0083\u0001\u0000\u0000\u0000\u008d\u0084\u0001"+ + "\u0000\u0000\u0000\u008d\u0085\u0001\u0000\u0000\u0000\u008d\u0086\u0001"+ + "\u0000\u0000\u0000\u008d\u008c\u0001\u0000\u0000\u0000\u008e&\u0001\u0000"+ + "\u0000\u0000\u008f\u0094\u0003+\u0015\u0000\u0090\u0094\u00033\u0019\u0000"+ + "\u0091\u0094\u00031\u0018\u0000\u0092\u0094\b\u0006\u0000\u0000\u0093"+ + "\u008f\u0001\u0000\u0000\u0000\u0093\u0090\u0001\u0000\u0000\u0000\u0093"+ + "\u0091\u0001\u0000\u0000\u0000\u0093\u0092\u0001\u0000\u0000\u0000\u0094"+ + "(\u0001\u0000\u0000\u0000\u0095\u0096\u0007\u0007\u0000\u0000\u0096*\u0001"+ + "\u0000\u0000\u0000\u0097\u0098\u0005\\\u0000\u0000\u0098\u009e\u0007\u0004"+ + "\u0000\u0000\u0099\u009a\u0005\\\u0000\u0000\u009a\u009e\u0007\u0005\u0000"+ + "\u0000\u009b\u009c\u0005\\\u0000\u0000\u009c\u009e\u0007\u0001\u0000\u0000"+ + "\u009d\u0097\u0001\u0000\u0000\u0000\u009d\u0099\u0001\u0000\u0000\u0000"+ + "\u009d\u009b\u0001\u0000\u0000\u0000\u009e,\u0001\u0000\u0000\u0000\u009f"+ + "\u00a0\b\b\u0000\u0000\u00a0.\u0001\u0000\u0000\u0000\u00a1\u00a2\u0005"+ + "\\\u0000\u0000\u00a2\u00a3\u0007\t\u0000\u0000\u00a30\u0001\u0000\u0000"+ + "\u0000\u00a4\u00a5\u0005\\\u0000\u0000\u00a5\u00a6\u0005\"\u0000\u0000"+ + "\u00a62\u0001\u0000\u0000\u0000\u00a7\u00a8\u0005\\\u0000\u0000\u00a8"+ + "\u00a9\u00035\u001a\u0000\u00a94\u0001\u0000\u0000\u0000\u00aa\u00ab\u0007"+ + "\n\u0000\u0000\u00ab\u00ac\u00037\u001b\u0000\u00ac\u00ad\u00037\u001b"+ + "\u0000\u00ad\u00ae\u00037\u001b\u0000\u00ae\u00af\u00037\u001b\u0000\u00af"+ + "6\u0001\u0000\u0000\u0000\u00b0\u00b1\u0007\u000b\u0000\u0000\u00b18\u0001"+ + "\u0000\u0000\u0000\u000b\u0000_eqx\u007f\u0081\u008a\u008d\u0093\u009d"+ + "\u0001\u0006\u0000\u0000"; public static final ATN _ATN = new ATNDeserializer().deserialize(_serializedATN.toCharArray()); static { diff --git a/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBaseListener.java b/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBaseListener.java index bce2044fa8175..505569dbde58d 100644 --- a/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBaseListener.java +++ b/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBaseListener.java @@ -71,16 +71,6 @@ interface KqlBaseListener extends ParseTreeListener { * @param ctx the parse tree */ void exitSimpleQuery(KqlBaseParser.SimpleQueryContext ctx); - /** - * Enter a parse tree produced by {@link KqlBaseParser#expression}. - * @param ctx the parse tree - */ - void enterExpression(KqlBaseParser.ExpressionContext ctx); - /** - * Exit a parse tree produced by {@link KqlBaseParser#expression}. - * @param ctx the parse tree - */ - void exitExpression(KqlBaseParser.ExpressionContext ctx); /** * Enter a parse tree produced by {@link KqlBaseParser#nestedQuery}. * @param ctx the parse tree @@ -92,45 +82,35 @@ interface KqlBaseListener extends ParseTreeListener { */ void exitNestedQuery(KqlBaseParser.NestedQueryContext ctx); /** - * Enter a parse tree produced by {@link KqlBaseParser#parenthesizedQuery}. - * @param ctx the parse tree - */ - void enterParenthesizedQuery(KqlBaseParser.ParenthesizedQueryContext ctx); - /** - * Exit a parse tree produced by {@link KqlBaseParser#parenthesizedQuery}. - * @param ctx the parse tree - */ - void exitParenthesizedQuery(KqlBaseParser.ParenthesizedQueryContext ctx); - /** - * Enter a parse tree produced by {@link KqlBaseParser#fieldRangeQuery}. + * Enter a parse tree produced by {@link KqlBaseParser#matchAllQuery}. * @param ctx the parse tree */ - void enterFieldRangeQuery(KqlBaseParser.FieldRangeQueryContext ctx); + void enterMatchAllQuery(KqlBaseParser.MatchAllQueryContext ctx); /** - * Exit a parse tree produced by {@link KqlBaseParser#fieldRangeQuery}. + * Exit a parse tree produced by {@link KqlBaseParser#matchAllQuery}. * @param ctx the parse tree */ - void exitFieldRangeQuery(KqlBaseParser.FieldRangeQueryContext ctx); + void exitMatchAllQuery(KqlBaseParser.MatchAllQueryContext ctx); /** - * Enter a parse tree produced by {@link KqlBaseParser#fieldTermQuery}. + * Enter a parse tree produced by {@link KqlBaseParser#parenthesizedQuery}. * @param ctx the parse tree */ - void enterFieldTermQuery(KqlBaseParser.FieldTermQueryContext ctx); + void enterParenthesizedQuery(KqlBaseParser.ParenthesizedQueryContext ctx); /** - * Exit a parse tree produced by {@link KqlBaseParser#fieldTermQuery}. + * Exit a parse tree produced by {@link KqlBaseParser#parenthesizedQuery}. * @param ctx the parse tree */ - void exitFieldTermQuery(KqlBaseParser.FieldTermQueryContext ctx); + void exitParenthesizedQuery(KqlBaseParser.ParenthesizedQueryContext ctx); /** - * Enter a parse tree produced by {@link KqlBaseParser#fieldName}. + * Enter a parse tree produced by {@link KqlBaseParser#rangeQuery}. * @param ctx the parse tree */ - void enterFieldName(KqlBaseParser.FieldNameContext ctx); + void enterRangeQuery(KqlBaseParser.RangeQueryContext ctx); /** - * Exit a parse tree produced by {@link KqlBaseParser#fieldName}. + * Exit a parse tree produced by {@link KqlBaseParser#rangeQuery}. * @param ctx the parse tree */ - void exitFieldName(KqlBaseParser.FieldNameContext ctx); + void exitRangeQuery(KqlBaseParser.RangeQueryContext ctx); /** * Enter a parse tree produced by {@link KqlBaseParser#rangeQueryValue}. * @param ctx the parse tree @@ -142,53 +122,53 @@ interface KqlBaseListener extends ParseTreeListener { */ void exitRangeQueryValue(KqlBaseParser.RangeQueryValueContext ctx); /** - * Enter a parse tree produced by {@link KqlBaseParser#termQueryValue}. + * Enter a parse tree produced by {@link KqlBaseParser#existsQuery}. * @param ctx the parse tree */ - void enterTermQueryValue(KqlBaseParser.TermQueryValueContext ctx); + void enterExistsQuery(KqlBaseParser.ExistsQueryContext ctx); /** - * Exit a parse tree produced by {@link KqlBaseParser#termQueryValue}. + * Exit a parse tree produced by {@link KqlBaseParser#existsQuery}. * @param ctx the parse tree */ - void exitTermQueryValue(KqlBaseParser.TermQueryValueContext ctx); + void exitExistsQuery(KqlBaseParser.ExistsQueryContext ctx); /** - * Enter a parse tree produced by {@link KqlBaseParser#groupingTermExpression}. + * Enter a parse tree produced by {@link KqlBaseParser#fieldQuery}. * @param ctx the parse tree */ - void enterGroupingTermExpression(KqlBaseParser.GroupingTermExpressionContext ctx); + void enterFieldQuery(KqlBaseParser.FieldQueryContext ctx); /** - * Exit a parse tree produced by {@link KqlBaseParser#groupingTermExpression}. + * Exit a parse tree produced by {@link KqlBaseParser#fieldQuery}. * @param ctx the parse tree */ - void exitGroupingTermExpression(KqlBaseParser.GroupingTermExpressionContext ctx); + void exitFieldQuery(KqlBaseParser.FieldQueryContext ctx); /** - * Enter a parse tree produced by {@link KqlBaseParser#unquotedLiteralExpression}. + * Enter a parse tree produced by {@link KqlBaseParser#fieldLessQuery}. * @param ctx the parse tree */ - void enterUnquotedLiteralExpression(KqlBaseParser.UnquotedLiteralExpressionContext ctx); + void enterFieldLessQuery(KqlBaseParser.FieldLessQueryContext ctx); /** - * Exit a parse tree produced by {@link KqlBaseParser#unquotedLiteralExpression}. + * Exit a parse tree produced by {@link KqlBaseParser#fieldLessQuery}. * @param ctx the parse tree */ - void exitUnquotedLiteralExpression(KqlBaseParser.UnquotedLiteralExpressionContext ctx); + void exitFieldLessQuery(KqlBaseParser.FieldLessQueryContext ctx); /** - * Enter a parse tree produced by {@link KqlBaseParser#quotedStringExpression}. + * Enter a parse tree produced by {@link KqlBaseParser#fieldQueryValue}. * @param ctx the parse tree */ - void enterQuotedStringExpression(KqlBaseParser.QuotedStringExpressionContext ctx); + void enterFieldQueryValue(KqlBaseParser.FieldQueryValueContext ctx); /** - * Exit a parse tree produced by {@link KqlBaseParser#quotedStringExpression}. + * Exit a parse tree produced by {@link KqlBaseParser#fieldQueryValue}. * @param ctx the parse tree */ - void exitQuotedStringExpression(KqlBaseParser.QuotedStringExpressionContext ctx); + void exitFieldQueryValue(KqlBaseParser.FieldQueryValueContext ctx); /** - * Enter a parse tree produced by {@link KqlBaseParser#wildcardExpression}. + * Enter a parse tree produced by {@link KqlBaseParser#fieldName}. * @param ctx the parse tree */ - void enterWildcardExpression(KqlBaseParser.WildcardExpressionContext ctx); + void enterFieldName(KqlBaseParser.FieldNameContext ctx); /** - * Exit a parse tree produced by {@link KqlBaseParser#wildcardExpression}. + * Exit a parse tree produced by {@link KqlBaseParser#fieldName}. * @param ctx the parse tree */ - void exitWildcardExpression(KqlBaseParser.WildcardExpressionContext ctx); + void exitFieldName(KqlBaseParser.FieldNameContext ctx); } diff --git a/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBaseParser.java b/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBaseParser.java index 3bd9cc4104d2c..3ee44e389a371 100644 --- a/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBaseParser.java +++ b/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBaseParser.java @@ -25,37 +25,35 @@ class KqlBaseParser extends Parser { protected static final PredictionContextCache _sharedContextCache = new PredictionContextCache(); public static final int - DEFAULT_SKIP=1, AND=2, OR=3, NOT=4, COLON=5, OP_COMPARE=6, LEFT_PARENTHESIS=7, - RIGHT_PARENTHESIS=8, LEFT_CURLY_BRACKET=9, RIGHT_CURLY_BRACKET=10, UNQUOTED_LITERAL=11, - QUOTED_STRING=12, WILDCARD=13; + DEFAULT_SKIP=1, AND=2, OR=3, NOT=4, COLON=5, OP_LESS=6, OP_LESS_EQ=7, + OP_MORE=8, OP_MORE_EQ=9, LEFT_PARENTHESIS=10, RIGHT_PARENTHESIS=11, LEFT_CURLY_BRACKET=12, + RIGHT_CURLY_BRACKET=13, UNQUOTED_LITERAL=14, QUOTED_STRING=15, WILDCARD=16; public static final int - RULE_topLevelQuery = 0, RULE_query = 1, RULE_simpleQuery = 2, RULE_expression = 3, - RULE_nestedQuery = 4, RULE_parenthesizedQuery = 5, RULE_fieldRangeQuery = 6, - RULE_fieldTermQuery = 7, RULE_fieldName = 8, RULE_rangeQueryValue = 9, - RULE_termQueryValue = 10, RULE_groupingTermExpression = 11, RULE_unquotedLiteralExpression = 12, - RULE_quotedStringExpression = 13, RULE_wildcardExpression = 14; + RULE_topLevelQuery = 0, RULE_query = 1, RULE_simpleQuery = 2, RULE_nestedQuery = 3, + RULE_matchAllQuery = 4, RULE_parenthesizedQuery = 5, RULE_rangeQuery = 6, + RULE_rangeQueryValue = 7, RULE_existsQuery = 8, RULE_fieldQuery = 9, RULE_fieldLessQuery = 10, + RULE_fieldQueryValue = 11, RULE_fieldName = 12; private static String[] makeRuleNames() { return new String[] { - "topLevelQuery", "query", "simpleQuery", "expression", "nestedQuery", - "parenthesizedQuery", "fieldRangeQuery", "fieldTermQuery", "fieldName", - "rangeQueryValue", "termQueryValue", "groupingTermExpression", "unquotedLiteralExpression", - "quotedStringExpression", "wildcardExpression" + "topLevelQuery", "query", "simpleQuery", "nestedQuery", "matchAllQuery", + "parenthesizedQuery", "rangeQuery", "rangeQueryValue", "existsQuery", + "fieldQuery", "fieldLessQuery", "fieldQueryValue", "fieldName" }; } public static final String[] ruleNames = makeRuleNames(); private static String[] makeLiteralNames() { return new String[] { - null, null, "'and'", "'or'", "'not'", "':'", null, "'('", "')'", "'{'", - "'}'" + null, null, "'and'", "'or'", "'not'", "':'", "'<'", "'<='", "'>'", "'>='", + "'('", "')'", "'{'", "'}'" }; } private static final String[] _LITERAL_NAMES = makeLiteralNames(); private static String[] makeSymbolicNames() { return new String[] { - null, "DEFAULT_SKIP", "AND", "OR", "NOT", "COLON", "OP_COMPARE", "LEFT_PARENTHESIS", - "RIGHT_PARENTHESIS", "LEFT_CURLY_BRACKET", "RIGHT_CURLY_BRACKET", "UNQUOTED_LITERAL", - "QUOTED_STRING", "WILDCARD" + null, "DEFAULT_SKIP", "AND", "OR", "NOT", "COLON", "OP_LESS", "OP_LESS_EQ", + "OP_MORE", "OP_MORE_EQ", "LEFT_PARENTHESIS", "RIGHT_PARENTHESIS", "LEFT_CURLY_BRACKET", + "RIGHT_CURLY_BRACKET", "UNQUOTED_LITERAL", "QUOTED_STRING", "WILDCARD" }; } private static final String[] _SYMBOLIC_NAMES = makeSymbolicNames(); @@ -141,17 +139,17 @@ public final TopLevelQueryContext topLevelQuery() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(31); + setState(27); _errHandler.sync(this); _la = _input.LA(1); - if ((((_la) & ~0x3f) == 0 && ((1L << _la) & 14480L) != 0)) { + if ((((_la) & ~0x3f) == 0 && ((1L << _la) & 115740L) != 0)) { { - setState(30); + setState(26); query(0); } } - setState(33); + setState(29); match(EOF); } } @@ -202,6 +200,7 @@ public T accept(ParseTreeVisitor visitor) { } @SuppressWarnings("CheckReturnValue") public static class BooleanQueryContext extends QueryContext { + public Token operator; public List query() { return getRuleContexts(QueryContext.class); } @@ -262,38 +261,33 @@ private QueryContext query(int _p) throws RecognitionException { int _alt; enterOuterAlt(_localctx, 1); { - setState(39); + setState(35); _errHandler.sync(this); - switch (_input.LA(1)) { - case NOT: + switch ( getInterpreter().adaptivePredict(_input,1,_ctx) ) { + case 1: { _localctx = new NotQueryContext(_localctx); _ctx = _localctx; _prevctx = _localctx; - setState(36); + setState(32); match(NOT); - setState(37); + setState(33); ((NotQueryContext)_localctx).subQuery = simpleQuery(); } break; - case LEFT_PARENTHESIS: - case UNQUOTED_LITERAL: - case QUOTED_STRING: - case WILDCARD: + case 2: { _localctx = new DefaultQueryContext(_localctx); _ctx = _localctx; _prevctx = _localctx; - setState(38); + setState(34); simpleQuery(); } break; - default: - throw new NoViableAltException(this); } _ctx.stop = _input.LT(-1); - setState(46); + setState(42); _errHandler.sync(this); _alt = getInterpreter().adaptivePredict(_input,2,_ctx); while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ) { @@ -304,24 +298,25 @@ private QueryContext query(int _p) throws RecognitionException { { _localctx = new BooleanQueryContext(new QueryContext(_parentctx, _parentState)); pushNewRecursionContext(_localctx, _startState, RULE_query); - setState(41); + setState(37); if (!(precpred(_ctx, 3))) throw new FailedPredicateException(this, "precpred(_ctx, 3)"); - setState(42); + setState(38); + ((BooleanQueryContext)_localctx).operator = _input.LT(1); _la = _input.LA(1); if ( !(_la==AND || _la==OR) ) { - _errHandler.recoverInline(this); + ((BooleanQueryContext)_localctx).operator = (Token)_errHandler.recoverInline(this); } else { if ( _input.LA(1)==Token.EOF ) matchedEOF = true; _errHandler.reportMatch(this); consume(); } - setState(43); - query(4); + setState(39); + query(3); } } } - setState(48); + setState(44); _errHandler.sync(this); _alt = getInterpreter().adaptivePredict(_input,2,_ctx); } @@ -343,12 +338,24 @@ public static class SimpleQueryContext extends ParserRuleContext { public NestedQueryContext nestedQuery() { return getRuleContext(NestedQueryContext.class,0); } - public ExpressionContext expression() { - return getRuleContext(ExpressionContext.class,0); - } public ParenthesizedQueryContext parenthesizedQuery() { return getRuleContext(ParenthesizedQueryContext.class,0); } + public MatchAllQueryContext matchAllQuery() { + return getRuleContext(MatchAllQueryContext.class,0); + } + public ExistsQueryContext existsQuery() { + return getRuleContext(ExistsQueryContext.class,0); + } + public RangeQueryContext rangeQuery() { + return getRuleContext(RangeQueryContext.class,0); + } + public FieldQueryContext fieldQuery() { + return getRuleContext(FieldQueryContext.class,0); + } + public FieldLessQueryContext fieldLessQuery() { + return getRuleContext(FieldLessQueryContext.class,0); + } public SimpleQueryContext(ParserRuleContext parent, int invokingState) { super(parent, invokingState); } @@ -378,83 +385,50 @@ public final SimpleQueryContext simpleQuery() throws RecognitionException { case 1: enterOuterAlt(_localctx, 1); { - setState(49); + setState(45); nestedQuery(); } break; case 2: enterOuterAlt(_localctx, 2); { - setState(50); - expression(); + setState(46); + parenthesizedQuery(); } break; case 3: enterOuterAlt(_localctx, 3); { - setState(51); - parenthesizedQuery(); + setState(47); + matchAllQuery(); } break; - } - } - catch (RecognitionException re) { - _localctx.exception = re; - _errHandler.reportError(this, re); - _errHandler.recover(this, re); - } - finally { - exitRule(); - } - return _localctx; - } - - @SuppressWarnings("CheckReturnValue") - public static class ExpressionContext extends ParserRuleContext { - public FieldTermQueryContext fieldTermQuery() { - return getRuleContext(FieldTermQueryContext.class,0); - } - public FieldRangeQueryContext fieldRangeQuery() { - return getRuleContext(FieldRangeQueryContext.class,0); - } - public ExpressionContext(ParserRuleContext parent, int invokingState) { - super(parent, invokingState); - } - @Override public int getRuleIndex() { return RULE_expression; } - @Override - public void enterRule(ParseTreeListener listener) { - if ( listener instanceof KqlBaseListener ) ((KqlBaseListener)listener).enterExpression(this); - } - @Override - public void exitRule(ParseTreeListener listener) { - if ( listener instanceof KqlBaseListener ) ((KqlBaseListener)listener).exitExpression(this); - } - @Override - public T accept(ParseTreeVisitor visitor) { - if ( visitor instanceof KqlBaseVisitor ) return ((KqlBaseVisitor)visitor).visitExpression(this); - else return visitor.visitChildren(this); - } - } - - public final ExpressionContext expression() throws RecognitionException { - ExpressionContext _localctx = new ExpressionContext(_ctx, getState()); - enterRule(_localctx, 6, RULE_expression); - try { - setState(56); - _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,4,_ctx) ) { - case 1: - enterOuterAlt(_localctx, 1); + case 4: + enterOuterAlt(_localctx, 4); { - setState(54); - fieldTermQuery(); + setState(48); + existsQuery(); } break; - case 2: - enterOuterAlt(_localctx, 2); + case 5: + enterOuterAlt(_localctx, 5); { - setState(55); - fieldRangeQuery(); + setState(49); + rangeQuery(); + } + break; + case 6: + enterOuterAlt(_localctx, 6); + { + setState(50); + fieldQuery(); + } + break; + case 7: + enterOuterAlt(_localctx, 7); + { + setState(51); + fieldLessQuery(); } break; } @@ -502,19 +476,19 @@ public T accept(ParseTreeVisitor visitor) { public final NestedQueryContext nestedQuery() throws RecognitionException { NestedQueryContext _localctx = new NestedQueryContext(_ctx, getState()); - enterRule(_localctx, 8, RULE_nestedQuery); + enterRule(_localctx, 6, RULE_nestedQuery); try { enterOuterAlt(_localctx, 1); { - setState(58); + setState(54); fieldName(); - setState(59); + setState(55); match(COLON); - setState(60); + setState(56); match(LEFT_CURLY_BRACKET); - setState(61); + setState(57); query(0); - setState(62); + setState(58); match(RIGHT_CURLY_BRACKET); } } @@ -530,43 +504,51 @@ public final NestedQueryContext nestedQuery() throws RecognitionException { } @SuppressWarnings("CheckReturnValue") - public static class ParenthesizedQueryContext extends ParserRuleContext { - public TerminalNode LEFT_PARENTHESIS() { return getToken(KqlBaseParser.LEFT_PARENTHESIS, 0); } - public QueryContext query() { - return getRuleContext(QueryContext.class,0); + public static class MatchAllQueryContext extends ParserRuleContext { + public List WILDCARD() { return getTokens(KqlBaseParser.WILDCARD); } + public TerminalNode WILDCARD(int i) { + return getToken(KqlBaseParser.WILDCARD, i); } - public TerminalNode RIGHT_PARENTHESIS() { return getToken(KqlBaseParser.RIGHT_PARENTHESIS, 0); } - public ParenthesizedQueryContext(ParserRuleContext parent, int invokingState) { + public TerminalNode COLON() { return getToken(KqlBaseParser.COLON, 0); } + public MatchAllQueryContext(ParserRuleContext parent, int invokingState) { super(parent, invokingState); } - @Override public int getRuleIndex() { return RULE_parenthesizedQuery; } + @Override public int getRuleIndex() { return RULE_matchAllQuery; } @Override public void enterRule(ParseTreeListener listener) { - if ( listener instanceof KqlBaseListener ) ((KqlBaseListener)listener).enterParenthesizedQuery(this); + if ( listener instanceof KqlBaseListener ) ((KqlBaseListener)listener).enterMatchAllQuery(this); } @Override public void exitRule(ParseTreeListener listener) { - if ( listener instanceof KqlBaseListener ) ((KqlBaseListener)listener).exitParenthesizedQuery(this); + if ( listener instanceof KqlBaseListener ) ((KqlBaseListener)listener).exitMatchAllQuery(this); } @Override public T accept(ParseTreeVisitor visitor) { - if ( visitor instanceof KqlBaseVisitor ) return ((KqlBaseVisitor)visitor).visitParenthesizedQuery(this); + if ( visitor instanceof KqlBaseVisitor ) return ((KqlBaseVisitor)visitor).visitMatchAllQuery(this); else return visitor.visitChildren(this); } } - public final ParenthesizedQueryContext parenthesizedQuery() throws RecognitionException { - ParenthesizedQueryContext _localctx = new ParenthesizedQueryContext(_ctx, getState()); - enterRule(_localctx, 10, RULE_parenthesizedQuery); + public final MatchAllQueryContext matchAllQuery() throws RecognitionException { + MatchAllQueryContext _localctx = new MatchAllQueryContext(_ctx, getState()); + enterRule(_localctx, 8, RULE_matchAllQuery); try { enterOuterAlt(_localctx, 1); { + setState(62); + _errHandler.sync(this); + switch ( getInterpreter().adaptivePredict(_input,4,_ctx) ) { + case 1: + { + setState(60); + match(WILDCARD); + setState(61); + match(COLON); + } + break; + } setState(64); - match(LEFT_PARENTHESIS); - setState(65); - query(0); - setState(66); - match(RIGHT_PARENTHESIS); + match(WILDCARD); } } catch (RecognitionException re) { @@ -581,46 +563,43 @@ public final ParenthesizedQueryContext parenthesizedQuery() throws RecognitionEx } @SuppressWarnings("CheckReturnValue") - public static class FieldRangeQueryContext extends ParserRuleContext { - public Token operator; - public FieldNameContext fieldName() { - return getRuleContext(FieldNameContext.class,0); - } - public RangeQueryValueContext rangeQueryValue() { - return getRuleContext(RangeQueryValueContext.class,0); + public static class ParenthesizedQueryContext extends ParserRuleContext { + public TerminalNode LEFT_PARENTHESIS() { return getToken(KqlBaseParser.LEFT_PARENTHESIS, 0); } + public QueryContext query() { + return getRuleContext(QueryContext.class,0); } - public TerminalNode OP_COMPARE() { return getToken(KqlBaseParser.OP_COMPARE, 0); } - public FieldRangeQueryContext(ParserRuleContext parent, int invokingState) { + public TerminalNode RIGHT_PARENTHESIS() { return getToken(KqlBaseParser.RIGHT_PARENTHESIS, 0); } + public ParenthesizedQueryContext(ParserRuleContext parent, int invokingState) { super(parent, invokingState); } - @Override public int getRuleIndex() { return RULE_fieldRangeQuery; } + @Override public int getRuleIndex() { return RULE_parenthesizedQuery; } @Override public void enterRule(ParseTreeListener listener) { - if ( listener instanceof KqlBaseListener ) ((KqlBaseListener)listener).enterFieldRangeQuery(this); + if ( listener instanceof KqlBaseListener ) ((KqlBaseListener)listener).enterParenthesizedQuery(this); } @Override public void exitRule(ParseTreeListener listener) { - if ( listener instanceof KqlBaseListener ) ((KqlBaseListener)listener).exitFieldRangeQuery(this); + if ( listener instanceof KqlBaseListener ) ((KqlBaseListener)listener).exitParenthesizedQuery(this); } @Override public T accept(ParseTreeVisitor visitor) { - if ( visitor instanceof KqlBaseVisitor ) return ((KqlBaseVisitor)visitor).visitFieldRangeQuery(this); + if ( visitor instanceof KqlBaseVisitor ) return ((KqlBaseVisitor)visitor).visitParenthesizedQuery(this); else return visitor.visitChildren(this); } } - public final FieldRangeQueryContext fieldRangeQuery() throws RecognitionException { - FieldRangeQueryContext _localctx = new FieldRangeQueryContext(_ctx, getState()); - enterRule(_localctx, 12, RULE_fieldRangeQuery); + public final ParenthesizedQueryContext parenthesizedQuery() throws RecognitionException { + ParenthesizedQueryContext _localctx = new ParenthesizedQueryContext(_ctx, getState()); + enterRule(_localctx, 10, RULE_parenthesizedQuery); try { enterOuterAlt(_localctx, 1); { + setState(66); + match(LEFT_PARENTHESIS); + setState(67); + query(0); setState(68); - fieldName(); - setState(69); - ((FieldRangeQueryContext)_localctx).operator = match(OP_COMPARE); - setState(70); - rangeQueryValue(); + match(RIGHT_PARENTHESIS); } } catch (RecognitionException re) { @@ -635,53 +614,59 @@ public final FieldRangeQueryContext fieldRangeQuery() throws RecognitionExceptio } @SuppressWarnings("CheckReturnValue") - public static class FieldTermQueryContext extends ParserRuleContext { - public TermQueryValueContext termQueryValue() { - return getRuleContext(TermQueryValueContext.class,0); - } + public static class RangeQueryContext extends ParserRuleContext { + public Token operator; public FieldNameContext fieldName() { return getRuleContext(FieldNameContext.class,0); } - public TerminalNode COLON() { return getToken(KqlBaseParser.COLON, 0); } - public FieldTermQueryContext(ParserRuleContext parent, int invokingState) { + public RangeQueryValueContext rangeQueryValue() { + return getRuleContext(RangeQueryValueContext.class,0); + } + public TerminalNode OP_LESS() { return getToken(KqlBaseParser.OP_LESS, 0); } + public TerminalNode OP_LESS_EQ() { return getToken(KqlBaseParser.OP_LESS_EQ, 0); } + public TerminalNode OP_MORE() { return getToken(KqlBaseParser.OP_MORE, 0); } + public TerminalNode OP_MORE_EQ() { return getToken(KqlBaseParser.OP_MORE_EQ, 0); } + public RangeQueryContext(ParserRuleContext parent, int invokingState) { super(parent, invokingState); } - @Override public int getRuleIndex() { return RULE_fieldTermQuery; } + @Override public int getRuleIndex() { return RULE_rangeQuery; } @Override public void enterRule(ParseTreeListener listener) { - if ( listener instanceof KqlBaseListener ) ((KqlBaseListener)listener).enterFieldTermQuery(this); + if ( listener instanceof KqlBaseListener ) ((KqlBaseListener)listener).enterRangeQuery(this); } @Override public void exitRule(ParseTreeListener listener) { - if ( listener instanceof KqlBaseListener ) ((KqlBaseListener)listener).exitFieldTermQuery(this); + if ( listener instanceof KqlBaseListener ) ((KqlBaseListener)listener).exitRangeQuery(this); } @Override public T accept(ParseTreeVisitor visitor) { - if ( visitor instanceof KqlBaseVisitor ) return ((KqlBaseVisitor)visitor).visitFieldTermQuery(this); + if ( visitor instanceof KqlBaseVisitor ) return ((KqlBaseVisitor)visitor).visitRangeQuery(this); else return visitor.visitChildren(this); } } - public final FieldTermQueryContext fieldTermQuery() throws RecognitionException { - FieldTermQueryContext _localctx = new FieldTermQueryContext(_ctx, getState()); - enterRule(_localctx, 14, RULE_fieldTermQuery); + public final RangeQueryContext rangeQuery() throws RecognitionException { + RangeQueryContext _localctx = new RangeQueryContext(_ctx, getState()); + enterRule(_localctx, 12, RULE_rangeQuery); + int _la; try { enterOuterAlt(_localctx, 1); { - setState(75); - _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,5,_ctx) ) { - case 1: - { - setState(72); - fieldName(); - setState(73); - match(COLON); - } - break; + setState(70); + fieldName(); + setState(71); + ((RangeQueryContext)_localctx).operator = _input.LT(1); + _la = _input.LA(1); + if ( !((((_la) & ~0x3f) == 0 && ((1L << _la) & 960L) != 0)) ) { + ((RangeQueryContext)_localctx).operator = (Token)_errHandler.recoverInline(this); + } + else { + if ( _input.LA(1)==Token.EOF ) matchedEOF = true; + _errHandler.reportMatch(this); + consume(); } - setState(77); - termQueryValue(); + setState(72); + rangeQueryValue(); } } catch (RecognitionException re) { @@ -696,61 +681,83 @@ public final FieldTermQueryContext fieldTermQuery() throws RecognitionException } @SuppressWarnings("CheckReturnValue") - public static class FieldNameContext extends ParserRuleContext { - public WildcardExpressionContext wildcardExpression() { - return getRuleContext(WildcardExpressionContext.class,0); - } - public UnquotedLiteralExpressionContext unquotedLiteralExpression() { - return getRuleContext(UnquotedLiteralExpressionContext.class,0); + public static class RangeQueryValueContext extends ParserRuleContext { + public List UNQUOTED_LITERAL() { return getTokens(KqlBaseParser.UNQUOTED_LITERAL); } + public TerminalNode UNQUOTED_LITERAL(int i) { + return getToken(KqlBaseParser.UNQUOTED_LITERAL, i); } - public QuotedStringExpressionContext quotedStringExpression() { - return getRuleContext(QuotedStringExpressionContext.class,0); + public List WILDCARD() { return getTokens(KqlBaseParser.WILDCARD); } + public TerminalNode WILDCARD(int i) { + return getToken(KqlBaseParser.WILDCARD, i); } - public FieldNameContext(ParserRuleContext parent, int invokingState) { + public TerminalNode QUOTED_STRING() { return getToken(KqlBaseParser.QUOTED_STRING, 0); } + public RangeQueryValueContext(ParserRuleContext parent, int invokingState) { super(parent, invokingState); } - @Override public int getRuleIndex() { return RULE_fieldName; } + @Override public int getRuleIndex() { return RULE_rangeQueryValue; } @Override public void enterRule(ParseTreeListener listener) { - if ( listener instanceof KqlBaseListener ) ((KqlBaseListener)listener).enterFieldName(this); + if ( listener instanceof KqlBaseListener ) ((KqlBaseListener)listener).enterRangeQueryValue(this); } @Override public void exitRule(ParseTreeListener listener) { - if ( listener instanceof KqlBaseListener ) ((KqlBaseListener)listener).exitFieldName(this); + if ( listener instanceof KqlBaseListener ) ((KqlBaseListener)listener).exitRangeQueryValue(this); } @Override public T accept(ParseTreeVisitor visitor) { - if ( visitor instanceof KqlBaseVisitor ) return ((KqlBaseVisitor)visitor).visitFieldName(this); + if ( visitor instanceof KqlBaseVisitor ) return ((KqlBaseVisitor)visitor).visitRangeQueryValue(this); else return visitor.visitChildren(this); } } - public final FieldNameContext fieldName() throws RecognitionException { - FieldNameContext _localctx = new FieldNameContext(_ctx, getState()); - enterRule(_localctx, 16, RULE_fieldName); + public final RangeQueryValueContext rangeQueryValue() throws RecognitionException { + RangeQueryValueContext _localctx = new RangeQueryValueContext(_ctx, getState()); + enterRule(_localctx, 14, RULE_rangeQueryValue); + int _la; try { - setState(82); + int _alt; + setState(80); _errHandler.sync(this); switch (_input.LA(1)) { + case UNQUOTED_LITERAL: case WILDCARD: enterOuterAlt(_localctx, 1); { - setState(79); - wildcardExpression(); - } - break; - case UNQUOTED_LITERAL: - enterOuterAlt(_localctx, 2); - { - setState(80); - unquotedLiteralExpression(); + setState(75); + _errHandler.sync(this); + _alt = 1; + do { + switch (_alt) { + case 1: + { + { + setState(74); + _la = _input.LA(1); + if ( !(_la==UNQUOTED_LITERAL || _la==WILDCARD) ) { + _errHandler.recoverInline(this); + } + else { + if ( _input.LA(1)==Token.EOF ) matchedEOF = true; + _errHandler.reportMatch(this); + consume(); + } + } + } + break; + default: + throw new NoViableAltException(this); + } + setState(77); + _errHandler.sync(this); + _alt = getInterpreter().adaptivePredict(_input,5,_ctx); + } while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ); } break; case QUOTED_STRING: - enterOuterAlt(_localctx, 3); + enterOuterAlt(_localctx, 2); { - setState(81); - quotedStringExpression(); + setState(79); + match(QUOTED_STRING); } break; default: @@ -769,55 +776,43 @@ public final FieldNameContext fieldName() throws RecognitionException { } @SuppressWarnings("CheckReturnValue") - public static class RangeQueryValueContext extends ParserRuleContext { - public UnquotedLiteralExpressionContext unquotedLiteralExpression() { - return getRuleContext(UnquotedLiteralExpressionContext.class,0); - } - public QuotedStringExpressionContext quotedStringExpression() { - return getRuleContext(QuotedStringExpressionContext.class,0); + public static class ExistsQueryContext extends ParserRuleContext { + public FieldNameContext fieldName() { + return getRuleContext(FieldNameContext.class,0); } - public RangeQueryValueContext(ParserRuleContext parent, int invokingState) { + public TerminalNode COLON() { return getToken(KqlBaseParser.COLON, 0); } + public TerminalNode WILDCARD() { return getToken(KqlBaseParser.WILDCARD, 0); } + public ExistsQueryContext(ParserRuleContext parent, int invokingState) { super(parent, invokingState); } - @Override public int getRuleIndex() { return RULE_rangeQueryValue; } + @Override public int getRuleIndex() { return RULE_existsQuery; } @Override public void enterRule(ParseTreeListener listener) { - if ( listener instanceof KqlBaseListener ) ((KqlBaseListener)listener).enterRangeQueryValue(this); + if ( listener instanceof KqlBaseListener ) ((KqlBaseListener)listener).enterExistsQuery(this); } @Override public void exitRule(ParseTreeListener listener) { - if ( listener instanceof KqlBaseListener ) ((KqlBaseListener)listener).exitRangeQueryValue(this); + if ( listener instanceof KqlBaseListener ) ((KqlBaseListener)listener).exitExistsQuery(this); } @Override public T accept(ParseTreeVisitor visitor) { - if ( visitor instanceof KqlBaseVisitor ) return ((KqlBaseVisitor)visitor).visitRangeQueryValue(this); + if ( visitor instanceof KqlBaseVisitor ) return ((KqlBaseVisitor)visitor).visitExistsQuery(this); else return visitor.visitChildren(this); } } - public final RangeQueryValueContext rangeQueryValue() throws RecognitionException { - RangeQueryValueContext _localctx = new RangeQueryValueContext(_ctx, getState()); - enterRule(_localctx, 18, RULE_rangeQueryValue); + public final ExistsQueryContext existsQuery() throws RecognitionException { + ExistsQueryContext _localctx = new ExistsQueryContext(_ctx, getState()); + enterRule(_localctx, 16, RULE_existsQuery); try { - setState(86); - _errHandler.sync(this); - switch (_input.LA(1)) { - case UNQUOTED_LITERAL: - enterOuterAlt(_localctx, 1); - { - setState(84); - unquotedLiteralExpression(); - } - break; - case QUOTED_STRING: - enterOuterAlt(_localctx, 2); - { - setState(85); - quotedStringExpression(); - } - break; - default: - throw new NoViableAltException(this); + enterOuterAlt(_localctx, 1); + { + setState(82); + fieldName(); + setState(83); + match(COLON); + setState(84); + match(WILDCARD); } } catch (RecognitionException re) { @@ -832,76 +827,68 @@ public final RangeQueryValueContext rangeQueryValue() throws RecognitionExceptio } @SuppressWarnings("CheckReturnValue") - public static class TermQueryValueContext extends ParserRuleContext { - public UnquotedLiteralExpressionContext termValue; - public WildcardExpressionContext wildcardExpression() { - return getRuleContext(WildcardExpressionContext.class,0); - } - public QuotedStringExpressionContext quotedStringExpression() { - return getRuleContext(QuotedStringExpressionContext.class,0); - } - public UnquotedLiteralExpressionContext unquotedLiteralExpression() { - return getRuleContext(UnquotedLiteralExpressionContext.class,0); + public static class FieldQueryContext extends ParserRuleContext { + public FieldNameContext fieldName() { + return getRuleContext(FieldNameContext.class,0); } - public GroupingTermExpressionContext groupingTermExpression() { - return getRuleContext(GroupingTermExpressionContext.class,0); + public TerminalNode COLON() { return getToken(KqlBaseParser.COLON, 0); } + public FieldQueryValueContext fieldQueryValue() { + return getRuleContext(FieldQueryValueContext.class,0); } - public TermQueryValueContext(ParserRuleContext parent, int invokingState) { + public TerminalNode LEFT_PARENTHESIS() { return getToken(KqlBaseParser.LEFT_PARENTHESIS, 0); } + public TerminalNode RIGHT_PARENTHESIS() { return getToken(KqlBaseParser.RIGHT_PARENTHESIS, 0); } + public FieldQueryContext(ParserRuleContext parent, int invokingState) { super(parent, invokingState); } - @Override public int getRuleIndex() { return RULE_termQueryValue; } + @Override public int getRuleIndex() { return RULE_fieldQuery; } @Override public void enterRule(ParseTreeListener listener) { - if ( listener instanceof KqlBaseListener ) ((KqlBaseListener)listener).enterTermQueryValue(this); + if ( listener instanceof KqlBaseListener ) ((KqlBaseListener)listener).enterFieldQuery(this); } @Override public void exitRule(ParseTreeListener listener) { - if ( listener instanceof KqlBaseListener ) ((KqlBaseListener)listener).exitTermQueryValue(this); + if ( listener instanceof KqlBaseListener ) ((KqlBaseListener)listener).exitFieldQuery(this); } @Override public T accept(ParseTreeVisitor visitor) { - if ( visitor instanceof KqlBaseVisitor ) return ((KqlBaseVisitor)visitor).visitTermQueryValue(this); + if ( visitor instanceof KqlBaseVisitor ) return ((KqlBaseVisitor)visitor).visitFieldQuery(this); else return visitor.visitChildren(this); } } - public final TermQueryValueContext termQueryValue() throws RecognitionException { - TermQueryValueContext _localctx = new TermQueryValueContext(_ctx, getState()); - enterRule(_localctx, 20, RULE_termQueryValue); + public final FieldQueryContext fieldQuery() throws RecognitionException { + FieldQueryContext _localctx = new FieldQueryContext(_ctx, getState()); + enterRule(_localctx, 18, RULE_fieldQuery); try { - setState(92); + setState(96); _errHandler.sync(this); - switch (_input.LA(1)) { - case WILDCARD: + switch ( getInterpreter().adaptivePredict(_input,7,_ctx) ) { + case 1: enterOuterAlt(_localctx, 1); { + setState(86); + fieldName(); + setState(87); + match(COLON); setState(88); - wildcardExpression(); + fieldQueryValue(); } break; - case QUOTED_STRING: + case 2: enterOuterAlt(_localctx, 2); { - setState(89); - quotedStringExpression(); - } - break; - case UNQUOTED_LITERAL: - enterOuterAlt(_localctx, 3); - { setState(90); - ((TermQueryValueContext)_localctx).termValue = unquotedLiteralExpression(); - } - break; - case LEFT_PARENTHESIS: - enterOuterAlt(_localctx, 4); - { + fieldName(); setState(91); - groupingTermExpression(); + match(COLON); + setState(92); + match(LEFT_PARENTHESIS); + setState(93); + fieldQueryValue(); + setState(94); + match(RIGHT_PARENTHESIS); } break; - default: - throw new NoViableAltException(this); } } catch (RecognitionException re) { @@ -916,43 +903,63 @@ public final TermQueryValueContext termQueryValue() throws RecognitionException } @SuppressWarnings("CheckReturnValue") - public static class GroupingTermExpressionContext extends ParserRuleContext { - public TerminalNode LEFT_PARENTHESIS() { return getToken(KqlBaseParser.LEFT_PARENTHESIS, 0); } - public UnquotedLiteralExpressionContext unquotedLiteralExpression() { - return getRuleContext(UnquotedLiteralExpressionContext.class,0); + public static class FieldLessQueryContext extends ParserRuleContext { + public FieldQueryValueContext fieldQueryValue() { + return getRuleContext(FieldQueryValueContext.class,0); } + public TerminalNode LEFT_PARENTHESIS() { return getToken(KqlBaseParser.LEFT_PARENTHESIS, 0); } public TerminalNode RIGHT_PARENTHESIS() { return getToken(KqlBaseParser.RIGHT_PARENTHESIS, 0); } - public GroupingTermExpressionContext(ParserRuleContext parent, int invokingState) { + public FieldLessQueryContext(ParserRuleContext parent, int invokingState) { super(parent, invokingState); } - @Override public int getRuleIndex() { return RULE_groupingTermExpression; } + @Override public int getRuleIndex() { return RULE_fieldLessQuery; } @Override public void enterRule(ParseTreeListener listener) { - if ( listener instanceof KqlBaseListener ) ((KqlBaseListener)listener).enterGroupingTermExpression(this); + if ( listener instanceof KqlBaseListener ) ((KqlBaseListener)listener).enterFieldLessQuery(this); } @Override public void exitRule(ParseTreeListener listener) { - if ( listener instanceof KqlBaseListener ) ((KqlBaseListener)listener).exitGroupingTermExpression(this); + if ( listener instanceof KqlBaseListener ) ((KqlBaseListener)listener).exitFieldLessQuery(this); } @Override public T accept(ParseTreeVisitor visitor) { - if ( visitor instanceof KqlBaseVisitor ) return ((KqlBaseVisitor)visitor).visitGroupingTermExpression(this); + if ( visitor instanceof KqlBaseVisitor ) return ((KqlBaseVisitor)visitor).visitFieldLessQuery(this); else return visitor.visitChildren(this); } } - public final GroupingTermExpressionContext groupingTermExpression() throws RecognitionException { - GroupingTermExpressionContext _localctx = new GroupingTermExpressionContext(_ctx, getState()); - enterRule(_localctx, 22, RULE_groupingTermExpression); + public final FieldLessQueryContext fieldLessQuery() throws RecognitionException { + FieldLessQueryContext _localctx = new FieldLessQueryContext(_ctx, getState()); + enterRule(_localctx, 20, RULE_fieldLessQuery); try { - enterOuterAlt(_localctx, 1); - { - setState(94); - match(LEFT_PARENTHESIS); - setState(95); - unquotedLiteralExpression(); - setState(96); - match(RIGHT_PARENTHESIS); + setState(103); + _errHandler.sync(this); + switch (_input.LA(1)) { + case AND: + case OR: + case NOT: + case UNQUOTED_LITERAL: + case QUOTED_STRING: + case WILDCARD: + enterOuterAlt(_localctx, 1); + { + setState(98); + fieldQueryValue(); + } + break; + case LEFT_PARENTHESIS: + enterOuterAlt(_localctx, 2); + { + setState(99); + match(LEFT_PARENTHESIS); + setState(100); + fieldQueryValue(); + setState(101); + match(RIGHT_PARENTHESIS); + } + break; + default: + throw new NoViableAltException(this); } } catch (RecognitionException re) { @@ -967,57 +974,171 @@ public final GroupingTermExpressionContext groupingTermExpression() throws Recog } @SuppressWarnings("CheckReturnValue") - public static class UnquotedLiteralExpressionContext extends ParserRuleContext { + public static class FieldQueryValueContext extends ParserRuleContext { + public TerminalNode AND() { return getToken(KqlBaseParser.AND, 0); } + public TerminalNode OR() { return getToken(KqlBaseParser.OR, 0); } public List UNQUOTED_LITERAL() { return getTokens(KqlBaseParser.UNQUOTED_LITERAL); } public TerminalNode UNQUOTED_LITERAL(int i) { return getToken(KqlBaseParser.UNQUOTED_LITERAL, i); } - public UnquotedLiteralExpressionContext(ParserRuleContext parent, int invokingState) { + public List WILDCARD() { return getTokens(KqlBaseParser.WILDCARD); } + public TerminalNode WILDCARD(int i) { + return getToken(KqlBaseParser.WILDCARD, i); + } + public TerminalNode NOT() { return getToken(KqlBaseParser.NOT, 0); } + public TerminalNode QUOTED_STRING() { return getToken(KqlBaseParser.QUOTED_STRING, 0); } + public FieldQueryValueContext(ParserRuleContext parent, int invokingState) { super(parent, invokingState); } - @Override public int getRuleIndex() { return RULE_unquotedLiteralExpression; } + @Override public int getRuleIndex() { return RULE_fieldQueryValue; } @Override public void enterRule(ParseTreeListener listener) { - if ( listener instanceof KqlBaseListener ) ((KqlBaseListener)listener).enterUnquotedLiteralExpression(this); + if ( listener instanceof KqlBaseListener ) ((KqlBaseListener)listener).enterFieldQueryValue(this); } @Override public void exitRule(ParseTreeListener listener) { - if ( listener instanceof KqlBaseListener ) ((KqlBaseListener)listener).exitUnquotedLiteralExpression(this); + if ( listener instanceof KqlBaseListener ) ((KqlBaseListener)listener).exitFieldQueryValue(this); } @Override public T accept(ParseTreeVisitor visitor) { - if ( visitor instanceof KqlBaseVisitor ) return ((KqlBaseVisitor)visitor).visitUnquotedLiteralExpression(this); + if ( visitor instanceof KqlBaseVisitor ) return ((KqlBaseVisitor)visitor).visitFieldQueryValue(this); else return visitor.visitChildren(this); } } - public final UnquotedLiteralExpressionContext unquotedLiteralExpression() throws RecognitionException { - UnquotedLiteralExpressionContext _localctx = new UnquotedLiteralExpressionContext(_ctx, getState()); - enterRule(_localctx, 24, RULE_unquotedLiteralExpression); + public final FieldQueryValueContext fieldQueryValue() throws RecognitionException { + FieldQueryValueContext _localctx = new FieldQueryValueContext(_ctx, getState()); + enterRule(_localctx, 22, RULE_fieldQueryValue); + int _la; try { int _alt; - enterOuterAlt(_localctx, 1); - { - setState(99); + setState(123); _errHandler.sync(this); - _alt = 1; - do { - switch (_alt) { - case 1: + switch ( getInterpreter().adaptivePredict(_input,13,_ctx) ) { + case 1: + enterOuterAlt(_localctx, 1); + { + setState(106); + _errHandler.sync(this); + _la = _input.LA(1); + if (_la==AND || _la==OR) { { + setState(105); + _la = _input.LA(1); + if ( !(_la==AND || _la==OR) ) { + _errHandler.recoverInline(this); + } + else { + if ( _input.LA(1)==Token.EOF ) matchedEOF = true; + _errHandler.reportMatch(this); + consume(); + } + } + } + + setState(109); + _errHandler.sync(this); + _alt = 1; + do { + switch (_alt) { + case 1: + { + { + setState(108); + _la = _input.LA(1); + if ( !(_la==UNQUOTED_LITERAL || _la==WILDCARD) ) { + _errHandler.recoverInline(this); + } + else { + if ( _input.LA(1)==Token.EOF ) matchedEOF = true; + _errHandler.reportMatch(this); + consume(); + } + } + } + break; + default: + throw new NoViableAltException(this); + } + setState(111); + _errHandler.sync(this); + _alt = getInterpreter().adaptivePredict(_input,10,_ctx); + } while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ); + } + break; + case 2: + enterOuterAlt(_localctx, 2); + { + setState(114); + _errHandler.sync(this); + _alt = 1; + do { + switch (_alt) { + case 1: + { + { + setState(113); + _la = _input.LA(1); + if ( !(_la==UNQUOTED_LITERAL || _la==WILDCARD) ) { + _errHandler.recoverInline(this); + } + else { + if ( _input.LA(1)==Token.EOF ) matchedEOF = true; + _errHandler.reportMatch(this); + consume(); + } + } + } + break; + default: + throw new NoViableAltException(this); + } + setState(116); + _errHandler.sync(this); + _alt = getInterpreter().adaptivePredict(_input,11,_ctx); + } while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ); + setState(119); + _errHandler.sync(this); + switch ( getInterpreter().adaptivePredict(_input,12,_ctx) ) { + case 1: { - setState(98); - match(UNQUOTED_LITERAL); + setState(118); + _la = _input.LA(1); + if ( !(_la==AND || _la==OR) ) { + _errHandler.recoverInline(this); + } + else { + if ( _input.LA(1)==Token.EOF ) matchedEOF = true; + _errHandler.reportMatch(this); + consume(); } } break; - default: - throw new NoViableAltException(this); } - setState(101); - _errHandler.sync(this); - _alt = getInterpreter().adaptivePredict(_input,9,_ctx); - } while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ); + } + break; + case 3: + enterOuterAlt(_localctx, 3); + { + setState(121); + _la = _input.LA(1); + if ( !((((_la) & ~0x3f) == 0 && ((1L << _la) & 28L) != 0)) ) { + _errHandler.recoverInline(this); + } + else { + if ( _input.LA(1)==Token.EOF ) matchedEOF = true; + _errHandler.reportMatch(this); + consume(); + } + } + break; + case 4: + enterOuterAlt(_localctx, 4); + { + setState(122); + match(QUOTED_STRING); + } + break; } } catch (RecognitionException re) { @@ -1032,78 +1153,76 @@ public final UnquotedLiteralExpressionContext unquotedLiteralExpression() throws } @SuppressWarnings("CheckReturnValue") - public static class QuotedStringExpressionContext extends ParserRuleContext { - public TerminalNode QUOTED_STRING() { return getToken(KqlBaseParser.QUOTED_STRING, 0); } - public QuotedStringExpressionContext(ParserRuleContext parent, int invokingState) { - super(parent, invokingState); - } - @Override public int getRuleIndex() { return RULE_quotedStringExpression; } - @Override - public void enterRule(ParseTreeListener listener) { - if ( listener instanceof KqlBaseListener ) ((KqlBaseListener)listener).enterQuotedStringExpression(this); - } - @Override - public void exitRule(ParseTreeListener listener) { - if ( listener instanceof KqlBaseListener ) ((KqlBaseListener)listener).exitQuotedStringExpression(this); - } - @Override - public T accept(ParseTreeVisitor visitor) { - if ( visitor instanceof KqlBaseVisitor ) return ((KqlBaseVisitor)visitor).visitQuotedStringExpression(this); - else return visitor.visitChildren(this); - } - } - - public final QuotedStringExpressionContext quotedStringExpression() throws RecognitionException { - QuotedStringExpressionContext _localctx = new QuotedStringExpressionContext(_ctx, getState()); - enterRule(_localctx, 26, RULE_quotedStringExpression); - try { - enterOuterAlt(_localctx, 1); - { - setState(103); - match(QUOTED_STRING); - } - } - catch (RecognitionException re) { - _localctx.exception = re; - _errHandler.reportError(this, re); - _errHandler.recover(this, re); - } - finally { - exitRule(); + public static class FieldNameContext extends ParserRuleContext { + public Token value; + public List UNQUOTED_LITERAL() { return getTokens(KqlBaseParser.UNQUOTED_LITERAL); } + public TerminalNode UNQUOTED_LITERAL(int i) { + return getToken(KqlBaseParser.UNQUOTED_LITERAL, i); } - return _localctx; - } - - @SuppressWarnings("CheckReturnValue") - public static class WildcardExpressionContext extends ParserRuleContext { + public TerminalNode QUOTED_STRING() { return getToken(KqlBaseParser.QUOTED_STRING, 0); } public TerminalNode WILDCARD() { return getToken(KqlBaseParser.WILDCARD, 0); } - public WildcardExpressionContext(ParserRuleContext parent, int invokingState) { + public FieldNameContext(ParserRuleContext parent, int invokingState) { super(parent, invokingState); } - @Override public int getRuleIndex() { return RULE_wildcardExpression; } + @Override public int getRuleIndex() { return RULE_fieldName; } @Override public void enterRule(ParseTreeListener listener) { - if ( listener instanceof KqlBaseListener ) ((KqlBaseListener)listener).enterWildcardExpression(this); + if ( listener instanceof KqlBaseListener ) ((KqlBaseListener)listener).enterFieldName(this); } @Override public void exitRule(ParseTreeListener listener) { - if ( listener instanceof KqlBaseListener ) ((KqlBaseListener)listener).exitWildcardExpression(this); + if ( listener instanceof KqlBaseListener ) ((KqlBaseListener)listener).exitFieldName(this); } @Override public T accept(ParseTreeVisitor visitor) { - if ( visitor instanceof KqlBaseVisitor ) return ((KqlBaseVisitor)visitor).visitWildcardExpression(this); + if ( visitor instanceof KqlBaseVisitor ) return ((KqlBaseVisitor)visitor).visitFieldName(this); else return visitor.visitChildren(this); } } - public final WildcardExpressionContext wildcardExpression() throws RecognitionException { - WildcardExpressionContext _localctx = new WildcardExpressionContext(_ctx, getState()); - enterRule(_localctx, 28, RULE_wildcardExpression); + public final FieldNameContext fieldName() throws RecognitionException { + FieldNameContext _localctx = new FieldNameContext(_ctx, getState()); + enterRule(_localctx, 24, RULE_fieldName); + int _la; try { - enterOuterAlt(_localctx, 1); - { - setState(105); - match(WILDCARD); + setState(132); + _errHandler.sync(this); + switch (_input.LA(1)) { + case UNQUOTED_LITERAL: + enterOuterAlt(_localctx, 1); + { + setState(126); + _errHandler.sync(this); + _la = _input.LA(1); + do { + { + { + setState(125); + ((FieldNameContext)_localctx).value = match(UNQUOTED_LITERAL); + } + } + setState(128); + _errHandler.sync(this); + _la = _input.LA(1); + } while ( _la==UNQUOTED_LITERAL ); + } + break; + case QUOTED_STRING: + enterOuterAlt(_localctx, 2); + { + setState(130); + ((FieldNameContext)_localctx).value = match(QUOTED_STRING); + } + break; + case WILDCARD: + enterOuterAlt(_localctx, 3); + { + setState(131); + ((FieldNameContext)_localctx).value = match(WILDCARD); + } + break; + default: + throw new NoViableAltException(this); } } catch (RecognitionException re) { @@ -1133,65 +1252,86 @@ private boolean query_sempred(QueryContext _localctx, int predIndex) { } public static final String _serializedATN = - "\u0004\u0001\rl\u0002\u0000\u0007\u0000\u0002\u0001\u0007\u0001\u0002"+ - "\u0002\u0007\u0002\u0002\u0003\u0007\u0003\u0002\u0004\u0007\u0004\u0002"+ - "\u0005\u0007\u0005\u0002\u0006\u0007\u0006\u0002\u0007\u0007\u0007\u0002"+ - "\b\u0007\b\u0002\t\u0007\t\u0002\n\u0007\n\u0002\u000b\u0007\u000b\u0002"+ - "\f\u0007\f\u0002\r\u0007\r\u0002\u000e\u0007\u000e\u0001\u0000\u0003\u0000"+ - " \b\u0000\u0001\u0000\u0001\u0000\u0001\u0001\u0001\u0001\u0001\u0001"+ - "\u0001\u0001\u0003\u0001(\b\u0001\u0001\u0001\u0001\u0001\u0001\u0001"+ - "\u0005\u0001-\b\u0001\n\u0001\f\u00010\t\u0001\u0001\u0002\u0001\u0002"+ - "\u0001\u0002\u0003\u00025\b\u0002\u0001\u0003\u0001\u0003\u0003\u0003"+ - "9\b\u0003\u0001\u0004\u0001\u0004\u0001\u0004\u0001\u0004\u0001\u0004"+ - "\u0001\u0004\u0001\u0005\u0001\u0005\u0001\u0005\u0001\u0005\u0001\u0006"+ - "\u0001\u0006\u0001\u0006\u0001\u0006\u0001\u0007\u0001\u0007\u0001\u0007"+ - "\u0003\u0007L\b\u0007\u0001\u0007\u0001\u0007\u0001\b\u0001\b\u0001\b"+ - "\u0003\bS\b\b\u0001\t\u0001\t\u0003\tW\b\t\u0001\n\u0001\n\u0001\n\u0001"+ - "\n\u0003\n]\b\n\u0001\u000b\u0001\u000b\u0001\u000b\u0001\u000b\u0001"+ - "\f\u0004\fd\b\f\u000b\f\f\fe\u0001\r\u0001\r\u0001\u000e\u0001\u000e\u0001"+ - "\u000e\u0000\u0001\u0002\u000f\u0000\u0002\u0004\u0006\b\n\f\u000e\u0010"+ - "\u0012\u0014\u0016\u0018\u001a\u001c\u0000\u0001\u0001\u0000\u0002\u0003"+ - "j\u0000\u001f\u0001\u0000\u0000\u0000\u0002\'\u0001\u0000\u0000\u0000"+ - "\u00044\u0001\u0000\u0000\u0000\u00068\u0001\u0000\u0000\u0000\b:\u0001"+ - "\u0000\u0000\u0000\n@\u0001\u0000\u0000\u0000\fD\u0001\u0000\u0000\u0000"+ - "\u000eK\u0001\u0000\u0000\u0000\u0010R\u0001\u0000\u0000\u0000\u0012V"+ - "\u0001\u0000\u0000\u0000\u0014\\\u0001\u0000\u0000\u0000\u0016^\u0001"+ - "\u0000\u0000\u0000\u0018c\u0001\u0000\u0000\u0000\u001ag\u0001\u0000\u0000"+ - "\u0000\u001ci\u0001\u0000\u0000\u0000\u001e \u0003\u0002\u0001\u0000\u001f"+ - "\u001e\u0001\u0000\u0000\u0000\u001f \u0001\u0000\u0000\u0000 !\u0001"+ - "\u0000\u0000\u0000!\"\u0005\u0000\u0000\u0001\"\u0001\u0001\u0000\u0000"+ - "\u0000#$\u0006\u0001\uffff\uffff\u0000$%\u0005\u0004\u0000\u0000%(\u0003"+ - "\u0004\u0002\u0000&(\u0003\u0004\u0002\u0000\'#\u0001\u0000\u0000\u0000"+ - "\'&\u0001\u0000\u0000\u0000(.\u0001\u0000\u0000\u0000)*\n\u0003\u0000"+ - "\u0000*+\u0007\u0000\u0000\u0000+-\u0003\u0002\u0001\u0004,)\u0001\u0000"+ - "\u0000\u0000-0\u0001\u0000\u0000\u0000.,\u0001\u0000\u0000\u0000./\u0001"+ - "\u0000\u0000\u0000/\u0003\u0001\u0000\u0000\u00000.\u0001\u0000\u0000"+ - "\u000015\u0003\b\u0004\u000025\u0003\u0006\u0003\u000035\u0003\n\u0005"+ - "\u000041\u0001\u0000\u0000\u000042\u0001\u0000\u0000\u000043\u0001\u0000"+ - "\u0000\u00005\u0005\u0001\u0000\u0000\u000069\u0003\u000e\u0007\u0000"+ - "79\u0003\f\u0006\u000086\u0001\u0000\u0000\u000087\u0001\u0000\u0000\u0000"+ - "9\u0007\u0001\u0000\u0000\u0000:;\u0003\u0010\b\u0000;<\u0005\u0005\u0000"+ - "\u0000<=\u0005\t\u0000\u0000=>\u0003\u0002\u0001\u0000>?\u0005\n\u0000"+ - "\u0000?\t\u0001\u0000\u0000\u0000@A\u0005\u0007\u0000\u0000AB\u0003\u0002"+ - "\u0001\u0000BC\u0005\b\u0000\u0000C\u000b\u0001\u0000\u0000\u0000DE\u0003"+ - "\u0010\b\u0000EF\u0005\u0006\u0000\u0000FG\u0003\u0012\t\u0000G\r\u0001"+ - "\u0000\u0000\u0000HI\u0003\u0010\b\u0000IJ\u0005\u0005\u0000\u0000JL\u0001"+ - "\u0000\u0000\u0000KH\u0001\u0000\u0000\u0000KL\u0001\u0000\u0000\u0000"+ - "LM\u0001\u0000\u0000\u0000MN\u0003\u0014\n\u0000N\u000f\u0001\u0000\u0000"+ - "\u0000OS\u0003\u001c\u000e\u0000PS\u0003\u0018\f\u0000QS\u0003\u001a\r"+ - "\u0000RO\u0001\u0000\u0000\u0000RP\u0001\u0000\u0000\u0000RQ\u0001\u0000"+ - "\u0000\u0000S\u0011\u0001\u0000\u0000\u0000TW\u0003\u0018\f\u0000UW\u0003"+ - "\u001a\r\u0000VT\u0001\u0000\u0000\u0000VU\u0001\u0000\u0000\u0000W\u0013"+ - "\u0001\u0000\u0000\u0000X]\u0003\u001c\u000e\u0000Y]\u0003\u001a\r\u0000"+ - "Z]\u0003\u0018\f\u0000[]\u0003\u0016\u000b\u0000\\X\u0001\u0000\u0000"+ - "\u0000\\Y\u0001\u0000\u0000\u0000\\Z\u0001\u0000\u0000\u0000\\[\u0001"+ - "\u0000\u0000\u0000]\u0015\u0001\u0000\u0000\u0000^_\u0005\u0007\u0000"+ - "\u0000_`\u0003\u0018\f\u0000`a\u0005\b\u0000\u0000a\u0017\u0001\u0000"+ - "\u0000\u0000bd\u0005\u000b\u0000\u0000cb\u0001\u0000\u0000\u0000de\u0001"+ - "\u0000\u0000\u0000ec\u0001\u0000\u0000\u0000ef\u0001\u0000\u0000\u0000"+ - "f\u0019\u0001\u0000\u0000\u0000gh\u0005\f\u0000\u0000h\u001b\u0001\u0000"+ - "\u0000\u0000ij\u0005\r\u0000\u0000j\u001d\u0001\u0000\u0000\u0000\n\u001f"+ - "\'.48KRV\\e"; + "\u0004\u0001\u0010\u0087\u0002\u0000\u0007\u0000\u0002\u0001\u0007\u0001"+ + "\u0002\u0002\u0007\u0002\u0002\u0003\u0007\u0003\u0002\u0004\u0007\u0004"+ + "\u0002\u0005\u0007\u0005\u0002\u0006\u0007\u0006\u0002\u0007\u0007\u0007"+ + "\u0002\b\u0007\b\u0002\t\u0007\t\u0002\n\u0007\n\u0002\u000b\u0007\u000b"+ + "\u0002\f\u0007\f\u0001\u0000\u0003\u0000\u001c\b\u0000\u0001\u0000\u0001"+ + "\u0000\u0001\u0001\u0001\u0001\u0001\u0001\u0001\u0001\u0003\u0001$\b"+ + "\u0001\u0001\u0001\u0001\u0001\u0001\u0001\u0005\u0001)\b\u0001\n\u0001"+ + "\f\u0001,\t\u0001\u0001\u0002\u0001\u0002\u0001\u0002\u0001\u0002\u0001"+ + "\u0002\u0001\u0002\u0001\u0002\u0003\u00025\b\u0002\u0001\u0003\u0001"+ + "\u0003\u0001\u0003\u0001\u0003\u0001\u0003\u0001\u0003\u0001\u0004\u0001"+ + "\u0004\u0003\u0004?\b\u0004\u0001\u0004\u0001\u0004\u0001\u0005\u0001"+ + "\u0005\u0001\u0005\u0001\u0005\u0001\u0006\u0001\u0006\u0001\u0006\u0001"+ + "\u0006\u0001\u0007\u0004\u0007L\b\u0007\u000b\u0007\f\u0007M\u0001\u0007"+ + "\u0003\u0007Q\b\u0007\u0001\b\u0001\b\u0001\b\u0001\b\u0001\t\u0001\t"+ + "\u0001\t\u0001\t\u0001\t\u0001\t\u0001\t\u0001\t\u0001\t\u0001\t\u0003"+ + "\ta\b\t\u0001\n\u0001\n\u0001\n\u0001\n\u0001\n\u0003\nh\b\n\u0001\u000b"+ + "\u0003\u000bk\b\u000b\u0001\u000b\u0004\u000bn\b\u000b\u000b\u000b\f\u000b"+ + "o\u0001\u000b\u0004\u000bs\b\u000b\u000b\u000b\f\u000bt\u0001\u000b\u0003"+ + "\u000bx\b\u000b\u0001\u000b\u0001\u000b\u0003\u000b|\b\u000b\u0001\f\u0004"+ + "\f\u007f\b\f\u000b\f\f\f\u0080\u0001\f\u0001\f\u0003\f\u0085\b\f\u0001"+ + "\f\u0000\u0001\u0002\r\u0000\u0002\u0004\u0006\b\n\f\u000e\u0010\u0012"+ + "\u0014\u0016\u0018\u0000\u0004\u0001\u0000\u0002\u0003\u0001\u0000\u0006"+ + "\t\u0002\u0000\u000e\u000e\u0010\u0010\u0001\u0000\u0002\u0004\u0091\u0000"+ + "\u001b\u0001\u0000\u0000\u0000\u0002#\u0001\u0000\u0000\u0000\u00044\u0001"+ + "\u0000\u0000\u0000\u00066\u0001\u0000\u0000\u0000\b>\u0001\u0000\u0000"+ + "\u0000\nB\u0001\u0000\u0000\u0000\fF\u0001\u0000\u0000\u0000\u000eP\u0001"+ + "\u0000\u0000\u0000\u0010R\u0001\u0000\u0000\u0000\u0012`\u0001\u0000\u0000"+ + "\u0000\u0014g\u0001\u0000\u0000\u0000\u0016{\u0001\u0000\u0000\u0000\u0018"+ + "\u0084\u0001\u0000\u0000\u0000\u001a\u001c\u0003\u0002\u0001\u0000\u001b"+ + "\u001a\u0001\u0000\u0000\u0000\u001b\u001c\u0001\u0000\u0000\u0000\u001c"+ + "\u001d\u0001\u0000\u0000\u0000\u001d\u001e\u0005\u0000\u0000\u0001\u001e"+ + "\u0001\u0001\u0000\u0000\u0000\u001f \u0006\u0001\uffff\uffff\u0000 !"+ + "\u0005\u0004\u0000\u0000!$\u0003\u0004\u0002\u0000\"$\u0003\u0004\u0002"+ + "\u0000#\u001f\u0001\u0000\u0000\u0000#\"\u0001\u0000\u0000\u0000$*\u0001"+ + "\u0000\u0000\u0000%&\n\u0003\u0000\u0000&\'\u0007\u0000\u0000\u0000\'"+ + ")\u0003\u0002\u0001\u0003(%\u0001\u0000\u0000\u0000),\u0001\u0000\u0000"+ + "\u0000*(\u0001\u0000\u0000\u0000*+\u0001\u0000\u0000\u0000+\u0003\u0001"+ + "\u0000\u0000\u0000,*\u0001\u0000\u0000\u0000-5\u0003\u0006\u0003\u0000"+ + ".5\u0003\n\u0005\u0000/5\u0003\b\u0004\u000005\u0003\u0010\b\u000015\u0003"+ + "\f\u0006\u000025\u0003\u0012\t\u000035\u0003\u0014\n\u00004-\u0001\u0000"+ + "\u0000\u00004.\u0001\u0000\u0000\u00004/\u0001\u0000\u0000\u000040\u0001"+ + "\u0000\u0000\u000041\u0001\u0000\u0000\u000042\u0001\u0000\u0000\u0000"+ + "43\u0001\u0000\u0000\u00005\u0005\u0001\u0000\u0000\u000067\u0003\u0018"+ + "\f\u000078\u0005\u0005\u0000\u000089\u0005\f\u0000\u00009:\u0003\u0002"+ + "\u0001\u0000:;\u0005\r\u0000\u0000;\u0007\u0001\u0000\u0000\u0000<=\u0005"+ + "\u0010\u0000\u0000=?\u0005\u0005\u0000\u0000><\u0001\u0000\u0000\u0000"+ + ">?\u0001\u0000\u0000\u0000?@\u0001\u0000\u0000\u0000@A\u0005\u0010\u0000"+ + "\u0000A\t\u0001\u0000\u0000\u0000BC\u0005\n\u0000\u0000CD\u0003\u0002"+ + "\u0001\u0000DE\u0005\u000b\u0000\u0000E\u000b\u0001\u0000\u0000\u0000"+ + "FG\u0003\u0018\f\u0000GH\u0007\u0001\u0000\u0000HI\u0003\u000e\u0007\u0000"+ + "I\r\u0001\u0000\u0000\u0000JL\u0007\u0002\u0000\u0000KJ\u0001\u0000\u0000"+ + "\u0000LM\u0001\u0000\u0000\u0000MK\u0001\u0000\u0000\u0000MN\u0001\u0000"+ + "\u0000\u0000NQ\u0001\u0000\u0000\u0000OQ\u0005\u000f\u0000\u0000PK\u0001"+ + "\u0000\u0000\u0000PO\u0001\u0000\u0000\u0000Q\u000f\u0001\u0000\u0000"+ + "\u0000RS\u0003\u0018\f\u0000ST\u0005\u0005\u0000\u0000TU\u0005\u0010\u0000"+ + "\u0000U\u0011\u0001\u0000\u0000\u0000VW\u0003\u0018\f\u0000WX\u0005\u0005"+ + "\u0000\u0000XY\u0003\u0016\u000b\u0000Ya\u0001\u0000\u0000\u0000Z[\u0003"+ + "\u0018\f\u0000[\\\u0005\u0005\u0000\u0000\\]\u0005\n\u0000\u0000]^\u0003"+ + "\u0016\u000b\u0000^_\u0005\u000b\u0000\u0000_a\u0001\u0000\u0000\u0000"+ + "`V\u0001\u0000\u0000\u0000`Z\u0001\u0000\u0000\u0000a\u0013\u0001\u0000"+ + "\u0000\u0000bh\u0003\u0016\u000b\u0000cd\u0005\n\u0000\u0000de\u0003\u0016"+ + "\u000b\u0000ef\u0005\u000b\u0000\u0000fh\u0001\u0000\u0000\u0000gb\u0001"+ + "\u0000\u0000\u0000gc\u0001\u0000\u0000\u0000h\u0015\u0001\u0000\u0000"+ + "\u0000ik\u0007\u0000\u0000\u0000ji\u0001\u0000\u0000\u0000jk\u0001\u0000"+ + "\u0000\u0000km\u0001\u0000\u0000\u0000ln\u0007\u0002\u0000\u0000ml\u0001"+ + "\u0000\u0000\u0000no\u0001\u0000\u0000\u0000om\u0001\u0000\u0000\u0000"+ + "op\u0001\u0000\u0000\u0000p|\u0001\u0000\u0000\u0000qs\u0007\u0002\u0000"+ + "\u0000rq\u0001\u0000\u0000\u0000st\u0001\u0000\u0000\u0000tr\u0001\u0000"+ + "\u0000\u0000tu\u0001\u0000\u0000\u0000uw\u0001\u0000\u0000\u0000vx\u0007"+ + "\u0000\u0000\u0000wv\u0001\u0000\u0000\u0000wx\u0001\u0000\u0000\u0000"+ + "x|\u0001\u0000\u0000\u0000y|\u0007\u0003\u0000\u0000z|\u0005\u000f\u0000"+ + "\u0000{j\u0001\u0000\u0000\u0000{r\u0001\u0000\u0000\u0000{y\u0001\u0000"+ + "\u0000\u0000{z\u0001\u0000\u0000\u0000|\u0017\u0001\u0000\u0000\u0000"+ + "}\u007f\u0005\u000e\u0000\u0000~}\u0001\u0000\u0000\u0000\u007f\u0080"+ + "\u0001\u0000\u0000\u0000\u0080~\u0001\u0000\u0000\u0000\u0080\u0081\u0001"+ + "\u0000\u0000\u0000\u0081\u0085\u0001\u0000\u0000\u0000\u0082\u0085\u0005"+ + "\u000f\u0000\u0000\u0083\u0085\u0005\u0010\u0000\u0000\u0084~\u0001\u0000"+ + "\u0000\u0000\u0084\u0082\u0001\u0000\u0000\u0000\u0084\u0083\u0001\u0000"+ + "\u0000\u0000\u0085\u0019\u0001\u0000\u0000\u0000\u0010\u001b#*4>MP`gj"+ + "otw{\u0080\u0084"; public static final ATN _ATN = new ATNDeserializer().deserialize(_serializedATN.toCharArray()); static { diff --git a/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBaseVisitor.java b/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBaseVisitor.java index 55fa21f0e899d..67253e4364190 100644 --- a/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBaseVisitor.java +++ b/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBaseVisitor.java @@ -51,12 +51,6 @@ interface KqlBaseVisitor extends ParseTreeVisitor { * @return the visitor result */ T visitSimpleQuery(KqlBaseParser.SimpleQueryContext ctx); - /** - * Visit a parse tree produced by {@link KqlBaseParser#expression}. - * @param ctx the parse tree - * @return the visitor result - */ - T visitExpression(KqlBaseParser.ExpressionContext ctx); /** * Visit a parse tree produced by {@link KqlBaseParser#nestedQuery}. * @param ctx the parse tree @@ -64,29 +58,23 @@ interface KqlBaseVisitor extends ParseTreeVisitor { */ T visitNestedQuery(KqlBaseParser.NestedQueryContext ctx); /** - * Visit a parse tree produced by {@link KqlBaseParser#parenthesizedQuery}. - * @param ctx the parse tree - * @return the visitor result - */ - T visitParenthesizedQuery(KqlBaseParser.ParenthesizedQueryContext ctx); - /** - * Visit a parse tree produced by {@link KqlBaseParser#fieldRangeQuery}. + * Visit a parse tree produced by {@link KqlBaseParser#matchAllQuery}. * @param ctx the parse tree * @return the visitor result */ - T visitFieldRangeQuery(KqlBaseParser.FieldRangeQueryContext ctx); + T visitMatchAllQuery(KqlBaseParser.MatchAllQueryContext ctx); /** - * Visit a parse tree produced by {@link KqlBaseParser#fieldTermQuery}. + * Visit a parse tree produced by {@link KqlBaseParser#parenthesizedQuery}. * @param ctx the parse tree * @return the visitor result */ - T visitFieldTermQuery(KqlBaseParser.FieldTermQueryContext ctx); + T visitParenthesizedQuery(KqlBaseParser.ParenthesizedQueryContext ctx); /** - * Visit a parse tree produced by {@link KqlBaseParser#fieldName}. + * Visit a parse tree produced by {@link KqlBaseParser#rangeQuery}. * @param ctx the parse tree * @return the visitor result */ - T visitFieldName(KqlBaseParser.FieldNameContext ctx); + T visitRangeQuery(KqlBaseParser.RangeQueryContext ctx); /** * Visit a parse tree produced by {@link KqlBaseParser#rangeQueryValue}. * @param ctx the parse tree @@ -94,33 +82,33 @@ interface KqlBaseVisitor extends ParseTreeVisitor { */ T visitRangeQueryValue(KqlBaseParser.RangeQueryValueContext ctx); /** - * Visit a parse tree produced by {@link KqlBaseParser#termQueryValue}. + * Visit a parse tree produced by {@link KqlBaseParser#existsQuery}. * @param ctx the parse tree * @return the visitor result */ - T visitTermQueryValue(KqlBaseParser.TermQueryValueContext ctx); + T visitExistsQuery(KqlBaseParser.ExistsQueryContext ctx); /** - * Visit a parse tree produced by {@link KqlBaseParser#groupingTermExpression}. + * Visit a parse tree produced by {@link KqlBaseParser#fieldQuery}. * @param ctx the parse tree * @return the visitor result */ - T visitGroupingTermExpression(KqlBaseParser.GroupingTermExpressionContext ctx); + T visitFieldQuery(KqlBaseParser.FieldQueryContext ctx); /** - * Visit a parse tree produced by {@link KqlBaseParser#unquotedLiteralExpression}. + * Visit a parse tree produced by {@link KqlBaseParser#fieldLessQuery}. * @param ctx the parse tree * @return the visitor result */ - T visitUnquotedLiteralExpression(KqlBaseParser.UnquotedLiteralExpressionContext ctx); + T visitFieldLessQuery(KqlBaseParser.FieldLessQueryContext ctx); /** - * Visit a parse tree produced by {@link KqlBaseParser#quotedStringExpression}. + * Visit a parse tree produced by {@link KqlBaseParser#fieldQueryValue}. * @param ctx the parse tree * @return the visitor result */ - T visitQuotedStringExpression(KqlBaseParser.QuotedStringExpressionContext ctx); + T visitFieldQueryValue(KqlBaseParser.FieldQueryValueContext ctx); /** - * Visit a parse tree produced by {@link KqlBaseParser#wildcardExpression}. + * Visit a parse tree produced by {@link KqlBaseParser#fieldName}. * @param ctx the parse tree * @return the visitor result */ - T visitWildcardExpression(KqlBaseParser.WildcardExpressionContext ctx); + T visitFieldName(KqlBaseParser.FieldNameContext ctx); } diff --git a/x-pack/plugin/kql/src/test/resources/supported-queries b/x-pack/plugin/kql/src/test/resources/supported-queries index d750f16149112..d9378cf9041c2 100644 --- a/x-pack/plugin/kql/src/test/resources/supported-queries +++ b/x-pack/plugin/kql/src/test/resources/supported-queries @@ -68,6 +68,15 @@ foo_field:foo AND (foo_field:foo bar OR foo bar) foo_field:foo AND (foo_field:foo bar OR foo bar) foo_field:foo OR (foo_field:foo bar OR foo bar) +foo:AND +foo:OR +foo:NOT +foo AND +foo OR +AND foo +OR foo +NOT + // Nested queries nested_field: { NOT foo } nested_field: { NOT foo bar } diff --git a/x-pack/plugin/kql/src/test/resources/unsupported-queries b/x-pack/plugin/kql/src/test/resources/unsupported-queries index 545b03576b331..97a26f16db141 100644 --- a/x-pack/plugin/kql/src/test/resources/unsupported-queries +++ b/x-pack/plugin/kql/src/test/resources/unsupported-queries @@ -16,14 +16,6 @@ NOT (foo_field:foo AND) foo_field:foo bar foo_field: "foo bar foo_field: foo bar" - -// Invalid boolean queries -foo AND -AND foo -foo OR -OR foo -NOT foo: - // Can't nest grouping terms parentheses foo_field:(foo (bar)) From 999274c003cc165c2634cbf94b9bad354239e22d Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Fri, 25 Oct 2024 12:43:28 +0200 Subject: [PATCH 108/324] Cleanup HotThreadsIT (example of test cleanup) (#115601) Just a quick example of how to save quite a few lines of code and make a test easier to reason about. --- .../action/admin/HotThreadsIT.java | 54 ++++++------------- 1 file changed, 15 insertions(+), 39 deletions(-) diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/HotThreadsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/HotThreadsIT.java index 20c10c3d8c1f9..8c80cee58f46c 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/HotThreadsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/HotThreadsIT.java @@ -10,7 +10,7 @@ import org.apache.logging.log4j.Level; import org.apache.lucene.util.Constants; -import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionFuture; import org.elasticsearch.action.admin.cluster.node.hotthreads.NodeHotThreads; import org.elasticsearch.action.admin.cluster.node.hotthreads.NodesHotThreadsRequest; import org.elasticsearch.action.admin.cluster.node.hotthreads.NodesHotThreadsResponse; @@ -26,15 +26,14 @@ import org.hamcrest.Matcher; import java.util.Map; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.ExecutionException; import static org.elasticsearch.index.query.QueryBuilders.boolQuery; import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; import static org.elasticsearch.index.query.QueryBuilders.termQuery; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.hamcrest.CoreMatchers.equalTo; -import static org.hamcrest.CoreMatchers.is; import static org.hamcrest.CoreMatchers.notNullValue; import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.containsString; @@ -44,11 +43,10 @@ public class HotThreadsIT extends ESIntegTestCase { - public void testHotThreadsDontFail() throws InterruptedException { + public void testHotThreadsDontFail() throws InterruptedException, ExecutionException { // This test just checks if nothing crashes or gets stuck etc. createIndex("test"); final int iters = scaledRandomIntBetween(2, 20); - final AtomicBoolean hasErrors = new AtomicBoolean(false); for (int i = 0; i < iters; i++) { final NodesHotThreadsRequest request = new NodesHotThreadsRequest( Strings.EMPTY_ARRAY, @@ -67,36 +65,7 @@ public void testHotThreadsDontFail() throws InterruptedException { randomBoolean() ) ); - final CountDownLatch latch = new CountDownLatch(1); - client().execute(TransportNodesHotThreadsAction.TYPE, request, new ActionListener<>() { - @Override - public void onResponse(NodesHotThreadsResponse nodeHotThreads) { - boolean success = false; - try { - assertThat(nodeHotThreads, notNullValue()); - Map nodesMap = nodeHotThreads.getNodesMap(); - assertThat(nodeHotThreads.failures(), empty()); - assertThat(nodesMap.size(), equalTo(cluster().size())); - for (NodeHotThreads ht : nodeHotThreads.getNodes()) { - assertNotNull(ht.getHotThreads()); - } - success = true; - } finally { - if (success == false) { - hasErrors.set(true); - } - latch.countDown(); - } - } - - @Override - public void onFailure(Exception e) { - logger.error("FAILED", e); - hasErrors.set(true); - latch.countDown(); - fail(); - } - }); + final ActionFuture hotThreadsFuture = client().execute(TransportNodesHotThreadsAction.TYPE, request); indexRandom( true, @@ -105,7 +74,7 @@ public void onFailure(Exception e) { prepareIndex("test").setId("3").setSource("field1", "value3") ); ensureSearchable(); - while (latch.getCount() > 0) { + while (hotThreadsFuture.isDone() == false) { assertHitCount( prepareSearch().setQuery(matchAllQuery()) .setPostFilter( @@ -115,8 +84,15 @@ public void onFailure(Exception e) { 3L ); } - safeAwait(latch); - assertThat(hasErrors.get(), is(false)); + assertResponse(hotThreadsFuture, nodeHotThreads -> { + assertThat(nodeHotThreads, notNullValue()); + Map nodesMap = nodeHotThreads.getNodesMap(); + assertThat(nodeHotThreads.failures(), empty()); + assertThat(nodesMap.size(), equalTo(cluster().size())); + for (NodeHotThreads ht : nodeHotThreads.getNodes()) { + assertNotNull(ht.getHotThreads()); + } + }); } } From e3523c159106255a96b8c00339f6c565b69c266a Mon Sep 17 00:00:00 2001 From: Liam Thompson <32779855+leemthompo@users.noreply.github.com> Date: Fri, 25 Oct 2024 13:01:41 +0200 Subject: [PATCH 109/324] [DOCS] Fix link syntax in connectors-API-tutorial.asciidoc (#115635) --- docs/reference/connector/docs/connectors-API-tutorial.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/reference/connector/docs/connectors-API-tutorial.asciidoc b/docs/reference/connector/docs/connectors-API-tutorial.asciidoc index 5275f82de1b1f..4118c564e4759 100644 --- a/docs/reference/connector/docs/connectors-API-tutorial.asciidoc +++ b/docs/reference/connector/docs/connectors-API-tutorial.asciidoc @@ -367,7 +367,7 @@ Refer to the individual connectors-references,connector references for these con ==== We're using a self-managed connector in this tutorial. To use these APIs with an Elastic managed connector, there's some extra setup for API keys. -Refer to native-connectors-manage-API-keys for details. +Refer to <> for details. ==== We're now ready to sync our PostgreSQL data to {es}. From 6e0bdbec0ade4af2b5d130aee6bf9e76a64f0e19 Mon Sep 17 00:00:00 2001 From: Craig Taverner Date: Fri, 25 Oct 2024 13:38:35 +0200 Subject: [PATCH 110/324] Fixed flaky test after PR that disallows functions to return TEXT (#115633) * Fixed flaky test after PR that disallows functions to return TEXT * Also ignore TEXT/KEYWORD combinations because they are now valid * Unmute the test --- muted-tests.yml | 3 --- .../elasticsearch/xpack/esql/analysis/AnalyzerTests.java | 8 +++++++- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/muted-tests.yml b/muted-tests.yml index 4869b669f6220..5c94c0aff60b6 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -282,9 +282,6 @@ tests: - class: org.elasticsearch.oldrepos.OldRepositoryAccessIT method: testOldRepoAccess issue: https://github.com/elastic/elasticsearch/issues/115631 -- class: org.elasticsearch.xpack.esql.analysis.AnalyzerTests - method: testMvAppendValidation - issue: https://github.com/elastic/elasticsearch/issues/115636 # Examples: # diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTests.java index c18f55a651408..b86935dcd03da 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTests.java @@ -56,6 +56,7 @@ import java.util.List; import java.util.Map; import java.util.Set; +import java.util.function.Function; import java.util.function.Supplier; import java.util.stream.Collectors; import java.util.stream.IntStream; @@ -1879,6 +1880,11 @@ public void testMvAppendValidation() { Supplier supplier = () -> randomInt(fields.length - 1); int first = supplier.get(); int second = randomValueOtherThan(first, supplier); + Function noText = (type) -> type.equals("text") ? "keyword" : type; + assumeTrue( + "Ignore tests with TEXT and KEYWORD combinations because they are now valid", + noText.apply(fields[first][0]).equals(noText.apply(fields[second][0])) == false + ); String signature = "mv_append(" + fields[first][0] + ", " + fields[second][0] + ")"; verifyUnsupported( @@ -1886,7 +1892,7 @@ public void testMvAppendValidation() { "second argument of [" + signature + "] must be [" - + fields[first][1] + + noText.apply(fields[first][1]) + "], found value [" + fields[second][0] + "] type [" From aabbc840a56e70b1d6b0f1221e3aaf9c4a54c08e Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Fri, 25 Oct 2024 23:22:55 +1100 Subject: [PATCH 111/324] Mute org.elasticsearch.xpack.search.CrossClusterAsyncSearchIT testCCSClusterDetailsWhereAllShardsSkippedInCanMatch #115652 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index 5c94c0aff60b6..f10c214be26bf 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -282,6 +282,9 @@ tests: - class: org.elasticsearch.oldrepos.OldRepositoryAccessIT method: testOldRepoAccess issue: https://github.com/elastic/elasticsearch/issues/115631 +- class: org.elasticsearch.xpack.search.CrossClusterAsyncSearchIT + method: testCCSClusterDetailsWhereAllShardsSkippedInCanMatch + issue: https://github.com/elastic/elasticsearch/issues/115652 # Examples: # From 9adbebb123c875765624ca1fe85f4aeb118287fb Mon Sep 17 00:00:00 2001 From: Pat Whelan Date: Fri, 25 Oct 2024 09:16:54 -0400 Subject: [PATCH 112/324] [ML] Fix streaming IT (#115543) Fix #113430 --- muted-tests.yml | 3 --- .../org/elasticsearch/xpack/inference/InferenceCrudIT.java | 3 +-- 2 files changed, 1 insertion(+), 5 deletions(-) diff --git a/muted-tests.yml b/muted-tests.yml index f10c214be26bf..20879ed327781 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -164,9 +164,6 @@ tests: - class: org.elasticsearch.xpack.esql.ccq.MultiClusterSpecIT method: test {categorize.Categorize} issue: https://github.com/elastic/elasticsearch/issues/113428 -- class: org.elasticsearch.xpack.inference.InferenceCrudIT - method: testSupportedStream - issue: https://github.com/elastic/elasticsearch/issues/113430 - class: org.elasticsearch.integration.KibanaUserRoleIntegTests method: testFieldMappings issue: https://github.com/elastic/elasticsearch/issues/113592 diff --git a/x-pack/plugin/inference/qa/inference-service-tests/src/javaRestTest/java/org/elasticsearch/xpack/inference/InferenceCrudIT.java b/x-pack/plugin/inference/qa/inference-service-tests/src/javaRestTest/java/org/elasticsearch/xpack/inference/InferenceCrudIT.java index 37de2caadb475..53c82219e2f12 100644 --- a/x-pack/plugin/inference/qa/inference-service-tests/src/javaRestTest/java/org/elasticsearch/xpack/inference/InferenceCrudIT.java +++ b/x-pack/plugin/inference/qa/inference-service-tests/src/javaRestTest/java/org/elasticsearch/xpack/inference/InferenceCrudIT.java @@ -307,8 +307,7 @@ public void testSupportedStream() throws Exception { assertEquals(modelId, singleModel.get("inference_id")); assertEquals(TaskType.COMPLETION.toString(), singleModel.get("task_type")); - var input = IntStream.range(1, randomInt(10)).mapToObj(i -> randomAlphaOfLength(10)).toList(); - + var input = IntStream.range(1, 2 + randomInt(8)).mapToObj(i -> randomAlphaOfLength(10)).toList(); try { var events = streamInferOnMockService(modelId, TaskType.COMPLETION, input); From 6cec96cc1e208e50518949edf3b1840ddd012dd1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christoph=20B=C3=BCscher?= Date: Fri, 25 Oct 2024 15:44:59 +0200 Subject: [PATCH 113/324] Fix TimeSeriesRateAggregatorTests file leak (#115278) With Lucene 10, IndexWriter requires a parent document field in order to use index sorting with document blocks. This lead to different IAE and file leaks in this test which are fixed by adapting the corresponding location in the test setup. --- .../search/aggregations/AggregatorTestCase.java | 2 ++ .../rate/TimeSeriesRateAggregatorTests.java | 15 ++++++++------- 2 files changed, 10 insertions(+), 7 deletions(-) diff --git a/test/framework/src/main/java/org/elasticsearch/search/aggregations/AggregatorTestCase.java b/test/framework/src/main/java/org/elasticsearch/search/aggregations/AggregatorTestCase.java index 5f64d123c1bed..d6709b00b4dbb 100644 --- a/test/framework/src/main/java/org/elasticsearch/search/aggregations/AggregatorTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/search/aggregations/AggregatorTestCase.java @@ -81,6 +81,7 @@ import org.elasticsearch.index.cache.bitset.BitsetFilterCache; import org.elasticsearch.index.cache.query.DisabledQueryCache; import org.elasticsearch.index.cache.query.TrivialQueryCachingPolicy; +import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.fielddata.FieldDataContext; import org.elasticsearch.index.fielddata.IndexFieldData; import org.elasticsearch.index.fielddata.IndexFieldDataCache; @@ -749,6 +750,7 @@ protected void tes new SortField(TimeSeriesIdFieldMapper.NAME, SortField.Type.STRING, false), new SortedNumericSortField(DataStreamTimestampFieldMapper.DEFAULT_PATH, SortField.Type.LONG, true) ); + config.setParentField(Engine.ROOT_DOC_FIELD_NAME); config.setIndexSort(sort); } RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory, config); diff --git a/x-pack/plugin/analytics/src/test/java/org/elasticsearch/xpack/analytics/rate/TimeSeriesRateAggregatorTests.java b/x-pack/plugin/analytics/src/test/java/org/elasticsearch/xpack/analytics/rate/TimeSeriesRateAggregatorTests.java index 753ce8283afca..3c7a18de536bc 100644 --- a/x-pack/plugin/analytics/src/test/java/org/elasticsearch/xpack/analytics/rate/TimeSeriesRateAggregatorTests.java +++ b/x-pack/plugin/analytics/src/test/java/org/elasticsearch/xpack/analytics/rate/TimeSeriesRateAggregatorTests.java @@ -42,6 +42,7 @@ import static org.hamcrest.Matchers.closeTo; import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.startsWith; public class TimeSeriesRateAggregatorTests extends AggregatorTestCase { @@ -155,14 +156,14 @@ public void testNestedWithinAutoDateHistogram() throws IOException { AggTestConfig aggTestConfig = new AggTestConfig(tsBuilder, timeStampField(), counterField("counter_field")) .withSplitLeavesIntoSeperateAggregators(false); - expectThrows(IllegalArgumentException.class, () -> testCase(iw -> { - for (Document document : docs(2000, "1", 15, 37, 60, /*reset*/ 14)) { - iw.addDocument(document); - } - for (Document document : docs(2000, "2", 74, 150, /*reset*/ 50, 90, /*reset*/ 40)) { - iw.addDocument(document); - } + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> testCase(iw -> { + iw.addDocuments(docs(2000, "1", 15, 37, 60, /*reset*/ 14)); + iw.addDocuments(docs(2000, "2", 74, 150, /*reset*/ 50, 90, /*reset*/ 40)); }, verifier, aggTestConfig)); + assertThat( + e.getMessage(), + startsWith("Wrapping a time-series rate aggregation within a DeferableBucketAggregator is not supported.") + ); } private List docs(long startTimestamp, String dim, long... values) throws IOException { From d4ac705d57ff19685703738883c595392547e399 Mon Sep 17 00:00:00 2001 From: John Wagster Date: Fri, 25 Oct 2024 09:26:51 -0500 Subject: [PATCH 114/324] [CI] MixedClusterClientYamlTestSuiteIT test {p0=range/20_synthetic_source/Date range} failing - Removed Old `Date range` test because it's not longer validating useful code (#114057) unmuting test and removing bwc test to get mixedClusterTest working --- muted-tests.yml | 3 - .../test/range/20_synthetic_source.yml | 134 ------------------ 2 files changed, 137 deletions(-) diff --git a/muted-tests.yml b/muted-tests.yml index 20879ed327781..70f29016d8475 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -191,9 +191,6 @@ tests: - class: org.elasticsearch.threadpool.SimpleThreadPoolIT method: testThreadPoolMetrics issue: https://github.com/elastic/elasticsearch/issues/108320 -- class: org.elasticsearch.backwards.MixedClusterClientYamlTestSuiteIT - method: test {p0=range/20_synthetic_source/Date range} - issue: https://github.com/elastic/elasticsearch/issues/113874 - class: org.elasticsearch.kibana.KibanaThreadPoolIT method: testBlockedThreadPoolsRejectUserRequests issue: https://github.com/elastic/elasticsearch/issues/113939 diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/range/20_synthetic_source.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/range/20_synthetic_source.yml index cc92b52e0887a..de20f82f8ba2f 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/range/20_synthetic_source.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/range/20_synthetic_source.yml @@ -525,140 +525,6 @@ setup: _source: ip_range: { "gte": "2001:db8::", "lte": null } ---- -"Date range": - - skip: - cluster_features: ["mapper.range.date_range_indexing_fix"] - reason: "tests prior to rounding fixes in 8.16.0 that caused non-intuitive indexing and query because ranges were assumed to always index with 0's as the default such as when time is missing 00:00:00.000 time was assumed but for lte indexing and query missing time should be 23:59:59.999 as per docs here: https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-range-query.html" - - - do: - index: - index: synthetic_source_test - id: "1" - body: { "date_range" : { "gte": "2017-09-01", "lte": "2017-09-05" } } - - - do: - index: - index: synthetic_source_test - id: "2" - body: { "date_range" : { "gt": "2017-09-01", "lte": "2017-09-03" } } - - - do: - index: - index: synthetic_source_test - id: "3" - body: { "date_range" : [ { "gte": "2017-09-04", "lt": "2017-09-05" } ] } - - - do: - index: - index: synthetic_source_test - id: "4" - body: { "date_range" : [ { "gt": "2017-09-04", "lt": "2017-09-08" }, { "gt": "2017-09-04", "lt": "2017-09-07" } ] } - - - do: - index: - index: synthetic_source_test - id: "5" - body: { "date_range" : { "gte": 1504224000000, "lte": 1504569600000 } } - - - do: - index: - index: synthetic_source_test - id: "6" - body: { "date_range" : { "gte": "2017-09-01T10:20:30.123Z", "lte": "2017-09-05T03:04:05.789Z" } } - - - do: - index: - index: synthetic_source_test - id: "7" - body: { "date_range" : null } - - - do: - index: - index: synthetic_source_test - id: "8" - body: { "date_range": { "gte": null, "lte": "2017-09-05" } } - - - do: - index: - index: synthetic_source_test - id: "9" - body: { "date_range": { "gte": "2017-09-05" } } - - - do: - indices.refresh: {} - - - do: - get: - index: synthetic_source_test - id: "1" - - match: - _source: - date_range: { "gte": "2017-09-01T00:00:00.000Z", "lte": "2017-09-05T00:00:00.000Z" } - - - do: - get: - index: synthetic_source_test - id: "2" - - match: - _source: - date_range: { "gte": "2017-09-01T00:00:00.001Z", "lte": "2017-09-03T00:00:00.000Z" } - - - do: - get: - index: synthetic_source_test - id: "3" - - match: - _source: - date_range: { "gte": "2017-09-04T00:00:00.000Z", "lte": "2017-09-04T23:59:59.999Z" } - - - do: - get: - index: synthetic_source_test - id: "4" - - match: - _source: - date_range: [ { "gte": "2017-09-04T00:00:00.001Z", "lte": "2017-09-06T23:59:59.999Z" }, { "gte": "2017-09-04T00:00:00.001Z", "lte": "2017-09-07T23:59:59.999Z" } ] - - - do: - get: - index: synthetic_source_test - id: "5" - - match: - _source: - date_range: { "gte": "2017-09-01T00:00:00.000Z", "lte": "2017-09-05T00:00:00.000Z" } - - - do: - get: - index: synthetic_source_test - id: "6" - - match: - _source: - date_range: { "gte": "2017-09-01T10:20:30.123Z", "lte": "2017-09-05T03:04:05.789Z" } - - - do: - get: - index: synthetic_source_test - id: "7" - - match: - _source: {} - - - do: - get: - index: synthetic_source_test - id: "8" - - match: - _source: - date_range: { "gte": null, "lte": "2017-09-05T00:00:00.000Z" } - - - do: - get: - index: synthetic_source_test - id: "9" - - match: - _source: - date_range: { "gte": "2017-09-05T00:00:00.000Z", "lte": null } - --- "Date range Rounding Fixes": - requires: From 0b94bb8a75076213f7ac7029d21a57ca2ebf93a1 Mon Sep 17 00:00:00 2001 From: Craig Taverner Date: Fri, 25 Oct 2024 16:51:02 +0200 Subject: [PATCH 115/324] Slightly more generous assertions for Cartesian tests (#115658) --- .../elasticsearch/lucene/spatial/CentroidCalculatorTests.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/server/src/test/java/org/elasticsearch/lucene/spatial/CentroidCalculatorTests.java b/server/src/test/java/org/elasticsearch/lucene/spatial/CentroidCalculatorTests.java index 8216d092bd683..caf4494986f6d 100644 --- a/server/src/test/java/org/elasticsearch/lucene/spatial/CentroidCalculatorTests.java +++ b/server/src/test/java/org/elasticsearch/lucene/spatial/CentroidCalculatorTests.java @@ -428,7 +428,7 @@ private Matcher matchDouble(double value) { // Most data (notably geo data) has values within bounds, and an absolute delta makes more sense. double delta = (value > 1e28 || value < -1e28) ? Math.abs(value / 1e6) : (value > 1e20 || value < -1e20) ? Math.abs(value / 1e10) - : (value > 1e9 || value < -1e9) ? Math.abs(value / 1e15) + : (value > 1e8 || value < -1e8) ? Math.abs(value / 1e15) : DELTA; return closeTo(value, delta); } From bb4a444edef301f263210209636ee46fb9d32d80 Mon Sep 17 00:00:00 2001 From: Rene Groeschke Date: Fri, 25 Oct 2024 17:17:30 +0200 Subject: [PATCH 116/324] [Gradle] Fix packaging tests after removing cloud docker image (#115654) --- .../InternalDistributionDownloadPlugin.java | 3 --- ...kerCloudElasticsearchDistributionType.java | 27 ------------------- ...nternalElasticsearchDistributionTypes.java | 2 -- .../internal/test/DistroTestPlugin.java | 2 -- 4 files changed, 34 deletions(-) delete mode 100644 build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/distribution/DockerCloudElasticsearchDistributionType.java diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalDistributionDownloadPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalDistributionDownloadPlugin.java index 19309fe2da8a3..0bf4bcb33c23b 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalDistributionDownloadPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalDistributionDownloadPlugin.java @@ -172,9 +172,6 @@ private static String distributionProjectName(ElasticsearchDistribution distribu if (distribution.getType() == InternalElasticsearchDistributionTypes.DOCKER_IRONBANK) { return projectName + "ironbank-docker" + archString + "-export"; } - if (distribution.getType() == InternalElasticsearchDistributionTypes.DOCKER_CLOUD) { - return projectName + "cloud-docker" + archString + "-export"; - } if (distribution.getType() == InternalElasticsearchDistributionTypes.DOCKER_CLOUD_ESS) { return projectName + "cloud-ess-docker" + archString + "-export"; } diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/distribution/DockerCloudElasticsearchDistributionType.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/distribution/DockerCloudElasticsearchDistributionType.java deleted file mode 100644 index eb522dbcad5e2..0000000000000 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/distribution/DockerCloudElasticsearchDistributionType.java +++ /dev/null @@ -1,27 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the "Elastic License - * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side - * Public License v 1"; you may not use this file except in compliance with, at - * your election, the "Elastic License 2.0", the "GNU Affero General Public - * License v3.0 only", or the "Server Side Public License, v 1". - */ - -package org.elasticsearch.gradle.internal.distribution; - -import org.elasticsearch.gradle.ElasticsearchDistributionType; - -public class DockerCloudElasticsearchDistributionType implements ElasticsearchDistributionType { - - DockerCloudElasticsearchDistributionType() {} - - @Override - public String getName() { - return "dockerCloud"; - } - - @Override - public boolean isDocker() { - return true; - } -} diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/distribution/InternalElasticsearchDistributionTypes.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/distribution/InternalElasticsearchDistributionTypes.java index ba0e76b3f5b99..8f0951da86b88 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/distribution/InternalElasticsearchDistributionTypes.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/distribution/InternalElasticsearchDistributionTypes.java @@ -19,7 +19,6 @@ public class InternalElasticsearchDistributionTypes { public static ElasticsearchDistributionType DOCKER = new DockerElasticsearchDistributionType(); public static ElasticsearchDistributionType DOCKER_UBI = new DockerUbiElasticsearchDistributionType(); public static ElasticsearchDistributionType DOCKER_IRONBANK = new DockerIronBankElasticsearchDistributionType(); - public static ElasticsearchDistributionType DOCKER_CLOUD = new DockerCloudElasticsearchDistributionType(); public static ElasticsearchDistributionType DOCKER_CLOUD_ESS = new DockerCloudEssElasticsearchDistributionType(); public static ElasticsearchDistributionType DOCKER_WOLFI = new DockerWolfiElasticsearchDistributionType(); @@ -29,7 +28,6 @@ public class InternalElasticsearchDistributionTypes { DOCKER, DOCKER_UBI, DOCKER_IRONBANK, - DOCKER_CLOUD, DOCKER_CLOUD_ESS, DOCKER_WOLFI ); diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/DistroTestPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/DistroTestPlugin.java index 77ab9557eac33..8e7884888b63b 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/DistroTestPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/DistroTestPlugin.java @@ -49,7 +49,6 @@ import static org.elasticsearch.gradle.internal.distribution.InternalElasticsearchDistributionTypes.ALL_INTERNAL; import static org.elasticsearch.gradle.internal.distribution.InternalElasticsearchDistributionTypes.DEB; import static org.elasticsearch.gradle.internal.distribution.InternalElasticsearchDistributionTypes.DOCKER; -import static org.elasticsearch.gradle.internal.distribution.InternalElasticsearchDistributionTypes.DOCKER_CLOUD; import static org.elasticsearch.gradle.internal.distribution.InternalElasticsearchDistributionTypes.DOCKER_CLOUD_ESS; import static org.elasticsearch.gradle.internal.distribution.InternalElasticsearchDistributionTypes.DOCKER_IRONBANK; import static org.elasticsearch.gradle.internal.distribution.InternalElasticsearchDistributionTypes.DOCKER_UBI; @@ -149,7 +148,6 @@ private static Map> lifecycleTask lifecyleTasks.put(DOCKER, project.getTasks().register(taskPrefix + ".docker")); lifecyleTasks.put(DOCKER_UBI, project.getTasks().register(taskPrefix + ".docker-ubi")); lifecyleTasks.put(DOCKER_IRONBANK, project.getTasks().register(taskPrefix + ".docker-ironbank")); - lifecyleTasks.put(DOCKER_CLOUD, project.getTasks().register(taskPrefix + ".docker-cloud")); lifecyleTasks.put(DOCKER_CLOUD_ESS, project.getTasks().register(taskPrefix + ".docker-cloud-ess")); lifecyleTasks.put(DOCKER_WOLFI, project.getTasks().register(taskPrefix + ".docker-wolfi")); lifecyleTasks.put(ARCHIVE, project.getTasks().register(taskPrefix + ".archives")); From e82e6af50517e35400d5438a3932a5b6b478b8d5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Istv=C3=A1n=20Zolt=C3=A1n=20Szab=C3=B3?= Date: Fri, 25 Oct 2024 17:35:48 +0200 Subject: [PATCH 117/324] [DOCS] Documents configurable chunking (#115300) Co-authored-by: David Kyle --- .../inference/inference-apis.asciidoc | 62 ++++++++++++++++++- .../inference/inference-shared.asciidoc | 34 +++++++++- .../service-alibabacloud-ai-search.asciidoc | 21 ++++++- .../inference/service-amazon-bedrock.asciidoc | 20 ++++++ .../inference/service-anthropic.asciidoc | 20 ++++++ .../service-azure-ai-studio.asciidoc | 20 ++++++ .../inference/service-azure-openai.asciidoc | 20 ++++++ .../inference/service-cohere.asciidoc | 20 ++++++ .../inference/service-elasticsearch.asciidoc | 20 ++++++ .../inference/service-elser.asciidoc | 20 ++++++ .../service-google-ai-studio.asciidoc | 20 ++++++ .../service-google-vertex-ai.asciidoc | 20 ++++++ .../inference/service-hugging-face.asciidoc | 20 ++++++ .../inference/service-mistral.asciidoc | 20 ++++++ .../inference/service-openai.asciidoc | 20 ++++++ 15 files changed, 354 insertions(+), 3 deletions(-) diff --git a/docs/reference/inference/inference-apis.asciidoc b/docs/reference/inference/inference-apis.asciidoc index 1206cb02ba89a..38afc7c416f18 100644 --- a/docs/reference/inference/inference-apis.asciidoc +++ b/docs/reference/inference/inference-apis.asciidoc @@ -35,7 +35,6 @@ Elastic –, then create an {infer} endpoint by the <>. Now use <> to perform <> on your data. - [discrete] [[default-enpoints]] === Default {infer} endpoints @@ -53,6 +52,67 @@ For these models, the minimum number of allocations is `0`. If there is no {infer} activity that uses the endpoint, the number of allocations will scale down to `0` automatically after 15 minutes. +[discrete] +[[infer-chunking-config]] +=== Configuring chunking + +{infer-cap} endpoints have a limit on the amount of text they can process at once, determined by the model's input capacity. +Chunking is the process of splitting the input text into pieces that remain within these limits. +It occurs when ingesting documents into <>. +Chunking also helps produce sections that are digestible for humans. +Returning a long document in search results is less useful than providing the most relevant chunk of text. + +Each chunk will include the text subpassage and the corresponding embedding generated from it. + +By default, documents are split into sentences and grouped in sections up to 250 words with 1 sentence overlap so that each chunk shares a sentence with the previous chunk. +Overlapping ensures continuity and prevents vital contextual information in the input text from being lost by a hard break. + +{es} uses the https://unicode-org.github.io/icu-docs/[ICU4J] library to detect word and sentence boundaries for chunking. +https://unicode-org.github.io/icu/userguide/boundaryanalysis/#word-boundary[Word boundaries] are identified by following a series of rules, not just the presence of a whitespace character. +For written languages that do use whitespace such as Chinese or Japanese dictionary lookups are used to detect word boundaries. + + +[discrete] +==== Chunking strategies + +Two strategies are available for chunking: `sentence` and `word`. + +The `sentence` strategy splits the input text at sentence boundaries. +Each chunk contains one or more complete sentences ensuring that the integrity of sentence-level context is preserved, except when a sentence causes a chunk to exceed a word count of `max_chunk_size`, in which case it will be split across chunks. +The `sentence_overlap` option defines the number of sentences from the previous chunk to include in the current chunk which is either `0` or `1`. + +The `word` strategy splits the input text on individual words up to the `max_chunk_size` limit. +The `overlap` option is the number of words from the previous chunk to include in the current chunk. + +The default chunking strategy is `sentence`. + +NOTE: The default chunking strategy for {infer} endpoints created before 8.16 is `word`. + + +[discrete] +==== Example of configuring the chunking behavior + +The following example creates an {infer} endpoint with the `elasticsearch` service that deploys the ELSER model by default and configures the chunking behavior. + +[source,console] +------------------------------------------------------------ +PUT _inference/sparse_embedding/small_chunk_size +{ + "service": "elasticsearch", + "service_settings": { + "num_allocations": 1, + "num_threads": 1 + }, + "chunking_settings": { + "strategy": "sentence", + "max_chunk_size": 100, + "sentence_overlap": 0 + } +} +------------------------------------------------------------ +// TEST[skip:TBD] + + include::delete-inference.asciidoc[] include::get-inference.asciidoc[] include::post-inference.asciidoc[] diff --git a/docs/reference/inference/inference-shared.asciidoc b/docs/reference/inference/inference-shared.asciidoc index 2eafa3434e89e..da497c6581e5d 100644 --- a/docs/reference/inference/inference-shared.asciidoc +++ b/docs/reference/inference/inference-shared.asciidoc @@ -31,4 +31,36 @@ end::task-settings[] tag::task-type[] The type of the {infer} task that the model will perform. -end::task-type[] \ No newline at end of file +end::task-type[] + +tag::chunking-settings[] +Chunking configuration object. +Refer to <> to learn more about chunking. +end::chunking-settings[] + +tag::chunking-settings-max-chunking-size[] +Specifies the maximum size of a chunk in words. +Defaults to `250`. +This value cannot be higher than `300` or lower than `20` (for `sentence` strategy) or `10` (for `word` strategy). +end::chunking-settings-max-chunking-size[] + +tag::chunking-settings-overlap[] +Only for `word` chunking strategy. +Specifies the number of overlapping words for chunks. +Defaults to `100`. +This value cannot be higher than the half of `max_chunking_size`. +end::chunking-settings-overlap[] + +tag::chunking-settings-sentence-overlap[] +Only for `sentence` chunking strategy. +Specifies the numnber of overlapping sentences for chunks. +It can be either `1` or `0`. +Defaults to `1`. +end::chunking-settings-sentence-overlap[] + +tag::chunking-settings-strategy[] +Specifies the chunking strategy. +It could be either `sentence` or `word`. +end::chunking-settings-strategy[] + + diff --git a/docs/reference/inference/service-alibabacloud-ai-search.asciidoc b/docs/reference/inference/service-alibabacloud-ai-search.asciidoc index 0607b56b528ea..c3ff40a39cd86 100644 --- a/docs/reference/inference/service-alibabacloud-ai-search.asciidoc +++ b/docs/reference/inference/service-alibabacloud-ai-search.asciidoc @@ -34,6 +34,26 @@ Available task types: [[infer-service-alibabacloud-ai-search-api-request-body]] ==== {api-request-body-title} +`chunking_settings`:: +(Optional, object) +include::inference-shared.asciidoc[tag=chunking-settings] + +`max_chunking_size`::: +(Optional, integer) +include::inference-shared.asciidoc[tag=chunking-settings-max-chunking-size] + +`overlap`::: +(Optional, integer) +include::inference-shared.asciidoc[tag=chunking-settings-overlap] + +`sentence_overlap`::: +(Optional, integer) +include::inference-shared.asciidoc[tag=chunking-settings-sentence-overlap] + +`strategy`::: +(Optional, string) +include::inference-shared.asciidoc[tag=chunking-settings-strategy] + `service`:: (Required, string) The type of service supported for the specified task type. In this case, @@ -108,7 +128,6 @@ To modify this, set the `requests_per_minute` setting of this object in your ser include::inference-shared.asciidoc[tag=request-per-minute-example] -- - `task_settings`:: (Optional, object) include::inference-shared.asciidoc[tag=task-settings] diff --git a/docs/reference/inference/service-amazon-bedrock.asciidoc b/docs/reference/inference/service-amazon-bedrock.asciidoc index dbffd5c26fbcc..761777e32f8e0 100644 --- a/docs/reference/inference/service-amazon-bedrock.asciidoc +++ b/docs/reference/inference/service-amazon-bedrock.asciidoc @@ -32,6 +32,26 @@ Available task types: [[infer-service-amazon-bedrock-api-request-body]] ==== {api-request-body-title} +`chunking_settings`:: +(Optional, object) +include::inference-shared.asciidoc[tag=chunking-settings] + +`max_chunking_size`::: +(Optional, integer) +include::inference-shared.asciidoc[tag=chunking-settings-max-chunking-size] + +`overlap`::: +(Optional, integer) +include::inference-shared.asciidoc[tag=chunking-settings-overlap] + +`sentence_overlap`::: +(Optional, integer) +include::inference-shared.asciidoc[tag=chunking-settings-sentence-overlap] + +`strategy`::: +(Optional, string) +include::inference-shared.asciidoc[tag=chunking-settings-strategy] + `service`:: (Required, string) The type of service supported for the specified task type. In this case, diff --git a/docs/reference/inference/service-anthropic.asciidoc b/docs/reference/inference/service-anthropic.asciidoc index 41419db7a6069..7fb3d1d5bea34 100644 --- a/docs/reference/inference/service-anthropic.asciidoc +++ b/docs/reference/inference/service-anthropic.asciidoc @@ -32,6 +32,26 @@ Available task types: [[infer-service-anthropic-api-request-body]] ==== {api-request-body-title} +`chunking_settings`:: +(Optional, object) +include::inference-shared.asciidoc[tag=chunking-settings] + +`max_chunking_size`::: +(Optional, integer) +include::inference-shared.asciidoc[tag=chunking-settings-max-chunking-size] + +`overlap`::: +(Optional, integer) +include::inference-shared.asciidoc[tag=chunking-settings-overlap] + +`sentence_overlap`::: +(Optional, integer) +include::inference-shared.asciidoc[tag=chunking-settings-sentence-overlap] + +`strategy`::: +(Optional, string) +include::inference-shared.asciidoc[tag=chunking-settings-strategy] + `service`:: (Required, string) The type of service supported for the specified task type. In this case, diff --git a/docs/reference/inference/service-azure-ai-studio.asciidoc b/docs/reference/inference/service-azure-ai-studio.asciidoc index 0d711a0d6171f..dd13a3e59aae5 100644 --- a/docs/reference/inference/service-azure-ai-studio.asciidoc +++ b/docs/reference/inference/service-azure-ai-studio.asciidoc @@ -33,6 +33,26 @@ Available task types: [[infer-service-azure-ai-studio-api-request-body]] ==== {api-request-body-title} +`chunking_settings`:: +(Optional, object) +include::inference-shared.asciidoc[tag=chunking-settings] + +`max_chunking_size`::: +(Optional, integer) +include::inference-shared.asciidoc[tag=chunking-settings-max-chunking-size] + +`overlap`::: +(Optional, integer) +include::inference-shared.asciidoc[tag=chunking-settings-overlap] + +`sentence_overlap`::: +(Optional, integer) +include::inference-shared.asciidoc[tag=chunking-settings-sentence-overlap] + +`strategy`::: +(Optional, string) +include::inference-shared.asciidoc[tag=chunking-settings-strategy] + `service`:: (Required, string) The type of service supported for the specified task type. In this case, diff --git a/docs/reference/inference/service-azure-openai.asciidoc b/docs/reference/inference/service-azure-openai.asciidoc index 6f03c5966d9e6..b134e2b687f6c 100644 --- a/docs/reference/inference/service-azure-openai.asciidoc +++ b/docs/reference/inference/service-azure-openai.asciidoc @@ -33,6 +33,26 @@ Available task types: [[infer-service-azure-openai-api-request-body]] ==== {api-request-body-title} +`chunking_settings`:: +(Optional, object) +include::inference-shared.asciidoc[tag=chunking-settings] + +`max_chunking_size`::: +(Optional, integer) +include::inference-shared.asciidoc[tag=chunking-settings-max-chunking-size] + +`overlap`::: +(Optional, integer) +include::inference-shared.asciidoc[tag=chunking-settings-overlap] + +`sentence_overlap`::: +(Optional, integer) +include::inference-shared.asciidoc[tag=chunking-settings-sentence-overlap] + +`strategy`::: +(Optional, string) +include::inference-shared.asciidoc[tag=chunking-settings-strategy] + `service`:: (Required, string) The type of service supported for the specified task type. In this case, diff --git a/docs/reference/inference/service-cohere.asciidoc b/docs/reference/inference/service-cohere.asciidoc index 84eae6e880617..1a815e3c45f36 100644 --- a/docs/reference/inference/service-cohere.asciidoc +++ b/docs/reference/inference/service-cohere.asciidoc @@ -34,6 +34,26 @@ Available task types: [[infer-service-cohere-api-request-body]] ==== {api-request-body-title} +`chunking_settings`:: +(Optional, object) +include::inference-shared.asciidoc[tag=chunking-settings] + +`max_chunking_size`::: +(Optional, integer) +include::inference-shared.asciidoc[tag=chunking-settings-max-chunking-size] + +`overlap`::: +(Optional, integer) +include::inference-shared.asciidoc[tag=chunking-settings-overlap] + +`sentence_overlap`::: +(Optional, integer) +include::inference-shared.asciidoc[tag=chunking-settings-sentence-overlap] + +`strategy`::: +(Optional, string) +include::inference-shared.asciidoc[tag=chunking-settings-strategy] + `service`:: (Required, string) The type of service supported for the specified task type. In this case, diff --git a/docs/reference/inference/service-elasticsearch.asciidoc b/docs/reference/inference/service-elasticsearch.asciidoc index 259779a12134d..0103b425faefe 100644 --- a/docs/reference/inference/service-elasticsearch.asciidoc +++ b/docs/reference/inference/service-elasticsearch.asciidoc @@ -36,6 +36,26 @@ Available task types: [[infer-service-elasticsearch-api-request-body]] ==== {api-request-body-title} +`chunking_settings`:: +(Optional, object) +include::inference-shared.asciidoc[tag=chunking-settings] + +`max_chunking_size`::: +(Optional, integer) +include::inference-shared.asciidoc[tag=chunking-settings-max-chunking-size] + +`overlap`::: +(Optional, integer) +include::inference-shared.asciidoc[tag=chunking-settings-overlap] + +`sentence_overlap`::: +(Optional, integer) +include::inference-shared.asciidoc[tag=chunking-settings-sentence-overlap] + +`strategy`::: +(Optional, string) +include::inference-shared.asciidoc[tag=chunking-settings-strategy] + `service`:: (Required, string) The type of service supported for the specified task type. In this case, diff --git a/docs/reference/inference/service-elser.asciidoc b/docs/reference/inference/service-elser.asciidoc index 521fab0375584..273d743e47a4b 100644 --- a/docs/reference/inference/service-elser.asciidoc +++ b/docs/reference/inference/service-elser.asciidoc @@ -36,6 +36,26 @@ Available task types: [[infer-service-elser-api-request-body]] ==== {api-request-body-title} +`chunking_settings`:: +(Optional, object) +include::inference-shared.asciidoc[tag=chunking-settings] + +`max_chunking_size`::: +(Optional, integer) +include::inference-shared.asciidoc[tag=chunking-settings-max-chunking-size] + +`overlap`::: +(Optional, integer) +include::inference-shared.asciidoc[tag=chunking-settings-overlap] + +`sentence_overlap`::: +(Optional, integer) +include::inference-shared.asciidoc[tag=chunking-settings-sentence-overlap] + +`strategy`::: +(Optional, string) +include::inference-shared.asciidoc[tag=chunking-settings-strategy] + `service`:: (Required, string) The type of service supported for the specified task type. In this case, diff --git a/docs/reference/inference/service-google-ai-studio.asciidoc b/docs/reference/inference/service-google-ai-studio.asciidoc index 25aa89cd49110..738fce3d53e9b 100644 --- a/docs/reference/inference/service-google-ai-studio.asciidoc +++ b/docs/reference/inference/service-google-ai-studio.asciidoc @@ -33,6 +33,26 @@ Available task types: [[infer-service-google-ai-studio-api-request-body]] ==== {api-request-body-title} +`chunking_settings`:: +(Optional, object) +include::inference-shared.asciidoc[tag=chunking-settings] + +`max_chunking_size`::: +(Optional, integer) +include::inference-shared.asciidoc[tag=chunking-settings-max-chunking-size] + +`overlap`::: +(Optional, integer) +include::inference-shared.asciidoc[tag=chunking-settings-overlap] + +`sentence_overlap`::: +(Optional, integer) +include::inference-shared.asciidoc[tag=chunking-settings-sentence-overlap] + +`strategy`::: +(Optional, string) +include::inference-shared.asciidoc[tag=chunking-settings-strategy] + `service`:: (Required, string) The type of service supported for the specified task type. In this case, diff --git a/docs/reference/inference/service-google-vertex-ai.asciidoc b/docs/reference/inference/service-google-vertex-ai.asciidoc index 640553ab74626..34e14e05e072a 100644 --- a/docs/reference/inference/service-google-vertex-ai.asciidoc +++ b/docs/reference/inference/service-google-vertex-ai.asciidoc @@ -33,6 +33,26 @@ Available task types: [[infer-service-google-vertex-ai-api-request-body]] ==== {api-request-body-title} +`chunking_settings`:: +(Optional, object) +include::inference-shared.asciidoc[tag=chunking-settings] + +`max_chunking_size`::: +(Optional, integer) +include::inference-shared.asciidoc[tag=chunking-settings-max-chunking-size] + +`overlap`::: +(Optional, integer) +include::inference-shared.asciidoc[tag=chunking-settings-overlap] + +`sentence_overlap`::: +(Optional, integer) +include::inference-shared.asciidoc[tag=chunking-settings-sentence-overlap] + +`strategy`::: +(Optional, string) +include::inference-shared.asciidoc[tag=chunking-settings-strategy] + `service`:: (Required, string) The type of service supported for the specified task type. In this case, diff --git a/docs/reference/inference/service-hugging-face.asciidoc b/docs/reference/inference/service-hugging-face.asciidoc index 177a15177d21f..6d8667351a6b4 100644 --- a/docs/reference/inference/service-hugging-face.asciidoc +++ b/docs/reference/inference/service-hugging-face.asciidoc @@ -32,6 +32,26 @@ Available task types: [[infer-service-hugging-face-api-request-body]] ==== {api-request-body-title} +`chunking_settings`:: +(Optional, object) +include::inference-shared.asciidoc[tag=chunking-settings] + +`max_chunking_size`::: +(Optional, integer) +include::inference-shared.asciidoc[tag=chunking-settings-max-chunking-size] + +`overlap`::: +(Optional, integer) +include::inference-shared.asciidoc[tag=chunking-settings-overlap] + +`sentence_overlap`::: +(Optional, integer) +include::inference-shared.asciidoc[tag=chunking-settings-sentence-overlap] + +`strategy`::: +(Optional, string) +include::inference-shared.asciidoc[tag=chunking-settings-strategy] + `service`:: (Required, string) The type of service supported for the specified task type. In this case, diff --git a/docs/reference/inference/service-mistral.asciidoc b/docs/reference/inference/service-mistral.asciidoc index 077e610191705..244381d107161 100644 --- a/docs/reference/inference/service-mistral.asciidoc +++ b/docs/reference/inference/service-mistral.asciidoc @@ -32,6 +32,26 @@ Available task types: [[infer-service-mistral-api-request-body]] ==== {api-request-body-title} +`chunking_settings`:: +(Optional, object) +include::inference-shared.asciidoc[tag=chunking-settings] + +`max_chunking_size`::: +(Optional, integer) +include::inference-shared.asciidoc[tag=chunking-settings-max-chunking-size] + +`overlap`::: +(Optional, integer) +include::inference-shared.asciidoc[tag=chunking-settings-overlap] + +`sentence_overlap`::: +(Optional, integer) +include::inference-shared.asciidoc[tag=chunking-settings-sentence-overlap] + +`strategy`::: +(Optional, string) +include::inference-shared.asciidoc[tag=chunking-settings-strategy] + `service`:: (Required, string) The type of service supported for the specified task type. In this case, diff --git a/docs/reference/inference/service-openai.asciidoc b/docs/reference/inference/service-openai.asciidoc index 075e76dc7d741..21643133553e1 100644 --- a/docs/reference/inference/service-openai.asciidoc +++ b/docs/reference/inference/service-openai.asciidoc @@ -33,6 +33,26 @@ Available task types: [[infer-service-openai-api-request-body]] ==== {api-request-body-title} +`chunking_settings`:: +(Optional, object) +include::inference-shared.asciidoc[tag=chunking-settings] + +`max_chunking_size`::: +(Optional, integer) +include::inference-shared.asciidoc[tag=chunking-settings-max-chunking-size] + +`overlap`::: +(Optional, integer) +include::inference-shared.asciidoc[tag=chunking-settings-overlap] + +`sentence_overlap`::: +(Optional, integer) +include::inference-shared.asciidoc[tag=chunking-settings-sentence-overlap] + +`strategy`::: +(Optional, string) +include::inference-shared.asciidoc[tag=chunking-settings-strategy] + `service`:: (Required, string) The type of service supported for the specified task type. In this case, From 3b5bd62467456db5eaefbf7cd72ce324eb7dbb49 Mon Sep 17 00:00:00 2001 From: Oleksandr Kolomiiets Date: Fri, 25 Oct 2024 09:06:11 -0700 Subject: [PATCH 118/324] Add tests for license changes while using data streams (#115478) --- .../logsdb/DataStreamLicenceDowngradeIT.java | 489 ++++++++++++++++++ .../logsdb/DataStreamLicenseChangeIT.java | 107 ++++ .../logsdb/DataStreamLicenseUpgradeIT.java | 487 +++++++++++++++++ 3 files changed, 1083 insertions(+) create mode 100644 x-pack/plugin/logsdb/src/javaRestTest/java/org/elasticsearch/xpack/logsdb/DataStreamLicenceDowngradeIT.java create mode 100644 x-pack/plugin/logsdb/src/javaRestTest/java/org/elasticsearch/xpack/logsdb/DataStreamLicenseChangeIT.java create mode 100644 x-pack/plugin/logsdb/src/javaRestTest/java/org/elasticsearch/xpack/logsdb/DataStreamLicenseUpgradeIT.java diff --git a/x-pack/plugin/logsdb/src/javaRestTest/java/org/elasticsearch/xpack/logsdb/DataStreamLicenceDowngradeIT.java b/x-pack/plugin/logsdb/src/javaRestTest/java/org/elasticsearch/xpack/logsdb/DataStreamLicenceDowngradeIT.java new file mode 100644 index 0000000000000..f004189098c43 --- /dev/null +++ b/x-pack/plugin/logsdb/src/javaRestTest/java/org/elasticsearch/xpack/logsdb/DataStreamLicenceDowngradeIT.java @@ -0,0 +1,489 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.logsdb; + +import org.elasticsearch.index.mapper.SourceFieldMapper; + +import java.io.IOException; +import java.util.List; + +public class DataStreamLicenceDowngradeIT extends DataStreamLicenseChangeIT { + @Override + protected void applyInitialLicense() throws IOException { + startTrial(); + } + + @Override + protected void licenseChange() throws IOException { + startBasic(); + } + + @Override + protected List cases() { + return List.of(new TestCase() { + @Override + public String dataStreamName() { + return "logs-test-regular"; + } + + @Override + public String indexMode() { + return "logsdb"; + } + + @Override + public void prepareDataStream() throws IOException { + assertOK(createDataStream(client(), dataStreamName())); + } + + @Override + public void rollover() throws IOException { + rolloverDataStream(client(), dataStreamName()); + } + + @Override + public SourceFieldMapper.Mode initialMode() { + return SourceFieldMapper.Mode.SYNTHETIC; + } + + @Override + public SourceFieldMapper.Mode finalMode() { + return SourceFieldMapper.Mode.STORED; + } + }, new TestCase() { + private static final String sourceModeOverride = """ + { + "template": { + "settings": { + "index": { + "mapping.source.mode": "SYNTHETIC" + } + } + } + }"""; + + @Override + public String dataStreamName() { + return "logs-test-explicit-synthetic"; + } + + @Override + public String indexMode() { + return "logsdb"; + } + + @Override + public void prepareDataStream() throws IOException { + assertOK(putComponentTemplate(client(), "logs@custom", sourceModeOverride)); + assertOK(createDataStream(client(), dataStreamName())); + assertOK(removeComponentTemplate(client(), "logs@custom")); + } + + @Override + public void rollover() throws IOException { + assertOK(putComponentTemplate(client(), "logs@custom", sourceModeOverride)); + rolloverDataStream(client(), dataStreamName()); + assertOK(removeComponentTemplate(client(), "logs@custom")); + } + + @Override + public SourceFieldMapper.Mode initialMode() { + return SourceFieldMapper.Mode.SYNTHETIC; + } + + @Override + public SourceFieldMapper.Mode finalMode() { + return SourceFieldMapper.Mode.STORED; + } + }, new TestCase() { + private static final String sourceModeOverride = """ + { + "template": { + "settings": { + "index": { + "mapping.source.mode": "STORED" + } + } + } + }"""; + + @Override + public String dataStreamName() { + return "logs-test-explicit-stored"; + } + + @Override + public String indexMode() { + return "logsdb"; + } + + @Override + public void prepareDataStream() throws IOException { + assertOK(putComponentTemplate(client(), "logs@custom", sourceModeOverride)); + assertOK(createDataStream(client(), dataStreamName())); + assertOK(removeComponentTemplate(client(), "logs@custom")); + } + + @Override + public void rollover() throws IOException { + assertOK(putComponentTemplate(client(), "logs@custom", sourceModeOverride)); + rolloverDataStream(client(), dataStreamName()); + assertOK(removeComponentTemplate(client(), "logs@custom")); + } + + @Override + public SourceFieldMapper.Mode initialMode() { + return SourceFieldMapper.Mode.STORED; + } + + @Override + public SourceFieldMapper.Mode finalMode() { + return SourceFieldMapper.Mode.STORED; + } + }, new TestCase() { + @Override + public String dataStreamName() { + return "tsdb-test-regular"; + } + + @Override + public String indexMode() { + return "time_series"; + } + + @Override + public void prepareDataStream() throws IOException { + var componentTemplate = """ + { + "template": { + "settings": { + "index": { + "mode": "time_series", + "routing_path": ["dim"] + } + }, + "mappings": { + "properties": { + "dim": { + "type": "keyword", + "time_series_dimension": true + } + } + } + } + } + """; + assertOK(putComponentTemplate(client(), "tsdb-test-regular-component", componentTemplate)); + + var template = """ + { + "index_patterns": ["tsdb-test-regular"], + "priority": 100, + "data_stream": {}, + "composed_of": ["tsdb-test-regular-component"] + } + """; + + putTemplate(client(), "tsdb-test-regular-template", template); + assertOK(createDataStream(client(), dataStreamName())); + } + + @Override + public void rollover() throws IOException { + rolloverDataStream(client(), dataStreamName()); + } + + @Override + public SourceFieldMapper.Mode initialMode() { + return SourceFieldMapper.Mode.SYNTHETIC; + } + + @Override + public SourceFieldMapper.Mode finalMode() { + return SourceFieldMapper.Mode.STORED; + } + }, new TestCase() { + @Override + public String dataStreamName() { + return "tsdb-test-synthetic"; + } + + @Override + public String indexMode() { + return "time_series"; + } + + @Override + public void prepareDataStream() throws IOException { + var componentTemplate = """ + { + "template": { + "settings": { + "index": { + "mode": "time_series", + "routing_path": ["dim"], + "mapping.source.mode": "SYNTHETIC" + } + }, + "mappings": { + "properties": { + "dim": { + "type": "keyword", + "time_series_dimension": true + } + } + } + } + } + """; + assertOK(putComponentTemplate(client(), "tsdb-test-synthetic-component", componentTemplate)); + + var template = """ + { + "index_patterns": ["tsdb-test-synthetic"], + "priority": 100, + "data_stream": {}, + "composed_of": ["tsdb-test-synthetic-component"] + } + """; + + putTemplate(client(), "tsdb-test-synthetic-template", template); + assertOK(createDataStream(client(), dataStreamName())); + } + + @Override + public void rollover() throws IOException { + rolloverDataStream(client(), dataStreamName()); + } + + @Override + public SourceFieldMapper.Mode initialMode() { + return SourceFieldMapper.Mode.SYNTHETIC; + } + + @Override + public SourceFieldMapper.Mode finalMode() { + return SourceFieldMapper.Mode.STORED; + } + }, new TestCase() { + @Override + public String dataStreamName() { + return "tsdb-test-stored"; + } + + @Override + public String indexMode() { + return "time_series"; + } + + @Override + public void prepareDataStream() throws IOException { + var componentTemplate = """ + { + "template": { + "settings": { + "index": { + "mode": "time_series", + "routing_path": ["dim"], + "mapping.source.mode": "STORED" + } + }, + "mappings": { + "properties": { + "dim": { + "type": "keyword", + "time_series_dimension": true + } + } + } + } + } + """; + assertOK(putComponentTemplate(client(), "tsdb-test-stored-component", componentTemplate)); + + var template = """ + { + "index_patterns": ["tsdb-test-stored"], + "priority": 100, + "data_stream": {}, + "composed_of": ["tsdb-test-stored-component"] + } + """; + + putTemplate(client(), "tsdb-test-stored-template", template); + assertOK(createDataStream(client(), dataStreamName())); + } + + @Override + public void rollover() throws IOException { + rolloverDataStream(client(), dataStreamName()); + } + + @Override + public SourceFieldMapper.Mode initialMode() { + return SourceFieldMapper.Mode.STORED; + } + + @Override + public SourceFieldMapper.Mode finalMode() { + return SourceFieldMapper.Mode.STORED; + } + }, + + new TestCase() { + @Override + public String dataStreamName() { + return "standard"; + } + + @Override + public String indexMode() { + return "standard"; + } + + @Override + public void prepareDataStream() throws IOException { + var template = """ + { + "index_patterns": ["standard"], + "priority": 100, + "data_stream": {}, + "composed_of": [] + } + """; + + putTemplate(client(), "standard-template", template); + assertOK(createDataStream(client(), dataStreamName())); + } + + @Override + public void rollover() throws IOException { + rolloverDataStream(client(), dataStreamName()); + } + + @Override + public SourceFieldMapper.Mode initialMode() { + return SourceFieldMapper.Mode.STORED; + } + + @Override + public SourceFieldMapper.Mode finalMode() { + return SourceFieldMapper.Mode.STORED; + } + }, + new TestCase() { + @Override + public String dataStreamName() { + return "standard-synthetic"; + } + + @Override + public String indexMode() { + return "standard"; + } + + @Override + public void prepareDataStream() throws IOException { + var componentTemplate = """ + { + "template": { + "settings": { + "index": { + "mapping.source.mode": "SYNTHETIC" + } + } + } + } + """; + assertOK(putComponentTemplate(client(), "standard-synthetic-component", componentTemplate)); + + var template = """ + { + "index_patterns": ["standard-synthetic"], + "priority": 100, + "data_stream": {}, + "composed_of": ["standard-synthetic-component"] + } + """; + + putTemplate(client(), "standard-synthetic-template", template); + assertOK(createDataStream(client(), dataStreamName())); + } + + @Override + public void rollover() throws IOException { + rolloverDataStream(client(), dataStreamName()); + } + + @Override + public SourceFieldMapper.Mode initialMode() { + return SourceFieldMapper.Mode.SYNTHETIC; + } + + @Override + public SourceFieldMapper.Mode finalMode() { + return SourceFieldMapper.Mode.STORED; + } + }, + new TestCase() { + @Override + public String dataStreamName() { + return "standard-stored"; + } + + @Override + public String indexMode() { + return "standard"; + } + + @Override + public void prepareDataStream() throws IOException { + var componentTemplate = """ + { + "template": { + "settings": { + "index": { + "mapping.source.mode": "STORED" + } + } + } + } + """; + assertOK(putComponentTemplate(client(), "standard-stored-component", componentTemplate)); + + var template = """ + { + "index_patterns": ["standard-stored"], + "priority": 100, + "data_stream": {}, + "composed_of": ["standard-stored-component"] + } + """; + + putTemplate(client(), "standard-stored-template", template); + assertOK(createDataStream(client(), dataStreamName())); + } + + @Override + public void rollover() throws IOException { + rolloverDataStream(client(), dataStreamName()); + } + + @Override + public SourceFieldMapper.Mode initialMode() { + return SourceFieldMapper.Mode.STORED; + } + + @Override + public SourceFieldMapper.Mode finalMode() { + return SourceFieldMapper.Mode.STORED; + } + } + ); + } +} diff --git a/x-pack/plugin/logsdb/src/javaRestTest/java/org/elasticsearch/xpack/logsdb/DataStreamLicenseChangeIT.java b/x-pack/plugin/logsdb/src/javaRestTest/java/org/elasticsearch/xpack/logsdb/DataStreamLicenseChangeIT.java new file mode 100644 index 0000000000000..b84c982766e4b --- /dev/null +++ b/x-pack/plugin/logsdb/src/javaRestTest/java/org/elasticsearch/xpack/logsdb/DataStreamLicenseChangeIT.java @@ -0,0 +1,107 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.logsdb; + +import org.elasticsearch.client.Request; +import org.elasticsearch.client.Response; +import org.elasticsearch.client.RestClient; +import org.elasticsearch.index.mapper.SourceFieldMapper; +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.test.cluster.local.distribution.DistributionType; +import org.junit.ClassRule; + +import java.io.IOException; +import java.util.List; + +public abstract class DataStreamLicenseChangeIT extends LogsIndexModeRestTestIT { + @ClassRule + public static ElasticsearchCluster cluster = ElasticsearchCluster.local() + .distribution(DistributionType.DEFAULT) + .module("data-streams") + .module("x-pack-stack") + .setting("cluster.logsdb.enabled", "true") + .setting("xpack.security.enabled", "false") + .setting("xpack.license.self_generated.type", "basic") + .build(); + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } + + protected interface TestCase { + String dataStreamName(); + + void prepareDataStream() throws IOException; + + String indexMode(); + + SourceFieldMapper.Mode initialMode(); + + SourceFieldMapper.Mode finalMode(); + + void rollover() throws IOException; + } + + protected abstract void licenseChange() throws IOException; + + protected abstract void applyInitialLicense() throws IOException; + + protected abstract List cases(); + + public void testLicenseChange() throws IOException { + applyInitialLicense(); + + for (var testCase : cases()) { + testCase.prepareDataStream(); + + var indexMode = (String) getSetting(client(), getDataStreamBackingIndex(client(), testCase.dataStreamName(), 0), "index.mode"); + assertEquals(testCase.indexMode(), indexMode); + + var sourceMode = (String) getSetting( + client(), + getDataStreamBackingIndex(client(), testCase.dataStreamName(), 0), + "index.mapping.source.mode" + ); + assertEquals(testCase.initialMode().toString(), sourceMode); + } + + licenseChange(); + + for (var testCase : cases()) { + testCase.rollover(); + + var indexMode = (String) getSetting(client(), getDataStreamBackingIndex(client(), testCase.dataStreamName(), 1), "index.mode"); + assertEquals(testCase.indexMode(), indexMode); + + var sourceMode = (String) getSetting( + client(), + getDataStreamBackingIndex(client(), testCase.dataStreamName(), 1), + "index.mapping.source.mode" + ); + assertEquals(testCase.finalMode().toString(), sourceMode); + } + } + + protected static void startBasic() throws IOException { + Request startTrial = new Request("POST", "/_license/start_basic"); + startTrial.addParameter("acknowledge", "true"); + assertOK(client().performRequest(startTrial)); + } + + protected static void startTrial() throws IOException { + Request startTrial = new Request("POST", "/_license/start_trial"); + startTrial.addParameter("acknowledge", "true"); + assertOK(client().performRequest(startTrial)); + } + + protected static Response removeComponentTemplate(final RestClient client, final String componentTemplate) throws IOException { + final Request request = new Request("DELETE", "/_component_template/" + componentTemplate); + return client.performRequest(request); + } +} diff --git a/x-pack/plugin/logsdb/src/javaRestTest/java/org/elasticsearch/xpack/logsdb/DataStreamLicenseUpgradeIT.java b/x-pack/plugin/logsdb/src/javaRestTest/java/org/elasticsearch/xpack/logsdb/DataStreamLicenseUpgradeIT.java new file mode 100644 index 0000000000000..bce43ca046523 --- /dev/null +++ b/x-pack/plugin/logsdb/src/javaRestTest/java/org/elasticsearch/xpack/logsdb/DataStreamLicenseUpgradeIT.java @@ -0,0 +1,487 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.logsdb; + +import org.elasticsearch.index.mapper.SourceFieldMapper; + +import java.io.IOException; +import java.util.List; + +public class DataStreamLicenseUpgradeIT extends DataStreamLicenseChangeIT { + @Override + protected void applyInitialLicense() {} + + @Override + protected void licenseChange() throws IOException { + startTrial(); + } + + @Override + protected List cases() { + return List.of(new TestCase() { + @Override + public String dataStreamName() { + return "logs-test-regular"; + } + + @Override + public String indexMode() { + return "logsdb"; + } + + @Override + public void prepareDataStream() throws IOException { + assertOK(createDataStream(client(), dataStreamName())); + } + + @Override + public void rollover() throws IOException { + rolloverDataStream(client(), dataStreamName()); + } + + @Override + public SourceFieldMapper.Mode initialMode() { + return SourceFieldMapper.Mode.STORED; + } + + @Override + public SourceFieldMapper.Mode finalMode() { + return SourceFieldMapper.Mode.SYNTHETIC; + } + }, new TestCase() { + private static final String sourceModeOverride = """ + { + "template": { + "settings": { + "index": { + "mapping.source.mode": "SYNTHETIC" + } + } + } + }"""; + + @Override + public String dataStreamName() { + return "logs-test-explicit-synthetic"; + } + + @Override + public String indexMode() { + return "logsdb"; + } + + @Override + public void prepareDataStream() throws IOException { + assertOK(putComponentTemplate(client(), "logs@custom", sourceModeOverride)); + assertOK(createDataStream(client(), dataStreamName())); + assertOK(removeComponentTemplate(client(), "logs@custom")); + } + + @Override + public void rollover() throws IOException { + assertOK(putComponentTemplate(client(), "logs@custom", sourceModeOverride)); + rolloverDataStream(client(), dataStreamName()); + assertOK(removeComponentTemplate(client(), "logs@custom")); + } + + @Override + public SourceFieldMapper.Mode initialMode() { + return SourceFieldMapper.Mode.STORED; + } + + @Override + public SourceFieldMapper.Mode finalMode() { + return SourceFieldMapper.Mode.SYNTHETIC; + } + }, new TestCase() { + private static final String sourceModeOverride = """ + { + "template": { + "settings": { + "index": { + "mapping.source.mode": "STORED" + } + } + } + }"""; + + @Override + public String dataStreamName() { + return "logs-test-explicit-stored"; + } + + @Override + public String indexMode() { + return "logsdb"; + } + + @Override + public void prepareDataStream() throws IOException { + assertOK(putComponentTemplate(client(), "logs@custom", sourceModeOverride)); + assertOK(createDataStream(client(), dataStreamName())); + assertOK(removeComponentTemplate(client(), "logs@custom")); + } + + @Override + public void rollover() throws IOException { + assertOK(putComponentTemplate(client(), "logs@custom", sourceModeOverride)); + rolloverDataStream(client(), dataStreamName()); + assertOK(removeComponentTemplate(client(), "logs@custom")); + } + + @Override + public SourceFieldMapper.Mode initialMode() { + return SourceFieldMapper.Mode.STORED; + } + + @Override + public SourceFieldMapper.Mode finalMode() { + return SourceFieldMapper.Mode.STORED; + } + }, new TestCase() { + @Override + public String dataStreamName() { + return "tsdb-test-regular"; + } + + @Override + public String indexMode() { + return "time_series"; + } + + @Override + public void prepareDataStream() throws IOException { + var componentTemplate = """ + { + "template": { + "settings": { + "index": { + "mode": "time_series", + "routing_path": ["dim"] + } + }, + "mappings": { + "properties": { + "dim": { + "type": "keyword", + "time_series_dimension": true + } + } + } + } + } + """; + assertOK(putComponentTemplate(client(), "tsdb-test-regular-component", componentTemplate)); + + var template = """ + { + "index_patterns": ["tsdb-test-regular"], + "priority": 100, + "data_stream": {}, + "composed_of": ["tsdb-test-regular-component"] + } + """; + + putTemplate(client(), "tsdb-test-regular-template", template); + assertOK(createDataStream(client(), dataStreamName())); + } + + @Override + public void rollover() throws IOException { + rolloverDataStream(client(), dataStreamName()); + } + + @Override + public SourceFieldMapper.Mode initialMode() { + return SourceFieldMapper.Mode.STORED; + } + + @Override + public SourceFieldMapper.Mode finalMode() { + return SourceFieldMapper.Mode.SYNTHETIC; + } + }, new TestCase() { + @Override + public String dataStreamName() { + return "tsdb-test-synthetic"; + } + + @Override + public String indexMode() { + return "time_series"; + } + + @Override + public void prepareDataStream() throws IOException { + var componentTemplate = """ + { + "template": { + "settings": { + "index": { + "mode": "time_series", + "routing_path": ["dim"], + "mapping.source.mode": "SYNTHETIC" + } + }, + "mappings": { + "properties": { + "dim": { + "type": "keyword", + "time_series_dimension": true + } + } + } + } + } + """; + assertOK(putComponentTemplate(client(), "tsdb-test-synthetic-component", componentTemplate)); + + var template = """ + { + "index_patterns": ["tsdb-test-synthetic"], + "priority": 100, + "data_stream": {}, + "composed_of": ["tsdb-test-synthetic-component"] + } + """; + + putTemplate(client(), "tsdb-test-synthetic-template", template); + assertOK(createDataStream(client(), dataStreamName())); + } + + @Override + public void rollover() throws IOException { + rolloverDataStream(client(), dataStreamName()); + } + + @Override + public SourceFieldMapper.Mode initialMode() { + return SourceFieldMapper.Mode.STORED; + } + + @Override + public SourceFieldMapper.Mode finalMode() { + return SourceFieldMapper.Mode.SYNTHETIC; + } + }, new TestCase() { + @Override + public String dataStreamName() { + return "tsdb-test-stored"; + } + + @Override + public String indexMode() { + return "time_series"; + } + + @Override + public void prepareDataStream() throws IOException { + var componentTemplate = """ + { + "template": { + "settings": { + "index": { + "mode": "time_series", + "routing_path": ["dim"], + "mapping.source.mode": "STORED" + } + }, + "mappings": { + "properties": { + "dim": { + "type": "keyword", + "time_series_dimension": true + } + } + } + } + } + """; + assertOK(putComponentTemplate(client(), "tsdb-test-stored-component", componentTemplate)); + + var template = """ + { + "index_patterns": ["tsdb-test-stored"], + "priority": 100, + "data_stream": {}, + "composed_of": ["tsdb-test-stored-component"] + } + """; + + putTemplate(client(), "tsdb-test-stored-template", template); + assertOK(createDataStream(client(), dataStreamName())); + } + + @Override + public void rollover() throws IOException { + rolloverDataStream(client(), dataStreamName()); + } + + @Override + public SourceFieldMapper.Mode initialMode() { + return SourceFieldMapper.Mode.STORED; + } + + @Override + public SourceFieldMapper.Mode finalMode() { + return SourceFieldMapper.Mode.STORED; + } + }, + + new TestCase() { + @Override + public String dataStreamName() { + return "standard"; + } + + @Override + public String indexMode() { + return "standard"; + } + + @Override + public void prepareDataStream() throws IOException { + var template = """ + { + "index_patterns": ["standard"], + "priority": 100, + "data_stream": {}, + "composed_of": [] + } + """; + + putTemplate(client(), "standard-template", template); + assertOK(createDataStream(client(), dataStreamName())); + } + + @Override + public void rollover() throws IOException { + rolloverDataStream(client(), dataStreamName()); + } + + @Override + public SourceFieldMapper.Mode initialMode() { + return SourceFieldMapper.Mode.STORED; + } + + @Override + public SourceFieldMapper.Mode finalMode() { + return SourceFieldMapper.Mode.STORED; + } + }, + new TestCase() { + @Override + public String dataStreamName() { + return "standard-synthetic"; + } + + @Override + public String indexMode() { + return "standard"; + } + + @Override + public void prepareDataStream() throws IOException { + var componentTemplate = """ + { + "template": { + "settings": { + "index": { + "mapping.source.mode": "SYNTHETIC" + } + } + } + } + """; + assertOK(putComponentTemplate(client(), "standard-synthetic-component", componentTemplate)); + + var template = """ + { + "index_patterns": ["standard-synthetic"], + "priority": 100, + "data_stream": {}, + "composed_of": ["standard-synthetic-component"] + } + """; + + putTemplate(client(), "standard-synthetic-template", template); + assertOK(createDataStream(client(), dataStreamName())); + } + + @Override + public void rollover() throws IOException { + rolloverDataStream(client(), dataStreamName()); + } + + @Override + public SourceFieldMapper.Mode initialMode() { + return SourceFieldMapper.Mode.STORED; + } + + @Override + public SourceFieldMapper.Mode finalMode() { + return SourceFieldMapper.Mode.SYNTHETIC; + } + }, + new TestCase() { + @Override + public String dataStreamName() { + return "standard-stored"; + } + + @Override + public String indexMode() { + return "standard"; + } + + @Override + public void prepareDataStream() throws IOException { + var componentTemplate = """ + { + "template": { + "settings": { + "index": { + "mapping.source.mode": "STORED" + } + } + } + } + """; + assertOK(putComponentTemplate(client(), "standard-stored-component", componentTemplate)); + + var template = """ + { + "index_patterns": ["standard-stored"], + "priority": 100, + "data_stream": {}, + "composed_of": ["standard-stored-component"] + } + """; + + putTemplate(client(), "standard-stored-template", template); + assertOK(createDataStream(client(), dataStreamName())); + } + + @Override + public void rollover() throws IOException { + rolloverDataStream(client(), dataStreamName()); + } + + @Override + public SourceFieldMapper.Mode initialMode() { + return SourceFieldMapper.Mode.STORED; + } + + @Override + public SourceFieldMapper.Mode finalMode() { + return SourceFieldMapper.Mode.STORED; + } + } + ); + } +} From 29d1d9e6e034df4a40f5357e03c30d0cccb51afc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Aur=C3=A9lien=20FOUCRET?= Date: Fri, 25 Oct 2024 19:13:34 +0200 Subject: [PATCH 119/324] Implement string parsing for the KQL parser. (#115662) --- .../xpack/kql/parser/ParserUtils.java | 254 ++++++++++++++++ .../xpack/kql/parser/ParserUtilsTests.java | 280 ++++++++++++++++++ 2 files changed, 534 insertions(+) create mode 100644 x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/ParserUtils.java create mode 100644 x-pack/plugin/kql/src/test/java/org/elasticsearch/xpack/kql/parser/ParserUtilsTests.java diff --git a/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/ParserUtils.java b/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/ParserUtils.java new file mode 100644 index 0000000000000..f996a953ea7f7 --- /dev/null +++ b/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/ParserUtils.java @@ -0,0 +1,254 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.kql.parser; + +import org.antlr.v4.runtime.ParserRuleContext; +import org.antlr.v4.runtime.Token; +import org.antlr.v4.runtime.tree.ParseTree; +import org.antlr.v4.runtime.tree.ParseTreeVisitor; +import org.antlr.v4.runtime.tree.TerminalNode; +import org.apache.logging.log4j.util.Strings; +import org.apache.lucene.queryparser.classic.QueryParser; + +import java.util.ArrayList; +import java.util.List; + +/** + * Utility class for parsing and processing KQL expressions. + * Provides methods for type-safe parsing, text extraction, and string escaping/unescaping. + */ +public final class ParserUtils { + + private static final String UNQUOTED_LITERAL_TERM_DELIMITER = " "; + private static final char ESCAPE_CHAR = '\\'; + private static final char QUOTE_CHAR = '"'; + private static final char WILDCARD_CHAR = '*'; + + private ParserUtils() { + throw new UnsupportedOperationException("No need to instantiate this class"); + } + + /** + * Performs type-safe parsing using the provided visitor. + * + * @param visitor The visitor to use to do the parsing + * @param ctx The parser tree context to visit + * @param type The expected return type class + * @return The parsed result, casted to the expected type + */ + @SuppressWarnings("unchecked") + public static T typedParsing(ParseTreeVisitor visitor, ParserRuleContext ctx, Class type) { + Object result = ctx.accept(visitor); + + if (type.isInstance(result)) { + return (T) result; + } + + throw new KqlParsingException( + "Invalid query '{}'[{}] given; expected {} but found {}", + ctx.start.getLine(), + ctx.start.getCharPositionInLine(), + ctx.getText(), + ctx.getClass().getSimpleName(), + type.getSimpleName(), + (result != null ? result.getClass().getSimpleName() : "null") + ); + } + + /** + * Extracts text from a parser tree context by joining all terminal nodes with a space delimiter. + * + * @param ctx The parser tree context + * + * @return The extracted text + */ + public static String extractText(ParserRuleContext ctx) { + return String.join(UNQUOTED_LITERAL_TERM_DELIMITER, extractTextTokens(ctx)); + } + + /** + * Checks if the given context contains any unescaped wildcard characters. + * + * @param ctx The tree context to check + * @return true if wildcards are present, false otherwise + */ + public static boolean hasWildcard(ParserRuleContext ctx) { + return ctx.children.stream().anyMatch(childNode -> { + if (childNode instanceof TerminalNode terminalNode) { + Token token = terminalNode.getSymbol(); + return switch (token.getType()) { + case KqlBaseParser.WILDCARD -> true; + case KqlBaseParser.UNQUOTED_LITERAL -> token.getText().matches("[^\\\\]*[*].*"); + default -> false; + }; + } + + return false; + }); + } + + /** + * Escapes special characters in a query string for use in Lucene queries. + * + * @param queryText The query text to escape + * @param preserveWildcards If true, does not escape wildcard characters (*) + * @return The escaped query string + */ + public static String escapeLuceneQueryString(String queryText, boolean preserveWildcards) { + if (preserveWildcards) { + StringBuilder escapedQuery = new StringBuilder(queryText.length()); + StringBuilder subpart = new StringBuilder(queryText.length()); + + for (char currentChar : queryText.toCharArray()) { + if (currentChar == WILDCARD_CHAR) { + escapedQuery.append(QueryParser.escape(subpart.toString())).append(currentChar); + subpart.setLength(0); + } else { + subpart.append(currentChar); + } + } + + return escapedQuery.append(QueryParser.escape(subpart.toString())).toString(); + } + + return QueryParser.escape(queryText); + } + + private static List extractTextTokens(ParserRuleContext ctx) { + assert ctx.children != null; + List textTokens = new ArrayList<>(ctx.children.size()); + + for (ParseTree currentNode : ctx.children) { + if (currentNode instanceof TerminalNode terminalNode) { + textTokens.add(extractText(terminalNode)); + } else { + throw new KqlParsingException("Unable to extract text from ctx", ctx.start.getLine(), ctx.start.getCharPositionInLine()); + } + } + + return textTokens; + } + + private static String extractText(TerminalNode node) { + if (node.getSymbol().getType() == KqlBaseParser.QUOTED_STRING) { + return unescapeQuotedString(node); + } else if (node.getSymbol().getType() == KqlBaseParser.UNQUOTED_LITERAL) { + return unescapeUnquotedLiteral(node); + } + + return node.getText(); + } + + private static String unescapeQuotedString(TerminalNode ctx) { + String inputText = ctx.getText(); + + assert inputText.length() >= 2 && inputText.charAt(0) == QUOTE_CHAR && inputText.charAt(inputText.length() - 1) == QUOTE_CHAR; + StringBuilder sb = new StringBuilder(); + + for (int i = 1; i < inputText.length() - 1;) { + char currentChar = inputText.charAt(i++); + if (currentChar == ESCAPE_CHAR && i + 1 < inputText.length()) { + currentChar = inputText.charAt(i++); + switch (currentChar) { + case 't' -> sb.append('\t'); + case 'n' -> sb.append('\n'); + case 'r' -> sb.append('\r'); + case 'u' -> i = handleUnicodeSequemce(ctx, sb, inputText, i); + case QUOTE_CHAR -> sb.append('\"'); + case ESCAPE_CHAR -> sb.append(ESCAPE_CHAR); + default -> sb.append(ESCAPE_CHAR).append(currentChar); + } + } else { + sb.append(currentChar); + } + } + + return sb.toString(); + } + + private static String unescapeUnquotedLiteral(TerminalNode ctx) { + String inputText = ctx.getText(); + + if (inputText == null || inputText.isEmpty()) { + return inputText; + } + StringBuilder sb = new StringBuilder(inputText.length()); + + for (int i = 0; i < inputText.length();) { + char currentChar = inputText.charAt(i++); + if (currentChar == ESCAPE_CHAR && i < inputText.length()) { + if (isEscapedKeywordSequence(inputText, i)) { + String sequence = handleKeywordSequence(inputText, i); + sb.append(sequence); + i += sequence.length(); + } else { + currentChar = inputText.charAt(i++); + switch (currentChar) { + case 't' -> sb.append('\t'); + case 'n' -> sb.append('\n'); + case 'r' -> sb.append('\r'); + case 'u' -> i = handleUnicodeSequemce(ctx, sb, inputText, i); + case QUOTE_CHAR -> sb.append('\"'); + case ESCAPE_CHAR -> sb.append(ESCAPE_CHAR); + case '(', ')', ':', '<', '>', '*', '{', '}' -> sb.append(currentChar); + default -> sb.append(ESCAPE_CHAR).append(currentChar); + } + } + } else { + sb.append(currentChar); + } + } + + return sb.toString(); + } + + private static boolean isEscapedKeywordSequence(String input, int startIndex) { + if (startIndex + 1 >= input.length()) { + return false; + } + String remaining = Strings.toRootLowerCase(input.substring(startIndex)); + return remaining.startsWith("and") || remaining.startsWith("or") || remaining.startsWith("not"); + } + + private static String handleKeywordSequence(String input, int startIndex) { + String remaining = input.substring(startIndex); + if (Strings.toRootLowerCase(remaining).startsWith("and")) return remaining.substring(0, 3); + if (Strings.toRootLowerCase(remaining).startsWith("or")) return remaining.substring(0, 2); + if (Strings.toRootLowerCase(remaining).startsWith("not")) return remaining.substring(0, 3); + return ""; + } + + private static int handleUnicodeSequemce(TerminalNode ctx, StringBuilder sb, String text, int startIdx) { + int endIdx = startIdx + 4; + String hex = text.substring(startIdx, endIdx); + + try { + int code = Integer.parseInt(hex, 16); + + if (code >= 0xD800 && code <= 0xDFFF) { + // U+D800—U+DFFF can only be used as surrogate pairs and are not valid character codes. + throw new KqlParsingException( + "Invalid unicode character code, [{}] is a surrogate code", + ctx.getSymbol().getLine(), + ctx.getSymbol().getCharPositionInLine() + startIdx, + hex + ); + } + sb.append(String.valueOf(Character.toChars(code))); + } catch (IllegalArgumentException e) { + throw new KqlParsingException( + "Invalid unicode character code [{}]", + ctx.getSymbol().getLine(), + ctx.getSymbol().getCharPositionInLine() + startIdx, + hex + ); + } + + return endIdx; + } +} diff --git a/x-pack/plugin/kql/src/test/java/org/elasticsearch/xpack/kql/parser/ParserUtilsTests.java b/x-pack/plugin/kql/src/test/java/org/elasticsearch/xpack/kql/parser/ParserUtilsTests.java new file mode 100644 index 0000000000000..05474bcedd4c8 --- /dev/null +++ b/x-pack/plugin/kql/src/test/java/org/elasticsearch/xpack/kql/parser/ParserUtilsTests.java @@ -0,0 +1,280 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.kql.parser; + +import org.antlr.v4.runtime.ParserRuleContext; +import org.antlr.v4.runtime.Token; +import org.antlr.v4.runtime.tree.ParseTree; +import org.antlr.v4.runtime.tree.TerminalNode; +import org.antlr.v4.runtime.tree.TerminalNodeImpl; +import org.elasticsearch.test.ESTestCase; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.stream.Stream; + +import static org.elasticsearch.core.Strings.format; +import static org.elasticsearch.xpack.kql.parser.KqlBaseParser.QUOTED_STRING; +import static org.elasticsearch.xpack.kql.parser.KqlBaseParser.UNQUOTED_LITERAL; +import static org.elasticsearch.xpack.kql.parser.KqlBaseParser.WILDCARD; +import static org.elasticsearch.xpack.kql.parser.ParserUtils.escapeLuceneQueryString; +import static org.elasticsearch.xpack.kql.parser.ParserUtils.extractText; +import static org.elasticsearch.xpack.kql.parser.ParserUtils.hasWildcard; +import static org.hamcrest.Matchers.equalTo; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class ParserUtilsTests extends ESTestCase { + + public void testExtractTestWithQuotedString() { + // General case + assertThat(extractText(parserRuleContext(quotedStringNode("foo"))), equalTo("foo")); + + // Empty string + assertThat(extractText(parserRuleContext(quotedStringNode(""))), equalTo("")); + + // Whitespaces are preserved + assertThat(extractText(parserRuleContext(quotedStringNode(" foo bar "))), equalTo(" foo bar ")); + + // Quoted string does not need escaping for KQL keywords (and, or, ...) + assertThat(extractText(parserRuleContext(quotedStringNode("not foo and bar or baz"))), equalTo("not foo and bar or baz")); + + // Quoted string does not need escaping for KQL special chars (e.g: '{', ':', ...) + assertThat(extractText(parserRuleContext(quotedStringNode("foo*:'\u3000{(})"))), equalTo("foo*:'\u3000{(})")); + + // Escaped characters handling + assertThat(extractText(parserRuleContext(quotedStringNode("\\\\"))), equalTo("\\")); + assertThat(extractText(parserRuleContext(quotedStringNode("foo\\\\bar"))), equalTo("foo\\bar")); + assertThat(extractText(parserRuleContext(quotedStringNode("foo\\\\"))), equalTo("foo\\")); + assertThat(extractText(parserRuleContext(quotedStringNode("\\\\foo"))), equalTo("\\foo")); + + assertThat(extractText(parserRuleContext(quotedStringNode("\\\""))), equalTo("\"")); + assertThat(extractText(parserRuleContext(quotedStringNode("foo\\\"bar"))), equalTo("foo\"bar")); + assertThat(extractText(parserRuleContext(quotedStringNode("foo\\\""))), equalTo("foo\"")); + assertThat(extractText(parserRuleContext(quotedStringNode("\\\"foo"))), equalTo("\"foo")); + + assertThat(extractText(parserRuleContext(quotedStringNode("\\t"))), equalTo("\t")); + assertThat(extractText(parserRuleContext(quotedStringNode("foo\\tbar"))), equalTo("foo\tbar")); + assertThat(extractText(parserRuleContext(quotedStringNode("foo\\t"))), equalTo("foo\t")); + assertThat(extractText(parserRuleContext(quotedStringNode("\\tfoo"))), equalTo("\tfoo")); + + assertThat(extractText(parserRuleContext(quotedStringNode("\\n"))), equalTo("\n")); + assertThat(extractText(parserRuleContext(quotedStringNode("foo\\nbar"))), equalTo("foo\nbar")); + assertThat(extractText(parserRuleContext(quotedStringNode("foo\\n"))), equalTo("foo\n")); + assertThat(extractText(parserRuleContext(quotedStringNode("\\nfoo"))), equalTo("\nfoo")); + + assertThat(extractText(parserRuleContext(quotedStringNode("\\r"))), equalTo("\r")); + assertThat(extractText(parserRuleContext(quotedStringNode("foo\\rbar"))), equalTo("foo\rbar")); + assertThat(extractText(parserRuleContext(quotedStringNode("foo\\r"))), equalTo("foo\r")); + assertThat(extractText(parserRuleContext(quotedStringNode("\\rfoo"))), equalTo("\rfoo")); + + // Unicode characters handling (\u0041 is 'A') + assertThat(extractText(parserRuleContext(quotedStringNode(format("\\u0041")))), equalTo("A")); + assertThat(extractText(parserRuleContext(quotedStringNode(format("foo\\u0041bar")))), equalTo("fooAbar")); + assertThat(extractText(parserRuleContext(quotedStringNode(format("foo\\u0041")))), equalTo("fooA")); + assertThat(extractText(parserRuleContext(quotedStringNode(format("\\u0041foo")))), equalTo("Afoo")); + } + + public void testExtractTestWithUnquotedLiteral() { + // General case + assertThat(extractText(parserRuleContext(literalNode("foo"))), equalTo("foo")); + + // KQL keywords unescaping + assertThat(extractText(parserRuleContext(literalNode("\\not foo \\and bar \\or baz"))), equalTo("not foo and bar or baz")); + assertThat( + extractText(parserRuleContext(literalNode("\\\\not foo \\\\and bar \\\\or baz"))), + equalTo("\\not foo \\and bar \\or baz") + ); + + // Escaped characters handling + assertThat(extractText(parserRuleContext(literalNode("\\\\"))), equalTo("\\")); + assertThat(extractText(parserRuleContext(literalNode("foo\\\\bar"))), equalTo("foo\\bar")); + assertThat(extractText(parserRuleContext(literalNode("foo\\\\"))), equalTo("foo\\")); + assertThat(extractText(parserRuleContext(literalNode("\\\\foo"))), equalTo("\\foo")); + + assertThat(extractText(parserRuleContext(literalNode("\\\""))), equalTo("\"")); + assertThat(extractText(parserRuleContext(literalNode("foo\\\"bar"))), equalTo("foo\"bar")); + assertThat(extractText(parserRuleContext(literalNode("foo\\\""))), equalTo("foo\"")); + assertThat(extractText(parserRuleContext(literalNode("\\\"foo"))), equalTo("\"foo")); + + assertThat(extractText(parserRuleContext(literalNode("\\t"))), equalTo("\t")); + assertThat(extractText(parserRuleContext(literalNode("foo\\tbar"))), equalTo("foo\tbar")); + assertThat(extractText(parserRuleContext(literalNode("foo\\t"))), equalTo("foo\t")); + assertThat(extractText(parserRuleContext(literalNode("\\tfoo"))), equalTo("\tfoo")); + + assertThat(extractText(parserRuleContext(literalNode("\\n"))), equalTo("\n")); + assertThat(extractText(parserRuleContext(literalNode("foo\\nbar"))), equalTo("foo\nbar")); + assertThat(extractText(parserRuleContext(literalNode("foo\\n"))), equalTo("foo\n")); + assertThat(extractText(parserRuleContext(literalNode("\\nfoo"))), equalTo("\nfoo")); + + assertThat(extractText(parserRuleContext(literalNode("\\r"))), equalTo("\r")); + assertThat(extractText(parserRuleContext(literalNode("foo\\rbar"))), equalTo("foo\rbar")); + assertThat(extractText(parserRuleContext(literalNode("foo\\r"))), equalTo("foo\r")); + assertThat(extractText(parserRuleContext(literalNode("\\rfoo"))), equalTo("\rfoo")); + + for (String escapedChar : List.of("(", ")", ":", "<", ">", "*", "{", "}")) { + assertThat(extractText(parserRuleContext(literalNode(format("\\%s", escapedChar)))), equalTo(escapedChar)); + assertThat( + extractText(parserRuleContext(literalNode(format("foo\\%sbar", escapedChar)))), + equalTo(format("foo%sbar", escapedChar)) + ); + assertThat(extractText(parserRuleContext(literalNode(format("foo\\%s", escapedChar)))), equalTo(format("foo%s", escapedChar))); + assertThat(extractText(parserRuleContext(literalNode(format("\\%sfoo", escapedChar)))), equalTo(format("%sfoo", escapedChar))); + } + + // Unicode characters handling (\u0041 is 'A') + assertThat(extractText(parserRuleContext(literalNode(format("\\u0041")))), equalTo("A")); + assertThat(extractText(parserRuleContext(literalNode(format("foo\\u0041bar")))), equalTo("fooAbar")); + assertThat(extractText(parserRuleContext(literalNode(format("foo\\u0041")))), equalTo("fooA")); + assertThat(extractText(parserRuleContext(literalNode(format("\\u0041foo")))), equalTo("Afoo")); + } + + public void testHasWildcard() { + // No children + assertFalse(hasWildcard(parserRuleContext(List.of()))); + + // Lone wildcard + assertTrue(hasWildcard(parserRuleContext(wildcardNode()))); + assertTrue(hasWildcard(parserRuleContext(randomTextNodeListWithNode(wildcardNode())))); + + // All children are literals + assertFalse(hasWildcard(parserRuleContext(randomList(1, randomIntBetween(1, 100), ParserUtilsTests::randomLiteralNode)))); + + // Quoted string + assertFalse(hasWildcard(parserRuleContext(randomQuotedStringNode()))); + + // Literal node containing the wildcard character + assertTrue(hasWildcard(parserRuleContext(literalNode("f*oo")))); + assertTrue(hasWildcard(parserRuleContext(literalNode("*foo")))); + assertTrue(hasWildcard(parserRuleContext(literalNode("foo*")))); + + // Literal node containing the wildcard characters (escaped) + assertFalse(hasWildcard(parserRuleContext(literalNode("f\\*oo")))); + assertFalse(hasWildcard(parserRuleContext(literalNode("\\*foo")))); + assertFalse(hasWildcard(parserRuleContext(literalNode("foo\\*")))); + } + + public void testUnquotedLiteralInvalidUnicodeCodeParsing() { + { + // Invalid unicode digit (G) + ParserRuleContext ctx = parserRuleContext(literalNode("\\u0G41")); + KqlParsingException e = assertThrows(KqlParsingException.class, () -> extractText(ctx)); + assertThat(e.getMessage(), equalTo("line 0:3: Invalid unicode character code [0G41]")); + } + + { + // U+D800—U+DFFF can only be used as surrogate pairs and are not valid character codes. + ParserRuleContext ctx = parserRuleContext(literalNode("\\uD900")); + KqlParsingException e = assertThrows(KqlParsingException.class, () -> extractText(ctx)); + assertThat(e.getMessage(), equalTo("line 0:3: Invalid unicode character code, [D900] is a surrogate code")); + } + } + + public void testQuotedStringInvalidUnicodeCodeParsing() { + { + // Invalid unicode digit (G) + ParserRuleContext ctx = parserRuleContext(quotedStringNode("\\u0G41")); + KqlParsingException e = assertThrows(KqlParsingException.class, () -> extractText(ctx)); + assertThat(e.getMessage(), equalTo("line 0:4: Invalid unicode character code [0G41]")); + } + + { + // U+D800—U+DFFF can only be used as surrogate pairs and are not valid character codes. + ParserRuleContext ctx = parserRuleContext(quotedStringNode("\\uD900")); + KqlParsingException e = assertThrows(KqlParsingException.class, () -> extractText(ctx)); + assertThat(e.getMessage(), equalTo("line 0:4: Invalid unicode character code, [D900] is a surrogate code")); + } + } + + public void testEscapeLuceneQueryString() { + // Quotes + assertThat(escapeLuceneQueryString("\"The Pink Panther\"", randomBoolean()), equalTo("\\\"The Pink Panther\\\"")); + + // Escape chars + assertThat(escapeLuceneQueryString("The Pink \\ Panther", randomBoolean()), equalTo("The Pink \\\\ Panther")); + + // Field operations + assertThat(escapeLuceneQueryString("title:Do it right", randomBoolean()), equalTo("title\\:Do it right")); + assertThat(escapeLuceneQueryString("title:(pink panther)", randomBoolean()), equalTo("title\\:\\(pink panther\\)")); + assertThat(escapeLuceneQueryString("title:-pink", randomBoolean()), equalTo("title\\:\\-pink")); + assertThat(escapeLuceneQueryString("title:+pink", randomBoolean()), equalTo("title\\:\\+pink")); + assertThat(escapeLuceneQueryString("title:pink~", randomBoolean()), equalTo("title\\:pink\\~")); + assertThat(escapeLuceneQueryString("title:pink~3.5", randomBoolean()), equalTo("title\\:pink\\~3.5")); + assertThat(escapeLuceneQueryString("title:pink panther^4", randomBoolean()), equalTo("title\\:pink panther\\^4")); + assertThat(escapeLuceneQueryString("rating:[0 TO 5]", randomBoolean()), equalTo("rating\\:\\[0 TO 5\\]")); + assertThat(escapeLuceneQueryString("rating:{0 TO 5}", randomBoolean()), equalTo("rating\\:\\{0 TO 5\\}")); + + // Boolean operators + assertThat(escapeLuceneQueryString("foo || bar", randomBoolean()), equalTo("foo \\|\\| bar")); + assertThat(escapeLuceneQueryString("foo && bar", randomBoolean()), equalTo("foo \\&\\& bar")); + assertThat(escapeLuceneQueryString("!foo", randomBoolean()), equalTo("\\!foo")); + + // Wildcards: + assertThat(escapeLuceneQueryString("te?t", randomBoolean()), equalTo("te\\?t")); + assertThat(escapeLuceneQueryString("foo*", true), equalTo("foo*")); + assertThat(escapeLuceneQueryString("*foo", true), equalTo("*foo")); + assertThat(escapeLuceneQueryString("foo * bar", true), equalTo("foo * bar")); + assertThat(escapeLuceneQueryString("foo*", false), equalTo("foo\\*")); + } + + private static ParserRuleContext parserRuleContext(ParseTree child) { + return parserRuleContext(List.of(child)); + } + + private static ParserRuleContext parserRuleContext(List children) { + ParserRuleContext ctx = new ParserRuleContext(null, randomInt()); + ctx.children = children; + return ctx; + } + + private static TerminalNode terminalNode(int type, String text) { + Token symbol = mock(Token.class); + when(symbol.getType()).thenReturn(type); + when(symbol.getText()).thenReturn(text); + when(symbol.getLine()).thenReturn(0); + when(symbol.getCharPositionInLine()).thenReturn(0); + return new TerminalNodeImpl(symbol); + } + + private static List randomTextNodeListWithNode(TerminalNode node) { + List nodes = new ArrayList<>( + Stream.concat(Stream.generate(ParserUtilsTests::randomTextNode).limit(100), Stream.of(node)).toList() + ); + Collections.shuffle(nodes, random()); + return nodes; + } + + private static TerminalNode randomTextNode() { + return switch (randomInt() % 3) { + case 0 -> wildcardNode(); + case 1 -> randomQuotedStringNode(); + default -> randomLiteralNode(); + }; + } + + private static TerminalNode quotedStringNode(String quotedStringText) { + return terminalNode(QUOTED_STRING, "\"" + quotedStringText + "\""); + } + + private static TerminalNode randomQuotedStringNode() { + return quotedStringNode(randomIdentifier()); + } + + private static TerminalNode literalNode(String literalText) { + return terminalNode(UNQUOTED_LITERAL, literalText); + } + + private static TerminalNode randomLiteralNode() { + return terminalNode(UNQUOTED_LITERAL, randomIdentifier()); + } + + private static TerminalNode wildcardNode() { + return terminalNode(WILDCARD, "*"); + } +} From 3294c679eb9b2fee0d6aca529da3fe2e2745db18 Mon Sep 17 00:00:00 2001 From: Benjamin Trent Date: Fri, 25 Oct 2024 13:57:43 -0400 Subject: [PATCH 120/324] Fix test mute 115605 (#115659) * Unmuting test issue #115605 * fixing --------- Co-authored-by: Elastic Machine --- muted-tests.yml | 3 --- 1 file changed, 3 deletions(-) diff --git a/muted-tests.yml b/muted-tests.yml index 70f29016d8475..abd483c1bc67e 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -270,9 +270,6 @@ tests: - class: org.elasticsearch.xpack.security.CoreWithSecurityClientYamlTestSuiteIT method: test {yaml=cluster.stats/30_ccs_stats/cross-cluster search stats search} issue: https://github.com/elastic/elasticsearch/issues/115600 -- class: org.elasticsearch.test.rest.ClientYamlTestSuiteIT - method: test {yaml=indices.create/10_basic/Create lookup index} - issue: https://github.com/elastic/elasticsearch/issues/115605 - class: org.elasticsearch.oldrepos.OldRepositoryAccessIT method: testOldRepoAccess issue: https://github.com/elastic/elasticsearch/issues/115631 From 679ef122091e825401993530e38dcabe26f375bf Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Sat, 26 Oct 2024 05:31:10 +1100 Subject: [PATCH 121/324] Mute org.elasticsearch.index.get.GetResultTests testToAndFromXContent #115688 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index abd483c1bc67e..f92795c18e2d2 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -276,6 +276,9 @@ tests: - class: org.elasticsearch.xpack.search.CrossClusterAsyncSearchIT method: testCCSClusterDetailsWhereAllShardsSkippedInCanMatch issue: https://github.com/elastic/elasticsearch/issues/115652 +- class: org.elasticsearch.index.get.GetResultTests + method: testToAndFromXContent + issue: https://github.com/elastic/elasticsearch/issues/115688 # Examples: # From 8240f409f943b1f2f865a4704213cdb79b047881 Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Sat, 26 Oct 2024 05:31:46 +1100 Subject: [PATCH 122/324] Mute org.elasticsearch.action.update.UpdateResponseTests testToAndFromXContent #115689 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index f92795c18e2d2..91644c9af70ca 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -279,6 +279,9 @@ tests: - class: org.elasticsearch.index.get.GetResultTests method: testToAndFromXContent issue: https://github.com/elastic/elasticsearch/issues/115688 +- class: org.elasticsearch.action.update.UpdateResponseTests + method: testToAndFromXContent + issue: https://github.com/elastic/elasticsearch/issues/115689 # Examples: # From 68316f7d17da89a11695e62561b4b0e099c0c3ff Mon Sep 17 00:00:00 2001 From: Moritz Mack Date: Fri, 25 Oct 2024 20:52:06 +0200 Subject: [PATCH 123/324] Remove metering from ingest service to occur afterwards when parsing the final document (#114895) --- ...eteringParserDecoratorWithPipelinesIT.java | 137 ------------------ .../XContentMeteringParserDecoratorIT.java | 11 +- .../org/elasticsearch/TransportVersions.java | 1 + .../action/index/IndexRequest.java | 81 +++-------- .../action/update/UpdateHelper.java | 17 +-- .../index/mapper/DocumentParser.java | 2 +- .../index/mapper/ParsedDocument.java | 21 +-- .../index/mapper/SourceToParse.java | 2 +- .../elasticsearch/ingest/IngestService.java | 16 +- .../elasticsearch/node/NodeConstruction.java | 9 +- .../internal/DocumentParsingProvider.java | 4 +- .../XContentMeteringParserDecorator.java | 8 +- .../bulk/TransportShardBulkActionTests.java | 32 ++-- .../ingest/ReservedPipelineActionTests.java | 2 - .../action/update/UpdateRequestTests.java | 5 +- .../index/IndexingSlowLogTests.java | 12 +- .../index/engine/InternalEngineTests.java | 4 +- .../index/shard/RefreshListenersTests.java | 4 +- .../index/translog/TranslogTests.java | 4 +- .../ingest/IngestServiceTests.java | 77 ---------- .../ingest/SimulateIngestServiceTests.java | 2 - .../snapshots/SnapshotResiliencyTests.java | 3 +- .../index/engine/EngineTestCase.java | 3 +- ...sportGetTrainedModelsStatsActionTests.java | 2 - .../authz/AuthorizationServiceTests.java | 3 +- 25 files changed, 87 insertions(+), 375 deletions(-) delete mode 100644 modules/ingest-common/src/internalClusterTest/java/org/elasticsearch/plugins/internal/XContentMeteringParserDecoratorWithPipelinesIT.java diff --git a/modules/ingest-common/src/internalClusterTest/java/org/elasticsearch/plugins/internal/XContentMeteringParserDecoratorWithPipelinesIT.java b/modules/ingest-common/src/internalClusterTest/java/org/elasticsearch/plugins/internal/XContentMeteringParserDecoratorWithPipelinesIT.java deleted file mode 100644 index 3547b3f9910ad..0000000000000 --- a/modules/ingest-common/src/internalClusterTest/java/org/elasticsearch/plugins/internal/XContentMeteringParserDecoratorWithPipelinesIT.java +++ /dev/null @@ -1,137 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the "Elastic License - * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side - * Public License v 1"; you may not use this file except in compliance with, at - * your election, the "Elastic License 2.0", the "GNU Affero General Public - * License v3.0 only", or the "Server Side Public License, v 1". - */ - -package org.elasticsearch.plugins.internal; - -import org.elasticsearch.action.DocWriteRequest; -import org.elasticsearch.action.index.IndexRequest; -import org.elasticsearch.index.mapper.MapperService; -import org.elasticsearch.index.mapper.ParsedDocument; -import org.elasticsearch.ingest.common.IngestCommonPlugin; -import org.elasticsearch.plugins.IngestPlugin; -import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.test.ESIntegTestCase; -import org.elasticsearch.xcontent.FilterXContentParserWrapper; -import org.elasticsearch.xcontent.XContentParser; - -import java.io.IOException; -import java.util.Collection; -import java.util.List; -import java.util.Map; -import java.util.concurrent.atomic.AtomicLong; - -import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; -import static org.hamcrest.Matchers.equalTo; - -@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST) -public class XContentMeteringParserDecoratorWithPipelinesIT extends ESIntegTestCase { - - private static String TEST_INDEX_NAME = "test-index-name"; - // the assertions are done in plugin which is static and will be created by ES server. - // hence a static flag to make sure it is indeed used - public static volatile boolean hasWrappedParser; - public static AtomicLong providedFixedSize = new AtomicLong(); - - public void testDocumentIsReportedWithPipelines() throws Exception { - hasWrappedParser = false; - // pipeline adding fields, changing destination is not affecting reporting - putJsonPipeline("pipeline", """ - { - "processors": [ - { - "set": { - "field": "my-text-field", - "value": "xxxx" - } - }, - { - "set": { - "field": "my-boolean-field", - "value": true - } - } - ] - } - """); - - client().index( - new IndexRequest(TEST_INDEX_NAME).setPipeline("pipeline") - .id("1") - .source(jsonBuilder().startObject().field("test", "I am sam i am").endObject()) - ).actionGet(); - assertBusy(() -> { - // ingest node has used an observer that was counting #map operations - // and passed that info to newFixedSize observer in TransportShardBulkAction - assertTrue(hasWrappedParser); - assertThat(providedFixedSize.get(), equalTo(1L)); - }); - } - - @Override - protected Collection> nodePlugins() { - return List.of(TestDocumentParsingProviderPlugin.class, IngestCommonPlugin.class); - } - - public static class TestDocumentParsingProviderPlugin extends Plugin implements DocumentParsingProviderPlugin, IngestPlugin { - - public TestDocumentParsingProviderPlugin() {} - - @Override - public DocumentParsingProvider getDocumentParsingProvider() { - // returns a static instance, because we want to assert that the wrapping is called only once - return new DocumentParsingProvider() { - @Override - public XContentMeteringParserDecorator newMeteringParserDecorator(DocWriteRequest request) { - if (request instanceof IndexRequest indexRequest && indexRequest.getNormalisedBytesParsed() > 0) { - long normalisedBytesParsed = indexRequest.getNormalisedBytesParsed(); - providedFixedSize.set(normalisedBytesParsed); - return new TestXContentMeteringParserDecorator(normalisedBytesParsed); - } - return new TestXContentMeteringParserDecorator(0L); - } - - @Override - public DocumentSizeReporter newDocumentSizeReporter( - String indexName, - MapperService mapperService, - DocumentSizeAccumulator documentSizeAccumulator - ) { - return DocumentSizeReporter.EMPTY_INSTANCE; - } - }; - } - } - - public static class TestXContentMeteringParserDecorator implements XContentMeteringParserDecorator { - long mapCounter = 0; - - public TestXContentMeteringParserDecorator(long mapCounter) { - this.mapCounter = mapCounter; - } - - @Override - public XContentParser decorate(XContentParser xContentParser) { - hasWrappedParser = true; - return new FilterXContentParserWrapper(xContentParser) { - - @Override - public Map map() throws IOException { - mapCounter++; - return super.map(); - } - }; - } - - @Override - public ParsedDocument.DocumentSize meteredDocumentSize() { - return new ParsedDocument.DocumentSize(mapCounter, 0); - } - } - -} diff --git a/server/src/internalClusterTest/java/org/elasticsearch/plugins/internal/XContentMeteringParserDecoratorIT.java b/server/src/internalClusterTest/java/org/elasticsearch/plugins/internal/XContentMeteringParserDecoratorIT.java index f11c145f71f23..f70667b91aec8 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/plugins/internal/XContentMeteringParserDecoratorIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/plugins/internal/XContentMeteringParserDecoratorIT.java @@ -9,7 +9,6 @@ package org.elasticsearch.plugins.internal; -import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.engine.EngineFactory; @@ -126,7 +125,7 @@ public TestDocumentParsingProviderPlugin() {} public DocumentParsingProvider getDocumentParsingProvider() { return new DocumentParsingProvider() { @Override - public XContentMeteringParserDecorator newMeteringParserDecorator(DocWriteRequest request) { + public XContentMeteringParserDecorator newMeteringParserDecorator(IndexRequest request) { return new TestXContentMeteringParserDecorator(0L); } @@ -152,8 +151,8 @@ public TestDocumentSizeReporter(String indexName) { @Override public void onIndexingCompleted(ParsedDocument parsedDocument) { - long delta = parsedDocument.getNormalizedSize().ingestedBytes(); - if (delta > 0) { + long delta = parsedDocument.getNormalizedSize(); + if (delta > XContentMeteringParserDecorator.UNKNOWN_SIZE) { COUNTER.addAndGet(delta); } assertThat(indexName, equalTo(TEST_INDEX_NAME)); @@ -181,8 +180,8 @@ public Token nextToken() throws IOException { } @Override - public ParsedDocument.DocumentSize meteredDocumentSize() { - return new ParsedDocument.DocumentSize(counter, counter); + public long meteredDocumentSize() { + return counter; } } } diff --git a/server/src/main/java/org/elasticsearch/TransportVersions.java b/server/src/main/java/org/elasticsearch/TransportVersions.java index 25bb792d827a9..3986ea4b97254 100644 --- a/server/src/main/java/org/elasticsearch/TransportVersions.java +++ b/server/src/main/java/org/elasticsearch/TransportVersions.java @@ -182,6 +182,7 @@ static TransportVersion def(int id) { public static final TransportVersion SIMULATE_MAPPING_ADDITION = def(8_777_00_0); public static final TransportVersion INTRODUCE_ALL_APPLICABLE_SELECTOR = def(8_778_00_0); public static final TransportVersion INDEX_MODE_LOOKUP = def(8_779_00_0); + public static final TransportVersion INDEX_REQUEST_REMOVE_METERING = def(8_780_00_0); /* * STOP! READ THIS FIRST! No, really, diff --git a/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java b/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java index b98f5d87ee232..d0785a60dd0f5 100644 --- a/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java +++ b/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java @@ -146,9 +146,6 @@ public class IndexRequest extends ReplicatedWriteRequest implement * rawTimestamp field is used on the coordinate node, it doesn't need to be serialised. */ private Object rawTimestamp; - private long normalisedBytesParsed = -1; - private boolean originatesFromUpdateByScript; - private boolean originatesFromUpdateByDoc; public IndexRequest(StreamInput in) throws IOException { this(null, in); @@ -183,7 +180,7 @@ public IndexRequest(@Nullable ShardId shardId, StreamInput in) throws IOExceptio dynamicTemplates = in.readMap(StreamInput::readString); if (in.getTransportVersion().onOrAfter(PIPELINES_HAVE_RUN_FIELD_ADDED) && in.getTransportVersion().before(TransportVersions.V_8_13_0)) { - in.readBoolean(); + in.readBoolean(); // obsolete, prior to tracking normalisedBytesParsed } if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_12_0)) { this.listExecutedPipelines = in.readBoolean(); @@ -196,21 +193,20 @@ public IndexRequest(@Nullable ShardId shardId, StreamInput in) throws IOExceptio } if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_13_0)) { requireDataStream = in.readBoolean(); - normalisedBytesParsed = in.readZLong(); } else { requireDataStream = false; } - if (in.getTransportVersion().onOrAfter(TransportVersions.INDEX_REQUEST_UPDATE_BY_SCRIPT_ORIGIN)) { - originatesFromUpdateByScript = in.readBoolean(); - } else { - originatesFromUpdateByScript = false; - } - - if (in.getTransportVersion().onOrAfter(TransportVersions.INDEX_REQUEST_UPDATE_BY_DOC_ORIGIN)) { - originatesFromUpdateByDoc = in.readBoolean(); - } else { - originatesFromUpdateByDoc = false; + if (in.getTransportVersion().before(TransportVersions.INDEX_REQUEST_REMOVE_METERING)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_13_0)) { + in.readZLong(); // obsolete normalisedBytesParsed + } + if (in.getTransportVersion().onOrAfter(TransportVersions.INDEX_REQUEST_UPDATE_BY_SCRIPT_ORIGIN)) { + in.readBoolean(); // obsolete originatesFromUpdateByScript + } + if (in.getTransportVersion().onOrAfter(TransportVersions.INDEX_REQUEST_UPDATE_BY_DOC_ORIGIN)) { + in.readBoolean(); // obsolete originatesFromUpdateByDoc + } } } @@ -759,7 +755,7 @@ private void writeBody(StreamOutput out) throws IOException { out.writeMap(dynamicTemplates, StreamOutput::writeString); if (out.getTransportVersion().onOrAfter(PIPELINES_HAVE_RUN_FIELD_ADDED) && out.getTransportVersion().before(TransportVersions.V_8_13_0)) { - out.writeBoolean(normalisedBytesParsed != -1L); + out.writeBoolean(false); // obsolete, prior to tracking normalisedBytesParsed } if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_12_0)) { out.writeBoolean(listExecutedPipelines); @@ -770,15 +766,18 @@ private void writeBody(StreamOutput out) throws IOException { if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_13_0)) { out.writeBoolean(requireDataStream); - out.writeZLong(normalisedBytesParsed); - } - - if (out.getTransportVersion().onOrAfter(TransportVersions.INDEX_REQUEST_UPDATE_BY_SCRIPT_ORIGIN)) { - out.writeBoolean(originatesFromUpdateByScript); } - if (out.getTransportVersion().onOrAfter(TransportVersions.INDEX_REQUEST_UPDATE_BY_DOC_ORIGIN)) { - out.writeBoolean(originatesFromUpdateByDoc); + if (out.getTransportVersion().before(TransportVersions.INDEX_REQUEST_REMOVE_METERING)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_13_0)) { + out.writeZLong(-1); // obsolete normalisedBytesParsed + } + if (out.getTransportVersion().onOrAfter(TransportVersions.INDEX_REQUEST_UPDATE_BY_SCRIPT_ORIGIN)) { + out.writeBoolean(false); // obsolete originatesFromUpdateByScript + } + if (out.getTransportVersion().onOrAfter(TransportVersions.INDEX_REQUEST_UPDATE_BY_DOC_ORIGIN)) { + out.writeBoolean(false); // obsolete originatesFromUpdateByDoc + } } } @@ -928,24 +927,6 @@ public void setRawTimestamp(Object rawTimestamp) { this.rawTimestamp = rawTimestamp; } - /** - * Returns a number of bytes observed when parsing a document in earlier stages of ingestion (like update/ingest service) - * Defaults to -1 when a document size was not observed in earlier stages. - * @return a number of bytes observed - */ - public long getNormalisedBytesParsed() { - return normalisedBytesParsed; - } - - /** - * Sets number of bytes observed by a DocumentSizeObserver - * @return an index request - */ - public IndexRequest setNormalisedBytesParsed(long normalisedBytesParsed) { - this.normalisedBytesParsed = normalisedBytesParsed; - return this; - } - /** * Adds the pipeline to the list of executed pipelines, if listExecutedPipelines is true * @@ -976,22 +957,4 @@ public List getExecutedPipelines() { return Collections.unmodifiableList(executedPipelines); } } - - public IndexRequest setOriginatesFromUpdateByScript(boolean originatesFromUpdateByScript) { - this.originatesFromUpdateByScript = originatesFromUpdateByScript; - return this; - } - - public boolean originatesFromUpdateByScript() { - return originatesFromUpdateByScript; - } - - public boolean originatesFromUpdateByDoc() { - return originatesFromUpdateByDoc; - } - - public IndexRequest setOriginatesFromUpdateByDoc(boolean originatesFromUpdateByDoc) { - this.originatesFromUpdateByDoc = originatesFromUpdateByDoc; - return this; - } } diff --git a/server/src/main/java/org/elasticsearch/action/update/UpdateHelper.java b/server/src/main/java/org/elasticsearch/action/update/UpdateHelper.java index 212b99ca140d3..d32e102b2e18b 100644 --- a/server/src/main/java/org/elasticsearch/action/update/UpdateHelper.java +++ b/server/src/main/java/org/elasticsearch/action/update/UpdateHelper.java @@ -28,8 +28,7 @@ import org.elasticsearch.index.mapper.RoutingFieldMapper; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.ShardId; -import org.elasticsearch.plugins.internal.DocumentParsingProvider; -import org.elasticsearch.plugins.internal.XContentMeteringParserDecorator; +import org.elasticsearch.plugins.internal.XContentParserDecorator; import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptService; import org.elasticsearch.script.UpdateCtxMap; @@ -51,11 +50,9 @@ public class UpdateHelper { private static final Logger logger = LogManager.getLogger(UpdateHelper.class); private final ScriptService scriptService; - private final DocumentParsingProvider documentParsingProvider; - public UpdateHelper(ScriptService scriptService, DocumentParsingProvider documentParsingProvider) { + public UpdateHelper(ScriptService scriptService) { this.scriptService = scriptService; - this.documentParsingProvider = documentParsingProvider; } /** @@ -183,14 +180,13 @@ static String calculateRouting(GetResult getResult, @Nullable IndexRequest updat Result prepareUpdateIndexRequest(IndexShard indexShard, UpdateRequest request, GetResult getResult, boolean detectNoop) { final IndexRequest currentRequest = request.doc(); final String routing = calculateRouting(getResult, currentRequest); - final XContentMeteringParserDecorator meteringParserDecorator = documentParsingProvider.newMeteringParserDecorator(request); final Tuple> sourceAndContent = XContentHelper.convertToMap(getResult.internalSourceRef(), true); final XContentType updateSourceContentType = sourceAndContent.v1(); final Map updatedSourceAsMap = sourceAndContent.v2(); final boolean noop = XContentHelper.update( updatedSourceAsMap, - currentRequest.sourceAsMap(meteringParserDecorator), + currentRequest.sourceAsMap(XContentParserDecorator.NOOP), detectNoop ) == false; @@ -228,9 +224,7 @@ Result prepareUpdateIndexRequest(IndexShard indexShard, UpdateRequest request, G .setIfPrimaryTerm(getResult.getPrimaryTerm()) .waitForActiveShards(request.waitForActiveShards()) .timeout(request.timeout()) - .setRefreshPolicy(request.getRefreshPolicy()) - .setOriginatesFromUpdateByDoc(true); - finalIndexRequest.setNormalisedBytesParsed(meteringParserDecorator.meteredDocumentSize().ingestedBytes()); + .setRefreshPolicy(request.getRefreshPolicy()); return new Result(finalIndexRequest, DocWriteResponse.Result.UPDATED, updatedSourceAsMap, updateSourceContentType); } } @@ -272,8 +266,7 @@ Result prepareUpdateScriptRequest(IndexShard indexShard, UpdateRequest request, .setIfPrimaryTerm(getResult.getPrimaryTerm()) .waitForActiveShards(request.waitForActiveShards()) .timeout(request.timeout()) - .setRefreshPolicy(request.getRefreshPolicy()) - .setOriginatesFromUpdateByScript(true); + .setRefreshPolicy(request.getRefreshPolicy()); return new Result(indexRequest, DocWriteResponse.Result.UPDATED, updatedSourceAsMap, updateSourceContentType); } case DELETE -> { diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java b/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java index 1ed0a117ddd89..bde9b0fb8a4ab 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java @@ -80,7 +80,7 @@ public ParsedDocument parseDocument(SourceToParse source, MappingLookup mappingL final RootDocumentParserContext context; final XContentType xContentType = source.getXContentType(); - XContentMeteringParserDecorator meteringParserDecorator = source.getDocumentSizeObserver(); + XContentMeteringParserDecorator meteringParserDecorator = source.getMeteringParserDecorator(); try ( XContentParser parser = meteringParserDecorator.decorate( XContentHelper.createParser(parserConfiguration, source.source(), xContentType) diff --git a/server/src/main/java/org/elasticsearch/index/mapper/ParsedDocument.java b/server/src/main/java/org/elasticsearch/index/mapper/ParsedDocument.java index b1d882f04de54..f2ddf38fe4357 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/ParsedDocument.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/ParsedDocument.java @@ -15,6 +15,7 @@ import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.index.mapper.MapperService.MergeReason; +import org.elasticsearch.plugins.internal.XContentMeteringParserDecorator; import org.elasticsearch.xcontent.XContentType; import java.util.Collections; @@ -24,6 +25,7 @@ * The result of parsing a document. */ public class ParsedDocument { + private final Field version; private final String id; @@ -33,7 +35,7 @@ public class ParsedDocument { private final List documents; - private final DocumentSize normalizedSize; + private final long normalizedSize; private BytesReference source; private XContentType xContentType; @@ -61,7 +63,7 @@ public static ParsedDocument noopTombstone(String reason) { new BytesArray("{}"), XContentType.JSON, null, - DocumentSize.UNKNOWN + XContentMeteringParserDecorator.UNKNOWN_SIZE ); } @@ -86,7 +88,7 @@ public static ParsedDocument deleteTombstone(String id) { new BytesArray("{}"), XContentType.JSON, null, - DocumentSize.UNKNOWN + XContentMeteringParserDecorator.UNKNOWN_SIZE ); } @@ -99,7 +101,7 @@ public ParsedDocument( BytesReference source, XContentType xContentType, Mapping dynamicMappingsUpdate, - DocumentSize normalizedSize + long normalizedSize ) { this.version = version; this.seqID = seqID; @@ -178,16 +180,7 @@ public String documentDescription() { return "id"; } - public DocumentSize getNormalizedSize() { + public long getNormalizedSize() { return normalizedSize; } - - /** - * Normalized ingested and stored size of a document. - * @param ingestedBytes ingest size of the document - * @param storedBytes stored retained size of the document - */ - public record DocumentSize(long ingestedBytes, long storedBytes) { - public static final DocumentSize UNKNOWN = new DocumentSize(-1, -1); - } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/SourceToParse.java b/server/src/main/java/org/elasticsearch/index/mapper/SourceToParse.java index a8cb03c223833..879e0fe785df2 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/SourceToParse.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/SourceToParse.java @@ -91,7 +91,7 @@ public XContentType getXContentType() { return this.xContentType; } - public XContentMeteringParserDecorator getDocumentSizeObserver() { + public XContentMeteringParserDecorator getMeteringParserDecorator() { return meteringParserDecorator; } } diff --git a/server/src/main/java/org/elasticsearch/ingest/IngestService.java b/server/src/main/java/org/elasticsearch/ingest/IngestService.java index 99ff44a3cd135..b5ac54b018e46 100644 --- a/server/src/main/java/org/elasticsearch/ingest/IngestService.java +++ b/server/src/main/java/org/elasticsearch/ingest/IngestService.java @@ -68,8 +68,6 @@ import org.elasticsearch.index.analysis.AnalysisRegistry; import org.elasticsearch.node.ReportingService; import org.elasticsearch.plugins.IngestPlugin; -import org.elasticsearch.plugins.internal.DocumentParsingProvider; -import org.elasticsearch.plugins.internal.XContentMeteringParserDecorator; import org.elasticsearch.plugins.internal.XContentParserDecorator; import org.elasticsearch.script.ScriptService; import org.elasticsearch.threadpool.Scheduler; @@ -121,7 +119,6 @@ public class IngestService implements ClusterStateApplier, ReportingService taskQueue; private final ClusterService clusterService; private final ScriptService scriptService; - private final DocumentParsingProvider documentParsingProvider; private final Map processorFactories; // Ideally this should be in IngestMetadata class, but we don't have the processor factories around there. // We know of all the processor factories when a node with all its plugin have been initialized. Also some @@ -204,12 +201,10 @@ public IngestService( List ingestPlugins, Client client, MatcherWatchdog matcherWatchdog, - DocumentParsingProvider documentParsingProvider, FailureStoreMetrics failureStoreMetrics ) { this.clusterService = clusterService; this.scriptService = scriptService; - this.documentParsingProvider = documentParsingProvider; this.processorFactories = processorFactories( ingestPlugins, new Processor.Parameters( @@ -238,7 +233,6 @@ public IngestService( IngestService(IngestService ingestService) { this.clusterService = ingestService.clusterService; this.scriptService = ingestService.scriptService; - this.documentParsingProvider = ingestService.documentParsingProvider; this.processorFactories = ingestService.processorFactories; this.threadPool = ingestService.threadPool; this.taskQueue = ingestService.taskQueue; @@ -776,10 +770,7 @@ protected void doRun() { } final int slot = i; final Releasable ref = refs.acquire(); - final XContentMeteringParserDecorator meteringParserDecorator = documentParsingProvider.newMeteringParserDecorator( - indexRequest - ); - final IngestDocument ingestDocument = newIngestDocument(indexRequest, meteringParserDecorator); + final IngestDocument ingestDocument = newIngestDocument(indexRequest); final org.elasticsearch.script.Metadata originalDocumentMetadata = ingestDocument.getMetadata().clone(); // the document listener gives us three-way logic: a document can fail processing (1), or it can // be successfully processed. a successfully processed document can be kept (2) or dropped (3). @@ -820,7 +811,6 @@ public void onFailure(Exception e) { ); executePipelines(pipelines, indexRequest, ingestDocument, resolveFailureStore, documentListener); - indexRequest.setNormalisedBytesParsed(meteringParserDecorator.meteredDocumentSize().ingestedBytes()); assert actionRequest.index() != null; i++; @@ -1159,14 +1149,14 @@ static String getProcessorName(Processor processor) { /** * Builds a new ingest document from the passed-in index request. */ - private static IngestDocument newIngestDocument(final IndexRequest request, XContentParserDecorator parserDecorator) { + private static IngestDocument newIngestDocument(final IndexRequest request) { return new IngestDocument( request.index(), request.id(), request.version(), request.routing(), request.versionType(), - request.sourceAsMap(parserDecorator) + request.sourceAsMap(XContentParserDecorator.NOOP) ); } diff --git a/server/src/main/java/org/elasticsearch/node/NodeConstruction.java b/server/src/main/java/org/elasticsearch/node/NodeConstruction.java index 784e02059823b..0a88a202ac8d3 100644 --- a/server/src/main/java/org/elasticsearch/node/NodeConstruction.java +++ b/server/src/main/java/org/elasticsearch/node/NodeConstruction.java @@ -285,7 +285,7 @@ static NodeConstruction prepareConstruction( ScriptService scriptService = constructor.createScriptService(settingsModule, threadPool, serviceProvider); - constructor.createUpdateHelper(documentParsingProvider, scriptService); + constructor.createUpdateHelper(scriptService); constructor.construct( threadPool, @@ -643,10 +643,10 @@ private DataStreamGlobalRetentionSettings createDataStreamServicesAndGlobalReten return dataStreamGlobalRetentionSettings; } - private UpdateHelper createUpdateHelper(DocumentParsingProvider documentParsingProvider, ScriptService scriptService) { - UpdateHelper updateHelper = new UpdateHelper(scriptService, documentParsingProvider); + private UpdateHelper createUpdateHelper(ScriptService scriptService) { + UpdateHelper updateHelper = new UpdateHelper(scriptService); - modules.add(b -> { b.bind(UpdateHelper.class).toInstance(new UpdateHelper(scriptService, documentParsingProvider)); }); + modules.add(b -> b.bind(UpdateHelper.class).toInstance(updateHelper)); return updateHelper; } @@ -701,7 +701,6 @@ private void construct( pluginsService.filterPlugins(IngestPlugin.class).toList(), client, IngestService.createGrokThreadWatchdog(environment, threadPool), - documentParsingProvider, failureStoreMetrics ); diff --git a/server/src/main/java/org/elasticsearch/plugins/internal/DocumentParsingProvider.java b/server/src/main/java/org/elasticsearch/plugins/internal/DocumentParsingProvider.java index e1613caf9deac..9df7fd4c3bd43 100644 --- a/server/src/main/java/org/elasticsearch/plugins/internal/DocumentParsingProvider.java +++ b/server/src/main/java/org/elasticsearch/plugins/internal/DocumentParsingProvider.java @@ -9,7 +9,7 @@ package org.elasticsearch.plugins.internal; -import org.elasticsearch.action.DocWriteRequest; +import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.index.mapper.MapperService; /** @@ -40,7 +40,7 @@ default DocumentSizeAccumulator createDocumentSizeAccumulator() { /** * @return an observer */ - default XContentMeteringParserDecorator newMeteringParserDecorator(DocWriteRequest request) { + default XContentMeteringParserDecorator newMeteringParserDecorator(IndexRequest request) { return XContentMeteringParserDecorator.NOOP; } } diff --git a/server/src/main/java/org/elasticsearch/plugins/internal/XContentMeteringParserDecorator.java b/server/src/main/java/org/elasticsearch/plugins/internal/XContentMeteringParserDecorator.java index e3b4415edcc01..6ccdac19acb91 100644 --- a/server/src/main/java/org/elasticsearch/plugins/internal/XContentMeteringParserDecorator.java +++ b/server/src/main/java/org/elasticsearch/plugins/internal/XContentMeteringParserDecorator.java @@ -9,17 +9,17 @@ package org.elasticsearch.plugins.internal; -import org.elasticsearch.index.mapper.ParsedDocument.DocumentSize; import org.elasticsearch.xcontent.XContentParser; public interface XContentMeteringParserDecorator extends XContentParserDecorator { + long UNKNOWN_SIZE = -1; /** * a default noop implementation */ XContentMeteringParserDecorator NOOP = new XContentMeteringParserDecorator() { @Override - public DocumentSize meteredDocumentSize() { - return DocumentSize.UNKNOWN; + public long meteredDocumentSize() { + return UNKNOWN_SIZE; } @Override @@ -28,5 +28,5 @@ public XContentParser decorate(XContentParser xContentParser) { } }; - DocumentSize meteredDocumentSize(); + long meteredDocumentSize(); } diff --git a/server/src/test/java/org/elasticsearch/action/bulk/TransportShardBulkActionTests.java b/server/src/test/java/org/elasticsearch/action/bulk/TransportShardBulkActionTests.java index 35ef892da59a2..b389e33993b9b 100644 --- a/server/src/test/java/org/elasticsearch/action/bulk/TransportShardBulkActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/bulk/TransportShardBulkActionTests.java @@ -49,11 +49,11 @@ import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.translog.Translog; import org.elasticsearch.plugins.internal.DocumentParsingProvider; +import org.elasticsearch.plugins.internal.XContentMeteringParserDecorator; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.threadpool.ThreadPool.Names; -import org.mockito.ArgumentCaptor; import org.mockito.MockingDetails; import org.mockito.Mockito; import org.mockito.stubbing.Stubbing; @@ -114,13 +114,18 @@ public void testExecuteBulkIndexRequest() throws Exception { BulkItemRequest[] items = new BulkItemRequest[1]; boolean create = randomBoolean(); - DocWriteRequest writeRequest = new IndexRequest("index").id("id").source(Requests.INDEX_CONTENT_TYPE).create(create); + IndexRequest writeRequest = new IndexRequest("index").id("id").source(Requests.INDEX_CONTENT_TYPE).create(create); BulkItemRequest primaryRequest = new BulkItemRequest(0, writeRequest); items[0] = primaryRequest; BulkShardRequest bulkShardRequest = new BulkShardRequest(shardId, RefreshPolicy.NONE, items); randomlySetIgnoredPrimaryResponse(primaryRequest); + DocumentParsingProvider documentParsingProvider = mock(); + XContentMeteringParserDecorator parserDecorator = mock(); + when(documentParsingProvider.newMeteringParserDecorator(any())).thenReturn(parserDecorator); + when(parserDecorator.decorate(any())).then(i -> i.getArgument(0)); + BulkPrimaryExecutionContext context = new BulkPrimaryExecutionContext(bulkShardRequest, shard); TransportShardBulkAction.executeBulkItemRequest( context, @@ -129,7 +134,7 @@ public void testExecuteBulkIndexRequest() throws Exception { new NoopMappingUpdatePerformer(), (listener, mappingVersion) -> {}, ASSERTING_DONE_LISTENER, - DocumentParsingProvider.EMPTY_INSTANCE + documentParsingProvider ); assertFalse(context.hasMoreOperationsToExecute()); @@ -185,6 +190,8 @@ public void testExecuteBulkIndexRequest() throws Exception { assertThat(failure.getStatus(), equalTo(RestStatus.CONFLICT)); assertThat(replicaRequest, equalTo(primaryRequest)); + verify(documentParsingProvider).newMeteringParserDecorator(any()); + verify(parserDecorator).decorate(any()); // Assert that the document count is still 1 assertDocCount(shard, 1); @@ -600,9 +607,7 @@ public void testUpdateRequestWithConflictFailure() throws Exception { .retryOnConflict(retries); BulkItemRequest primaryRequest = new BulkItemRequest(0, writeRequest); - IndexRequest updateResponse = new IndexRequest("index").id("id") - .source(Requests.INDEX_CONTENT_TYPE, "field", "value") - .setNormalisedBytesParsed(0);// let's pretend this was modified by a script + IndexRequest updateResponse = new IndexRequest("index").id("id").source(Requests.INDEX_CONTENT_TYPE, "field", "value"); DocumentParsingProvider documentParsingProvider = mock(DocumentParsingProvider.class); Exception err = new VersionConflictEngineException(shardId, "id", "I'm conflicted <(;_;)>"); @@ -655,11 +660,7 @@ public void testUpdateRequestWithConflictFailure() throws Exception { assertThat(failure.getCause(), equalTo(err)); assertThat(failure.getStatus(), equalTo(RestStatus.CONFLICT)); - // we have set 0 value on normalisedBytesParsed on the IndexRequest, like it happens with updates by script. - ArgumentCaptor argument = ArgumentCaptor.forClass(IndexRequest.class); - verify(documentParsingProvider, times(retries + 1)).newMeteringParserDecorator(argument.capture()); - IndexRequest value = argument.getValue(); - assertThat(value.getNormalisedBytesParsed(), equalTo(0L)); + verify(documentParsingProvider, times(retries + 1)).newMeteringParserDecorator(any()); } @SuppressWarnings("unchecked") @@ -668,9 +669,7 @@ public void testUpdateRequestWithSuccess() throws Exception { DocWriteRequest writeRequest = new UpdateRequest("index", "id").doc(Requests.INDEX_CONTENT_TYPE, "field", "value"); BulkItemRequest primaryRequest = new BulkItemRequest(0, writeRequest); - IndexRequest updateResponse = new IndexRequest("index").id("id") - .source(Requests.INDEX_CONTENT_TYPE, "field", "value") - .setNormalisedBytesParsed(100L); + IndexRequest updateResponse = new IndexRequest("index").id("id").source(Requests.INDEX_CONTENT_TYPE, "field", "value"); DocumentParsingProvider documentParsingProvider = mock(DocumentParsingProvider.class); boolean created = randomBoolean(); @@ -721,10 +720,7 @@ public void testUpdateRequestWithSuccess() throws Exception { assertThat(response.status(), equalTo(created ? RestStatus.CREATED : RestStatus.OK)); assertThat(response.getSeqNo(), equalTo(13L)); - ArgumentCaptor argument = ArgumentCaptor.forClass(IndexRequest.class); - verify(documentParsingProvider, times(1)).newMeteringParserDecorator(argument.capture()); - IndexRequest value = argument.getValue(); - assertThat(value.getNormalisedBytesParsed(), equalTo(100L)); + verify(documentParsingProvider).newMeteringParserDecorator(updateResponse); } public void testUpdateWithDelete() throws Exception { diff --git a/server/src/test/java/org/elasticsearch/action/ingest/ReservedPipelineActionTests.java b/server/src/test/java/org/elasticsearch/action/ingest/ReservedPipelineActionTests.java index 9729b653ae3d2..331f754d437a7 100644 --- a/server/src/test/java/org/elasticsearch/action/ingest/ReservedPipelineActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/ingest/ReservedPipelineActionTests.java @@ -31,7 +31,6 @@ import org.elasticsearch.ingest.Processor; import org.elasticsearch.ingest.ProcessorInfo; import org.elasticsearch.plugins.IngestPlugin; -import org.elasticsearch.plugins.internal.DocumentParsingProvider; import org.elasticsearch.reservedstate.TransformState; import org.elasticsearch.reservedstate.service.FileSettingsService; import org.elasticsearch.reservedstate.service.ReservedClusterStateService; @@ -94,7 +93,6 @@ public void setup() { Collections.singletonList(DUMMY_PLUGIN), client, null, - DocumentParsingProvider.EMPTY_INSTANCE, FailureStoreMetrics.NOOP ); Map factories = ingestService.getProcessorFactories(); diff --git a/server/src/test/java/org/elasticsearch/action/update/UpdateRequestTests.java b/server/src/test/java/org/elasticsearch/action/update/UpdateRequestTests.java index d8960bd902ac5..0cc2dcf38e8ff 100644 --- a/server/src/test/java/org/elasticsearch/action/update/UpdateRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/update/UpdateRequestTests.java @@ -26,7 +26,6 @@ import org.elasticsearch.index.mapper.MappingLookup; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.ShardId; -import org.elasticsearch.plugins.internal.DocumentParsingProvider; import org.elasticsearch.script.MockScriptEngine; import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptEngine; @@ -121,7 +120,7 @@ public void setUp() throws Exception { final MockScriptEngine engine = new MockScriptEngine("mock", scripts, Collections.emptyMap()); Map engines = Collections.singletonMap(engine.getType(), engine); ScriptService scriptService = new ScriptService(baseSettings, engines, ScriptModule.CORE_CONTEXTS, () -> 1L); - updateHelper = new UpdateHelper(scriptService, DocumentParsingProvider.EMPTY_INSTANCE); + updateHelper = new UpdateHelper(scriptService); } @SuppressWarnings("unchecked") @@ -594,7 +593,7 @@ public void testNoopDetection() throws Exception { try (var parser = createParser(JsonXContent.jsonXContent, new BytesArray("{\"doc\": {\"body\": \"foo\"}}"))) { request = new UpdateRequest("test", "1").fromXContent(parser); } - UpdateHelper updateHelper = new UpdateHelper(mock(ScriptService.class), DocumentParsingProvider.EMPTY_INSTANCE); + UpdateHelper updateHelper = new UpdateHelper(mock(ScriptService.class)); UpdateHelper.Result result = updateHelper.prepareUpdateIndexRequest(indexShard, request, getResult, true); assertThat(result.action(), instanceOf(UpdateResponse.class)); diff --git a/server/src/test/java/org/elasticsearch/index/IndexingSlowLogTests.java b/server/src/test/java/org/elasticsearch/index/IndexingSlowLogTests.java index 753602e73a30a..c626be7983c46 100644 --- a/server/src/test/java/org/elasticsearch/index/IndexingSlowLogTests.java +++ b/server/src/test/java/org/elasticsearch/index/IndexingSlowLogTests.java @@ -28,10 +28,10 @@ import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.engine.EngineTestCase; import org.elasticsearch.index.mapper.ParsedDocument; -import org.elasticsearch.index.mapper.ParsedDocument.DocumentSize; import org.elasticsearch.index.mapper.SeqNoFieldMapper; import org.elasticsearch.index.mapper.Uid; import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.plugins.internal.XContentMeteringParserDecorator; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xcontent.XContentParseException; import org.elasticsearch.xcontent.XContentType; @@ -217,7 +217,7 @@ public void testSlowLogMessageHasJsonFields() throws IOException { source, XContentType.JSON, null, - DocumentSize.UNKNOWN + XContentMeteringParserDecorator.UNKNOWN_SIZE ); Index index = new Index("foo", "123"); // Turning off document logging doesn't log source[] @@ -246,7 +246,7 @@ public void testSlowLogMessageHasAdditionalFields() throws IOException { source, XContentType.JSON, null, - DocumentSize.UNKNOWN + XContentMeteringParserDecorator.UNKNOWN_SIZE ); Index index = new Index("foo", "123"); // Turning off document logging doesn't log source[] @@ -276,7 +276,7 @@ public void testEmptyRoutingField() throws IOException { source, XContentType.JSON, null, - DocumentSize.UNKNOWN + XContentMeteringParserDecorator.UNKNOWN_SIZE ); Index index = new Index("foo", "123"); @@ -295,7 +295,7 @@ public void testSlowLogParsedDocumentPrinterSourceToLog() throws IOException { source, XContentType.JSON, null, - DocumentSize.UNKNOWN + XContentMeteringParserDecorator.UNKNOWN_SIZE ); Index index = new Index("foo", "123"); // Turning off document logging doesn't log source[] @@ -327,7 +327,7 @@ public void testSlowLogParsedDocumentPrinterSourceToLog() throws IOException { source, XContentType.JSON, null, - DocumentSize.UNKNOWN + XContentMeteringParserDecorator.UNKNOWN_SIZE ); final XContentParseException e = expectThrows( diff --git a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java index 21aefd893de70..bba1fa338559f 100644 --- a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java +++ b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java @@ -109,7 +109,6 @@ import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.MappingLookup; import org.elasticsearch.index.mapper.ParsedDocument; -import org.elasticsearch.index.mapper.ParsedDocument.DocumentSize; import org.elasticsearch.index.mapper.SeqNoFieldMapper; import org.elasticsearch.index.mapper.SourceFieldMapper; import org.elasticsearch.index.mapper.Uid; @@ -132,6 +131,7 @@ import org.elasticsearch.index.translog.TranslogOperationsUtils; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; import org.elasticsearch.indices.recovery.RecoverySettings; +import org.elasticsearch.plugins.internal.XContentMeteringParserDecorator; import org.elasticsearch.test.IndexSettingsModule; import org.elasticsearch.test.index.IndexVersionUtils; import org.elasticsearch.threadpool.ThreadPool; @@ -5522,7 +5522,7 @@ public void testSeqNoGenerator() throws IOException { source, XContentType.JSON, null, - DocumentSize.UNKNOWN + XContentMeteringParserDecorator.UNKNOWN_SIZE ); final Engine.Index index = new Engine.Index( diff --git a/server/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java b/server/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java index ccf0bbebcc354..9e7f5fbbce1a3 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java @@ -45,7 +45,6 @@ import org.elasticsearch.index.mapper.LuceneDocument; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.ParsedDocument; -import org.elasticsearch.index.mapper.ParsedDocument.DocumentSize; import org.elasticsearch.index.mapper.SeqNoFieldMapper; import org.elasticsearch.index.mapper.Uid; import org.elasticsearch.index.seqno.RetentionLeases; @@ -54,6 +53,7 @@ import org.elasticsearch.index.translog.Translog; import org.elasticsearch.index.translog.TranslogConfig; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; +import org.elasticsearch.plugins.internal.XContentMeteringParserDecorator; import org.elasticsearch.test.DummyShardLock; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.IndexSettingsModule; @@ -567,7 +567,7 @@ private Engine.IndexResult index(String id, String testFieldValue) throws IOExce source, XContentType.JSON, null, - DocumentSize.UNKNOWN + XContentMeteringParserDecorator.UNKNOWN_SIZE ); Engine.Index index = new Engine.Index(uid, engine.config().getPrimaryTermSupplier().getAsLong(), doc); return engine.index(index); diff --git a/server/src/test/java/org/elasticsearch/index/translog/TranslogTests.java b/server/src/test/java/org/elasticsearch/index/translog/TranslogTests.java index d0cabd609158b..97f49df41d099 100644 --- a/server/src/test/java/org/elasticsearch/index/translog/TranslogTests.java +++ b/server/src/test/java/org/elasticsearch/index/translog/TranslogTests.java @@ -56,7 +56,6 @@ import org.elasticsearch.index.mapper.IdFieldMapper; import org.elasticsearch.index.mapper.LuceneDocument; import org.elasticsearch.index.mapper.ParsedDocument; -import org.elasticsearch.index.mapper.ParsedDocument.DocumentSize; import org.elasticsearch.index.mapper.SeqNoFieldMapper; import org.elasticsearch.index.mapper.Uid; import org.elasticsearch.index.seqno.LocalCheckpointTracker; @@ -64,6 +63,7 @@ import org.elasticsearch.index.seqno.SequenceNumbers; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.translog.Translog.Location; +import org.elasticsearch.plugins.internal.XContentMeteringParserDecorator; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.IndexSettingsModule; import org.elasticsearch.test.TransportVersionUtils; @@ -3395,7 +3395,7 @@ public void testTranslogOpSerialization() throws Exception { B_1, XContentType.JSON, null, - DocumentSize.UNKNOWN + XContentMeteringParserDecorator.UNKNOWN_SIZE ); Engine.Index eIndex = new Engine.Index( diff --git a/server/src/test/java/org/elasticsearch/ingest/IngestServiceTests.java b/server/src/test/java/org/elasticsearch/ingest/IngestServiceTests.java index d83fdbd5dd46b..b3ddc313eaf3a 100644 --- a/server/src/test/java/org/elasticsearch/ingest/IngestServiceTests.java +++ b/server/src/test/java/org/elasticsearch/ingest/IngestServiceTests.java @@ -54,10 +54,7 @@ import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.VersionType; -import org.elasticsearch.index.mapper.ParsedDocument; import org.elasticsearch.plugins.IngestPlugin; -import org.elasticsearch.plugins.internal.DocumentParsingProvider; -import org.elasticsearch.plugins.internal.XContentMeteringParserDecorator; import org.elasticsearch.script.MockScriptEngine; import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptModule; @@ -68,7 +65,6 @@ import org.elasticsearch.test.MockLog; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xcontent.cbor.CborXContent; import org.junit.Before; @@ -157,7 +153,6 @@ public void testIngestPlugin() { List.of(DUMMY_PLUGIN), client, null, - DocumentParsingProvider.EMPTY_INSTANCE, FailureStoreMetrics.NOOP ); Map factories = ingestService.getProcessorFactories(); @@ -178,7 +173,6 @@ public void testIngestPluginDuplicate() { List.of(DUMMY_PLUGIN, DUMMY_PLUGIN), client, null, - DocumentParsingProvider.EMPTY_INSTANCE, FailureStoreMetrics.NOOP ) ); @@ -196,7 +190,6 @@ public void testExecuteIndexPipelineDoesNotExist() { List.of(DUMMY_PLUGIN), client, null, - DocumentParsingProvider.EMPTY_INSTANCE, FailureStoreMetrics.NOOP ); final IndexRequest indexRequest = new IndexRequest("_index").id("_id") @@ -1194,66 +1187,6 @@ public void testExecuteBulkPipelineDoesNotExist() { verify(completionHandler, times(1)).accept(Thread.currentThread(), null); } - public void testExecuteBulkRequestCallsDocumentSizeObserver() { - /* - * This test makes sure that for both insert and upsert requests, when we call executeBulkRequest DocumentSizeObserver is - * called using a non-null index name. - */ - AtomicInteger wrappedObserverWasUsed = new AtomicInteger(0); - AtomicInteger parsedValueWasUsed = new AtomicInteger(0); - DocumentParsingProvider documentParsingProvider = new DocumentParsingProvider() { - @Override - public XContentMeteringParserDecorator newMeteringParserDecorator(DocWriteRequest request) { - return new XContentMeteringParserDecorator() { - @Override - public ParsedDocument.DocumentSize meteredDocumentSize() { - parsedValueWasUsed.incrementAndGet(); - return new ParsedDocument.DocumentSize(0, 0); - } - - @Override - public XContentParser decorate(XContentParser xContentParser) { - wrappedObserverWasUsed.incrementAndGet(); - return xContentParser; - } - }; - } - }; - IngestService ingestService = createWithProcessors( - Map.of("mock", (factories, tag, description, config) -> mockCompoundProcessor()), - documentParsingProvider - ); - - PutPipelineRequest putRequest = putJsonPipelineRequest("_id", "{\"processors\": [{\"mock\" : {}}]}"); - ClusterState clusterState = ClusterState.builder(new ClusterName("_name")).build(); // Start empty - ClusterState previousClusterState = clusterState; - clusterState = executePut(putRequest, clusterState); - ingestService.applyClusterState(new ClusterChangedEvent("", clusterState, previousClusterState)); - - BulkRequest bulkRequest = new BulkRequest(); - UpdateRequest updateRequest = new UpdateRequest("_index", "_id1").upsert("{}", "{}"); - updateRequest.upsertRequest().setPipeline("_id"); - bulkRequest.add(updateRequest); - IndexRequest indexRequest = new IndexRequest("_index").id("_id1").source(Map.of()).setPipeline("_id1"); - bulkRequest.add(indexRequest); - @SuppressWarnings("unchecked") - BiConsumer failureHandler = mock(BiConsumer.class); - @SuppressWarnings("unchecked") - final BiConsumer completionHandler = mock(BiConsumer.class); - ingestService.executeBulkRequest( - bulkRequest.numberOfActions(), - bulkRequest.requests(), - indexReq -> {}, - (s) -> false, - (slot, targetIndex, e) -> fail("Should not be redirecting failures"), - failureHandler, - completionHandler, - EsExecutors.DIRECT_EXECUTOR_SERVICE - ); - assertThat(wrappedObserverWasUsed.get(), equalTo(2)); - assertThat(parsedValueWasUsed.get(), equalTo(2)); - } - public void testExecuteSuccess() { IngestService ingestService = createWithProcessors( Map.of("mock", (factories, tag, description, config) -> mockCompoundProcessor()) @@ -2271,7 +2204,6 @@ public Map getProcessors(Processor.Parameters paramet List.of(testPlugin), client, null, - DocumentParsingProvider.EMPTY_INSTANCE, FailureStoreMetrics.NOOP ); ingestService.addIngestClusterStateListener(ingestClusterStateListener); @@ -2611,7 +2543,6 @@ private void testUpdatingPipeline(String pipelineString) throws Exception { List.of(DUMMY_PLUGIN), client, null, - DocumentParsingProvider.EMPTY_INSTANCE, FailureStoreMetrics.NOOP ); ingestService.applyClusterState(new ClusterChangedEvent("", clusterState, clusterState)); @@ -2921,13 +2852,6 @@ private static IngestService createWithProcessors() { } private static IngestService createWithProcessors(Map processors) { - return createWithProcessors(processors, DocumentParsingProvider.EMPTY_INSTANCE); - } - - private static IngestService createWithProcessors( - Map processors, - DocumentParsingProvider documentParsingProvider - ) { Client client = mock(Client.class); ThreadPool threadPool = mock(ThreadPool.class); when(threadPool.generic()).thenReturn(EsExecutors.DIRECT_EXECUTOR_SERVICE); @@ -2946,7 +2870,6 @@ public Map getProcessors(final Processor.Parameters p }), client, null, - documentParsingProvider, FailureStoreMetrics.NOOP ); if (randomBoolean()) { diff --git a/server/src/test/java/org/elasticsearch/ingest/SimulateIngestServiceTests.java b/server/src/test/java/org/elasticsearch/ingest/SimulateIngestServiceTests.java index 94b3607bd7608..e8115e7266176 100644 --- a/server/src/test/java/org/elasticsearch/ingest/SimulateIngestServiceTests.java +++ b/server/src/test/java/org/elasticsearch/ingest/SimulateIngestServiceTests.java @@ -16,7 +16,6 @@ import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.plugins.IngestPlugin; -import org.elasticsearch.plugins.internal.DocumentParsingProvider; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xcontent.XContentType; @@ -132,7 +131,6 @@ public Map getProcessors(final Processor.Parameters p List.of(ingestPlugin), client, null, - DocumentParsingProvider.EMPTY_INSTANCE, FailureStoreMetrics.NOOP ); } diff --git a/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java b/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java index c46d98fe1cd8b..e0363d84ea4d2 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java +++ b/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java @@ -2405,7 +2405,6 @@ public RecyclerBytesStreamOutput newNetworkBytesStream() { Collections.emptyList(), client, null, - DocumentParsingProvider.EMPTY_INSTANCE, FailureStoreMetrics.NOOP ), mockFeatureService, @@ -2425,7 +2424,7 @@ public RecyclerBytesStreamOutput newNetworkBytesStream() { threadPool, shardStateAction, mappingUpdatedAction, - new UpdateHelper(scriptService, DocumentParsingProvider.EMPTY_INSTANCE), + new UpdateHelper(scriptService), actionFilters, indexingMemoryLimits, EmptySystemIndices.INSTANCE, diff --git a/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java index 4713adf6cf01d..87c566d543d0f 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java @@ -100,6 +100,7 @@ import org.elasticsearch.index.translog.TranslogDeletionPolicy; import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; +import org.elasticsearch.plugins.internal.XContentMeteringParserDecorator; import org.elasticsearch.test.DummyShardLock; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.IndexSettingsModule; @@ -428,7 +429,7 @@ protected static ParsedDocument testParsedDocument( source, XContentType.JSON, mappingUpdate, - ParsedDocument.DocumentSize.UNKNOWN + XContentMeteringParserDecorator.UNKNOWN_SIZE ); } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportGetTrainedModelsStatsActionTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportGetTrainedModelsStatsActionTests.java index 7e88cad88dcec..bb973bf4359e8 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportGetTrainedModelsStatsActionTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportGetTrainedModelsStatsActionTests.java @@ -26,7 +26,6 @@ import org.elasticsearch.ingest.Processor; import org.elasticsearch.license.MockLicenseState; import org.elasticsearch.plugins.IngestPlugin; -import org.elasticsearch.plugins.internal.DocumentParsingProvider; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xpack.core.ml.MachineLearningField; @@ -139,7 +138,6 @@ public void setUpVariables() { Collections.singletonList(SKINNY_INGEST_PLUGIN), client, null, - DocumentParsingProvider.EMPTY_INSTANCE, FailureStoreMetrics.NOOP ); } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizationServiceTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizationServiceTests.java index 5710b031494bf..c2e9a92e45353 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizationServiceTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizationServiceTests.java @@ -115,7 +115,6 @@ import org.elasticsearch.indices.TestIndexNameExpressionResolver; import org.elasticsearch.license.MockLicenseState; import org.elasticsearch.license.XPackLicenseState; -import org.elasticsearch.plugins.internal.DocumentParsingProvider; import org.elasticsearch.script.ScriptService; import org.elasticsearch.search.SearchPhaseResult; import org.elasticsearch.search.SearchShardTarget; @@ -1580,7 +1579,7 @@ public void testDenialErrorMessagesForBulkIngest() throws Exception { TransportShardBulkAction.performOnPrimary( request, indexShard, - new UpdateHelper(mock(ScriptService.class), DocumentParsingProvider.EMPTY_INSTANCE), + new UpdateHelper(mock(ScriptService.class)), System::currentTimeMillis, mappingUpdater, waitForMappingUpdate, From 5e98251bdab337beb218892d30529a7290f4e5a3 Mon Sep 17 00:00:00 2001 From: Mike Pellegrini Date: Fri, 25 Oct 2024 14:57:46 -0400 Subject: [PATCH 124/324] Remove "Use ELSER By Default For Semantic Text" Changelog Entry (#115686) --- docs/changelog/113563.yaml | 5 ----- 1 file changed, 5 deletions(-) delete mode 100644 docs/changelog/113563.yaml diff --git a/docs/changelog/113563.yaml b/docs/changelog/113563.yaml deleted file mode 100644 index 48484ead99d77..0000000000000 --- a/docs/changelog/113563.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 113563 -summary: Use ELSER By Default For Semantic Text -area: Mapping -type: enhancement -issues: [] From ca193bb923bf2a42df06dd39c8ad50068842879a Mon Sep 17 00:00:00 2001 From: Keith Massey Date: Fri, 25 Oct 2024 14:56:13 -0500 Subject: [PATCH 125/324] Adding additional checks for IPInfo results (#115481) --- .../geoip/IpinfoIpDataLookupsTests.java | 113 ++++++++++++++++-- 1 file changed, 106 insertions(+), 7 deletions(-) diff --git a/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/IpinfoIpDataLookupsTests.java b/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/IpinfoIpDataLookupsTests.java index d0cdc5a3e1b5e..11aa123824d18 100644 --- a/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/IpinfoIpDataLookupsTests.java +++ b/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/IpinfoIpDataLookupsTests.java @@ -14,8 +14,10 @@ import com.maxmind.db.Reader; import org.apache.lucene.util.Constants; +import org.elasticsearch.common.network.InetAddresses; import org.elasticsearch.common.network.NetworkAddress; import org.elasticsearch.core.IOUtils; +import org.elasticsearch.core.Strings; import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.test.ESTestCase; import org.junit.After; @@ -113,7 +115,10 @@ public void testAsnFree() { entry("asn", 16625L), entry("network", "23.32.184.0/21"), entry("domain", "akamai.com") - ) + ), + Map.ofEntries(entry("name", "organization_name"), entry("asn", "asn"), entry("network", "network"), entry("domain", "domain")), + Set.of("ip"), + Set.of() ); } @@ -133,7 +138,17 @@ public void testAsnStandard() { entry("domain", "tpx.com"), entry("type", "hosting"), entry("country_iso_code", "US") - ) + ), + Map.ofEntries( + entry("name", "organization_name"), + entry("asn", "asn"), + entry("network", "network"), + entry("domain", "domain"), + entry("country", "country_iso_code"), + entry("type", "type") + ), + Set.of("ip"), + Set.of() ); } @@ -188,7 +203,16 @@ public void testCountryFree() { entry("country_iso_code", "IE"), entry("continent_name", "Europe"), entry("continent_code", "EU") - ) + ), + Map.ofEntries( + entry("continent_name", "continent_name"), + entry("continent", "continent_code"), + entry("country", "country_iso_code"), + entry("country_name", "country_name"), + entry("type", "type") + ), + Set.of("ip"), + Set.of("network") ); } @@ -208,7 +232,18 @@ public void testGeolocationStandard() { entry("timezone", "Europe/London"), entry("postal_code", "E1W"), entry("location", Map.of("lat", 51.50853, "lon", -0.12574)) - ) + ), + Map.ofEntries( + entry("country", "country_iso_code"), + entry("region", "region_name"), + entry("city", "city_name"), + entry("timezone", "timezone"), + entry("postal_code", "postal_code"), + entry("lat", "location"), + entry("lng", "location") + ), + Set.of("ip", "location"), + Set.of("geoname_id", "region_code") ); } @@ -266,7 +301,16 @@ public void testPrivacyDetectionStandard() { entry("relay", false), entry("tor", false), entry("vpn", true) - ) + ), + Map.ofEntries( + entry("hosting", "hosting"), + entry("proxy", "proxy"), + entry("relay", "relay"), + entry("tor", "tor"), + entry("vpn", "vpn") + ), + Set.of("ip"), + Set.of("network", "service") ); } @@ -286,7 +330,17 @@ public void testPrivacyDetectionStandardNonEmptyService() { entry("relay", false), entry("tor", false), entry("vpn", true) - ) + ), + Map.ofEntries( + entry("hosting", "hosting"), + entry("proxy", "proxy"), + entry("service", "service"), + entry("relay", "relay"), + entry("tor", "tor"), + entry("vpn", "vpn") + ), + Set.of("ip"), + Set.of("network") ); } @@ -438,7 +492,15 @@ private static File pathToFile(Path databasePath) { return databasePath.toFile(); } - private void assertExpectedLookupResults(String databaseName, String ip, IpDataLookup lookup, Map expected) { + private void assertExpectedLookupResults( + String databaseName, + String ip, + IpDataLookup lookup, + Map expected, + Map keyMappings, + Set knownAdditionalKeys, + Set knownMissingKeys + ) { try (DatabaseReaderLazyLoader loader = loader(databaseName)) { Map actual = lookup.getData(loader, ip); assertThat( @@ -449,6 +511,7 @@ private void assertExpectedLookupResults(String databaseName, String ip, IpDataL for (Map.Entry entry : expected.entrySet()) { assertThat("Unexpected value for key [" + entry.getKey() + "]", actual.get(entry.getKey()), equalTo(entry.getValue())); } + assertActualResultsMatchReader(actual, databaseName, ip, keyMappings, knownAdditionalKeys, knownMissingKeys); } catch (AssertionError e) { fail(e, "Assert failed for database [%s] with address [%s]", databaseName, ip); } catch (Exception e) { @@ -456,6 +519,42 @@ private void assertExpectedLookupResults(String databaseName, String ip, IpDataL } } + private void assertActualResultsMatchReader( + Map actual, + String databaseName, + String ip, + Map keyMappings, + Set knownAdditionalKeys, + Set knownMissingKeys + ) throws IOException { + Path databasePath = tmpDir.resolve(databaseName); + try (Reader reader = new Reader(pathToFile(databasePath))) { + @SuppressWarnings("unchecked") + Map data = reader.get(InetAddresses.forString(ip), Map.class); + for (String key : data.keySet()) { + if (keyMappings.containsKey(key)) { + assertTrue( + Strings.format( + "The reader returned key [%s] that is expected to map to key [%s], but [%s] did not appear in the " + + "actual data", + key, + keyMappings.get(key), + keyMappings.get(key) + ), + actual.containsKey(keyMappings.get(key)) + ); + } else if (knownMissingKeys.contains(key) == false) { + fail(null, "The reader returned unexpected key [%s]", key); + } + } + for (String key : actual.keySet()) { + if (keyMappings.containsValue(key) == false && knownAdditionalKeys.contains(key) == false) { + fail(null, "Unexpected key [%s] in results", key); + } + } + } + } + private DatabaseReaderLazyLoader loader(final String databaseName) { Path path = tmpDir.resolve(databaseName); copyDatabase("ipinfo/" + databaseName, path); // the ipinfo databases are prefixed on the test classpath From 1ed7ff50a9874491648926ebfe754204298bb939 Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Sat, 26 Oct 2024 07:22:31 +1100 Subject: [PATCH 126/324] Mute org.elasticsearch.xpack.shutdown.NodeShutdownIT testStalledShardMigrationProperlyDetected #115697 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index 91644c9af70ca..be2c8d03c3931 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -282,6 +282,9 @@ tests: - class: org.elasticsearch.action.update.UpdateResponseTests method: testToAndFromXContent issue: https://github.com/elastic/elasticsearch/issues/115689 +- class: org.elasticsearch.xpack.shutdown.NodeShutdownIT + method: testStalledShardMigrationProperlyDetected + issue: https://github.com/elastic/elasticsearch/issues/115697 # Examples: # From 9fffd2962ed8e6e53ba4ab60b5bcf54f2d73aeaa Mon Sep 17 00:00:00 2001 From: Oleksandr Kolomiiets Date: Fri, 25 Oct 2024 13:25:46 -0700 Subject: [PATCH 127/324] Add challenge tests for logsdb that utilize stored source (#115606) --- .../xpack/logsdb/qa/DataGenerationHelper.java | 1 + ...ndexedIntoStandardModeChallengeRestIT.java | 30 ------------ ...ndexedIntoStoredSourceChallengeRestIT.java | 48 ++++++++++++++++++ ...bVersusReindexedLogsDbChallengeRestIT.java | 30 ------------ ...VersusLogsStoredSourceChallengeRestIT.java | 22 +++++++++ ...bVersusReindexedLogsDbChallengeRestIT.java | 49 +++++++++++++++++++ 6 files changed, 120 insertions(+), 60 deletions(-) create mode 100644 x-pack/plugin/logsdb/src/javaRestTest/java/org/elasticsearch/xpack/logsdb/qa/LogsDbVersusReindexedIntoStoredSourceChallengeRestIT.java create mode 100644 x-pack/plugin/logsdb/src/javaRestTest/java/org/elasticsearch/xpack/logsdb/qa/StandardVersusLogsStoredSourceChallengeRestIT.java create mode 100644 x-pack/plugin/logsdb/src/javaRestTest/java/org/elasticsearch/xpack/logsdb/qa/StoredSourceLogsDbVersusReindexedLogsDbChallengeRestIT.java diff --git a/x-pack/plugin/logsdb/src/javaRestTest/java/org/elasticsearch/xpack/logsdb/qa/DataGenerationHelper.java b/x-pack/plugin/logsdb/src/javaRestTest/java/org/elasticsearch/xpack/logsdb/qa/DataGenerationHelper.java index c03e8aea9c2ac..8a5bb8d12cd3d 100644 --- a/x-pack/plugin/logsdb/src/javaRestTest/java/org/elasticsearch/xpack/logsdb/qa/DataGenerationHelper.java +++ b/x-pack/plugin/logsdb/src/javaRestTest/java/org/elasticsearch/xpack/logsdb/qa/DataGenerationHelper.java @@ -89,6 +89,7 @@ void standardMapping(XContentBuilder builder) throws IOException { } void logsDbSettings(Settings.Builder builder) { + builder.put("index.mode", "logsdb"); if (keepArraySource) { builder.put(Mapper.SYNTHETIC_SOURCE_KEEP_INDEX_SETTING.getKey(), "arrays"); } diff --git a/x-pack/plugin/logsdb/src/javaRestTest/java/org/elasticsearch/xpack/logsdb/qa/LogsDbVersusLogsDbReindexedIntoStandardModeChallengeRestIT.java b/x-pack/plugin/logsdb/src/javaRestTest/java/org/elasticsearch/xpack/logsdb/qa/LogsDbVersusLogsDbReindexedIntoStandardModeChallengeRestIT.java index 0329f7723a108..d9abdc2cde446 100644 --- a/x-pack/plugin/logsdb/src/javaRestTest/java/org/elasticsearch/xpack/logsdb/qa/LogsDbVersusLogsDbReindexedIntoStandardModeChallengeRestIT.java +++ b/x-pack/plugin/logsdb/src/javaRestTest/java/org/elasticsearch/xpack/logsdb/qa/LogsDbVersusLogsDbReindexedIntoStandardModeChallengeRestIT.java @@ -7,17 +7,10 @@ package org.elasticsearch.xpack.logsdb.qa; -import org.elasticsearch.client.Request; -import org.elasticsearch.client.Response; -import org.elasticsearch.common.CheckedSupplier; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.xcontent.XContentBuilder; import java.io.IOException; -import java.util.List; -import java.util.Locale; - -import static org.hamcrest.Matchers.equalTo; /** * This test compares behavior of a logsdb data stream and a standard index mode data stream @@ -52,27 +45,4 @@ public void baselineMappings(XContentBuilder builder) throws IOException { public void contenderMappings(XContentBuilder builder) throws IOException { dataGenerationHelper.standardMapping(builder); } - - @Override - public Response indexContenderDocuments(CheckedSupplier, IOException> documentsSupplier) throws IOException { - var reindexRequest = new Request("POST", "/_reindex?refresh=true"); - reindexRequest.setJsonEntity(String.format(Locale.ROOT, """ - { - "source": { - "index": "%s" - }, - "dest": { - "index": "%s", - "op_type": "create" - } - } - """, getBaselineDataStreamName(), getContenderDataStreamName())); - var response = client.performRequest(reindexRequest); - assertOK(response); - - var body = entityAsMap(response); - assertThat("encountered failures when performing reindex:\n " + body, body.get("failures"), equalTo(List.of())); - - return response; - } } diff --git a/x-pack/plugin/logsdb/src/javaRestTest/java/org/elasticsearch/xpack/logsdb/qa/LogsDbVersusReindexedIntoStoredSourceChallengeRestIT.java b/x-pack/plugin/logsdb/src/javaRestTest/java/org/elasticsearch/xpack/logsdb/qa/LogsDbVersusReindexedIntoStoredSourceChallengeRestIT.java new file mode 100644 index 0000000000000..776a6faf7fa07 --- /dev/null +++ b/x-pack/plugin/logsdb/src/javaRestTest/java/org/elasticsearch/xpack/logsdb/qa/LogsDbVersusReindexedIntoStoredSourceChallengeRestIT.java @@ -0,0 +1,48 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.logsdb.qa; + +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.xcontent.XContentBuilder; + +import java.io.IOException; + +/** + * This test compares behavior of a standard mode data stream and a logsdb data stream using stored source. + * There should be no differences between such two data streams. + */ +public class LogsDbVersusReindexedIntoStoredSourceChallengeRestIT extends ReindexChallengeRestIT { + public String getBaselineDataStreamName() { + return "logs-apache-baseline"; + } + + public String getContenderDataStreamName() { + return "logs-apache-reindexed"; + } + + @Override + public void baselineSettings(Settings.Builder builder) { + dataGenerationHelper.logsDbSettings(builder); + } + + @Override + public void contenderSettings(Settings.Builder builder) { + dataGenerationHelper.logsDbSettings(builder); + builder.put("index.mapping.source.mode", "stored"); + } + + @Override + public void baselineMappings(XContentBuilder builder) throws IOException { + dataGenerationHelper.logsDbMapping(builder); + } + + @Override + public void contenderMappings(XContentBuilder builder) throws IOException { + dataGenerationHelper.logsDbMapping(builder); + } +} diff --git a/x-pack/plugin/logsdb/src/javaRestTest/java/org/elasticsearch/xpack/logsdb/qa/LogsDbVersusReindexedLogsDbChallengeRestIT.java b/x-pack/plugin/logsdb/src/javaRestTest/java/org/elasticsearch/xpack/logsdb/qa/LogsDbVersusReindexedLogsDbChallengeRestIT.java index 1c425cf30907b..8b00c647b5dd0 100644 --- a/x-pack/plugin/logsdb/src/javaRestTest/java/org/elasticsearch/xpack/logsdb/qa/LogsDbVersusReindexedLogsDbChallengeRestIT.java +++ b/x-pack/plugin/logsdb/src/javaRestTest/java/org/elasticsearch/xpack/logsdb/qa/LogsDbVersusReindexedLogsDbChallengeRestIT.java @@ -7,17 +7,10 @@ package org.elasticsearch.xpack.logsdb.qa; -import org.elasticsearch.client.Request; -import org.elasticsearch.client.Response; -import org.elasticsearch.common.CheckedSupplier; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.xcontent.XContentBuilder; import java.io.IOException; -import java.util.List; -import java.util.Locale; - -import static org.hamcrest.Matchers.equalTo; /** * This test compares behavior of a logsdb data stream and a data stream containing @@ -52,27 +45,4 @@ public void baselineMappings(XContentBuilder builder) throws IOException { public void contenderMappings(XContentBuilder builder) throws IOException { dataGenerationHelper.logsDbMapping(builder); } - - @Override - public Response indexContenderDocuments(CheckedSupplier, IOException> documentsSupplier) throws IOException { - var reindexRequest = new Request("POST", "/_reindex?refresh=true"); - reindexRequest.setJsonEntity(String.format(Locale.ROOT, """ - { - "source": { - "index": "%s" - }, - "dest": { - "index": "%s", - "op_type": "create" - } - } - """, getBaselineDataStreamName(), getContenderDataStreamName())); - var response = client.performRequest(reindexRequest); - assertOK(response); - - var body = entityAsMap(response); - assertThat("encountered failures when performing reindex:\n " + body, body.get("failures"), equalTo(List.of())); - - return response; - } } diff --git a/x-pack/plugin/logsdb/src/javaRestTest/java/org/elasticsearch/xpack/logsdb/qa/StandardVersusLogsStoredSourceChallengeRestIT.java b/x-pack/plugin/logsdb/src/javaRestTest/java/org/elasticsearch/xpack/logsdb/qa/StandardVersusLogsStoredSourceChallengeRestIT.java new file mode 100644 index 0000000000000..2f018b7dc0b38 --- /dev/null +++ b/x-pack/plugin/logsdb/src/javaRestTest/java/org/elasticsearch/xpack/logsdb/qa/StandardVersusLogsStoredSourceChallengeRestIT.java @@ -0,0 +1,22 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.logsdb.qa; + +import org.elasticsearch.common.settings.Settings; + +/** + * This test compares behavior of a standard mode data stream and a logsdb data stream using stored source. + * There should be no differences between such two data streams. + */ +public class StandardVersusLogsStoredSourceChallengeRestIT extends StandardVersusLogsIndexModeRandomDataChallengeRestIT { + @Override + public void contenderSettings(Settings.Builder builder) { + super.contenderSettings(builder); + builder.put("index.mapping.source.mode", "stored"); + } +} diff --git a/x-pack/plugin/logsdb/src/javaRestTest/java/org/elasticsearch/xpack/logsdb/qa/StoredSourceLogsDbVersusReindexedLogsDbChallengeRestIT.java b/x-pack/plugin/logsdb/src/javaRestTest/java/org/elasticsearch/xpack/logsdb/qa/StoredSourceLogsDbVersusReindexedLogsDbChallengeRestIT.java new file mode 100644 index 0000000000000..a0672daafb243 --- /dev/null +++ b/x-pack/plugin/logsdb/src/javaRestTest/java/org/elasticsearch/xpack/logsdb/qa/StoredSourceLogsDbVersusReindexedLogsDbChallengeRestIT.java @@ -0,0 +1,49 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.logsdb.qa; + +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.xcontent.XContentBuilder; + +import java.io.IOException; + +/** + * This test compares behavior of a logsdb data stream using stored source and a logsdb data stream + * containing data reindexed from initial data stream. + * There should be no differences between such two data streams. + */ +public class StoredSourceLogsDbVersusReindexedLogsDbChallengeRestIT extends ReindexChallengeRestIT { + public String getBaselineDataStreamName() { + return "logs-apache-baseline"; + } + + public String getContenderDataStreamName() { + return "logs-apache-reindexed"; + } + + @Override + public void baselineSettings(Settings.Builder builder) { + dataGenerationHelper.logsDbSettings(builder); + builder.put("index.mapping.source.mode", "stored"); + } + + @Override + public void contenderSettings(Settings.Builder builder) { + dataGenerationHelper.logsDbSettings(builder); + } + + @Override + public void baselineMappings(XContentBuilder builder) throws IOException { + dataGenerationHelper.logsDbMapping(builder); + } + + @Override + public void contenderMappings(XContentBuilder builder) throws IOException { + dataGenerationHelper.logsDbMapping(builder); + } +} From 38a19bfa2e4918db8ee64e8ca9a2c2c11ef82051 Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Sat, 26 Oct 2024 07:32:02 +1100 Subject: [PATCH 128/324] Mute org.elasticsearch.index.get.GetResultTests testToAndFromXContentEmbedded #115657 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index be2c8d03c3931..7675bcc4f2a28 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -285,6 +285,9 @@ tests: - class: org.elasticsearch.xpack.shutdown.NodeShutdownIT method: testStalledShardMigrationProperlyDetected issue: https://github.com/elastic/elasticsearch/issues/115697 +- class: org.elasticsearch.index.get.GetResultTests + method: testToAndFromXContentEmbedded + issue: https://github.com/elastic/elasticsearch/issues/115657 # Examples: # From 83578872d949879e93675c9e6b037072b0209ba4 Mon Sep 17 00:00:00 2001 From: Keith Massey Date: Fri, 25 Oct 2024 15:33:54 -0500 Subject: [PATCH 129/324] Fixing DatabaseNodeServiceIT testNonGzippedDatabase and testGzippedDatabase race condition (#115463) Co-authored-by: Joe Gallo --- .../ingest/geoip/DatabaseNodeServiceIT.java | 28 +++++++++++++------ muted-tests.yml | 6 ---- 2 files changed, 20 insertions(+), 14 deletions(-) diff --git a/modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/DatabaseNodeServiceIT.java b/modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/DatabaseNodeServiceIT.java index 786f091e0c024..7331afdbf585a 100644 --- a/modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/DatabaseNodeServiceIT.java +++ b/modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/DatabaseNodeServiceIT.java @@ -46,15 +46,21 @@ public class DatabaseNodeServiceIT extends AbstractGeoIpIT { public void testNonGzippedDatabase() throws Exception { String databaseType = "GeoLite2-Country"; String databaseFileName = databaseType + ".mmdb"; - // making the dabase name unique so we know we're not using another one: + // making the database name unique so we know we're not using another one: String databaseName = randomAlphaOfLength(20) + "-" + databaseFileName; byte[] mmdbBytes = getBytesForFile(databaseFileName); final DatabaseNodeService databaseNodeService = internalCluster().getInstance(DatabaseNodeService.class); assertNull(databaseNodeService.getDatabase(databaseName)); int numChunks = indexData(databaseName, mmdbBytes); - retrieveDatabase(databaseNodeService, databaseName, mmdbBytes, numChunks); - assertBusy(() -> assertNotNull(databaseNodeService.getDatabase(databaseName))); - assertValidDatabase(databaseNodeService, databaseName, databaseType); + /* + * If DatabaseNodeService::checkDatabases runs it will sometimes (rarely) remove the database we are using in this test while we + * are trying to assert things about it. So if it does then we 'just' try again. + */ + assertBusy(() -> { + retrieveDatabase(databaseNodeService, databaseName, mmdbBytes, numChunks); + assertNotNull(databaseNodeService.getDatabase(databaseName)); + assertValidDatabase(databaseNodeService, databaseName, databaseType); + }); } /* @@ -64,16 +70,22 @@ public void testNonGzippedDatabase() throws Exception { public void testGzippedDatabase() throws Exception { String databaseType = "GeoLite2-Country"; String databaseFileName = databaseType + ".mmdb"; - // making the dabase name unique so we know we're not using another one: + // making the database name unique so we know we're not using another one: String databaseName = randomAlphaOfLength(20) + "-" + databaseFileName; byte[] mmdbBytes = getBytesForFile(databaseFileName); byte[] gzipBytes = gzipFileBytes(databaseName, mmdbBytes); final DatabaseNodeService databaseNodeService = internalCluster().getInstance(DatabaseNodeService.class); assertNull(databaseNodeService.getDatabase(databaseName)); int numChunks = indexData(databaseName, gzipBytes); - retrieveDatabase(databaseNodeService, databaseName, gzipBytes, numChunks); - assertBusy(() -> assertNotNull(databaseNodeService.getDatabase(databaseName))); - assertValidDatabase(databaseNodeService, databaseName, databaseType); + /* + * If DatabaseNodeService::checkDatabases runs it will sometimes (rarely) remove the database we are using in this test while we + * are trying to assert things about it. So if it does then we 'just' try again. + */ + assertBusy(() -> { + retrieveDatabase(databaseNodeService, databaseName, gzipBytes, numChunks); + assertNotNull(databaseNodeService.getDatabase(databaseName)); + assertValidDatabase(databaseNodeService, databaseName, databaseType); + }); } /* diff --git a/muted-tests.yml b/muted-tests.yml index 7675bcc4f2a28..fad1304d73059 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -182,12 +182,6 @@ tests: - class: org.elasticsearch.xpack.esql.qa.mixed.MixedClusterEsqlSpecIT method: test {categorize.Categorize SYNC} issue: https://github.com/elastic/elasticsearch/issues/113722 -- class: org.elasticsearch.ingest.geoip.DatabaseNodeServiceIT - method: testNonGzippedDatabase - issue: https://github.com/elastic/elasticsearch/issues/113821 -- class: org.elasticsearch.ingest.geoip.DatabaseNodeServiceIT - method: testGzippedDatabase - issue: https://github.com/elastic/elasticsearch/issues/113752 - class: org.elasticsearch.threadpool.SimpleThreadPoolIT method: testThreadPoolMetrics issue: https://github.com/elastic/elasticsearch/issues/108320 From d9c776468dca054aab44512def67e51a116e1a20 Mon Sep 17 00:00:00 2001 From: Mark Vieira Date: Fri, 25 Oct 2024 14:28:35 -0700 Subject: [PATCH 130/324] Enable preview features on native modules during IntelliJ import (#115698) --- .../src/main/groovy/elasticsearch.ide.gradle | 35 ++++++++++++++++++- 1 file changed, 34 insertions(+), 1 deletion(-) diff --git a/build-tools-internal/src/main/groovy/elasticsearch.ide.gradle b/build-tools-internal/src/main/groovy/elasticsearch.ide.gradle index d3209ff27ce06..67878181a005d 100644 --- a/build-tools-internal/src/main/groovy/elasticsearch.ide.gradle +++ b/build-tools-internal/src/main/groovy/elasticsearch.ide.gradle @@ -122,6 +122,36 @@ if (providers.systemProperty('idea.active').getOrNull() == 'true') { .findAll { it != null } } + // force IntelliJ to generate *.iml files for each imported module + tasks.register("enableExternalConfiguration") { + group = 'ide' + description = 'Enable per-module *.iml files' + + doLast { + modifyXml('.idea/misc.xml') {xml -> + def externalStorageConfig = xml.component.find { it.'@name' == 'ExternalStorageConfigurationManager' } + if (externalStorageConfig) { + xml.remove(externalStorageConfig) + } + } + } + } + + // modifies the idea module config to enable preview features on 'elasticsearch-native' module + tasks.register("enablePreviewFeatures") { + group = 'ide' + description = 'Enables preview features on native library module' + dependsOn tasks.named("enableExternalConfiguration") + + doLast { + ['main', 'test'].each { sourceSet -> + modifyXml(".idea/modules/libs/native/elasticsearch.libs.elasticsearch-native.${sourceSet}.iml") { xml -> + xml.component.find { it.'@name' == 'NewModuleRootManager' }?.'@LANGUAGE_LEVEL' = 'JDK_21_PREVIEW' + } + } + } + } + tasks.register('buildDependencyArtifacts') { group = 'ide' description = 'Builds artifacts needed as dependency for IDE modules' @@ -149,7 +179,10 @@ if (providers.systemProperty('idea.active').getOrNull() == 'true') { testRunner = 'choose_per_test' } taskTriggers { - afterSync tasks.named('configureIdeCheckstyle'), tasks.named('configureIdeaGradleJvm'), tasks.named('buildDependencyArtifacts') + afterSync tasks.named('configureIdeCheckstyle'), + tasks.named('configureIdeaGradleJvm'), + tasks.named('buildDependencyArtifacts'), + tasks.named('enablePreviewFeatures') } encodings { encoding = 'UTF-8' From d887d8e045bde0a5c8c1165dd05942c96c9c048e Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Sat, 26 Oct 2024 08:52:15 +1100 Subject: [PATCH 131/324] Mute org.elasticsearch.xpack.spatial.search.GeoGridAggAndQueryConsistencyIT testGeoShapeGeoHash #115664 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index fad1304d73059..c0c716f2e26cf 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -282,6 +282,9 @@ tests: - class: org.elasticsearch.index.get.GetResultTests method: testToAndFromXContentEmbedded issue: https://github.com/elastic/elasticsearch/issues/115657 +- class: org.elasticsearch.xpack.spatial.search.GeoGridAggAndQueryConsistencyIT + method: testGeoShapeGeoHash + issue: https://github.com/elastic/elasticsearch/issues/115664 # Examples: # From 9b951cd92ed1fb7bb242d286bef6c5ba72dfc730 Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Sat, 26 Oct 2024 16:47:29 +1100 Subject: [PATCH 132/324] Mute org.elasticsearch.xpack.inference.InferenceCrudIT testSupportedStream #113430 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index c0c716f2e26cf..97a4864e57f8a 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -285,6 +285,9 @@ tests: - class: org.elasticsearch.xpack.spatial.search.GeoGridAggAndQueryConsistencyIT method: testGeoShapeGeoHash issue: https://github.com/elastic/elasticsearch/issues/115664 +- class: org.elasticsearch.xpack.inference.InferenceCrudIT + method: testSupportedStream + issue: https://github.com/elastic/elasticsearch/issues/113430 # Examples: # From 2b3d41ac2771180e4c034bb5ecd565ea30fa1f87 Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Sun, 27 Oct 2024 00:51:56 +1100 Subject: [PATCH 133/324] Mute org.elasticsearch.xpack.spatial.search.GeoGridAggAndQueryConsistencyIT testGeoShapeGeoTile #115717 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index 97a4864e57f8a..3a59af6234038 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -288,6 +288,9 @@ tests: - class: org.elasticsearch.xpack.inference.InferenceCrudIT method: testSupportedStream issue: https://github.com/elastic/elasticsearch/issues/113430 +- class: org.elasticsearch.xpack.spatial.search.GeoGridAggAndQueryConsistencyIT + method: testGeoShapeGeoTile + issue: https://github.com/elastic/elasticsearch/issues/115717 # Examples: # From 2f2ddad00492fcac8fbfc272607a8db91d279385 Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Sat, 26 Oct 2024 06:55:49 -0700 Subject: [PATCH 134/324] Improve error message for unparseable numeric settings (#115609) When a numeric setting is too large or too small such that it can't be parsed at all, the error message is the same as for garbage values. This commit improves the error message in these cases to be the same as for normal bounds checks. closes #115080 --- .../common/settings/Setting.java | 58 ++++++++++++++++--- .../common/settings/SettingTests.java | 34 +++++++++++ 2 files changed, 84 insertions(+), 8 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/common/settings/Setting.java b/server/src/main/java/org/elasticsearch/common/settings/Setting.java index a0b6e665042d0..aec9c108d898d 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/Setting.java +++ b/server/src/main/java/org/elasticsearch/common/settings/Setting.java @@ -34,6 +34,7 @@ import org.elasticsearch.xcontent.XContentType; import java.io.IOException; +import java.math.BigInteger; import java.time.Instant; import java.util.Arrays; import java.util.Collections; @@ -1485,27 +1486,68 @@ public static int parseInt(String s, int minValue, int maxValue, String key) { } public static int parseInt(String s, int minValue, int maxValue, String key, boolean isFiltered) { - int value = Integer.parseInt(s); + int value; + try { + value = Integer.parseInt(s); + } catch (NumberFormatException e) { + // check if value is a number or garbage + try { + var bi = new BigInteger(s); + // it's a number, so check which bound it is outside + if (bi.compareTo(BigInteger.valueOf(minValue)) < 0) { + throw newNumericBoundsException(s, key, isFiltered, ">=", minValue); + } else { + throw newNumericBoundsException(s, key, isFiltered, "<=", maxValue); + } + } catch (NumberFormatException e2) { + throw e; // it's garbage, use the original exception + } + } if (value < minValue) { - String err = "Failed to parse value" + (isFiltered ? "" : " [" + s + "]") + " for setting [" + key + "] must be >= " + minValue; - throw new IllegalArgumentException(err); + throw newNumericBoundsException(s, key, isFiltered, ">=", minValue); } if (value > maxValue) { - String err = "Failed to parse value" + (isFiltered ? "" : " [" + s + "]") + " for setting [" + key + "] must be <= " + maxValue; - throw new IllegalArgumentException(err); + throw newNumericBoundsException(s, key, isFiltered, "<=", maxValue); } return value; } static long parseLong(String s, long minValue, String key, boolean isFiltered) { - long value = Long.parseLong(s); + long value; + try { + value = Long.parseLong(s); + } catch (NumberFormatException e) { + // check if value is a number or garbage + try { + var bi = new BigInteger(s); + // it's a number, so check which bound it is outside + if (bi.compareTo(BigInteger.valueOf(minValue)) < 0) { + throw newNumericBoundsException(s, key, isFiltered, ">=", minValue); + } else { + throw newNumericBoundsException(s, key, isFiltered, "<=", Long.MAX_VALUE); + } + } catch (NumberFormatException e2) { + throw e; // it's garbage, use the original exception + } + } if (value < minValue) { - String err = "Failed to parse value" + (isFiltered ? "" : " [" + s + "]") + " for setting [" + key + "] must be >= " + minValue; - throw new IllegalArgumentException(err); + throw newNumericBoundsException(s, key, isFiltered, ">=", minValue); } return value; } + private static IllegalArgumentException newNumericBoundsException(String s, String key, boolean isFiltered, String type, long bound) { + String err = "Failed to parse value" + + (isFiltered ? "" : " [" + s + "]") + + " for setting [" + + key + + "] must be " + + type + + " " + + bound; + throw new IllegalArgumentException(err); + } + public static Setting intSetting(String key, int defaultValue, Property... properties) { return intSetting(key, defaultValue, Integer.MIN_VALUE, properties); } diff --git a/server/src/test/java/org/elasticsearch/common/settings/SettingTests.java b/server/src/test/java/org/elasticsearch/common/settings/SettingTests.java index ba78ea5cf08a6..75f5045c5fbb6 100644 --- a/server/src/test/java/org/elasticsearch/common/settings/SettingTests.java +++ b/server/src/test/java/org/elasticsearch/common/settings/SettingTests.java @@ -1522,4 +1522,38 @@ public void testDeprecationPropertyValidation() { () -> Setting.boolSetting("a.bool.setting", true, Property.DeprecatedWarning, Property.IndexSettingDeprecatedInV7AndRemovedInV8) ); } + + public void testIntSettingBounds() { + Setting setting = Setting.intSetting("int.setting", 0, Integer.MIN_VALUE, Integer.MAX_VALUE); + var e = expectThrows( + IllegalArgumentException.class, + () -> setting.get(Settings.builder().put("int.setting", "2147483648").build()) + ); + assertThat(e.getMessage(), equalTo("Failed to parse value [2147483648] for setting [int.setting] must be <= 2147483647")); + var e2 = expectThrows( + IllegalArgumentException.class, + () -> setting.get(Settings.builder().put("int.setting", "-2147483649").build()) + ); + assertThat(e2.getMessage(), equalTo("Failed to parse value [-2147483649] for setting [int.setting] must be >= -2147483648")); + } + + public void testLongSettingBounds() { + Setting setting = Setting.longSetting("long.setting", 0, Long.MIN_VALUE); + var e = expectThrows( + IllegalArgumentException.class, + () -> setting.get(Settings.builder().put("long.setting", "9223372036854775808").build()) + ); + assertThat( + e.getMessage(), + equalTo("Failed to parse value [9223372036854775808] for setting [long.setting] must be <= 9223372036854775807") + ); + var e2 = expectThrows( + IllegalArgumentException.class, + () -> setting.get(Settings.builder().put("long.setting", "-9223372036854775809").build()) + ); + assertThat( + e2.getMessage(), + equalTo("Failed to parse value [-9223372036854775809] for setting [long.setting] must be >= -9223372036854775808") + ); + } } From 06cdd11193a5c551ce75edd9713a52d389144f4c Mon Sep 17 00:00:00 2001 From: Yang Wang Date: Mon, 28 Oct 2024 10:50:11 +1100 Subject: [PATCH 135/324] Retry on 403 for S3 put in certain environments (#115486) This PR configures a new retry condition for s3 client so that it retries on 403 for operations such as PUT in certain environments. Note that 403 is already retried for GET due to S3RetryingInputStream. Resolves: ES-9321 --- .../s3/S3BlobStoreRepositoryMetricsTests.java | 48 +++++++++++++++++++ .../repositories/s3/S3Service.java | 32 ++++++++++++- .../s3/AwsS3ServiceImplTests.java | 4 +- .../s3/S3ClientSettingsTests.java | 14 ++++-- .../repositories/s3/S3ServiceTests.java | 33 ++++++++++--- 5 files changed, 118 insertions(+), 13 deletions(-) diff --git a/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryMetricsTests.java b/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryMetricsTests.java index 21f42bf9eb99c..b1c5d707220af 100644 --- a/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryMetricsTests.java +++ b/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryMetricsTests.java @@ -19,6 +19,7 @@ import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.Strings; import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.core.TimeValue; import org.elasticsearch.plugins.PluginsService; @@ -53,11 +54,13 @@ import static org.elasticsearch.repositories.RepositoriesMetrics.METRIC_THROTTLES_TOTAL; import static org.elasticsearch.repositories.RepositoriesMetrics.METRIC_UNSUCCESSFUL_OPERATIONS_TOTAL; import static org.elasticsearch.repositories.s3.S3RepositoriesMetrics.METRIC_DELETE_RETRIES_HISTOGRAM; +import static org.elasticsearch.rest.RestStatus.FORBIDDEN; import static org.elasticsearch.rest.RestStatus.INTERNAL_SERVER_ERROR; import static org.elasticsearch.rest.RestStatus.NOT_FOUND; import static org.elasticsearch.rest.RestStatus.REQUESTED_RANGE_NOT_SATISFIED; import static org.elasticsearch.rest.RestStatus.SERVICE_UNAVAILABLE; import static org.elasticsearch.rest.RestStatus.TOO_MANY_REQUESTS; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.lessThanOrEqualTo; @@ -320,6 +323,51 @@ public void testRetrySnapshotDeleteMetricsWhenRetriesExhausted() { assertThat(longHistogramMeasurement.get(0).getLong(), equalTo(3L)); } + public void testPutDoesNotRetryOn403InStateful() { + final Settings settings = internalCluster().getInstance(Settings.class); + assertThat(DiscoveryNode.isStateless(settings), equalTo(false)); + + final String repository = createRepository(randomRepositoryName()); + final String dataNodeName = internalCluster().getNodeNameThat(DiscoveryNode::canContainData); + final TestTelemetryPlugin plugin = getPlugin(dataNodeName); + // Exclude snapshot related purpose to avoid trigger assertions for cross-checking purpose and blob names + final OperationPurpose purpose = randomFrom( + OperationPurpose.REPOSITORY_ANALYSIS, + OperationPurpose.CLUSTER_STATE, + OperationPurpose.INDICES, + OperationPurpose.TRANSLOG + ); + final BlobContainer blobContainer = getBlobContainer(dataNodeName, repository); + final String blobName = randomIdentifier(); + + plugin.resetMeter(); + addErrorStatus(new S3ErrorResponse(FORBIDDEN, Strings.format(""" + + + InvalidAccessKeyId + The AWS Access Key Id you provided does not exist in our records. + %s + """, randomUUID()))); + + final var exception = expectThrows(IOException.class, () -> { + if (randomBoolean()) { + blobContainer.writeBlob(purpose, blobName, new BytesArray("blob"), randomBoolean()); + } else { + blobContainer.writeMetadataBlob( + purpose, + blobName, + randomBoolean(), + randomBoolean(), + outputStream -> outputStream.write("blob".getBytes()) + ); + } + }); + assertThat(exception.getCause().getMessage(), containsString("InvalidAccessKeyId")); + + assertThat(getLongCounterValue(plugin, METRIC_REQUESTS_TOTAL, Operation.PUT_OBJECT), equalTo(1L)); + assertThat(getLongCounterValue(plugin, METRIC_EXCEPTIONS_TOTAL, Operation.PUT_OBJECT), equalTo(1L)); + } + private void addErrorStatus(RestStatus... statuses) { errorResponseQueue.addAll(Arrays.stream(statuses).map(S3ErrorResponse::new).toList()); } diff --git a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Service.java b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Service.java index 9042234de6f50..36eb1d61e21d7 100644 --- a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Service.java +++ b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Service.java @@ -9,6 +9,7 @@ package org.elasticsearch.repositories.s3; +import com.amazonaws.AmazonServiceException; import com.amazonaws.ClientConfiguration; import com.amazonaws.SDKGlobalConfiguration; import com.amazonaws.auth.AWSCredentials; @@ -20,6 +21,8 @@ import com.amazonaws.auth.STSAssumeRoleWithWebIdentitySessionCredentialsProvider; import com.amazonaws.client.builder.AwsClientBuilder; import com.amazonaws.http.IdleConnectionReaper; +import com.amazonaws.retry.PredefinedRetryPolicies; +import com.amazonaws.retry.RetryPolicy; import com.amazonaws.services.s3.AmazonS3; import com.amazonaws.services.s3.AmazonS3ClientBuilder; import com.amazonaws.services.s3.internal.Constants; @@ -27,6 +30,7 @@ import com.amazonaws.services.securitytoken.AWSSecurityTokenServiceClient; import com.amazonaws.services.securitytoken.AWSSecurityTokenServiceClientBuilder; +import org.apache.http.HttpStatus; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchException; @@ -193,7 +197,10 @@ AmazonS3 buildClient(final S3ClientSettings clientSettings) { protected AmazonS3ClientBuilder buildClientBuilder(S3ClientSettings clientSettings) { final AmazonS3ClientBuilder builder = AmazonS3ClientBuilder.standard(); builder.withCredentials(buildCredentials(LOGGER, clientSettings, webIdentityTokenCredentialsProvider)); - builder.withClientConfiguration(buildConfiguration(clientSettings)); + final ClientConfiguration clientConfiguration = buildConfiguration(clientSettings, isStateless); + assert (isStateless == false && clientConfiguration.getRetryPolicy() == PredefinedRetryPolicies.DEFAULT) + || (isStateless && clientConfiguration.getRetryPolicy() == RETRYABLE_403_RETRY_POLICY) : "invalid retry policy configuration"; + builder.withClientConfiguration(clientConfiguration); String endpoint = Strings.hasLength(clientSettings.endpoint) ? clientSettings.endpoint : Constants.S3_HOSTNAME; if ((endpoint.startsWith("http://") || endpoint.startsWith("https://")) == false) { @@ -223,7 +230,7 @@ protected AmazonS3ClientBuilder buildClientBuilder(S3ClientSettings clientSettin } // pkg private for tests - static ClientConfiguration buildConfiguration(S3ClientSettings clientSettings) { + static ClientConfiguration buildConfiguration(S3ClientSettings clientSettings, boolean isStateless) { final ClientConfiguration clientConfiguration = new ClientConfiguration(); // the response metadata cache is only there for diagnostics purposes, // but can force objects from every response to the old generation. @@ -248,6 +255,10 @@ static ClientConfiguration buildConfiguration(S3ClientSettings clientSettings) { clientConfiguration.setUseThrottleRetries(clientSettings.throttleRetries); clientConfiguration.setSocketTimeout(clientSettings.readTimeoutMillis); + if (isStateless) { + clientConfiguration.setRetryPolicy(RETRYABLE_403_RETRY_POLICY); + } + return clientConfiguration; } @@ -504,4 +515,21 @@ interface SystemEnvironment { interface JvmEnvironment { String getProperty(String key, String defaultValue); } + + static final RetryPolicy RETRYABLE_403_RETRY_POLICY = RetryPolicy.builder() + .withRetryCondition((originalRequest, exception, retriesAttempted) -> { + if (PredefinedRetryPolicies.DEFAULT_RETRY_CONDITION.shouldRetry(originalRequest, exception, retriesAttempted)) { + return true; + } + if (exception instanceof AmazonServiceException ase) { + return ase.getStatusCode() == HttpStatus.SC_FORBIDDEN && "InvalidAccessKeyId".equals(ase.getErrorCode()); + } + return false; + }) + .withBackoffStrategy(PredefinedRetryPolicies.DEFAULT_BACKOFF_STRATEGY) + .withMaxErrorRetry(PredefinedRetryPolicies.DEFAULT_MAX_ERROR_RETRY) + .withHonorMaxErrorRetryInClientConfig(true) + .withHonorDefaultMaxErrorRetryInRetryMode(true) + .withHonorDefaultBackoffStrategyInRetryMode(true) + .build(); } diff --git a/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AwsS3ServiceImplTests.java b/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AwsS3ServiceImplTests.java index 0aac0ba898f97..43f606135291d 100644 --- a/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AwsS3ServiceImplTests.java +++ b/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AwsS3ServiceImplTests.java @@ -17,6 +17,7 @@ import com.amazonaws.auth.AWSStaticCredentialsProvider; import com.amazonaws.auth.BasicAWSCredentials; import com.amazonaws.auth.EC2ContainerCredentialsProviderWrapper; +import com.amazonaws.retry.PredefinedRetryPolicies; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.util.Supplier; @@ -211,7 +212,7 @@ private void launchAWSConfigurationTest( ) { final S3ClientSettings clientSettings = S3ClientSettings.getClientSettings(settings, "default"); - final ClientConfiguration configuration = S3Service.buildConfiguration(clientSettings); + final ClientConfiguration configuration = S3Service.buildConfiguration(clientSettings, false); assertThat(configuration.getResponseMetadataCacheSize(), is(0)); assertThat(configuration.getProtocol(), is(expectedProtocol)); @@ -222,6 +223,7 @@ private void launchAWSConfigurationTest( assertThat(configuration.getMaxErrorRetry(), is(expectedMaxRetries)); assertThat(configuration.useThrottledRetries(), is(expectedUseThrottleRetries)); assertThat(configuration.getSocketTimeout(), is(expectedReadTimeout)); + assertThat(configuration.getRetryPolicy(), is(PredefinedRetryPolicies.DEFAULT)); } public void testEndpointSetting() { diff --git a/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3ClientSettingsTests.java b/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3ClientSettingsTests.java index ddc7a1851c663..288ac1bb3c534 100644 --- a/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3ClientSettingsTests.java +++ b/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3ClientSettingsTests.java @@ -194,9 +194,9 @@ public void testSignerOverrideCanBeSet() { ); assertThat(settings.get("default").region, is("")); assertThat(settings.get("other").signerOverride, is(signerOverride)); - ClientConfiguration defaultConfiguration = S3Service.buildConfiguration(settings.get("default")); + ClientConfiguration defaultConfiguration = S3Service.buildConfiguration(settings.get("default"), false); assertThat(defaultConfiguration.getSignerOverride(), nullValue()); - ClientConfiguration configuration = S3Service.buildConfiguration(settings.get("other")); + ClientConfiguration configuration = S3Service.buildConfiguration(settings.get("other"), false); assertThat(configuration.getSignerOverride(), is(signerOverride)); } @@ -207,12 +207,18 @@ public void testMaxConnectionsCanBeSet() { ); assertThat(settings.get("default").maxConnections, is(ClientConfiguration.DEFAULT_MAX_CONNECTIONS)); assertThat(settings.get("other").maxConnections, is(maxConnections)); - ClientConfiguration defaultConfiguration = S3Service.buildConfiguration(settings.get("default")); + ClientConfiguration defaultConfiguration = S3Service.buildConfiguration(settings.get("default"), false); assertThat(defaultConfiguration.getMaxConnections(), is(ClientConfiguration.DEFAULT_MAX_CONNECTIONS)); - ClientConfiguration configuration = S3Service.buildConfiguration(settings.get("other")); + ClientConfiguration configuration = S3Service.buildConfiguration(settings.get("other"), false); assertThat(configuration.getMaxConnections(), is(maxConnections)); // the default appears in the docs so let's make sure it doesn't change: assertEquals(50, ClientConfiguration.DEFAULT_MAX_CONNECTIONS); } + + public void testStatelessDefaultRetryPolicy() { + final var s3ClientSettings = S3ClientSettings.load(Settings.EMPTY).get("default"); + final var clientConfiguration = S3Service.buildConfiguration(s3ClientSettings, true); + assertThat(clientConfiguration.getRetryPolicy(), is(S3Service.RETRYABLE_403_RETRY_POLICY)); + } } diff --git a/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3ServiceTests.java b/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3ServiceTests.java index 7bfaf56127fc7..afe1bb1a03c76 100644 --- a/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3ServiceTests.java +++ b/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3ServiceTests.java @@ -8,23 +8,23 @@ */ package org.elasticsearch.repositories.s3; +import com.amazonaws.AmazonWebServiceRequest; +import com.amazonaws.services.s3.model.AmazonS3Exception; + import org.elasticsearch.cluster.metadata.RepositoryMetadata; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.watcher.ResourceWatcherService; -import org.mockito.Mockito; import java.io.IOException; +import static org.mockito.Mockito.mock; + public class S3ServiceTests extends ESTestCase { public void testCachedClientsAreReleased() throws IOException { - final S3Service s3Service = new S3Service( - Mockito.mock(Environment.class), - Settings.EMPTY, - Mockito.mock(ResourceWatcherService.class) - ); + final S3Service s3Service = new S3Service(mock(Environment.class), Settings.EMPTY, mock(ResourceWatcherService.class)); final Settings settings = Settings.builder().put("endpoint", "http://first").build(); final RepositoryMetadata metadata1 = new RepositoryMetadata("first", "s3", settings); final RepositoryMetadata metadata2 = new RepositoryMetadata("second", "s3", settings); @@ -41,4 +41,25 @@ public void testCachedClientsAreReleased() throws IOException { final S3ClientSettings clientSettingsReloaded = s3Service.settings(metadata1); assertNotSame(clientSettings, clientSettingsReloaded); } + + public void testRetryOn403RetryPolicy() { + final AmazonS3Exception e = new AmazonS3Exception("error"); + e.setStatusCode(403); + e.setErrorCode("InvalidAccessKeyId"); + + // Retry on 403 invalid access key id + assertTrue( + S3Service.RETRYABLE_403_RETRY_POLICY.getRetryCondition().shouldRetry(mock(AmazonWebServiceRequest.class), e, between(0, 9)) + ); + + // Not retry if not 403 or not invalid access key id + if (randomBoolean()) { + e.setStatusCode(randomValueOtherThan(403, () -> between(0, 600))); + } else { + e.setErrorCode(randomAlphaOfLength(10)); + } + assertFalse( + S3Service.RETRYABLE_403_RETRY_POLICY.getRetryCondition().shouldRetry(mock(AmazonWebServiceRequest.class), e, between(0, 9)) + ); + } } From 5fb1e23f45f71bff1176f939b9f30f942f39cd96 Mon Sep 17 00:00:00 2001 From: David Turner Date: Mon, 28 Oct 2024 06:07:27 +0000 Subject: [PATCH 136/324] Clarify status of response to voting config API (#115714) These APIs return no body, just a status code. This commit clarifies that in the docs. Closes #115462 --- .../cluster/voting-exclusions.asciidoc | 22 +++++++++++-------- 1 file changed, 13 insertions(+), 9 deletions(-) diff --git a/docs/reference/cluster/voting-exclusions.asciidoc b/docs/reference/cluster/voting-exclusions.asciidoc index e5b8544a16554..55587a7010f8f 100644 --- a/docs/reference/cluster/voting-exclusions.asciidoc +++ b/docs/reference/cluster/voting-exclusions.asciidoc @@ -7,7 +7,6 @@ Adds or removes master-eligible nodes from the <>. - [[voting-config-exclusions-api-request]] ==== {api-request-title} @@ -28,7 +27,7 @@ users can use this API. [[voting-config-exclusions-api-desc]] ==== {api-description-title} - + By default, if there are more than three master-eligible nodes in the cluster and you remove fewer than half of the master-eligible nodes in the cluster at once, the <> automatically @@ -50,14 +49,19 @@ use `DELETE /_cluster/voting_config_exclusions?wait_for_removal=false` to clear the voting configuration exclusions without waiting for the nodes to leave the cluster. -If the API fails, you can safely retry it. Only a successful response -guarantees that the node has been removed from the voting configuration and -will not be reinstated. +A response to `POST /_cluster/voting_config_exclusions` with an HTTP status +code of `200 OK` guarantees that the node has been removed from the voting +configuration and will not be reinstated until the voting configuration +exclusions are cleared by calling `DELETE /_cluster/voting_config_exclusions`. +If the call to `POST /_cluster/voting_config_exclusions` fails or returns a +response with an HTTP status code other than `200 OK` then the node may not +have been removed from the voting configuration. In that case, you may safely +retry the call. NOTE: Voting exclusions are required only when you remove at least half of the master-eligible nodes from a cluster in a short time period. They are not -required when removing master-ineligible nodes or fewer than half of the -master-eligible nodes. +required when removing master-ineligible nodes or when removing fewer than half +of the master-eligible nodes. For more information, see <>. @@ -94,7 +98,7 @@ list. Defaults to `true`, meaning that all excluded nodes must be removed from the cluster before this API takes any action. If set to `false` then the voting configuration exclusions list is cleared even if some excluded nodes are still in the cluster. Only applies to the `DELETE` form of this API. - + [[voting-config-exclusions-api-example]] ==== {api-examples-title} @@ -102,7 +106,7 @@ Adds nodes named `nodeName1` and `nodeName2` to the voting configuration exclusions list: [source,console] --------------------------------------------------- +-------------------------------------------------- POST /_cluster/voting_config_exclusions?node_names=nodeName1,nodeName2 -------------------------------------------------- From 98cd34f3fde14cec9c5d5ac2507d4fdc55e89288 Mon Sep 17 00:00:00 2001 From: Martijn van Groningen Date: Mon, 28 Oct 2024 08:34:48 +0100 Subject: [PATCH 137/324] Add more tsdb and logsdb rolling upgrade indexing tests. (#115639) The main difference between other rolling upgrade tests is that these tests index more data while performing the rolling upgrade and no rollover is performed during rolling upgrade. For example this makes it more likely for merging to happen, which could uncover bwc bugs. Note that currently both test suites start trial license so that synthetic source gets used. --- .../resources/checkstyle_suppressions.xml | 2 + .../LogsIndexModeRollingUpgradeIT.java | 4 +- .../LogsdbIndexingRollingUpgradeIT.java | 253 ++++++++++++++++++ .../org/elasticsearch/upgrades/TsdbIT.java | 4 +- .../TsdbIndexingRollingUpgradeIT.java | 187 +++++++++++++ 5 files changed, 446 insertions(+), 4 deletions(-) create mode 100644 qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/LogsdbIndexingRollingUpgradeIT.java create mode 100644 qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/TsdbIndexingRollingUpgradeIT.java diff --git a/build-tools-internal/src/main/resources/checkstyle_suppressions.xml b/build-tools-internal/src/main/resources/checkstyle_suppressions.xml index fd01993951959..5fdfebf6849e7 100644 --- a/build-tools-internal/src/main/resources/checkstyle_suppressions.xml +++ b/build-tools-internal/src/main/resources/checkstyle_suppressions.xml @@ -35,6 +35,8 @@ + + diff --git a/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/LogsIndexModeRollingUpgradeIT.java b/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/LogsIndexModeRollingUpgradeIT.java index ba79de4ab6cd1..8c369ebc9950d 100644 --- a/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/LogsIndexModeRollingUpgradeIT.java +++ b/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/LogsIndexModeRollingUpgradeIT.java @@ -171,7 +171,7 @@ public void testLogsIndexing() throws IOException { } } - private static void enableLogsdbByDefault() throws IOException { + static void enableLogsdbByDefault() throws IOException { var request = new Request("PUT", "/_cluster/settings"); request.setJsonEntity(""" { @@ -214,7 +214,7 @@ private static Request rolloverDataStream(final RestClient client, final String } @SuppressWarnings("unchecked") - private static String getWriteBackingIndex(final RestClient client, final String dataStreamName, int backingIndex) throws IOException { + static String getWriteBackingIndex(final RestClient client, final String dataStreamName, int backingIndex) throws IOException { final Request request = new Request("GET", "_data_stream/" + dataStreamName); final List dataStreams = (List) entityAsMap(client.performRequest(request)).get("data_streams"); final Map dataStream = (Map) dataStreams.get(0); diff --git a/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/LogsdbIndexingRollingUpgradeIT.java b/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/LogsdbIndexingRollingUpgradeIT.java new file mode 100644 index 0000000000000..9bdc43543e331 --- /dev/null +++ b/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/LogsdbIndexingRollingUpgradeIT.java @@ -0,0 +1,253 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.upgrades; + +import com.carrotsearch.randomizedtesting.annotations.Name; + +import org.elasticsearch.client.Request; +import org.elasticsearch.client.Response; +import org.elasticsearch.common.network.NetworkAddress; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.test.rest.ObjectPath; +import org.elasticsearch.xcontent.XContentType; + +import java.io.IOException; +import java.io.InputStream; +import java.time.Instant; +import java.util.Map; + +import static org.elasticsearch.upgrades.LogsIndexModeRollingUpgradeIT.enableLogsdbByDefault; +import static org.elasticsearch.upgrades.LogsIndexModeRollingUpgradeIT.getWriteBackingIndex; +import static org.elasticsearch.upgrades.TsdbIT.formatInstant; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.notNullValue; + +public class LogsdbIndexingRollingUpgradeIT extends AbstractRollingUpgradeTestCase { + + static String BULK_ITEM_TEMPLATE = + """ + {"@timestamp": "$now", "host.name": "$host", "method": "$method", "ip": "$ip", "message": "$message", "length": $length, "factor": $factor} + """; + + private static final String TEMPLATE = """ + { + "mappings": { + "properties": { + "@timestamp" : { + "type": "date" + }, + "method": { + "type": "keyword" + }, + "message": { + "type": "text" + }, + "ip": { + "type": "ip" + }, + "length": { + "type": "long" + }, + "factor": { + "type": "double" + } + } + } + }"""; + + public LogsdbIndexingRollingUpgradeIT(@Name("upgradedNodes") int upgradedNodes) { + super(upgradedNodes); + } + + public void testIndexing() throws Exception { + String dataStreamName = "logs-bwc-test"; + if (isOldCluster()) { + startTrial(); + enableLogsdbByDefault(); + createTemplate(dataStreamName, "3", TEMPLATE); + + Instant startTime = Instant.now().minusSeconds(60 * 60); + bulkIndex(dataStreamName, 4, 1024, startTime); + + String firstBackingIndex = getWriteBackingIndex(client(), dataStreamName, 0); + var settings = (Map) getIndexSettingsWithDefaults(firstBackingIndex).get(firstBackingIndex); + assertThat(((Map) settings.get("settings")).get("index.mode"), equalTo("logsdb")); + assertThat(((Map) settings.get("defaults")).get("index.mapping.source.mode"), equalTo("SYNTHETIC")); + + ensureGreen(dataStreamName); + search(dataStreamName); + query(dataStreamName); + } else if (isMixedCluster()) { + Instant startTime = Instant.now().minusSeconds(60 * 30); + bulkIndex(dataStreamName, 4, 1024, startTime); + + ensureGreen(dataStreamName); + search(dataStreamName); + query(dataStreamName); + } else if (isUpgradedCluster()) { + ensureGreen(dataStreamName); + Instant startTime = Instant.now(); + bulkIndex(dataStreamName, 4, 1024, startTime); + search(dataStreamName); + query(dataStreamName); + + var forceMergeRequest = new Request("POST", "/" + dataStreamName + "/_forcemerge"); + forceMergeRequest.addParameter("max_num_segments", "1"); + assertOK(client().performRequest(forceMergeRequest)); + + ensureGreen(dataStreamName); + search(dataStreamName); + query(dataStreamName); + } + } + + static void createTemplate(String dataStreamName, String id, String template) throws IOException { + final String INDEX_TEMPLATE = """ + { + "index_patterns": ["$DATASTREAM"], + "template": $TEMPLATE, + "data_stream": { + } + }"""; + var putIndexTemplateRequest = new Request("POST", "/_index_template/" + id); + putIndexTemplateRequest.setJsonEntity(INDEX_TEMPLATE.replace("$TEMPLATE", template).replace("$DATASTREAM", dataStreamName)); + assertOK(client().performRequest(putIndexTemplateRequest)); + } + + static void bulkIndex(String dataStreamName, int numRequest, int numDocs, Instant startTime) throws Exception { + for (int i = 0; i < numRequest; i++) { + var bulkRequest = new Request("POST", "/" + dataStreamName + "/_bulk"); + StringBuilder requestBody = new StringBuilder(); + for (int j = 0; j < numDocs; j++) { + String hostName = "host" + j % 50; // Not realistic, but makes asserting search / query response easier. + String methodName = "method" + j % 5; + String ip = NetworkAddress.format(randomIp(true)); + String message = randomAlphaOfLength(128); + long length = randomLong(); + double factor = randomDouble(); + + requestBody.append("{\"create\": {}}"); + requestBody.append('\n'); + requestBody.append( + BULK_ITEM_TEMPLATE.replace("$now", formatInstant(startTime)) + .replace("$host", hostName) + .replace("$method", methodName) + .replace("$ip", ip) + .replace("$message", message) + .replace("$length", Long.toString(length)) + .replace("$factor", Double.toString(factor)) + ); + requestBody.append('\n'); + + startTime = startTime.plusMillis(1); + } + bulkRequest.setJsonEntity(requestBody.toString()); + bulkRequest.addParameter("refresh", "true"); + var response = client().performRequest(bulkRequest); + assertOK(response); + var responseBody = entityAsMap(response); + assertThat("errors in response:\n " + responseBody, responseBody.get("errors"), equalTo(false)); + } + } + + void search(String dataStreamName) throws Exception { + var searchRequest = new Request("POST", "/" + dataStreamName + "/_search"); + searchRequest.addParameter("pretty", "true"); + searchRequest.setJsonEntity(""" + { + "size": 0, + "aggs": { + "host_name": { + "terms": { + "field": "host.name", + "order": { "_key": "asc" } + }, + "aggs": { + "max_length": { + "max": { + "field": "length" + } + }, + "max_factor": { + "max": { + "field": "factor" + } + } + } + } + } + } + """); + var response = client().performRequest(searchRequest); + assertOK(response); + var responseBody = entityAsMap(response); + + Integer totalCount = ObjectPath.evaluate(responseBody, "hits.total.value"); + assertThat(totalCount, greaterThanOrEqualTo(4096)); + String key = ObjectPath.evaluate(responseBody, "aggregations.host_name.buckets.0.key"); + assertThat(key, equalTo("host0")); + Integer docCount = ObjectPath.evaluate(responseBody, "aggregations.host_name.buckets.0.doc_count"); + assertThat(docCount, greaterThan(0)); + Double maxTx = ObjectPath.evaluate(responseBody, "aggregations.host_name.buckets.0.max_length.value"); + assertThat(maxTx, notNullValue()); + Double maxRx = ObjectPath.evaluate(responseBody, "aggregations.host_name.buckets.0.max_factor.value"); + assertThat(maxRx, notNullValue()); + } + + void query(String dataStreamName) throws Exception { + var queryRequest = new Request("POST", "/_query"); + queryRequest.addParameter("pretty", "true"); + queryRequest.setJsonEntity(""" + { + "query": "FROM $ds | STATS max(length), max(factor) BY host.name | SORT host.name | LIMIT 5" + } + """.replace("$ds", dataStreamName)); + var response = client().performRequest(queryRequest); + assertOK(response); + var responseBody = entityAsMap(response); + + String column1 = ObjectPath.evaluate(responseBody, "columns.0.name"); + String column2 = ObjectPath.evaluate(responseBody, "columns.1.name"); + String column3 = ObjectPath.evaluate(responseBody, "columns.2.name"); + assertThat(column1, equalTo("max(length)")); + assertThat(column2, equalTo("max(factor)")); + assertThat(column3, equalTo("host.name")); + + String key = ObjectPath.evaluate(responseBody, "values.0.2"); + assertThat(key, equalTo("host0")); + Long maxRx = ObjectPath.evaluate(responseBody, "values.0.0"); + assertThat(maxRx, notNullValue()); + Double maxTx = ObjectPath.evaluate(responseBody, "values.0.1"); + assertThat(maxTx, notNullValue()); + } + + protected static void startTrial() throws IOException { + Request startTrial = new Request("POST", "/_license/start_trial"); + startTrial.addParameter("acknowledge", "true"); + assertOK(client().performRequest(startTrial)); + } + + static Map getIndexSettingsWithDefaults(String index) throws IOException { + Request request = new Request("GET", "/" + index + "/_settings"); + request.addParameter("flat_settings", "true"); + request.addParameter("include_defaults", "true"); + Response response = client().performRequest(request); + try (InputStream is = response.getEntity().getContent()) { + return XContentHelper.convertToMap( + XContentType.fromMediaType(response.getEntity().getContentType().getValue()).xContent(), + is, + true + ); + } + } + +} diff --git a/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/TsdbIT.java b/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/TsdbIT.java index 9e3030d510266..6744c84f29d0f 100644 --- a/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/TsdbIT.java +++ b/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/TsdbIT.java @@ -33,7 +33,7 @@ public TsdbIT(@Name("upgradedNodes") int upgradedNodes) { super(upgradedNodes); } - private static final String TEMPLATE = """ + static final String TEMPLATE = """ { "settings":{ "index": { @@ -289,7 +289,7 @@ private static void assertSearch(String dataStreamName, int expectedHitCount) th assertThat(ObjectPath.evaluate(responseBody, "hits.total.value"), equalTo(expectedHitCount)); } - private static String formatInstant(Instant instant) { + static String formatInstant(Instant instant) { return DateFormatter.forPattern(FormatNames.STRICT_DATE_OPTIONAL_TIME.getName()).format(instant); } diff --git a/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/TsdbIndexingRollingUpgradeIT.java b/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/TsdbIndexingRollingUpgradeIT.java new file mode 100644 index 0000000000000..1ac919ea57001 --- /dev/null +++ b/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/TsdbIndexingRollingUpgradeIT.java @@ -0,0 +1,187 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.upgrades; + +import com.carrotsearch.randomizedtesting.annotations.Name; + +import org.elasticsearch.client.Request; +import org.elasticsearch.common.network.NetworkAddress; +import org.elasticsearch.test.rest.ObjectPath; + +import java.time.Instant; +import java.util.Map; + +import static org.elasticsearch.upgrades.LogsIndexModeRollingUpgradeIT.getWriteBackingIndex; +import static org.elasticsearch.upgrades.LogsdbIndexingRollingUpgradeIT.*; +import static org.elasticsearch.upgrades.TsdbIT.TEMPLATE; +import static org.elasticsearch.upgrades.TsdbIT.formatInstant; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.notNullValue; + +public class TsdbIndexingRollingUpgradeIT extends AbstractRollingUpgradeTestCase { + + static String BULK_ITEM_TEMPLATE = + """ + {"@timestamp": "$now", "metricset": "pod", "k8s": {"pod": {"name": "$name", "uid":"$uid", "ip": "$ip", "network": {"tx": $tx, "rx": $rx}}}} + """; + + public TsdbIndexingRollingUpgradeIT(@Name("upgradedNodes") int upgradedNodes) { + super(upgradedNodes); + } + + public void testIndexing() throws Exception { + String dataStreamName = "k9s"; + if (isOldCluster()) { + startTrial(); + createTemplate(dataStreamName, "2", TEMPLATE); + + Instant startTime = Instant.now().minusSeconds(60 * 60); + bulkIndex(dataStreamName, 4, 1024, startTime); + + String firstBackingIndex = getWriteBackingIndex(client(), dataStreamName, 0); + var settings = (Map) getIndexSettingsWithDefaults(firstBackingIndex).get(firstBackingIndex); + assertThat(((Map) settings.get("settings")).get("index.mode"), equalTo("time_series")); + assertThat(((Map) settings.get("defaults")).get("index.mapping.source.mode"), equalTo("SYNTHETIC")); + + ensureGreen(dataStreamName); + search(dataStreamName); + query(dataStreamName); + } else if (isMixedCluster()) { + Instant startTime = Instant.now().minusSeconds(60 * 30); + bulkIndex(dataStreamName, 4, 1024, startTime); + + ensureGreen(dataStreamName); + search(dataStreamName); + query(dataStreamName); + } else if (isUpgradedCluster()) { + ensureGreen(dataStreamName); + Instant startTime = Instant.now(); + bulkIndex(dataStreamName, 4, 1024, startTime); + search(dataStreamName); + query(dataStreamName); + + var forceMergeRequest = new Request("POST", "/" + dataStreamName + "/_forcemerge"); + forceMergeRequest.addParameter("max_num_segments", "1"); + assertOK(client().performRequest(forceMergeRequest)); + + ensureGreen(dataStreamName); + search(dataStreamName); + query(dataStreamName); + } + } + + static void bulkIndex(String dataStreamName, int numRequest, int numDocs, Instant startTime) throws Exception { + for (int i = 0; i < numRequest; i++) { + var bulkRequest = new Request("POST", "/" + dataStreamName + "/_bulk"); + StringBuilder requestBody = new StringBuilder(); + for (int j = 0; j < numDocs; j++) { + String podName = "pod" + j % 5; // Not realistic, but makes asserting search / query response easier. + String podUid = randomUUID(); + String podIp = NetworkAddress.format(randomIp(true)); + long podTx = randomLong(); + long podRx = randomLong(); + + requestBody.append("{\"create\": {}}"); + requestBody.append('\n'); + requestBody.append( + BULK_ITEM_TEMPLATE.replace("$now", formatInstant(startTime)) + .replace("$name", podName) + .replace("$uid", podUid) + .replace("$ip", podIp) + .replace("$tx", Long.toString(podTx)) + .replace("$rx", Long.toString(podRx)) + ); + requestBody.append('\n'); + + startTime = startTime.plusMillis(1); + } + bulkRequest.setJsonEntity(requestBody.toString()); + bulkRequest.addParameter("refresh", "true"); + var response = client().performRequest(bulkRequest); + assertOK(response); + var responseBody = entityAsMap(response); + assertThat("errors in response:\n " + responseBody, responseBody.get("errors"), equalTo(false)); + } + } + + void search(String dataStreamName) throws Exception { + var searchRequest = new Request("POST", "/" + dataStreamName + "/_search"); + searchRequest.addParameter("pretty", "true"); + searchRequest.setJsonEntity(""" + { + "size": 0, + "aggs": { + "pod_name": { + "terms": { + "field": "k8s.pod.name", + "order": { "_key": "asc" } + }, + "aggs": { + "max_tx": { + "max": { + "field": "k8s.pod.network.tx" + } + }, + "max_rx": { + "max": { + "field": "k8s.pod.network.rx" + } + } + } + } + } + } + """); + var response = client().performRequest(searchRequest); + assertOK(response); + var responseBody = entityAsMap(response); + + Integer totalCount = ObjectPath.evaluate(responseBody, "hits.total.value"); + assertThat(totalCount, greaterThanOrEqualTo(4096)); + String key = ObjectPath.evaluate(responseBody, "aggregations.pod_name.buckets.0.key"); + assertThat(key, equalTo("pod0")); + Integer docCount = ObjectPath.evaluate(responseBody, "aggregations.pod_name.buckets.0.doc_count"); + assertThat(docCount, greaterThan(0)); + Double maxTx = ObjectPath.evaluate(responseBody, "aggregations.pod_name.buckets.0.max_tx.value"); + assertThat(maxTx, notNullValue()); + Double maxRx = ObjectPath.evaluate(responseBody, "aggregations.pod_name.buckets.0.max_rx.value"); + assertThat(maxRx, notNullValue()); + } + + void query(String dataStreamName) throws Exception { + var queryRequest = new Request("POST", "/_query"); + queryRequest.addParameter("pretty", "true"); + queryRequest.setJsonEntity(""" + { + "query": "FROM $ds | STATS max(k8s.pod.network.rx), max(k8s.pod.network.tx) BY k8s.pod.name | SORT k8s.pod.name | LIMIT 5" + } + """.replace("$ds", dataStreamName)); + var response = client().performRequest(queryRequest); + assertOK(response); + var responseBody = entityAsMap(response); + + String column1 = ObjectPath.evaluate(responseBody, "columns.0.name"); + String column2 = ObjectPath.evaluate(responseBody, "columns.1.name"); + String column3 = ObjectPath.evaluate(responseBody, "columns.2.name"); + assertThat(column1, equalTo("max(k8s.pod.network.rx)")); + assertThat(column2, equalTo("max(k8s.pod.network.tx)")); + assertThat(column3, equalTo("k8s.pod.name")); + + String key = ObjectPath.evaluate(responseBody, "values.0.2"); + assertThat(key, equalTo("pod0")); + Long maxRx = ObjectPath.evaluate(responseBody, "values.0.0"); + assertThat(maxRx, notNullValue()); + Long maxTx = ObjectPath.evaluate(responseBody, "values.0.1"); + assertThat(maxTx, notNullValue()); + } + +} From ef85d0a53f1f58a63359b63933fc1e147167d42f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lorenzo=20Dematt=C3=A9?= Date: Mon, 28 Oct 2024 09:31:19 +0100 Subject: [PATCH 138/324] Avoid double instrumentation via class annotation (#115398) --- .../impl/InstrumenterImpl.java | 96 ++++++++++++--- .../impl/InstrumenterTests.java | 112 +++++++++++++++--- 2 files changed, 177 insertions(+), 31 deletions(-) diff --git a/distribution/tools/entitlement-agent/impl/src/main/java/org/elasticsearch/entitlement/instrumentation/impl/InstrumenterImpl.java b/distribution/tools/entitlement-agent/impl/src/main/java/org/elasticsearch/entitlement/instrumentation/impl/InstrumenterImpl.java index 81c120ddcd6d1..7c2e1645ada83 100644 --- a/distribution/tools/entitlement-agent/impl/src/main/java/org/elasticsearch/entitlement/instrumentation/impl/InstrumenterImpl.java +++ b/distribution/tools/entitlement-agent/impl/src/main/java/org/elasticsearch/entitlement/instrumentation/impl/InstrumenterImpl.java @@ -15,8 +15,10 @@ import org.objectweb.asm.ClassReader; import org.objectweb.asm.ClassVisitor; import org.objectweb.asm.ClassWriter; +import org.objectweb.asm.FieldVisitor; import org.objectweb.asm.MethodVisitor; import org.objectweb.asm.Opcodes; +import org.objectweb.asm.RecordComponentVisitor; import org.objectweb.asm.Type; import java.io.IOException; @@ -73,7 +75,13 @@ public byte[] instrumentClass(String className, byte[] classfileBuffer) { } class EntitlementClassVisitor extends ClassVisitor { - final String className; + + private static final String ENTITLEMENT_ANNOTATION = "EntitlementInstrumented"; + + private final String className; + + private boolean isAnnotationPresent; + private boolean annotationNeeded = true; EntitlementClassVisitor(int api, ClassVisitor classVisitor, String className) { super(api, classVisitor); @@ -85,25 +93,85 @@ public void visit(int version, int access, String name, String signature, String super.visit(version, access, name + classNameSuffix, signature, superName, interfaces); } + @Override + public AnnotationVisitor visitAnnotation(String descriptor, boolean visible) { + if (visible && descriptor.equals(ENTITLEMENT_ANNOTATION)) { + isAnnotationPresent = true; + annotationNeeded = false; + } + return cv.visitAnnotation(descriptor, visible); + } + + @Override + public void visitNestMember(String nestMember) { + addClassAnnotationIfNeeded(); + super.visitNestMember(nestMember); + } + + @Override + public void visitPermittedSubclass(String permittedSubclass) { + addClassAnnotationIfNeeded(); + super.visitPermittedSubclass(permittedSubclass); + } + + @Override + public void visitInnerClass(String name, String outerName, String innerName, int access) { + addClassAnnotationIfNeeded(); + super.visitInnerClass(name, outerName, innerName, access); + } + + @Override + public FieldVisitor visitField(int access, String name, String descriptor, String signature, Object value) { + addClassAnnotationIfNeeded(); + return super.visitField(access, name, descriptor, signature, value); + } + + @Override + public RecordComponentVisitor visitRecordComponent(String name, String descriptor, String signature) { + addClassAnnotationIfNeeded(); + return super.visitRecordComponent(name, descriptor, signature); + } + @Override public MethodVisitor visitMethod(int access, String name, String descriptor, String signature, String[] exceptions) { + addClassAnnotationIfNeeded(); var mv = super.visitMethod(access, name, descriptor, signature, exceptions); - boolean isStatic = (access & ACC_STATIC) != 0; - var key = new MethodKey( - className, - name, - Stream.of(Type.getArgumentTypes(descriptor)).map(Type::getInternalName).toList(), - isStatic - ); - var instrumentationMethod = instrumentationMethods.get(key); - if (instrumentationMethod != null) { - // LOGGER.debug("Will instrument method {}", key); - return new EntitlementMethodVisitor(Opcodes.ASM9, mv, isStatic, descriptor, instrumentationMethod); - } else { - // LOGGER.trace("Will not instrument method {}", key); + if (isAnnotationPresent == false) { + boolean isStatic = (access & ACC_STATIC) != 0; + var key = new MethodKey( + className, + name, + Stream.of(Type.getArgumentTypes(descriptor)).map(Type::getInternalName).toList(), + isStatic + ); + var instrumentationMethod = instrumentationMethods.get(key); + if (instrumentationMethod != null) { + // LOGGER.debug("Will instrument method {}", key); + return new EntitlementMethodVisitor(Opcodes.ASM9, mv, isStatic, descriptor, instrumentationMethod); + } else { + // LOGGER.trace("Will not instrument method {}", key); + } } return mv; } + + /** + * A class annotation can be added via visitAnnotation; we need to call visitAnnotation after all other visitAnnotation + * calls (in case one of them detects our annotation is already present), but before any other subsequent visit* method is called + * (up to visitMethod -- if no visitMethod is called, there is nothing to instrument). + * This includes visitNestMember, visitPermittedSubclass, visitInnerClass, visitField, visitRecordComponent and, of course, + * visitMethod (see {@link ClassVisitor} javadoc). + */ + private void addClassAnnotationIfNeeded() { + if (annotationNeeded) { + // logger.debug("Adding {} annotation", ENTITLEMENT_ANNOTATION); + AnnotationVisitor av = cv.visitAnnotation(ENTITLEMENT_ANNOTATION, true); + if (av != null) { + av.visitEnd(); + } + annotationNeeded = false; + } + } } static class EntitlementMethodVisitor extends MethodVisitor { diff --git a/distribution/tools/entitlement-agent/impl/src/test/java/org/elasticsearch/entitlement/instrumentation/impl/InstrumenterTests.java b/distribution/tools/entitlement-agent/impl/src/test/java/org/elasticsearch/entitlement/instrumentation/impl/InstrumenterTests.java index e807ecee4f103..f05c7ccae62e6 100644 --- a/distribution/tools/entitlement-agent/impl/src/test/java/org/elasticsearch/entitlement/instrumentation/impl/InstrumenterTests.java +++ b/distribution/tools/entitlement-agent/impl/src/test/java/org/elasticsearch/entitlement/instrumentation/impl/InstrumenterTests.java @@ -9,20 +9,24 @@ package org.elasticsearch.entitlement.instrumentation.impl; +import org.elasticsearch.common.Strings; import org.elasticsearch.entitlement.api.EntitlementChecks; import org.elasticsearch.entitlement.api.EntitlementProvider; import org.elasticsearch.entitlement.instrumentation.InstrumentationService; -import org.elasticsearch.entitlement.instrumentation.MethodKey; import org.elasticsearch.logging.LogManager; import org.elasticsearch.logging.Logger; import org.elasticsearch.test.ESTestCase; import org.junit.Before; +import org.objectweb.asm.Type; import java.lang.reflect.InvocationTargetException; import java.lang.reflect.Method; -import java.util.Map; +import java.util.Arrays; +import java.util.stream.Collectors; import static org.elasticsearch.entitlement.instrumentation.impl.ASMUtils.bytecode2text; +import static org.elasticsearch.entitlement.instrumentation.impl.InstrumenterImpl.getClassFileInfo; +import static org.hamcrest.Matchers.is; /** * This tests {@link InstrumenterImpl} in isolation, without a java agent. @@ -60,6 +64,10 @@ public static class ClassToInstrument implements Testable { public static void systemExit(int status) { assertEquals(123, status); } + + public static void anotherSystemExit(int status) { + assertEquals(123, status); + } } static final class TestException extends RuntimeException {} @@ -76,8 +84,11 @@ public static class TestEntitlementManager implements EntitlementChecks { */ volatile boolean isActive; + int checkSystemExitCallCount = 0; + @Override public void checkSystemExit(Class callerClass, int status) { + checkSystemExitCallCount++; assertSame(InstrumenterTests.class, callerClass); assertEquals(123, status); throwIfActive(); @@ -90,18 +101,11 @@ private void throwIfActive() { } } - public void test() throws Exception { - // This test doesn't replace ClassToInstrument in-place but instead loads a separate - // class ClassToInstrument_NEW that contains the instrumentation. Because of this, - // we need to configure the Transformer to use a MethodKey and instrumentationMethod - // with slightly different signatures (using the common interface Testable) which - // is not what would happen when it's run by the agent. - - MethodKey k1 = instrumentationService.methodKeyForTarget(ClassToInstrument.class.getMethod("systemExit", int.class)); - Method v1 = EntitlementChecks.class.getMethod("checkSystemExit", Class.class, int.class); - var instrumenter = new InstrumenterImpl("_NEW", Map.of(k1, v1)); + public void testClassIsInstrumented() throws Exception { + var classToInstrument = ClassToInstrument.class; + var instrumenter = createInstrumenter(classToInstrument, "systemExit"); - byte[] newBytecode = instrumenter.instrumentClassFile(ClassToInstrument.class).bytecodes(); + byte[] newBytecode = instrumenter.instrumentClassFile(classToInstrument).bytecodes(); if (logger.isTraceEnabled()) { logger.trace("Bytecode after instrumentation:\n{}", bytecode2text(newBytecode)); @@ -112,22 +116,96 @@ public void test() throws Exception { newBytecode ); + getTestChecks().isActive = false; + // Before checking is active, nothing should throw - callStaticSystemExit(newClass, 123); + callStaticMethod(newClass, "systemExit", 123); getTestChecks().isActive = true; // After checking is activated, everything should throw - assertThrows(TestException.class, () -> callStaticSystemExit(newClass, 123)); + assertThrows(TestException.class, () -> callStaticMethod(newClass, "systemExit", 123)); + } + + public void testClassIsNotInstrumentedTwice() throws Exception { + var classToInstrument = ClassToInstrument.class; + var instrumenter = createInstrumenter(classToInstrument, "systemExit"); + + InstrumenterImpl.ClassFileInfo initial = getClassFileInfo(classToInstrument); + var internalClassName = Type.getInternalName(classToInstrument); + + byte[] instrumentedBytecode = instrumenter.instrumentClass(internalClassName, initial.bytecodes()); + byte[] instrumentedTwiceBytecode = instrumenter.instrumentClass(internalClassName, instrumentedBytecode); + + logger.trace(() -> Strings.format("Bytecode after 1st instrumentation:\n%s", bytecode2text(instrumentedBytecode))); + logger.trace(() -> Strings.format("Bytecode after 2nd instrumentation:\n%s", bytecode2text(instrumentedTwiceBytecode))); + + Class newClass = new TestLoader(Testable.class.getClassLoader()).defineClassFromBytes( + ClassToInstrument.class.getName() + "_NEW_NEW", + instrumentedTwiceBytecode + ); + + getTestChecks().isActive = true; + getTestChecks().checkSystemExitCallCount = 0; + + assertThrows(TestException.class, () -> callStaticMethod(newClass, "systemExit", 123)); + assertThat(getTestChecks().checkSystemExitCallCount, is(1)); + } + + public void testClassAllMethodsAreInstrumentedFirstPass() throws Exception { + var classToInstrument = ClassToInstrument.class; + var instrumenter = createInstrumenter(classToInstrument, "systemExit", "anotherSystemExit"); + + InstrumenterImpl.ClassFileInfo initial = getClassFileInfo(classToInstrument); + var internalClassName = Type.getInternalName(classToInstrument); + + byte[] instrumentedBytecode = instrumenter.instrumentClass(internalClassName, initial.bytecodes()); + byte[] instrumentedTwiceBytecode = instrumenter.instrumentClass(internalClassName, instrumentedBytecode); + + logger.trace(() -> Strings.format("Bytecode after 1st instrumentation:\n%s", bytecode2text(instrumentedBytecode))); + logger.trace(() -> Strings.format("Bytecode after 2nd instrumentation:\n%s", bytecode2text(instrumentedTwiceBytecode))); + + Class newClass = new TestLoader(Testable.class.getClassLoader()).defineClassFromBytes( + ClassToInstrument.class.getName() + "_NEW_NEW", + instrumentedTwiceBytecode + ); + + getTestChecks().isActive = true; + getTestChecks().checkSystemExitCallCount = 0; + + assertThrows(TestException.class, () -> callStaticMethod(newClass, "systemExit", 123)); + assertThat(getTestChecks().checkSystemExitCallCount, is(1)); + + assertThrows(TestException.class, () -> callStaticMethod(newClass, "anotherSystemExit", 123)); + assertThat(getTestChecks().checkSystemExitCallCount, is(2)); + } + + /** This test doesn't replace ClassToInstrument in-place but instead loads a separate + * class ClassToInstrument_NEW that contains the instrumentation. Because of this, + * we need to configure the Transformer to use a MethodKey and instrumentationMethod + * with slightly different signatures (using the common interface Testable) which + * is not what would happen when it's run by the agent. + */ + private InstrumenterImpl createInstrumenter(Class classToInstrument, String... methodNames) throws NoSuchMethodException { + Method v1 = EntitlementChecks.class.getMethod("checkSystemExit", Class.class, int.class); + var methods = Arrays.stream(methodNames).map(name -> { + try { + return instrumentationService.methodKeyForTarget(classToInstrument.getMethod(name, int.class)); + } catch (NoSuchMethodException e) { + throw new RuntimeException(e); + } + }).collect(Collectors.toUnmodifiableMap(name -> name, name -> v1)); + + return new InstrumenterImpl("_NEW", methods); } /** * Calling a static method of a dynamically loaded class is significantly more cumbersome * than calling a virtual method. */ - private static void callStaticSystemExit(Class c, int status) throws NoSuchMethodException, IllegalAccessException { + private static void callStaticMethod(Class c, String methodName, int status) throws NoSuchMethodException, IllegalAccessException { try { - c.getMethod("systemExit", int.class).invoke(null, status); + c.getMethod(methodName, int.class).invoke(null, status); } catch (InvocationTargetException e) { Throwable cause = e.getCause(); if (cause instanceof TestException n) { From c4c33ff359b99e855306542d6cc077661e21383d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lorenzo=20Dematt=C3=A9?= Date: Mon, 28 Oct 2024 09:33:14 +0100 Subject: [PATCH 139/324] Fix NPE on plugin sync (#115640) --- .../plugins/cli/SyncPluginsAction.java | 8 ++++---- .../plugins/cli/SyncPluginsActionTests.java | 16 ++++++++++++++++ docs/changelog/115640.yaml | 6 ++++++ 3 files changed, 26 insertions(+), 4 deletions(-) create mode 100644 docs/changelog/115640.yaml diff --git a/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/cli/SyncPluginsAction.java b/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/cli/SyncPluginsAction.java index 5394cb8f3d79b..d6d0619422770 100644 --- a/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/cli/SyncPluginsAction.java +++ b/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/cli/SyncPluginsAction.java @@ -25,6 +25,7 @@ import java.nio.file.Path; import java.util.ArrayList; import java.util.Comparator; +import java.util.HashMap; import java.util.Iterator; import java.util.List; import java.util.Locale; @@ -60,7 +61,7 @@ public SyncPluginsAction(Terminal terminal, Environment env) { * @throws UserException if a plugins config file is found. */ public static void ensureNoConfigFile(Environment env) throws UserException { - final Path pluginsConfig = env.configFile().resolve("elasticsearch-plugins.yml"); + final Path pluginsConfig = env.configFile().resolve(ELASTICSEARCH_PLUGINS_YML); if (Files.exists(pluginsConfig)) { throw new UserException( ExitCodes.USAGE, @@ -207,9 +208,8 @@ private List getPluginsToUpgrade( Optional cachedPluginsConfig, List existingPlugins ) { - final Map cachedPluginIdToLocation = cachedPluginsConfig.map( - config -> config.getPlugins().stream().collect(Collectors.toMap(InstallablePlugin::getId, InstallablePlugin::getLocation)) - ).orElse(Map.of()); + final Map cachedPluginIdToLocation = new HashMap<>(); + cachedPluginsConfig.ifPresent(config -> config.getPlugins().forEach(p -> cachedPluginIdToLocation.put(p.getId(), p.getLocation()))); return pluginsToMaybeUpgrade.stream().filter(eachPlugin -> { final String eachPluginId = eachPlugin.getId(); diff --git a/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/cli/SyncPluginsActionTests.java b/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/cli/SyncPluginsActionTests.java index 8ef44c8862e84..2d2336428a0a5 100644 --- a/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/cli/SyncPluginsActionTests.java +++ b/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/cli/SyncPluginsActionTests.java @@ -157,6 +157,22 @@ public void test_getPluginChanges_withOfficialPluginToUpgrade_returnsPluginToUpg assertThat(pluginChanges.upgrade.get(0).getId(), equalTo("analysis-icu")); } + /** + * Check that when there is an official plugin in the config file and in the cached config, then we + * calculate that the plugin does not need to be upgraded. + */ + public void test_getPluginChanges_withOfficialPluginCachedConfigAndNoChanges_returnsNoChanges() throws Exception { + createPlugin("analysis-icu"); + config.setPlugins(List.of(new InstallablePlugin("analysis-icu"))); + + final PluginsConfig cachedConfig = new PluginsConfig(); + cachedConfig.setPlugins(List.of(new InstallablePlugin("analysis-icu"))); + + final PluginChanges pluginChanges = action.getPluginChanges(config, Optional.of(cachedConfig)); + + assertThat(pluginChanges.isEmpty(), is(true)); + } + /** * Check that if an unofficial plugins' location has not changed in the cached config, then we * calculate that the plugin does not need to be upgraded. diff --git a/docs/changelog/115640.yaml b/docs/changelog/115640.yaml new file mode 100644 index 0000000000000..5c4a943a9697d --- /dev/null +++ b/docs/changelog/115640.yaml @@ -0,0 +1,6 @@ +pr: 115640 +summary: Fix NPE on plugin sync +area: Infra/CLI +type: bug +issues: + - 114818 From 918a9cc35ada3a348f0bd4ed24e7ab6f836d468e Mon Sep 17 00:00:00 2001 From: Simon Cooper Date: Mon, 28 Oct 2024 09:47:48 +0000 Subject: [PATCH 140/324] Make some chunked xcontent more efficient (#115512) --- .../org/elasticsearch/action/bulk/BulkResponse.java | 12 ++++++------ .../common/xcontent/ChunkedToXContentBuilder.java | 8 ++------ 2 files changed, 8 insertions(+), 12 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkResponse.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkResponse.java index 88a9fb56b8edb..ec7a08007de93 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/BulkResponse.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkResponse.java @@ -158,13 +158,13 @@ public void writeTo(StreamOutput out) throws IOException { @Override public Iterator toXContentChunked(ToXContent.Params params) { - return ChunkedToXContent.builder(params).object(ob -> { - ob.field(ERRORS, hasFailures()); - ob.field(TOOK, tookInMillis); + return ChunkedToXContent.builder(params).object(ob -> ob.append((b, p) -> { + b.field(ERRORS, hasFailures()); + b.field(TOOK, tookInMillis); if (ingestTookInMillis != BulkResponse.NO_INGEST_TOOK) { - ob.field(INGEST_TOOK, ingestTookInMillis); + b.field(INGEST_TOOK, ingestTookInMillis); } - ob.array(ITEMS, Iterators.forArray(responses)); - }); + return b; + }).array(ITEMS, Iterators.forArray(responses))); } } diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/ChunkedToXContentBuilder.java b/server/src/main/java/org/elasticsearch/common/xcontent/ChunkedToXContentBuilder.java index a3141bff7c6e2..a3243ef3865a7 100644 --- a/server/src/main/java/org/elasticsearch/common/xcontent/ChunkedToXContentBuilder.java +++ b/server/src/main/java/org/elasticsearch/common/xcontent/ChunkedToXContentBuilder.java @@ -58,9 +58,7 @@ private void endObject() { * Creates an object, with the specified {@code contents} */ public ChunkedToXContentBuilder xContentObject(ToXContent contents) { - startObject(); - append(contents); - endObject(); + addChunk((b, p) -> contents.toXContent(b.startObject(), p).endObject()); return this; } @@ -68,9 +66,7 @@ public ChunkedToXContentBuilder xContentObject(ToXContent contents) { * Creates an object named {@code name}, with the specified {@code contents} */ public ChunkedToXContentBuilder xContentObject(String name, ToXContent contents) { - startObject(name); - append(contents); - endObject(); + addChunk((b, p) -> contents.toXContent(b.startObject(name), p).endObject()); return this; } From 0d8d8bd39282dd146ead0e8000a96d31587127de Mon Sep 17 00:00:00 2001 From: Liam Thompson <32779855+leemthompo@users.noreply.github.com> Date: Mon, 28 Oct 2024 11:05:44 +0100 Subject: [PATCH 141/324] [DOCS] Add search and filtering tutorial/quickstart, edit filtering page (#114353) --- .../query-dsl/query_filter_context.asciidoc | 47 +- .../full-text-filtering-tutorial.asciidoc | 626 ++++++++++++++++++ docs/reference/quickstart/index.asciidoc | 4 +- 3 files changed, 662 insertions(+), 15 deletions(-) create mode 100644 docs/reference/quickstart/full-text-filtering-tutorial.asciidoc diff --git a/docs/reference/query-dsl/query_filter_context.asciidoc b/docs/reference/query-dsl/query_filter_context.asciidoc index 78e1549644aa6..1fd75ef0e841d 100644 --- a/docs/reference/query-dsl/query_filter_context.asciidoc +++ b/docs/reference/query-dsl/query_filter_context.asciidoc @@ -29,26 +29,45 @@ parameter, such as the `query` parameter in the [discrete] [[filter-context]] === Filter context -In a filter context, a query clause answers the question ``__Does this -document match this query clause?__'' The answer is a simple Yes or No -- no -scores are calculated. Filter context is mostly used for filtering structured -data, e.g. -* __Does this +timestamp+ fall into the range 2015 to 2016?__ -* __Is the +status+ field set to ++"published"++__? +A filter answers the binary question “Does this document match this query clause?”. The answer is simply "yes" or "no". +Filtering has several benefits: -Frequently used filters will be cached automatically by Elasticsearch, to -speed up performance. +. *Simple binary logic*: In a filter context, a query clause determines document matches based on a yes/no criterion, without score calculation. +. *Performance*: Because they don't compute relevance scores, filters execute faster than queries. +. *Caching*: {es} automatically caches frequently used filters, speeding up subsequent search performance. +. *Resource efficiency*: Filters consume less CPU resources compared to full-text queries. +. *Query combination*: Filters can be combined with scored queries to refine result sets efficiently. -Filter context is in effect whenever a query clause is passed to a `filter` -parameter, such as the `filter` or `must_not` parameters in the -<> query, the `filter` parameter in the -<> query, or the -<> aggregation. +Filters are particularly effective for querying structured data and implementing "must have" criteria in complex searches. + +Structured data refers to information that is highly organized and formatted in a predefined manner. In the context of Elasticsearch, this typically includes: + +* Numeric fields (integers, floating-point numbers) +* Dates and timestamps +* Boolean values +* Keyword fields (exact match strings) +* Geo-points and geo-shapes + +Unlike full-text fields, structured data has a consistent, predictable format, making it ideal for precise filtering operations. + +Common filter applications include: + +* Date range checks: for example is the `timestamp` field between 2015 and 2016 +* Specific field value checks: for example is the `status` field equal to "published" or is the `author` field equal to "John Doe" + +Filter context applies when a query clause is passed to a `filter` parameter, such as: + +* `filter` or `must_not` parameters in <> queries +* `filter` parameter in <> queries +* <> aggregations + +Filters optimize query performance and efficiency, especially for structured data queries and when combined with full-text searches. [discrete] [[query-filter-context-ex]] === Example of query and filter contexts + Below is an example of query clauses being used in query and filter context in the `search` API. This query will match documents where all of the following conditions are met: @@ -93,4 +112,4 @@ significand's precision will be converted to floats with loss of precision. TIP: Use query clauses in query context for conditions which should affect the score of matching documents (i.e. how well does the document match), and use -all other query clauses in filter context. \ No newline at end of file +all other query clauses in filter context. diff --git a/docs/reference/quickstart/full-text-filtering-tutorial.asciidoc b/docs/reference/quickstart/full-text-filtering-tutorial.asciidoc new file mode 100644 index 0000000000000..46cadc19f2547 --- /dev/null +++ b/docs/reference/quickstart/full-text-filtering-tutorial.asciidoc @@ -0,0 +1,626 @@ +[[full-text-filter-tutorial]] +== Basic full-text search and filtering in {es} +++++ +Basics: Full-text search and filtering +++++ + +This is a hands-on introduction to the basics of full-text search with {es}, also known as _lexical search_, using the <> and <>. +You'll also learn how to filter data, to narrow down search results based on exact criteria. + +In this scenario, we're implementing a search function for a cooking blog. +The blog contains recipes with various attributes including textual content, categorical data, and numerical ratings. + +The goal is to create search queries that enable users to: + +* Find recipes based on ingredients they want to use or avoid +* Discover dishes suitable for their dietary needs +* Find highly-rated recipes in specific categories +* Find recent recipes from their favorite authors + +To achieve these goals we'll use different Elasticsearch queries to perform full-text search, apply filters, and combine multiple search criteria. + +[discrete] +[[full-text-filter-tutorial-create-index]] +=== Step 1: Create an index + +Create the `cooking_blog` index to get started: + +[source,console] +---- +PUT /cooking_blog +---- +// TESTSETUP + +Now define the mappings for the index: + +[source,console] +---- +PUT /cooking_blog/_mapping +{ + "properties": { + "title": { + "type": "text", + "analyzer": "standard", <1> + "fields": { <2> + "keyword": { + "type": "keyword", + "ignore_above": 256 <3> + } + } + }, + "description": { + "type": "text", + "fields": { + "keyword": { + "type": "keyword" + } + } + }, + "author": { + "type": "text", + "fields": { + "keyword": { + "type": "keyword" + } + } + }, + "date": { + "type": "date", + "format": "yyyy-MM-dd" + }, + "category": { + "type": "text", + "fields": { + "keyword": { + "type": "keyword" + } + } + }, + "tags": { + "type": "text", + "fields": { + "keyword": { + "type": "keyword" + } + } + }, + "rating": { + "type": "float" + } + } +} +---- +// TEST +<1> The `standard` analyzer is used by default for `text` fields if an `analyzer` isn't specified. It's included here for demonstration purposes. +<2> <> are used here to index `text` fields as both `text` and `keyword` <>. This enables both full-text search and exact matching/filtering on the same field. +Note that if you used <>, these multi-fields would be created automatically. +<3> The <> prevents indexing values longer than 256 characters in the `keyword` field. Again this is the default value, but it's included here for for demonstration purposes. +It helps to save disk space and avoid potential issues with Lucene's term byte-length limit. + +[TIP] +==== +Full-text search is powered by <>. +Text analysis normalizes and standardizes text data so it can be efficiently stored in an inverted index and searched in near real-time. +Analysis happens at both <>. +This tutorial won't cover analysis in detail, but it's important to understand how text is processed to create effective search queries. +==== + +[discrete] +[[full-text-filter-tutorial-index-data]] +=== Step 2: Add sample blog posts to your index + +Now you'll need to index some example blog posts using the <>. +Note that `text` fields are analyzed and multi-fields are generated at index time. + +[source,console] +---- +POST /cooking_blog/_bulk?refresh=wait_for +{"index":{"_id":"1"}} +{"title":"Perfect Pancakes: A Fluffy Breakfast Delight","description":"Learn the secrets to making the fluffiest pancakes, so amazing you won't believe your tastebuds. This recipe uses buttermilk and a special folding technique to create light, airy pancakes that are perfect for lazy Sunday mornings.","author":"Maria Rodriguez","date":"2023-05-01","category":"Breakfast","tags":["pancakes","breakfast","easy recipes"],"rating":4.8} +{"index":{"_id":"2"}} +{"title":"Spicy Thai Green Curry: A Vegetarian Adventure","description":"Dive into the flavors of Thailand with this vibrant green curry. Packed with vegetables and aromatic herbs, this dish is both healthy and satisfying. Don't worry about the heat - you can easily adjust the spice level to your liking.","author":"Liam Chen","date":"2023-05-05","category":"Main Course","tags":["thai","vegetarian","curry","spicy"],"rating":4.6} +{"index":{"_id":"3"}} +{"title":"Classic Beef Stroganoff: A Creamy Comfort Food","description":"Indulge in this rich and creamy beef stroganoff. Tender strips of beef in a savory mushroom sauce, served over a bed of egg noodles. It's the ultimate comfort food for chilly evenings.","author":"Emma Watson","date":"2023-05-10","category":"Main Course","tags":["beef","pasta","comfort food"],"rating":4.7} +{"index":{"_id":"4"}} +{"title":"Vegan Chocolate Avocado Mousse","description":"Discover the magic of avocado in this rich, vegan chocolate mousse. Creamy, indulgent, and secretly healthy, it's the perfect guilt-free dessert for chocolate lovers.","author":"Alex Green","date":"2023-05-15","category":"Dessert","tags":["vegan","chocolate","avocado","healthy dessert"],"rating":4.5} +{"index":{"_id":"5"}} +{"title":"Crispy Oven-Fried Chicken","description":"Get that perfect crunch without the deep fryer! This oven-fried chicken recipe delivers crispy, juicy results every time. A healthier take on the classic comfort food.","author":"Maria Rodriguez","date":"2023-05-20","category":"Main Course","tags":["chicken","oven-fried","healthy"],"rating":4.9} +---- +// TEST[continued] + +[discrete] +[[full-text-filter-tutorial-match-query]] +=== Step 3: Perform basic full-text searches + +Full-text search involves executing text-based queries across one or more document fields. +These queries calculate a relevance score for each matching document, based on how closely the document's content aligns with the search terms. +{es} offers various query types, each with its own method for matching text and <>. + +[discrete] +==== `match` query + +The <> query is the standard query for full-text, or "lexical", search. +The query text will be analyzed according to the analyzer configuration specified on each field (or at query time). + +First, search the `description` field for "fluffy pancakes": + +[source,console] +---- +GET /cooking_blog/_search +{ + "query": { + "match": { + "description": { + "query": "fluffy pancakes" <1> + } + } + } +} +---- +// TEST[continued] +<1> By default, the `match` query uses `OR` logic between the resulting tokens. This means it will match documents that contain either "fluffy" or "pancakes", or both, in the description field. + +At search time, {es} defaults to the analyzer defined in the field mapping. In this example, we're using the `standard` analyzer. Using a different analyzer at search time is an <>. + +.Example response +[%collapsible] +============== +[source,console-result] +---- +{ + "took": 0, + "timed_out": false, + "_shards": { + "total": 1, + "successful": 1, + "skipped": 0, + "failed": 0 + }, + "hits": { <1> + "total": { + "value": 1, + "relation": "eq" + }, + "max_score": 1.8378843, <2> + "hits": [ + { + "_index": "cooking_blog", + "_id": "1", + "_score": 1.8378843, <3> + "_source": { + "title": "Perfect Pancakes: A Fluffy Breakfast Delight", <4> + "description": "Learn the secrets to making the fluffiest pancakes, so amazing you won't believe your tastebuds. This recipe uses buttermilk and a special folding technique to create light, airy pancakes that are perfect for lazy Sunday mornings.", <5> + "author": "Maria Rodriguez", + "date": "2023-05-01", + "category": "Breakfast", + "tags": [ + "pancakes", + "breakfast", + "easy recipes" + ], + "rating": 4.8 + } + } + ] + } +} +---- +// TESTRESPONSE[s/"took": 0/"took": "$body.took"/] +// TESTRESPONSE[s/"total": 1/"total": $body._shards.total/] +// TESTRESPONSE[s/"successful": 1/"successful": $body._shards.successful/] +// TESTRESPONSE[s/"value": 1/"value": $body.hits.total.value/] +// TESTRESPONSE[s/"max_score": 1.8378843/"max_score": $body.hits.max_score/] +// TESTRESPONSE[s/"_score": 1.8378843/"_score": $body.hits.hits.0._score/] +<1> The `hits` object contains the total number of matching documents and their relation to the total. Refer to <> for more details about the `hits` object. +<2> `max_score` is the highest relevance score among all matching documents. In this example, we only have one matching document. +<3> `_score` is the relevance score for a specific document, indicating how well it matches the query. Higher scores indicate better matches. In this example the `max_score` is the same as the `_score`, as there is only one matching document. +<4> The title contains both "Fluffy" and "Pancakes", matching our search terms exactly. +<5> The description includes "fluffiest" and "pancakes", further contributing to the document's relevance due to the analysis process. +============== + +[discrete] +==== Require all terms in a match query + +Specify the `and` operator to require both terms in the `description` field. +This stricter search returns _zero hits_ on our sample data, as no document contains both "fluffy" and "pancakes" in the description. + +[source,console] +---- +GET /cooking_blog/_search +{ + "query": { + "match": { + "description": { + "query": "fluffy pancakes", + "operator": "and" + } + } + } +} +---- +// TEST[continued] + +.Example response +[%collapsible] +============== +[source,console-result] +---- +{ + "took": 0, + "timed_out": false, + "_shards": { + "total": 1, + "successful": 1, + "skipped": 0, + "failed": 0 + }, + "hits": { + "total": { + "value": 0, + "relation": "eq" + }, + "max_score": null, + "hits": [] + } +} +---- +// TESTRESPONSE[s/"took": 0/"took": "$body.took"/] +============== + +[discrete] +==== Specify a minimum number of terms to match + +Use the <> parameter to specify the minimum number of terms a document should have to be included in the search results. + +Search the title field to match at least 2 of the 3 terms: "fluffy", "pancakes", or "breakfast". +This is useful for improving relevance while allowing some flexibility. + +[source,console] +---- +GET /cooking_blog/_search +{ + "query": { + "match": { + "title": { + "query": "fluffy pancakes breakfast", + "minimum_should_match": 2 + } + } + } +} +---- +// TEST[continued] + +[discrete] +[[full-text-filter-tutorial-multi-match]] +=== Step 4: Search across multiple fields at once + +When users enter a search query, they often don't know (or care) whether their search terms appear in a specific field. +A <> query allows searching across multiple fields simultaneously. + +Let's start with a basic `multi_match` query: + +[source,console] +---- +GET /cooking_blog/_search +{ + "query": { + "multi_match": { + "query": "vegetarian curry", + "fields": ["title", "description", "tags"] + } + } +} +---- +// TEST[continued] + +This query searches for "vegetarian curry" across the title, description, and tags fields. Each field is treated with equal importance. + +However, in many cases, matches in certain fields (like the title) might be more relevant than others. We can adjust the importance of each field using field boosting: + +[source,console] +---- +GET /cooking_blog/_search +{ + "query": { + "multi_match": { + "query": "vegetarian curry", + "fields": ["title^3", "description^2", "tags"] <1> + } + } +} +---- +// TEST[continued] +<1> The `^` syntax applies a boost to specific fields: ++ +* `title^3`: The title field is 3 times more important than an unboosted field +* `description^2`: The description is 2 times more important +* `tags`: No boost applied (equivalent to `^1`) ++ +These boosts help tune relevance, prioritizing matches in the title over the description, and matches in the description over tags. + +Learn more about fields and per-field boosting in the <> reference. + +.Example response +[%collapsible] +============== +[source,console-result] +---- +{ + "took": 0, + "timed_out": false, + "_shards": { + "total": 1, + "successful": 1, + "skipped": 0, + "failed": 0 + }, + "hits": { + "total": { + "value": 1, + "relation": "eq" + }, + "max_score": 7.546015, + "hits": [ + { + "_index": "cooking_blog", + "_id": "2", + "_score": 7.546015, + "_source": { + "title": "Spicy Thai Green Curry: A Vegetarian Adventure", <1> + "description": "Dive into the flavors of Thailand with this vibrant green curry. Packed with vegetables and aromatic herbs, this dish is both healthy and satisfying. Don't worry about the heat - you can easily adjust the spice level to your liking.", <2> + "author": "Liam Chen", + "date": "2023-05-05", + "category": "Main Course", + "tags": [ + "thai", + "vegetarian", + "curry", + "spicy" + ], <3> + "rating": 4.6 + } + } + ] + } +} +---- +// TESTRESPONSE[s/"took": 0/"took": "$body.took"/] +// TESTRESPONSE[s/"_score": 7.546015/"_score": $body.hits.hits.0._score/] +// TESTRESPONSE[s/"max_score": 7.546015/"max_score": $body.hits.max_score/] +<1> The title contains "Vegetarian" and "Curry", which matches our search terms. The title field has the highest boost (^3), contributing significantly to this document's relevance score. +<2> The description contains "curry" and related terms like "vegetables", further increasing the document's relevance. +<3> The tags include both "vegetarian" and "curry", providing an exact match for our search terms, albeit with no boost. + +This result demonstrates how the `multi_match` query with field boosts helps users find relevant recipes across multiple fields. +Even though the exact phrase "vegetarian curry" doesn't appear in any single field, the combination of matches across fields produces a highly relevant result. +============== + +[TIP] +==== +The `multi_match` query is often recommended over a single `match` query for most text search use cases, as it provides more flexibility and better matches user expectations. +==== + +[discrete] +[[full-text-filter-tutorial-filtering]] +=== Step 5: Filter and find exact matches + +<> allows you to narrow down your search results based on exact criteria. +Unlike full-text searches, filters are binary (yes/no) and do not affect the relevance score. +Filters execute faster than queries because excluded results don't need to be scored. + +This <> query will return only blog posts in the "Breakfast" category. + +[source,console] +---- +GET /cooking_blog/_search +{ + "query": { + "bool": { + "filter": [ + { "term": { "category.keyword": "Breakfast" } } <1> + ] + } + } +} +---- +// TEST[continued] +<1> Note the use of `category.keyword` here. This refers to the <> multi-field of the `category` field, ensuring an exact, case-sensitive match. + +[TIP] +==== +The `.keyword` suffix accesses the unanalyzed version of a field, enabling exact, case-sensitive matching. This works in two scenarios: + +1. *When using dynamic mapping for text fields*. Elasticsearch automatically creates a `.keyword` sub-field. +2. *When text fields are explicitly mapped with a `.keyword` sub-field*. For example, we explicitly mapped the `category` field in <> of this tutorial. +==== + +[discrete] +[[full-text-filter-tutorial-range-query]] +==== Search for posts within a date range + +Often users want to find content published within a specific time frame. +A <> query finds documents that fall within numeric or date ranges. + +[source,console] +---- +GET /cooking_blog/_search +{ + "query": { + "range": { + "date": { + "gte": "2023-05-01", <1> + "lte": "2023-05-31" <2> + } + } + } +} +---- +// TEST[continued] +<1> Greater than or equal to May 1, 2023. +<2> Less than or equal to May 31, 2023. + +[discrete] +[[full-text-filter-tutorial-term-query]] +==== Find exact matches + +Sometimes users want to search for exact terms to eliminate ambiguity in their search results. +A <> query searches for an exact term in a field without analyzing it. +Exact, case-sensitive matches on specific terms are often referred to as "keyword" searches. + +Here you'll search for the author "Maria Rodriguez" in the `author.keyword` field. + +[source,console] +---- +GET /cooking_blog/_search +{ + "query": { + "term": { + "author.keyword": "Maria Rodriguez" <1> + } + } +} +---- +// TEST[continued] +<1> The `term` query has zero flexibility. For example, here the queries `maria` or `maria rodriguez` would have zero hits, due to case sensitivity. + +[TIP] +==== +Avoid using the `term` query for <> because they are transformed by the analysis process. +==== + +[discrete] +[[full-text-filter-tutorial-complex-bool]] +=== Step 6: Combine multiple search criteria + +A <> query allows you to combine multiple query clauses to create sophisticated searches. +In this tutorial scenario it's useful for when users have complex requirements for finding recipes. + +Let's create a query that addresses the following user needs: + +* Must be a vegetarian main course +* Should contain "curry" or "spicy" in the title or description +* Must not be a dessert +* Must have a rating of at least 4.5 +* Should prefer recipes published in the last month + +[source,console] +---- +GET /cooking_blog/_search +{ + "query": { + "bool": { + "must": [ + { + "term": { + "category.keyword": "Main Course" + } + }, + { + "term": { + "tags": "vegetarian" + } + }, + { + "range": { + "rating": { + "gte": 4.5 + } + } + } + ], + "should": [ + { + "multi_match": { + "query": "curry spicy", + "fields": ["title^2", "description"] + } + }, + { + "range": { + "date": { + "gte": "now-1M/d" + } + } + } + ], + "must_not": [ <1> + { + "term": { + "category.keyword": "Dessert" + } + } + ] + } + } +} +---- +// TEST[continued] +<1> The `must_not` clause excludes documents that match the specified criteria. This is a powerful tool for filtering out unwanted results. + +.Example response +[%collapsible] +============== +[source,console-result] +---- +{ + "took": 1, + "timed_out": false, + "_shards": { + "total": 1, + "successful": 1, + "skipped": 0, + "failed": 0 + }, + "hits": { + "total": { + "value": 1, + "relation": "eq" + }, + "max_score": 7.9835095, + "hits": [ + { + "_index": "cooking_blog", + "_id": "2", + "_score": 7.9835095, + "_source": { + "title": "Spicy Thai Green Curry: A Vegetarian Adventure", <1> + "description": "Dive into the flavors of Thailand with this vibrant green curry. Packed with vegetables and aromatic herbs, this dish is both healthy and satisfying. Don't worry about the heat - you can easily adjust the spice level to your liking.", <2> + "author": "Liam Chen", + "date": "2023-05-05", + "category": "Main Course", <3> + "tags": [ <4> + "thai", + "vegetarian", <5> + "curry", + "spicy" + ], + "rating": 4.6 <6> + } + } + ] + } +} +---- +// TESTRESPONSE[s/"took": 1/"took": "$body.took"/] +<1> The title contains "Spicy" and "Curry", matching our should condition. With the default <> behavior, this field contributes most to the relevance score. +<2> While the description also contains matching terms, only the best matching field's score is used by default. +<3> The recipe was published within the last month, satisfying our recency preference. +<4> The "Main Course" category matches our `must` condition. +<5> The "vegetarian" tag satisfies another `must` condition, while "curry" and "spicy" tags align with our `should` preferences. +<6> The rating of 4.6 meets our minimum rating requirement of 4.5. +============== + +[discrete] +[[full-text-filter-tutorial-learn-more]] +=== Learn more + +This tutorial introduced the basics of full-text search and filtering in {es}. +Building a real-world search experience requires understanding many more advanced concepts and techniques. +Here are some resources once you're ready to dive deeper: + +* <>: Understand all your options for searching and analyzing data in {es}. +* <>: Understand how text is processed for full-text search. +* <>: Learn about more advanced search techniques using the `_search` API, including semantic search. + + diff --git a/docs/reference/quickstart/index.asciidoc b/docs/reference/quickstart/index.asciidoc index 2d9114882254f..ed4c128392994 100644 --- a/docs/reference/quickstart/index.asciidoc +++ b/docs/reference/quickstart/index.asciidoc @@ -15,7 +15,8 @@ Get started <> , or see our <>. Learn about indices, documents, and mappings, and perform a basic search. +* <>. Learn about indices, documents, and mappings, and perform a basic search using the Query DSL. +* <>. Learn about different options for querying data, including full-text search and filtering, using the Query DSL. [discrete] [[quickstart-python-links]] @@ -27,3 +28,4 @@ If you're interested in using {es} with Python, check out Elastic Search Labs: * https://www.elastic.co/search-labs/tutorials/search-tutorial/welcome[Tutorial]: This walks you through building a complete search solution with {es} from the ground up using Flask. include::getting-started.asciidoc[] +include::full-text-filtering-tutorial.asciidoc[] From 78ccd2a4a216c25c6bf75833295a9f2d423fc19d Mon Sep 17 00:00:00 2001 From: Souradip Poddar <49103513+SouradipPoddar@users.noreply.github.com> Date: Mon, 28 Oct 2024 15:48:34 +0530 Subject: [PATCH 142/324] 112274 converted cpu stats to support unsigned 64 bit number (#114681) --- docs/changelog/114681.yaml | 6 ++ .../org/elasticsearch/TransportVersions.java | 1 + .../common/io/stream/StreamOutput.java | 7 +++ .../org/elasticsearch/monitor/os/OsProbe.java | 48 ++++++++------- .../org/elasticsearch/monitor/os/OsStats.java | 61 +++++++++++++------ .../cluster/node/stats/NodeStatsTests.java | 9 ++- .../monitor/os/OsProbeTests.java | 24 ++++---- .../monitor/os/OsStatsTests.java | 11 +++- .../node/NodeStatsMonitoringDocTests.java | 11 +++- 9 files changed, 119 insertions(+), 59 deletions(-) create mode 100644 docs/changelog/114681.yaml diff --git a/docs/changelog/114681.yaml b/docs/changelog/114681.yaml new file mode 100644 index 0000000000000..2a9901114e56f --- /dev/null +++ b/docs/changelog/114681.yaml @@ -0,0 +1,6 @@ +pr: 114681 +summary: "Support for unsigned 64 bit numbers in Cpu stats" +area: Infra/Core +type: enhancement +issues: + - 112274 diff --git a/server/src/main/java/org/elasticsearch/TransportVersions.java b/server/src/main/java/org/elasticsearch/TransportVersions.java index 3986ea4b97254..9454c27dd787c 100644 --- a/server/src/main/java/org/elasticsearch/TransportVersions.java +++ b/server/src/main/java/org/elasticsearch/TransportVersions.java @@ -183,6 +183,7 @@ static TransportVersion def(int id) { public static final TransportVersion INTRODUCE_ALL_APPLICABLE_SELECTOR = def(8_778_00_0); public static final TransportVersion INDEX_MODE_LOOKUP = def(8_779_00_0); public static final TransportVersion INDEX_REQUEST_REMOVE_METERING = def(8_780_00_0); + public static final TransportVersion CPU_STAT_STRING_PARSING = def(8_781_00_0); /* * STOP! READ THIS FIRST! No, really, diff --git a/server/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java b/server/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java index c449065a953e2..d724e5ea25ca6 100644 --- a/server/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java +++ b/server/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java @@ -1234,4 +1234,11 @@ public void writeMissingWriteable(Class ignored) throws public void writeMissingString() throws IOException { writeBoolean(false); } + + /** + * Write a {@link BigInteger} to the stream + */ + public void writeBigInteger(BigInteger bigInteger) throws IOException { + writeString(bigInteger.toString()); + } } diff --git a/server/src/main/java/org/elasticsearch/monitor/os/OsProbe.java b/server/src/main/java/org/elasticsearch/monitor/os/OsProbe.java index 799264d8392b1..06ab6a6eee410 100644 --- a/server/src/main/java/org/elasticsearch/monitor/os/OsProbe.java +++ b/server/src/main/java/org/elasticsearch/monitor/os/OsProbe.java @@ -22,6 +22,7 @@ import java.lang.management.OperatingSystemMXBean; import java.lang.reflect.InvocationTargetException; import java.lang.reflect.Method; +import java.math.BigInteger; import java.nio.file.Files; import java.nio.file.Path; import java.util.Collections; @@ -341,8 +342,8 @@ List readProcSelfCgroup() throws IOException { * @return the total CPU time in nanoseconds * @throws IOException if an I/O exception occurs reading {@code cpuacct.usage} for the control group */ - private long getCgroupCpuAcctUsageNanos(final String controlGroup) throws IOException { - return Long.parseLong(readSysFsCgroupCpuAcctCpuAcctUsage(controlGroup)); + private BigInteger getCgroupCpuAcctUsageNanos(final String controlGroup) throws IOException { + return new BigInteger(readSysFsCgroupCpuAcctCpuAcctUsage(controlGroup)); } /** @@ -435,21 +436,22 @@ String readSysFsCgroupCpuAcctCpuAcctCfsQuota(final String controlGroup) throws I * @throws IOException if an I/O exception occurs reading {@code cpu.stat} for the control group */ private OsStats.Cgroup.CpuStat getCgroupCpuAcctCpuStat(final String controlGroup) throws IOException { + final var SENTINEL_VALUE = BigInteger.valueOf(-1); final List lines = readSysFsCgroupCpuAcctCpuStat(controlGroup); - long numberOfPeriods = -1; - long numberOfTimesThrottled = -1; - long timeThrottledNanos = -1; + var numberOfPeriods = SENTINEL_VALUE; + var numberOfTimesThrottled = SENTINEL_VALUE; + var timeThrottledNanos = SENTINEL_VALUE; for (final String line : lines) { final String[] fields = line.split("\\s+"); switch (fields[0]) { - case "nr_periods" -> numberOfPeriods = Long.parseLong(fields[1]); - case "nr_throttled" -> numberOfTimesThrottled = Long.parseLong(fields[1]); - case "throttled_time" -> timeThrottledNanos = Long.parseLong(fields[1]); + case "nr_periods" -> numberOfPeriods = new BigInteger(fields[1]); + case "nr_throttled" -> numberOfTimesThrottled = new BigInteger(fields[1]); + case "throttled_time" -> timeThrottledNanos = new BigInteger(fields[1]); } } - assert numberOfPeriods != -1; - assert numberOfTimesThrottled != -1; - assert timeThrottledNanos != -1; + assert numberOfPeriods.equals(SENTINEL_VALUE) == false; + assert numberOfTimesThrottled.equals(SENTINEL_VALUE) == false; + assert timeThrottledNanos.equals(SENTINEL_VALUE) == false; return new OsStats.Cgroup.CpuStat(numberOfPeriods, numberOfTimesThrottled, timeThrottledNanos); } @@ -635,28 +637,30 @@ boolean areCgroupStatsAvailable() throws IOException { * @throws IOException if an I/O exception occurs reading {@code cpu.stat} for the control group */ @SuppressForbidden(reason = "Uses PathUtils.get to generate meaningful assertion messages") - private Map getCgroupV2CpuStats(String controlGroup) throws IOException { + private Map getCgroupV2CpuStats(String controlGroup) throws IOException { final List lines = readCgroupV2CpuStats(controlGroup); - final Map stats = new HashMap<>(); + final Map stats = new HashMap<>(); + final BigInteger SENTINEL_VALUE = BigInteger.valueOf(-1); for (String line : lines) { String[] parts = line.split("\\s+"); assert parts.length == 2 : "Corrupt cpu.stat line: [" + line + "]"; - stats.put(parts[0], Long.parseLong(parts[1])); + stats.put(parts[0], new BigInteger(parts[1])); } final List expectedKeys = List.of("system_usec", "usage_usec", "user_usec"); expectedKeys.forEach(key -> { assert stats.containsKey(key) : "[" + key + "] missing from " + PathUtils.get("/sys/fs/cgroup", controlGroup, "cpu.stat"); - assert stats.get(key) != -1 : stats.get(key); + assert stats.get(key).compareTo(SENTINEL_VALUE) != 0 : stats.get(key).toString(); }); final List optionalKeys = List.of("nr_periods", "nr_throttled", "throttled_usec"); optionalKeys.forEach(key -> { if (stats.containsKey(key) == false) { - stats.put(key, 0L); + stats.put(key, BigInteger.ZERO); } - assert stats.get(key) != -1L : "[" + key + "] in " + PathUtils.get("/sys/fs/cgroup", controlGroup, "cpu.stat") + " is -1"; + assert stats.get(key).compareTo(SENTINEL_VALUE) != 0 + : "[" + key + "] in " + PathUtils.get("/sys/fs/cgroup", controlGroup, "cpu.stat") + " is -1"; }); return stats; @@ -682,7 +686,7 @@ private OsStats.Cgroup getCgroup() { assert controllerMap.isEmpty() == false; final String cpuAcctControlGroup; - final long cgroupCpuAcctUsageNanos; + final BigInteger cgroupCpuAcctUsageNanos; final long cgroupCpuAcctCpuCfsPeriodMicros; final long cgroupCpuAcctCpuCfsQuotaMicros; final String cpuControlGroup; @@ -696,9 +700,11 @@ private OsStats.Cgroup getCgroup() { cpuControlGroup = cpuAcctControlGroup = memoryControlGroup = controllerMap.get(""); // `cpuacct` was merged with `cpu` in v2 - final Map cpuStatsMap = getCgroupV2CpuStats(cpuControlGroup); + final Map cpuStatsMap = getCgroupV2CpuStats(cpuControlGroup); - cgroupCpuAcctUsageNanos = cpuStatsMap.get("usage_usec") * 1000; // convert from micros to nanos + final BigInteger THOUSAND = BigInteger.valueOf(1000); + + cgroupCpuAcctUsageNanos = cpuStatsMap.get("usage_usec").multiply(THOUSAND); // convert from micros to nanos long[] cpuLimits = getCgroupV2CpuLimit(cpuControlGroup); cgroupCpuAcctCpuCfsQuotaMicros = cpuLimits[0]; @@ -707,7 +713,7 @@ private OsStats.Cgroup getCgroup() { cpuStat = new OsStats.Cgroup.CpuStat( cpuStatsMap.get("nr_periods"), cpuStatsMap.get("nr_throttled"), - cpuStatsMap.get("throttled_usec") * 1000 + cpuStatsMap.get("throttled_usec").multiply(THOUSAND) ); cgroupMemoryLimitInBytes = getCgroupV2MemoryLimitInBytes(memoryControlGroup); diff --git a/server/src/main/java/org/elasticsearch/monitor/os/OsStats.java b/server/src/main/java/org/elasticsearch/monitor/os/OsStats.java index 7a2f46668f610..6c1ba2dfbe63a 100644 --- a/server/src/main/java/org/elasticsearch/monitor/os/OsStats.java +++ b/server/src/main/java/org/elasticsearch/monitor/os/OsStats.java @@ -20,6 +20,7 @@ import org.elasticsearch.xcontent.XContentBuilder; import java.io.IOException; +import java.math.BigInteger; import java.util.Arrays; import java.util.Objects; @@ -362,7 +363,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws public static class Cgroup implements Writeable, ToXContentFragment { private final String cpuAcctControlGroup; - private final long cpuAcctUsageNanos; + private final BigInteger cpuAcctUsageNanos; private final String cpuControlGroup; private final long cpuCfsPeriodMicros; private final long cpuCfsQuotaMicros; @@ -387,7 +388,7 @@ public String getCpuAcctControlGroup() { * * @return the total CPU time in nanoseconds */ - public long getCpuAcctUsageNanos() { + public BigInteger getCpuAcctUsageNanos() { return cpuAcctUsageNanos; } @@ -465,7 +466,7 @@ public String getMemoryUsageInBytes() { public Cgroup( final String cpuAcctControlGroup, - final long cpuAcctUsageNanos, + final BigInteger cpuAcctUsageNanos, final String cpuControlGroup, final long cpuCfsPeriodMicros, final long cpuCfsQuotaMicros, @@ -487,7 +488,11 @@ public Cgroup( Cgroup(final StreamInput in) throws IOException { cpuAcctControlGroup = in.readString(); - cpuAcctUsageNanos = in.readLong(); + if (in.getTransportVersion().onOrAfter(TransportVersions.CPU_STAT_STRING_PARSING)) { + cpuAcctUsageNanos = in.readBigInteger(); + } else { + cpuAcctUsageNanos = BigInteger.valueOf(in.readLong()); + } cpuControlGroup = in.readString(); cpuCfsPeriodMicros = in.readLong(); cpuCfsQuotaMicros = in.readLong(); @@ -500,7 +505,11 @@ public Cgroup( @Override public void writeTo(final StreamOutput out) throws IOException { out.writeString(cpuAcctControlGroup); - out.writeLong(cpuAcctUsageNanos); + if (out.getTransportVersion().onOrAfter(TransportVersions.CPU_STAT_STRING_PARSING)) { + out.writeBigInteger(cpuAcctUsageNanos); + } else { + out.writeLong(cpuAcctUsageNanos.longValue()); + } out.writeString(cpuControlGroup); out.writeLong(cpuCfsPeriodMicros); out.writeLong(cpuCfsQuotaMicros); @@ -551,9 +560,9 @@ public XContentBuilder toXContent(final XContentBuilder builder, final Params pa */ public static class CpuStat implements Writeable, ToXContentFragment { - private final long numberOfElapsedPeriods; - private final long numberOfTimesThrottled; - private final long timeThrottledNanos; + private final BigInteger numberOfElapsedPeriods; + private final BigInteger numberOfTimesThrottled; + private final BigInteger timeThrottledNanos; /** * The number of elapsed periods. @@ -561,7 +570,7 @@ public static class CpuStat implements Writeable, ToXContentFragment { * @return the number of elapsed periods as measured by * {@code cpu.cfs_period_us} */ - public long getNumberOfElapsedPeriods() { + public BigInteger getNumberOfElapsedPeriods() { return numberOfElapsedPeriods; } @@ -571,7 +580,7 @@ public long getNumberOfElapsedPeriods() { * * @return the number of times */ - public long getNumberOfTimesThrottled() { + public BigInteger getNumberOfTimesThrottled() { return numberOfTimesThrottled; } @@ -581,27 +590,43 @@ public long getNumberOfTimesThrottled() { * * @return the total time in nanoseconds */ - public long getTimeThrottledNanos() { + public BigInteger getTimeThrottledNanos() { return timeThrottledNanos; } - public CpuStat(final long numberOfElapsedPeriods, final long numberOfTimesThrottled, final long timeThrottledNanos) { + public CpuStat( + final BigInteger numberOfElapsedPeriods, + final BigInteger numberOfTimesThrottled, + final BigInteger timeThrottledNanos + ) { this.numberOfElapsedPeriods = numberOfElapsedPeriods; this.numberOfTimesThrottled = numberOfTimesThrottled; this.timeThrottledNanos = timeThrottledNanos; } CpuStat(final StreamInput in) throws IOException { - numberOfElapsedPeriods = in.readLong(); - numberOfTimesThrottled = in.readLong(); - timeThrottledNanos = in.readLong(); + if (in.getTransportVersion().onOrAfter(TransportVersions.CPU_STAT_STRING_PARSING)) { + numberOfElapsedPeriods = in.readBigInteger(); + numberOfTimesThrottled = in.readBigInteger(); + timeThrottledNanos = in.readBigInteger(); + } else { + numberOfElapsedPeriods = BigInteger.valueOf(in.readLong()); + numberOfTimesThrottled = BigInteger.valueOf(in.readLong()); + timeThrottledNanos = BigInteger.valueOf(in.readLong()); + } } @Override public void writeTo(final StreamOutput out) throws IOException { - out.writeLong(numberOfElapsedPeriods); - out.writeLong(numberOfTimesThrottled); - out.writeLong(timeThrottledNanos); + if (out.getTransportVersion().onOrAfter(TransportVersions.CPU_STAT_STRING_PARSING)) { + out.writeBigInteger(numberOfElapsedPeriods); + out.writeBigInteger(numberOfTimesThrottled); + out.writeBigInteger(timeThrottledNanos); + } else { + out.writeLong(numberOfElapsedPeriods.longValue()); + out.writeLong(numberOfTimesThrottled.longValue()); + out.writeLong(timeThrottledNanos.longValue()); + } } @Override diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/stats/NodeStatsTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/stats/NodeStatsTests.java index 77d00f0e5a068..b5f61d5b798fa 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/stats/NodeStatsTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/stats/NodeStatsTests.java @@ -85,6 +85,7 @@ import org.elasticsearch.xcontent.ToXContent; import java.io.IOException; +import java.math.BigInteger; import java.nio.file.Path; import java.util.ArrayList; import java.util.Collections; @@ -709,11 +710,15 @@ public static NodeStats createNodeStats() { new OsStats.Swap(swapTotal, randomLongBetween(0, swapTotal)), new OsStats.Cgroup( randomAlphaOfLength(8), - randomNonNegativeLong(), + randomUnsignedLongBetween(BigInteger.ZERO, BigInteger.valueOf(Long.MAX_VALUE).multiply(BigInteger.TWO)), randomAlphaOfLength(8), randomNonNegativeLong(), randomNonNegativeLong(), - new OsStats.Cgroup.CpuStat(randomNonNegativeLong(), randomNonNegativeLong(), randomNonNegativeLong()), + new OsStats.Cgroup.CpuStat( + randomUnsignedLongBetween(BigInteger.ZERO, BigInteger.valueOf(Long.MAX_VALUE).multiply(BigInteger.TWO)), + randomUnsignedLongBetween(BigInteger.ZERO, BigInteger.valueOf(Long.MAX_VALUE).multiply(BigInteger.TWO)), + randomUnsignedLongBetween(BigInteger.ZERO, BigInteger.valueOf(Long.MAX_VALUE).multiply(BigInteger.TWO)) + ), randomAlphaOfLength(8), Long.toString(randomNonNegativeLong()), Long.toString(randomNonNegativeLong()) diff --git a/server/src/test/java/org/elasticsearch/monitor/os/OsProbeTests.java b/server/src/test/java/org/elasticsearch/monitor/os/OsProbeTests.java index 220f4336fc444..aad78881a8a13 100644 --- a/server/src/test/java/org/elasticsearch/monitor/os/OsProbeTests.java +++ b/server/src/test/java/org/elasticsearch/monitor/os/OsProbeTests.java @@ -136,12 +136,12 @@ public void testOsStats() { if (Constants.LINUX) { if (stats.getCgroup() != null) { assertThat(stats.getCgroup().getCpuAcctControlGroup(), notNullValue()); - assertThat(stats.getCgroup().getCpuAcctUsageNanos(), greaterThan(0L)); + assertThat(stats.getCgroup().getCpuAcctUsageNanos(), greaterThan(BigInteger.ZERO)); assertThat(stats.getCgroup().getCpuCfsQuotaMicros(), anyOf(equalTo(-1L), greaterThanOrEqualTo(0L))); assertThat(stats.getCgroup().getCpuCfsPeriodMicros(), greaterThanOrEqualTo(0L)); - assertThat(stats.getCgroup().getCpuStat().getNumberOfElapsedPeriods(), greaterThanOrEqualTo(0L)); - assertThat(stats.getCgroup().getCpuStat().getNumberOfTimesThrottled(), greaterThanOrEqualTo(0L)); - assertThat(stats.getCgroup().getCpuStat().getTimeThrottledNanos(), greaterThanOrEqualTo(0L)); + assertThat(stats.getCgroup().getCpuStat().getNumberOfElapsedPeriods(), greaterThanOrEqualTo(BigInteger.ZERO)); + assertThat(stats.getCgroup().getCpuStat().getNumberOfTimesThrottled(), greaterThanOrEqualTo(BigInteger.ZERO)); + assertThat(stats.getCgroup().getCpuStat().getTimeThrottledNanos(), greaterThanOrEqualTo(BigInteger.ZERO)); // These could be null if transported from a node running an older version, but shouldn't be null on the current node assertThat(stats.getCgroup().getMemoryControlGroup(), notNullValue()); String memoryLimitInBytes = stats.getCgroup().getMemoryLimitInBytes(); @@ -191,26 +191,26 @@ public void testCgroupProbe() { case 1 -> { assertNotNull(cgroup); assertThat(cgroup.getCpuAcctControlGroup(), equalTo("/" + hierarchy)); - assertThat(cgroup.getCpuAcctUsageNanos(), equalTo(364869866063112L)); + assertThat(cgroup.getCpuAcctUsageNanos(), equalTo(new BigInteger("364869866063112"))); assertThat(cgroup.getCpuControlGroup(), equalTo("/" + hierarchy)); assertThat(cgroup.getCpuCfsPeriodMicros(), equalTo(100000L)); assertThat(cgroup.getCpuCfsQuotaMicros(), equalTo(50000L)); - assertThat(cgroup.getCpuStat().getNumberOfElapsedPeriods(), equalTo(17992L)); - assertThat(cgroup.getCpuStat().getNumberOfTimesThrottled(), equalTo(1311L)); - assertThat(cgroup.getCpuStat().getTimeThrottledNanos(), equalTo(139298645489L)); + assertThat(cgroup.getCpuStat().getNumberOfElapsedPeriods(), equalTo(BigInteger.valueOf(17992))); + assertThat(cgroup.getCpuStat().getNumberOfTimesThrottled(), equalTo(BigInteger.valueOf(1311))); + assertThat(cgroup.getCpuStat().getTimeThrottledNanos(), equalTo(new BigInteger("139298645489"))); assertThat(cgroup.getMemoryLimitInBytes(), equalTo("18446744073709551615")); assertThat(cgroup.getMemoryUsageInBytes(), equalTo("4796416")); } case 2 -> { assertNotNull(cgroup); assertThat(cgroup.getCpuAcctControlGroup(), equalTo("/" + hierarchy)); - assertThat(cgroup.getCpuAcctUsageNanos(), equalTo(364869866063000L)); + assertThat(cgroup.getCpuAcctUsageNanos(), equalTo(new BigInteger("364869866063000"))); assertThat(cgroup.getCpuControlGroup(), equalTo("/" + hierarchy)); assertThat(cgroup.getCpuCfsPeriodMicros(), equalTo(100000L)); assertThat(cgroup.getCpuCfsQuotaMicros(), equalTo(50000L)); - assertThat(cgroup.getCpuStat().getNumberOfElapsedPeriods(), equalTo(17992L)); - assertThat(cgroup.getCpuStat().getNumberOfTimesThrottled(), equalTo(1311L)); - assertThat(cgroup.getCpuStat().getTimeThrottledNanos(), equalTo(139298645000L)); + assertThat(cgroup.getCpuStat().getNumberOfElapsedPeriods(), equalTo(BigInteger.valueOf(17992))); + assertThat(cgroup.getCpuStat().getNumberOfTimesThrottled(), equalTo(BigInteger.valueOf(1311))); + assertThat(cgroup.getCpuStat().getTimeThrottledNanos(), equalTo(new BigInteger("139298645000"))); assertThat(cgroup.getMemoryLimitInBytes(), equalTo("18446744073709551615")); assertThat(cgroup.getMemoryUsageInBytes(), equalTo("4796416")); } diff --git a/server/src/test/java/org/elasticsearch/monitor/os/OsStatsTests.java b/server/src/test/java/org/elasticsearch/monitor/os/OsStatsTests.java index 4c53067ca123a..2146e47febe9c 100644 --- a/server/src/test/java/org/elasticsearch/monitor/os/OsStatsTests.java +++ b/server/src/test/java/org/elasticsearch/monitor/os/OsStatsTests.java @@ -14,6 +14,7 @@ import org.elasticsearch.test.ESTestCase; import java.io.IOException; +import java.math.BigInteger; import static org.hamcrest.Matchers.equalTo; @@ -21,7 +22,7 @@ public class OsStatsTests extends ESTestCase { public void testSerialization() throws IOException { int numLoadAverages = randomIntBetween(1, 5); - double loadAverages[] = new double[numLoadAverages]; + double[] loadAverages = new double[numLoadAverages]; for (int i = 0; i < loadAverages.length; i++) { loadAverages[i] = randomDouble(); } @@ -32,11 +33,15 @@ public void testSerialization() throws IOException { OsStats.Swap swap = new OsStats.Swap(swapTotal, randomLongBetween(0, swapTotal)); OsStats.Cgroup cgroup = new OsStats.Cgroup( randomAlphaOfLength(8), - randomNonNegativeLong(), + randomUnsignedLongBetween(BigInteger.ZERO, BigInteger.valueOf(Long.MAX_VALUE)), randomAlphaOfLength(8), randomNonNegativeLong(), randomNonNegativeLong(), - new OsStats.Cgroup.CpuStat(randomNonNegativeLong(), randomNonNegativeLong(), randomNonNegativeLong()), + new OsStats.Cgroup.CpuStat( + randomUnsignedLongBetween(BigInteger.ZERO, BigInteger.valueOf(Long.MAX_VALUE).multiply(BigInteger.TWO)), + randomUnsignedLongBetween(BigInteger.ZERO, BigInteger.valueOf(Long.MAX_VALUE).multiply(BigInteger.TWO)), + randomUnsignedLongBetween(BigInteger.ZERO, BigInteger.valueOf(Long.MAX_VALUE).multiply(BigInteger.TWO)) + ), randomAlphaOfLength(8), Long.toString(randomNonNegativeLong()), Long.toString(randomNonNegativeLong()) diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/node/NodeStatsMonitoringDocTests.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/node/NodeStatsMonitoringDocTests.java index da23f27e1357e..3d7f843358646 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/node/NodeStatsMonitoringDocTests.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/node/NodeStatsMonitoringDocTests.java @@ -37,6 +37,7 @@ import org.junit.Before; import java.io.IOException; +import java.math.BigInteger; import java.util.ArrayList; import java.util.Collections; import java.util.List; @@ -227,7 +228,7 @@ public void testToXContent() throws IOException { "stat": { "number_of_elapsed_periods": 39, "number_of_times_throttled": 40, - "time_throttled_nanos": 41 + "time_throttled_nanos": 9223372036854775848 } }, "memory": { @@ -393,10 +394,14 @@ private static NodeStats mockNodeStats() { // Os final OsStats.Cpu osCpu = new OsStats.Cpu((short) no, new double[] { ++iota, ++iota, ++iota }); - final OsStats.Cgroup.CpuStat osCpuStat = new OsStats.Cgroup.CpuStat(++iota, ++iota, ++iota); + final OsStats.Cgroup.CpuStat osCpuStat = new OsStats.Cgroup.CpuStat( + BigInteger.valueOf(++iota), + BigInteger.valueOf(++iota), + BigInteger.valueOf(Long.MAX_VALUE).add(BigInteger.valueOf(++iota)) + ); final OsStats.Cgroup osCgroup = new OsStats.Cgroup( "_cpu_acct_ctrl_group", - ++iota, + BigInteger.valueOf(++iota), "_cpu_ctrl_group", ++iota, ++iota, From ddad15676eae431329d74000e16ab311c09d9cf9 Mon Sep 17 00:00:00 2001 From: Benjamin Trent Date: Mon, 28 Oct 2024 08:19:29 -0400 Subject: [PATCH 143/324] Updating knn tuning guide and size estimates (#115691) --- docs/reference/how-to/knn-search.asciidoc | 30 ++++++++++++++++------- 1 file changed, 21 insertions(+), 9 deletions(-) diff --git a/docs/reference/how-to/knn-search.asciidoc b/docs/reference/how-to/knn-search.asciidoc index 1d9c988f7b6c9..83614b0d99024 100644 --- a/docs/reference/how-to/knn-search.asciidoc +++ b/docs/reference/how-to/knn-search.asciidoc @@ -16,10 +16,11 @@ structures. So these same recommendations also help with indexing speed. The default <> is `float`. But this can be automatically quantized during index time through <>. Quantization will reduce the -required memory by 4x, but it will also reduce the precision of the vectors and -increase disk usage for the field (by up to 25%). Increased disk usage is a +required memory by 4x, 8x, or as much as 32x, but it will also reduce the precision of the vectors and +increase disk usage for the field (by up to 25%, 12.5%, or 3.125%, respectively). Increased disk usage is a result of {es} storing both the quantized and the unquantized vectors. -For example, when quantizing 40GB of floating point vectors an extra 10GB of data will be stored for the quantized vectors. The total disk usage amounts to 50GB, but the memory usage for fast search will be reduced to 10GB. +For example, when int8 quantizing 40GB of floating point vectors an extra 10GB of data will be stored for the quantized vectors. +The total disk usage amounts to 50GB, but the memory usage for fast search will be reduced to 10GB. For `float` vectors with `dim` greater than or equal to `384`, using a <> index is highly recommended. @@ -68,12 +69,23 @@ Another option is to use <>. kNN search. HNSW is a graph-based algorithm which only works efficiently when most vector data is held in memory. You should ensure that data nodes have at least enough RAM to hold the vector data and index structures. To check the -size of the vector data, you can use the <> API. As a -loose rule of thumb, and assuming the default HNSW options, the bytes used will -be `num_vectors * 4 * (num_dimensions + 12)`. When using the `byte` <> -the space required will be closer to `num_vectors * (num_dimensions + 12)`. Note that -the required RAM is for the filesystem cache, which is separate from the Java -heap. +size of the vector data, you can use the <> API. + +Here are estimates for different element types and quantization levels: ++ +-- +`element_type: float`: `num_vectors * num_dimensions * 4` +`element_type: float` with `quantization: int8`: `num_vectors * (num_dimensions + 4)` +`element_type: float` with `quantization: int4`: `num_vectors * (num_dimensions/2 + 4)` +`element_type: float` with `quantization: bbq`: `num_vectors * (num_dimensions/8 + 12)` +`element_type: byte`: `num_vectors * num_dimensions` +`element_type: bit`: `num_vectors * (num_dimensions/8)` +-- + +If utilizing HNSW, the graph must also be in memory, to estimate the required bytes use `num_vectors * 4 * HNSW.m`. The +default value for `HNSW.m` is 16, so by default `num_vectors * 4 * 16`. + +Note that the required RAM is for the filesystem cache, which is separate from the Java heap. The data nodes should also leave a buffer for other ways that RAM is needed. For example your index might also include text fields and numerics, which also From ab558e663c4e5e086d16e862cff11bc6cdb82486 Mon Sep 17 00:00:00 2001 From: Liam Thompson <32779855+leemthompo@users.noreply.github.com> Date: Mon, 28 Oct 2024 14:13:14 +0100 Subject: [PATCH 144/324] Update quickstart overview, add local install instructions (#115746) --- .../full-text-filtering-tutorial.asciidoc | 13 +++++++++++++ .../quickstart/getting-started.asciidoc | 15 ++++++++++----- docs/reference/quickstart/index.asciidoc | 19 +++++++++++++++---- .../run-elasticsearch-locally.asciidoc | 19 ++----------------- 4 files changed, 40 insertions(+), 26 deletions(-) diff --git a/docs/reference/quickstart/full-text-filtering-tutorial.asciidoc b/docs/reference/quickstart/full-text-filtering-tutorial.asciidoc index 46cadc19f2547..fee4b797da724 100644 --- a/docs/reference/quickstart/full-text-filtering-tutorial.asciidoc +++ b/docs/reference/quickstart/full-text-filtering-tutorial.asciidoc @@ -19,6 +19,19 @@ The goal is to create search queries that enable users to: To achieve these goals we'll use different Elasticsearch queries to perform full-text search, apply filters, and combine multiple search criteria. +[discrete] +[[full-text-filter-tutorial-requirements]] +=== Requirements + +You'll need a running {es} cluster, together with {kib} to use the Dev Tools API Console. +Run the following command in your terminal to set up a <>: + +[source,sh] +---- +curl -fsSL https://elastic.co/start-local | sh +---- +// NOTCONSOLE + [discrete] [[full-text-filter-tutorial-create-index]] === Step 1: Create an index diff --git a/docs/reference/quickstart/getting-started.asciidoc b/docs/reference/quickstart/getting-started.asciidoc index a6d233d8b8abc..03bfb62548b25 100644 --- a/docs/reference/quickstart/getting-started.asciidoc +++ b/docs/reference/quickstart/getting-started.asciidoc @@ -15,12 +15,17 @@ You can {kibana-ref}/console-kibana.html#import-export-console-requests[convert ==== [discrete] -[[getting-started-prerequisites]] -=== Prerequisites +[[getting-started-requirements]] +=== Requirements -Before you begin, you need to have a running {es} cluster. -The fastest way to get started is with a <>. -Refer to <> for other deployment options. +You'll need a running {es} cluster, together with {kib} to use the Dev Tools API Console. +Run the following command in your terminal to set up a <>: + +[source,sh] +---- +curl -fsSL https://elastic.co/start-local | sh +---- +// NOTCONSOLE //// [source,console] diff --git a/docs/reference/quickstart/index.asciidoc b/docs/reference/quickstart/index.asciidoc index ed4c128392994..3fa6d53e6345d 100644 --- a/docs/reference/quickstart/index.asciidoc +++ b/docs/reference/quickstart/index.asciidoc @@ -9,7 +9,15 @@ Unless otherwise noted, these examples will use queries written in <> , or see our <>. +Run the following command in your terminal to set up a <>: + +[source,sh] +---- +curl -fsSL https://elastic.co/start-local | sh +---- +// NOTCONSOLE + +Alternatively, refer to our <>. [discrete] [[quickstart-list]] @@ -17,15 +25,18 @@ Get started <> , or see our <>. Learn about indices, documents, and mappings, and perform a basic search using the Query DSL. * <>. Learn about different options for querying data, including full-text search and filtering, using the Query DSL. +* <>: Learn how to create embeddings for your data with `semantic_text` and query using the `semantic` query. +** <>: Learn how to combine semantic search with full-text search. +* <>: Learn how to ingest dense vector embeddings into {es}. -[discrete] -[[quickstart-python-links]] -== Working in Python +.Working in Python +****************** If you're interested in using {es} with Python, check out Elastic Search Labs: * https://github.com/elastic/elasticsearch-labs[`elasticsearch-labs` repository]: Contains a range of Python https://github.com/elastic/elasticsearch-labs/tree/main/notebooks[notebooks] and https://github.com/elastic/elasticsearch-labs/tree/main/example-apps[example apps]. * https://www.elastic.co/search-labs/tutorials/search-tutorial/welcome[Tutorial]: This walks you through building a complete search solution with {es} from the ground up using Flask. +****************** include::getting-started.asciidoc[] include::full-text-filtering-tutorial.asciidoc[] diff --git a/docs/reference/run-elasticsearch-locally.asciidoc b/docs/reference/run-elasticsearch-locally.asciidoc index 03885132e4050..371660f2da7c9 100644 --- a/docs/reference/run-elasticsearch-locally.asciidoc +++ b/docs/reference/run-elasticsearch-locally.asciidoc @@ -42,6 +42,7 @@ To set up {es} and {kib} locally, run the `start-local` script: curl -fsSL https://elastic.co/start-local | sh ---- // NOTCONSOLE +// REVIEWED[OCT.28.2024] This script creates an `elastic-start-local` folder containing configuration files and starts both {es} and {kib} using Docker. @@ -50,29 +51,13 @@ After running the script, you can access Elastic services at the following endpo * *{es}*: http://localhost:9200 * *{kib}*: http://localhost:5601 -The script generates a random password for the `elastic` user, which is displayed at the end of the installation and stored in the `.env` file. +The script generates a random password for the `elastic` user, and an API key, stored in the `.env` file. [CAUTION] ==== This setup is for local testing only. HTTPS is disabled, and Basic authentication is used for {es}. For security, {es} and {kib} are accessible only through `localhost`. ==== -[discrete] -[[api-access]] -=== API access - -An API key for {es} is generated and stored in the `.env` file as `ES_LOCAL_API_KEY`. -Use this key to connect to {es} with a https://www.elastic.co/guide/en/elasticsearch/client/index.html[programming language client] or the <>. - -From the `elastic-start-local` folder, check the connection to Elasticsearch using `curl`: - -[source,sh] ----- -source .env -curl $ES_LOCAL_URL -H "Authorization: ApiKey ${ES_LOCAL_API_KEY}" ----- -// NOTCONSOLE - [discrete] [[local-dev-additional-info]] === Learn more From 0e184afad8b8894803c70bccf2ce7fbeb6acf829 Mon Sep 17 00:00:00 2001 From: Ankita Kumar Date: Mon, 28 Oct 2024 09:18:24 -0400 Subject: [PATCH 145/324] Change Reindexing metrics unit from millis to seconds (#115721) Fix the units for reindexing took time metrics from milliseconds to seconds. --- docs/changelog/115721.yaml | 5 +++++ .../main/java/org/elasticsearch/reindex/ReindexMetrics.java | 2 +- 2 files changed, 6 insertions(+), 1 deletion(-) create mode 100644 docs/changelog/115721.yaml diff --git a/docs/changelog/115721.yaml b/docs/changelog/115721.yaml new file mode 100644 index 0000000000000..53703dcca872a --- /dev/null +++ b/docs/changelog/115721.yaml @@ -0,0 +1,5 @@ +pr: 115721 +summary: Change Reindexing metrics unit from millis to seconds +area: Reindex +type: enhancement +issues: [] diff --git a/modules/reindex/src/main/java/org/elasticsearch/reindex/ReindexMetrics.java b/modules/reindex/src/main/java/org/elasticsearch/reindex/ReindexMetrics.java index d985a21815103..f7975120d9fc8 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/reindex/ReindexMetrics.java +++ b/modules/reindex/src/main/java/org/elasticsearch/reindex/ReindexMetrics.java @@ -19,7 +19,7 @@ public class ReindexMetrics { private final LongHistogram reindexTimeSecsHistogram; public ReindexMetrics(MeterRegistry meterRegistry) { - this(meterRegistry.registerLongHistogram(REINDEX_TIME_HISTOGRAM, "Time to reindex by search", "millis")); + this(meterRegistry.registerLongHistogram(REINDEX_TIME_HISTOGRAM, "Time to reindex by search", "seconds")); } private ReindexMetrics(LongHistogram reindexTimeSecsHistogram) { From f0adbc6d50255268b4ebe327ceb2e70994525f8a Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Mon, 28 Oct 2024 14:18:51 +0100 Subject: [PATCH 146/324] Simplify result handling in FetchSearchPhase (#115723) The results instance does not need to be a field. It's state handling is fairly straight forward, it needs to be released once all fetches have been procesed. No need to even create it on any other path so I split up the method slightly to clearly isolate when and how we need the results instance. Part of #115722 --- .../action/search/FetchSearchPhase.java | 92 ++++++++++--------- 1 file changed, 48 insertions(+), 44 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/action/search/FetchSearchPhase.java b/server/src/main/java/org/elasticsearch/action/search/FetchSearchPhase.java index 7ad61f60c0088..99b24bd483fb4 100644 --- a/server/src/main/java/org/elasticsearch/action/search/FetchSearchPhase.java +++ b/server/src/main/java/org/elasticsearch/action/search/FetchSearchPhase.java @@ -33,7 +33,6 @@ * Then it reaches out to all relevant shards to fetch the topN hits. */ final class FetchSearchPhase extends SearchPhase { - private final ArraySearchPhaseResults fetchResults; private final AtomicArray searchPhaseShardResults; private final BiFunction, SearchPhase> nextPhaseFactory; private final SearchPhaseContext context; @@ -79,8 +78,6 @@ final class FetchSearchPhase extends SearchPhase { + resultConsumer.getNumShards() ); } - this.fetchResults = new ArraySearchPhaseResults<>(resultConsumer.getNumShards()); - context.addReleasable(fetchResults); this.searchPhaseShardResults = resultConsumer.getAtomicArray(); this.aggregatedDfs = aggregatedDfs; this.nextPhaseFactory = nextPhaseFactory; @@ -129,48 +126,56 @@ private void innerRun() throws Exception { // we have to release contexts here to free up resources searchPhaseShardResults.asList() .forEach(searchPhaseShardResult -> releaseIrrelevantSearchContext(searchPhaseShardResult, context)); - moveToNextPhase(fetchResults.getAtomicArray(), reducedQueryPhase); + moveToNextPhase(new AtomicArray<>(numShards), reducedQueryPhase); } else { - final boolean shouldExplainRank = shouldExplainRankScores(context.getRequest()); - final List> rankDocsPerShard = false == shouldExplainRank - ? null - : splitRankDocsPerShard(scoreDocs, numShards); - final ScoreDoc[] lastEmittedDocPerShard = context.getRequest().scroll() != null - ? SearchPhaseController.getLastEmittedDocPerShard(reducedQueryPhase, numShards) - : null; - final List[] docIdsToLoad = SearchPhaseController.fillDocIdsToLoad(numShards, scoreDocs); - final CountedCollector counter = new CountedCollector<>( - fetchResults, - docIdsToLoad.length, // we count down every shard in the result no matter if we got any results or not - () -> moveToNextPhase(fetchResults.getAtomicArray(), reducedQueryPhase), - context - ); - for (int i = 0; i < docIdsToLoad.length; i++) { - List entry = docIdsToLoad[i]; - RankDocShardInfo rankDocs = rankDocsPerShard == null || rankDocsPerShard.get(i).isEmpty() - ? null - : new RankDocShardInfo(rankDocsPerShard.get(i)); - SearchPhaseResult shardPhaseResult = searchPhaseShardResults.get(i); - if (entry == null) { // no results for this shard ID - if (shardPhaseResult != null) { - // if we got some hits from this shard we have to release the context there - // we do this as we go since it will free up resources and passing on the request on the - // transport layer is cheap. - releaseIrrelevantSearchContext(shardPhaseResult, context); - progressListener.notifyFetchResult(i); - } - // in any case we count down this result since we don't talk to this shard anymore - counter.countDown(); - } else { - executeFetch( - shardPhaseResult, - counter, - entry, - rankDocs, - (lastEmittedDocPerShard != null) ? lastEmittedDocPerShard[i] : null - ); - } + innerRunFetch(scoreDocs, numShards, reducedQueryPhase); + } + } + } + + private void innerRunFetch(ScoreDoc[] scoreDocs, int numShards, SearchPhaseController.ReducedQueryPhase reducedQueryPhase) { + ArraySearchPhaseResults fetchResults = new ArraySearchPhaseResults<>(numShards); + final List> rankDocsPerShard = false == shouldExplainRankScores(context.getRequest()) + ? null + : splitRankDocsPerShard(scoreDocs, numShards); + final ScoreDoc[] lastEmittedDocPerShard = context.getRequest().scroll() != null + ? SearchPhaseController.getLastEmittedDocPerShard(reducedQueryPhase, numShards) + : null; + final List[] docIdsToLoad = SearchPhaseController.fillDocIdsToLoad(numShards, scoreDocs); + final CountedCollector counter = new CountedCollector<>( + fetchResults, + docIdsToLoad.length, // we count down every shard in the result no matter if we got any results or not + () -> { + try (fetchResults) { + moveToNextPhase(fetchResults.getAtomicArray(), reducedQueryPhase); + } + }, + context + ); + for (int i = 0; i < docIdsToLoad.length; i++) { + List entry = docIdsToLoad[i]; + RankDocShardInfo rankDocs = rankDocsPerShard == null || rankDocsPerShard.get(i).isEmpty() + ? null + : new RankDocShardInfo(rankDocsPerShard.get(i)); + SearchPhaseResult shardPhaseResult = searchPhaseShardResults.get(i); + if (entry == null) { // no results for this shard ID + if (shardPhaseResult != null) { + // if we got some hits from this shard we have to release the context there + // we do this as we go since it will free up resources and passing on the request on the + // transport layer is cheap. + releaseIrrelevantSearchContext(shardPhaseResult, context); + progressListener.notifyFetchResult(i); } + // in any case we count down this result since we don't talk to this shard anymore + counter.countDown(); + } else { + executeFetch( + shardPhaseResult, + counter, + entry, + rankDocs, + (lastEmittedDocPerShard != null) ? lastEmittedDocPerShard[i] : null + ); } } } @@ -257,7 +262,6 @@ private void moveToNextPhase( ) { var resp = SearchPhaseController.merge(context.getRequest().scroll() != null, reducedQueryPhase, fetchResultsArr); context.addReleasable(resp::decRef); - fetchResults.close(); context.executeNextPhase(this, nextPhaseFactory.apply(resp, searchPhaseShardResults)); } From 4058daf8b291f486ce7c75ac17b0ccdd31e3cac9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Istv=C3=A1n=20Zolt=C3=A1n=20Szab=C3=B3?= Date: Mon, 28 Oct 2024 14:31:42 +0100 Subject: [PATCH 147/324] =?UTF-8?q?Revert=20"[DOCS]=20Documents=20that=20E?= =?UTF-8?q?LSER=20is=20the=20default=20service=20for=20`semantic=5Ftext?= =?UTF-8?q?=E2=80=A6"=20(#115748)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This reverts commit 541bcf30e5d03944cace8deec24559fc63c8bcb5. --- .../mapping/types/semantic-text.asciidoc | 26 +-------- .../semantic-search-semantic-text.asciidoc | 57 ++++++++++++++++--- 2 files changed, 50 insertions(+), 33 deletions(-) diff --git a/docs/reference/mapping/types/semantic-text.asciidoc b/docs/reference/mapping/types/semantic-text.asciidoc index 893e2c6cff8ed..ac23c153e01a3 100644 --- a/docs/reference/mapping/types/semantic-text.asciidoc +++ b/docs/reference/mapping/types/semantic-text.asciidoc @@ -13,47 +13,25 @@ Long passages are <> to smaller secti The `semantic_text` field type specifies an inference endpoint identifier that will be used to generate embeddings. You can create the inference endpoint by using the <>. This field type and the <> type make it simpler to perform semantic search on your data. -If you don't specify an inference endpoint, the <> is used by default. Using `semantic_text`, you won't need to specify how to generate embeddings for your data, or how to index it. The {infer} endpoint automatically determines the embedding generation, indexing, and query to use. -If you use the ELSER service, you can set up `semantic_text` with the following API request: - [source,console] ------------------------------------------------------------ PUT my-index-000001 -{ - "mappings": { - "properties": { - "inference_field": { - "type": "semantic_text" - } - } - } -} ------------------------------------------------------------- - -NOTE: In Serverless, you must create an {infer} endpoint using the <> and reference it when setting up `semantic_text` even if you use the ELSER service. - -If you use a service other than ELSER, you must create an {infer} endpoint using the <> and reference it when setting up `semantic_text` as the following example demonstrates: - -[source,console] ------------------------------------------------------------- -PUT my-index-000002 { "mappings": { "properties": { "inference_field": { "type": "semantic_text", - "inference_id": "my-openai-endpoint" <1> + "inference_id": "my-elser-endpoint" } } } } ------------------------------------------------------------ // TEST[skip:Requires inference endpoint] -<1> The `inference_id` of the {infer} endpoint to use to generate embeddings. The recommended way to use semantic_text is by having dedicated {infer} endpoints for ingestion and search. @@ -62,7 +40,7 @@ After creating dedicated {infer} endpoints for both, you can reference them usin [source,console] ------------------------------------------------------------ -PUT my-index-000003 +PUT my-index-000002 { "mappings": { "properties": { diff --git a/docs/reference/search/search-your-data/semantic-search-semantic-text.asciidoc b/docs/reference/search/search-your-data/semantic-search-semantic-text.asciidoc index f881ca87a92e6..60692c19c184a 100644 --- a/docs/reference/search/search-your-data/semantic-search-semantic-text.asciidoc +++ b/docs/reference/search/search-your-data/semantic-search-semantic-text.asciidoc @@ -21,11 +21,45 @@ This tutorial uses the <> for demonstra [[semantic-text-requirements]] ==== Requirements -This tutorial uses the <> for demonstration, which is created automatically as needed. -To use the `semantic_text` field type with an {infer} service other than ELSER, you must create an inference endpoint using the <>. +To use the `semantic_text` field type, you must have an {infer} endpoint deployed in +your cluster using the <>. -NOTE: In Serverless, you must create an {infer} endpoint using the <> and reference it when setting up `semantic_text` even if you use the ELSER service. +[discrete] +[[semantic-text-infer-endpoint]] +==== Create the {infer} endpoint + +Create an inference endpoint by using the <>: +[source,console] +------------------------------------------------------------ +PUT _inference/sparse_embedding/my-elser-endpoint <1> +{ + "service": "elser", <2> + "service_settings": { + "adaptive_allocations": { <3> + "enabled": true, + "min_number_of_allocations": 3, + "max_number_of_allocations": 10 + }, + "num_threads": 1 + } +} +------------------------------------------------------------ +// TEST[skip:TBD] +<1> The task type is `sparse_embedding` in the path as the `elser` service will +be used and ELSER creates sparse vectors. The `inference_id` is +`my-elser-endpoint`. +<2> The `elser` service is used in this example. +<3> This setting enables and configures {ml-docs}/ml-nlp-auto-scale.html#nlp-model-adaptive-allocations[adaptive allocations]. +Adaptive allocations make it possible for ELSER to automatically scale up or down resources based on the current load on the process. + +[NOTE] +==== +You might see a 502 bad gateway error in the response when using the {kib} Console. +This error usually just reflects a timeout, while the model downloads in the background. +You can check the download progress in the {ml-app} UI. +If using the Python client, you can set the `timeout` parameter to a higher value. +==== [discrete] [[semantic-text-index-mapping]] @@ -41,7 +75,8 @@ PUT semantic-embeddings "mappings": { "properties": { "content": { <1> - "type": "semantic_text" <2> + "type": "semantic_text", <2> + "inference_id": "my-elser-endpoint" <3> } } } @@ -50,14 +85,18 @@ PUT semantic-embeddings // TEST[skip:TBD] <1> The name of the field to contain the generated embeddings. <2> The field to contain the embeddings is a `semantic_text` field. -Since no `inference_id` is provided, the <> is used by default. -To use a different {infer} service, you must create an {infer} endpoint first using the <> and then specify it in the `semantic_text` field mapping using the `inference_id` parameter. +<3> The `inference_id` is the inference endpoint you created in the previous step. +It will be used to generate the embeddings based on the input text. +Every time you ingest data into the related `semantic_text` field, this endpoint will be used for creating the vector representation of the text. [NOTE] ==== -If you're using web crawlers or connectors to generate indices, you have to <> for these indices to include the `semantic_text` field. -Once the mapping is updated, you'll need to run a full web crawl or a full connector sync. -This ensures that all existing documents are reprocessed and updated with the new semantic embeddings, enabling semantic search on the updated data. +If you're using web crawlers or connectors to generate indices, you have to +<> for these indices to +include the `semantic_text` field. Once the mapping is updated, you'll need to run +a full web crawl or a full connector sync. This ensures that all existing +documents are reprocessed and updated with the new semantic embeddings, +enabling semantic search on the updated data. ==== From d67d8eacfecd89a55dd4647e19382e5f65b63844 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Istv=C3=A1n=20Zolt=C3=A1n=20Szab=C3=B3?= Date: Mon, 28 Oct 2024 14:32:02 +0100 Subject: [PATCH 148/324] [DOCS] Comments out default inference config docs. (#115742) --- .../inference/inference-apis.asciidoc | 24 +++++++++---------- 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/docs/reference/inference/inference-apis.asciidoc b/docs/reference/inference/inference-apis.asciidoc index 38afc7c416f18..037d7abeb2a36 100644 --- a/docs/reference/inference/inference-apis.asciidoc +++ b/docs/reference/inference/inference-apis.asciidoc @@ -35,21 +35,21 @@ Elastic –, then create an {infer} endpoint by the <>. Now use <> to perform <> on your data. -[discrete] -[[default-enpoints]] -=== Default {infer} endpoints +//[discrete] +//[[default-enpoints]] +//=== Default {infer} endpoints -Your {es} deployment contains some preconfigured {infer} endpoints that makes it easier for you to use them when defining `semantic_text` fields or {infer} processors. -The following list contains the default {infer} endpoints listed by `inference_id`: +//Your {es} deployment contains some preconfigured {infer} endpoints that makes it easier for you to use them when defining `semantic_text` fields or {infer} processors. +//The following list contains the default {infer} endpoints listed by `inference_id`: -* `.elser-2-elasticsearch`: uses the {ml-docs}/ml-nlp-elser.html[ELSER] built-in trained model for `sparse_embedding` tasks (recommended for English language texts) -* `.multilingual-e5-small-elasticsearch`: uses the {ml-docs}/ml-nlp-e5.html[E5] built-in trained model for `text_embedding` tasks (recommended for non-English language texts) +//* `.elser-2-elasticsearch`: uses the {ml-docs}/ml-nlp-elser.html[ELSER] built-in trained model for `sparse_embedding` tasks (recommended for English language texts) +//* `.multilingual-e5-small-elasticsearch`: uses the {ml-docs}/ml-nlp-e5.html[E5] built-in trained model for `text_embedding` tasks (recommended for non-English language texts) -Use the `inference_id` of the endpoint in a <> field definition or when creating an <>. -The API call will automatically download and deploy the model which might take a couple of minutes. -Default {infer} enpoints have {ml-docs}/ml-nlp-auto-scale.html#nlp-model-adaptive-allocations[adaptive allocations] enabled. -For these models, the minimum number of allocations is `0`. -If there is no {infer} activity that uses the endpoint, the number of allocations will scale down to `0` automatically after 15 minutes. +//Use the `inference_id` of the endpoint in a <> field definition or when creating an <>. +//The API call will automatically download and deploy the model which might take a couple of minutes. +//Default {infer} enpoints have {ml-docs}/ml-nlp-auto-scale.html#nlp-model-adaptive-allocations[adaptive allocations] enabled. +//For these models, the minimum number of allocations is `0`. +//If there is no {infer} activity that uses the endpoint, the number of allocations will scale down to `0` automatically after 15 minutes. [discrete] From 2b6828ddcdc962001ab46c4ab07d8277c740deb2 Mon Sep 17 00:00:00 2001 From: Marci W <333176+marciw@users.noreply.github.com> Date: Mon, 28 Oct 2024 10:14:40 -0400 Subject: [PATCH 149/324] Document ?_tstart and ?_tend in Kibana (#114965) * Document ?_tstart and ?_tend in Kibana * Edits: restructure, be clearer --- docs/reference/esql/esql-kibana.asciidoc | 40 +++++++++++++++++++++--- 1 file changed, 35 insertions(+), 5 deletions(-) diff --git a/docs/reference/esql/esql-kibana.asciidoc b/docs/reference/esql/esql-kibana.asciidoc index 5da8b9323cc20..9850e012fc049 100644 --- a/docs/reference/esql/esql-kibana.asciidoc +++ b/docs/reference/esql/esql-kibana.asciidoc @@ -171,14 +171,44 @@ FROM kibana_sample_data_logs [[esql-kibana-time-filter]] === Time filtering -To display data within a specified time range, use the -{kibana-ref}/set-time-filter.html[time filter]. The time filter is only enabled -when the indices you're querying have a field called `@timestamp`. +To display data within a specified time range, you can use the standard time filter, +custom time parameters, or a WHERE command. -If your indices do not have a timestamp field called `@timestamp`, you can limit -the time range using the <> command and the <> function. +[discrete] +==== Standard time filter +The standard {kibana-ref}/set-time-filter.html[time filter] is enabled +when the indices you're querying have a field named `@timestamp`. + +[discrete] +==== Custom time parameters +If your indices do not have a field named `@timestamp`, you can use +the `?_tstart` and `?_tend` parameters to specify a time range. These parameters +work with any timestamp field and automatically sync with the {kibana-ref}/set-time-filter.html[time filter]. + +[source,esql] +---- +FROM my_index +| WHERE custom_timestamp >= ?_tstart AND custom_timestamp < ?_tend +---- + +You can also use the `?_tstart` and `?_tend` parameters with the <> function +to create auto-incrementing time buckets in {esql} <>. +For example: + +[source,esql] +---- +FROM kibana_sample_data_logs +| STATS average_bytes = AVG(bytes) BY BUCKET(@timestamp, 50, ?_tstart, ?_tend) +---- + +This example uses `50` buckets, which is the maximum number of buckets. + +[discrete] +==== WHERE command +You can also limit the time range using the <> command and the <> function. For example, if the timestamp field is called `timestamp`, to query the last 15 minutes of data: + [source,esql] ---- FROM kibana_sample_data_logs From 35808be7d810b26e35d6889d3024d22b0dd7a471 Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Tue, 29 Oct 2024 01:16:25 +1100 Subject: [PATCH 150/324] Mute org.elasticsearch.xpack.spatial.search.GeoGridAggAndQueryConsistencyIT testGeoShapeGeoHex #115705 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index 3a59af6234038..b69455a68790b 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -291,6 +291,9 @@ tests: - class: org.elasticsearch.xpack.spatial.search.GeoGridAggAndQueryConsistencyIT method: testGeoShapeGeoTile issue: https://github.com/elastic/elasticsearch/issues/115717 +- class: org.elasticsearch.xpack.spatial.search.GeoGridAggAndQueryConsistencyIT + method: testGeoShapeGeoHex + issue: https://github.com/elastic/elasticsearch/issues/115705 # Examples: # From 7efa5a36c91913178e3fc5813cd4f39ef44e4123 Mon Sep 17 00:00:00 2001 From: Dan Rubinstein Date: Mon, 28 Oct 2024 10:29:54 -0400 Subject: [PATCH 151/324] Unmute TestFeatureLicenseTrackingIT testFeatureTrackingInferenceModelPipeline (#115340) Co-authored-by: Elastic Machine --- .../xpack/ml/integration/TestFeatureLicenseTrackingIT.java | 1 - 1 file changed, 1 deletion(-) diff --git a/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/TestFeatureLicenseTrackingIT.java b/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/TestFeatureLicenseTrackingIT.java index ce270c570c8cd..5ccc3d64daf1d 100644 --- a/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/TestFeatureLicenseTrackingIT.java +++ b/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/TestFeatureLicenseTrackingIT.java @@ -107,7 +107,6 @@ public void testFeatureTrackingAnomalyJob() throws Exception { }); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/102381") public void testFeatureTrackingInferenceModelPipeline() throws Exception { String modelId = "test-load-models-classification-license-tracking"; Map oneHotEncoding = new HashMap<>(); From 15a3a4e353ff136c9f543c5b9fc6fca0631acb2a Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Mon, 28 Oct 2024 07:34:57 -0700 Subject: [PATCH 152/324] Remove XPackFeatureSet interface (#115679) XPackFeatureSet hasn't been used for many years. But the inner "Usage" class is still used. This commit moves the Usage class up to its own file as XPackFeatureUsage, and removes the defunct XPackFeatureSet interface. closes #29736 --- .../AnalyticsInfoTransportActionTests.java | 10 +- .../xpack/ccr/CCRInfoTransportAction.java | 4 +- .../java/org/elasticsearch/xpack/ccr/Ccr.java | 4 +- .../action/XPackUsageRestCancellationIT.java | 4 +- .../xpack/core/HealthApiFeatureSetUsage.java | 2 +- .../core/RemoteClusterFeatureSetUsage.java | 2 +- .../xpack/core/XPackClientPlugin.java | 62 ++++++------ .../xpack/core/XPackFeatureSet.java | 83 ---------------- .../xpack/core/XPackFeatureUsage.java | 74 ++++++++++++++ .../action/TransportXPackUsageAction.java | 4 +- .../action/XPackUsageFeatureResponse.java | 10 +- .../xpack/core/action/XPackUsageResponse.java | 14 +-- .../AggregateMetricFeatureSetUsage.java | 4 +- .../analytics/AnalyticsFeatureSetUsage.java | 4 +- .../EnterpriseSearchFeatureSetUsage.java | 4 +- .../core/application/ProfilingUsage.java | 4 +- .../core/archive/ArchiveFeatureSetUsage.java | 4 +- .../DataStreamFeatureSetUsage.java | 4 +- .../DataStreamLifecycleFeatureSetUsage.java | 4 +- .../datatiers/DataTiersFeatureSetUsage.java | 4 +- .../core/enrich/EnrichFeatureSetUsage.java | 4 +- .../xpack/core/eql/EqlFeatureSetUsage.java | 4 +- .../xpack/core/esql/EsqlFeatureSetUsage.java | 4 +- .../frozen/FrozenIndicesFeatureSetUsage.java | 4 +- .../core/graph/GraphFeatureSetUsage.java | 4 +- .../ilm/IndexLifecycleFeatureSetUsage.java | 4 +- .../inference/InferenceFeatureSetUsage.java | 4 +- .../logstash/LogstashFeatureSetUsage.java | 4 +- .../ml/MachineLearningFeatureSetUsage.java | 4 +- .../monitoring/MonitoringFeatureSetUsage.java | 4 +- .../rest/action/RestXPackUsageAction.java | 4 +- .../core/rollup/RollupFeatureSetUsage.java | 4 +- .../SearchableSnapshotFeatureSetUsage.java | 4 +- .../security/SecurityFeatureSetUsage.java | 4 +- .../xpack/core/slm/SLMFeatureSetUsage.java | 4 +- .../core/spatial/SpatialFeatureSetUsage.java | 4 +- .../xpack/core/sql/SqlFeatureSetUsage.java | 4 +- .../transform/TransformFeatureSetUsage.java | 4 +- .../VotingOnlyNodeFeatureSetUsage.java | 4 +- .../core/watcher/WatcherFeatureSetUsage.java | 4 +- .../core/action/XPackUsageResponseTests.java | 14 +-- .../graph/GraphInfoTransportActionTests.java | 10 +- .../TransportInferenceUsageActionTests.java | 4 +- .../LogstashInfoTransportActionTests.java | 10 +- ...chineLearningInfoTransportActionTests.java | 42 ++++---- .../cluster/ClusterStatsCollector.java | 4 +- .../cluster/ClusterStatsMonitoringDoc.java | 10 +- .../MonitoringInfoTransportActionTests.java | 6 +- .../ClusterStatsMonitoringDocTests.java | 6 +- .../authc/esnative/NativeRealmIntegTests.java | 4 +- .../SecurityInfoTransportActionTests.java | 8 +- .../SpatialInfoTransportActionTests.java | 10 +- .../VotingOnlyInfoTransportAction.java | 38 +++++++ .../votingonly/VotingOnlyNodeFeatureSet.java | 99 ------------------- .../votingonly/VotingOnlyNodePlugin.java | 4 +- .../VotingOnlyUsageTransportAction.java | 55 +++++++++++ .../WatcherInfoTransportActionTests.java | 6 +- 57 files changed, 348 insertions(+), 367 deletions(-) delete mode 100644 x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackFeatureSet.java create mode 100644 x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackFeatureUsage.java create mode 100644 x-pack/plugin/voting-only-node/src/main/java/org/elasticsearch/cluster/coordination/votingonly/VotingOnlyInfoTransportAction.java delete mode 100644 x-pack/plugin/voting-only-node/src/main/java/org/elasticsearch/cluster/coordination/votingonly/VotingOnlyNodeFeatureSet.java create mode 100644 x-pack/plugin/voting-only-node/src/main/java/org/elasticsearch/cluster/coordination/votingonly/VotingOnlyUsageTransportAction.java diff --git a/x-pack/plugin/analytics/src/test/java/org/elasticsearch/xpack/analytics/action/AnalyticsInfoTransportActionTests.java b/x-pack/plugin/analytics/src/test/java/org/elasticsearch/xpack/analytics/action/AnalyticsInfoTransportActionTests.java index eb7cc9c51c62e..77d3d5a5577b0 100644 --- a/x-pack/plugin/analytics/src/test/java/org/elasticsearch/xpack/analytics/action/AnalyticsInfoTransportActionTests.java +++ b/x-pack/plugin/analytics/src/test/java/org/elasticsearch/xpack/analytics/action/AnalyticsInfoTransportActionTests.java @@ -20,7 +20,7 @@ import org.elasticsearch.test.MockUtils; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; -import org.elasticsearch.xpack.core.XPackFeatureSet; +import org.elasticsearch.xpack.core.XPackFeatureUsage; import org.elasticsearch.xpack.core.action.XPackUsageFeatureResponse; import org.elasticsearch.xpack.core.analytics.AnalyticsFeatureSetUsage; import org.elasticsearch.xpack.core.analytics.action.AnalyticsStatsAction; @@ -75,12 +75,12 @@ public void testAvailable() throws Exception { ); PlainActionFuture future = new PlainActionFuture<>(); usageAction.masterOperation(task, null, clusterState, future); - XPackFeatureSet.Usage usage = future.get().getUsage(); + XPackFeatureUsage usage = future.get().getUsage(); assertThat(usage.available(), is(true)); BytesStreamOutput out = new BytesStreamOutput(); usage.writeTo(out); - XPackFeatureSet.Usage serializedUsage = new AnalyticsFeatureSetUsage(out.bytes().streamInput()); + XPackFeatureUsage serializedUsage = new AnalyticsFeatureSetUsage(out.bytes().streamInput()); assertThat(serializedUsage.available(), is(true)); verify(client, times(1)).execute(any(), any(), any()); verifyNoMoreInteractions(client); @@ -103,12 +103,12 @@ public void testEnabled() throws Exception { ); PlainActionFuture future = new PlainActionFuture<>(); usageAction.masterOperation(task, null, clusterState, future); - XPackFeatureSet.Usage usage = future.get().getUsage(); + XPackFeatureUsage usage = future.get().getUsage(); assertTrue(usage.enabled()); BytesStreamOutput out = new BytesStreamOutput(); usage.writeTo(out); - XPackFeatureSet.Usage serializedUsage = new AnalyticsFeatureSetUsage(out.bytes().streamInput()); + XPackFeatureUsage serializedUsage = new AnalyticsFeatureSetUsage(out.bytes().streamInput()); assertTrue(serializedUsage.enabled()); verify(client, times(1)).execute(any(), any(), any()); verifyNoMoreInteractions(client); diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/CCRInfoTransportAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/CCRInfoTransportAction.java index d4e31f25a0a91..6e6b54af4d0e1 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/CCRInfoTransportAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/CCRInfoTransportAction.java @@ -16,7 +16,7 @@ import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xpack.core.XPackFeatureSet; +import org.elasticsearch.xpack.core.XPackFeatureUsage; import org.elasticsearch.xpack.core.XPackField; import org.elasticsearch.xpack.core.XPackSettings; import org.elasticsearch.xpack.core.action.XPackInfoFeatureAction; @@ -58,7 +58,7 @@ public boolean enabled() { return enabled; } - public static class Usage extends XPackFeatureSet.Usage { + public static class Usage extends XPackFeatureUsage { private final int numberOfFollowerIndices; private final int numberOfAutoFollowPatterns; diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/Ccr.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/Ccr.java index 395af34c59b3a..87a4c2c7d4826 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/Ccr.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/Ccr.java @@ -92,7 +92,7 @@ import org.elasticsearch.xpack.ccr.rest.RestResumeAutoFollowPatternAction; import org.elasticsearch.xpack.ccr.rest.RestResumeFollowAction; import org.elasticsearch.xpack.ccr.rest.RestUnfollowAction; -import org.elasticsearch.xpack.core.XPackFeatureSet; +import org.elasticsearch.xpack.core.XPackFeatureUsage; import org.elasticsearch.xpack.core.XPackField; import org.elasticsearch.xpack.core.action.XPackInfoFeatureAction; import org.elasticsearch.xpack.core.action.XPackUsageFeatureAction; @@ -306,7 +306,7 @@ public List getNamedWriteables() { ), // usage api - new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, XPackField.CCR, CCRInfoTransportAction.Usage::new) + new NamedWriteableRegistry.Entry(XPackFeatureUsage.class, XPackField.CCR, CCRInfoTransportAction.Usage::new) ); } diff --git a/x-pack/plugin/core/src/internalClusterTest/java/org/elasticsearch/xpack/core/rest/action/XPackUsageRestCancellationIT.java b/x-pack/plugin/core/src/internalClusterTest/java/org/elasticsearch/xpack/core/rest/action/XPackUsageRestCancellationIT.java index f17a89774f71d..a9c780ccd468c 100644 --- a/x-pack/plugin/core/src/internalClusterTest/java/org/elasticsearch/xpack/core/rest/action/XPackUsageRestCancellationIT.java +++ b/x-pack/plugin/core/src/internalClusterTest/java/org/elasticsearch/xpack/core/rest/action/XPackUsageRestCancellationIT.java @@ -34,7 +34,7 @@ import org.elasticsearch.transport.TransportService; import org.elasticsearch.transport.netty4.Netty4Plugin; import org.elasticsearch.xpack.core.LocalStateCompositeXPackPlugin; -import org.elasticsearch.xpack.core.XPackFeatureSet; +import org.elasticsearch.xpack.core.XPackFeatureUsage; import org.elasticsearch.xpack.core.action.TransportXPackUsageAction; import org.elasticsearch.xpack.core.action.XPackUsageAction; import org.elasticsearch.xpack.core.action.XPackUsageFeatureResponse; @@ -168,7 +168,7 @@ protected void masterOperation( ) throws Exception { blockingXPackUsageActionExecuting.countDown(); blockActionLatch.await(); - listener.onResponse(new XPackUsageFeatureResponse(new XPackFeatureSet.Usage("test", false, false) { + listener.onResponse(new XPackUsageFeatureResponse(new XPackFeatureUsage("test", false, false) { @Override public TransportVersion getMinimalSupportedVersion() { return TransportVersion.current(); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/HealthApiFeatureSetUsage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/HealthApiFeatureSetUsage.java index ac261270db6d6..6ec0f3f1c0d95 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/HealthApiFeatureSetUsage.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/HealthApiFeatureSetUsage.java @@ -71,7 +71,7 @@ * "enabled": true * } */ -public class HealthApiFeatureSetUsage extends XPackFeatureSet.Usage { +public class HealthApiFeatureSetUsage extends XPackFeatureUsage { private final Map usageStats; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/RemoteClusterFeatureSetUsage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/RemoteClusterFeatureSetUsage.java index 3a75ce34e22bf..de657e78c4302 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/RemoteClusterFeatureSetUsage.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/RemoteClusterFeatureSetUsage.java @@ -17,7 +17,7 @@ import java.io.IOException; import java.util.List; -public class RemoteClusterFeatureSetUsage extends XPackFeatureSet.Usage { +public class RemoteClusterFeatureSetUsage extends XPackFeatureUsage { private final List remoteConnectionInfos; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java index 2e806a24ad469..9004239478bdf 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java @@ -136,19 +136,19 @@ public List> getSettings() { public List getNamedWriteables() { return Stream.of( // graph - new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, XPackField.GRAPH, GraphFeatureSetUsage::new), + new NamedWriteableRegistry.Entry(XPackFeatureUsage.class, XPackField.GRAPH, GraphFeatureSetUsage::new), // logstash - new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, XPackField.LOGSTASH, LogstashFeatureSetUsage::new), + new NamedWriteableRegistry.Entry(XPackFeatureUsage.class, XPackField.LOGSTASH, LogstashFeatureSetUsage::new), // ML - new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, XPackField.MACHINE_LEARNING, MachineLearningFeatureSetUsage::new), + new NamedWriteableRegistry.Entry(XPackFeatureUsage.class, XPackField.MACHINE_LEARNING, MachineLearningFeatureSetUsage::new), // inference - new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, XPackField.INFERENCE, InferenceFeatureSetUsage::new), + new NamedWriteableRegistry.Entry(XPackFeatureUsage.class, XPackField.INFERENCE, InferenceFeatureSetUsage::new), // monitoring - new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, XPackField.MONITORING, MonitoringFeatureSetUsage::new), + new NamedWriteableRegistry.Entry(XPackFeatureUsage.class, XPackField.MONITORING, MonitoringFeatureSetUsage::new), // security new NamedWriteableRegistry.Entry(ClusterState.Custom.class, TokenMetadata.TYPE, TokenMetadata::new), new NamedWriteableRegistry.Entry(NamedDiff.class, TokenMetadata.TYPE, TokenMetadata::readDiffFrom), - new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, XPackField.SECURITY, SecurityFeatureSetUsage::new), + new NamedWriteableRegistry.Entry(XPackFeatureUsage.class, XPackField.SECURITY, SecurityFeatureSetUsage::new), // security : configurable cluster privileges new NamedWriteableRegistry.Entry( ConfigurableClusterPrivilege.class, @@ -180,20 +180,20 @@ public List getNamedWriteables() { RemoteClusterPermissionGroup::new ), // eql - new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, XPackField.EQL, EqlFeatureSetUsage::new), + new NamedWriteableRegistry.Entry(XPackFeatureUsage.class, XPackField.EQL, EqlFeatureSetUsage::new), // esql - new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, XPackField.ESQL, EsqlFeatureSetUsage::new), + new NamedWriteableRegistry.Entry(XPackFeatureUsage.class, XPackField.ESQL, EsqlFeatureSetUsage::new), // sql - new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, XPackField.SQL, SqlFeatureSetUsage::new), + new NamedWriteableRegistry.Entry(XPackFeatureUsage.class, XPackField.SQL, SqlFeatureSetUsage::new), // watcher new NamedWriteableRegistry.Entry(Metadata.Custom.class, WatcherMetadata.TYPE, WatcherMetadata::new), new NamedWriteableRegistry.Entry(NamedDiff.class, WatcherMetadata.TYPE, WatcherMetadata::readDiffFrom), - new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, XPackField.WATCHER, WatcherFeatureSetUsage::new), + new NamedWriteableRegistry.Entry(XPackFeatureUsage.class, XPackField.WATCHER, WatcherFeatureSetUsage::new), // licensing new NamedWriteableRegistry.Entry(Metadata.Custom.class, LicensesMetadata.TYPE, LicensesMetadata::new), new NamedWriteableRegistry.Entry(NamedDiff.class, LicensesMetadata.TYPE, LicensesMetadata::readDiffFrom), // rollup - new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, XPackField.ROLLUP, RollupFeatureSetUsage::new), + new NamedWriteableRegistry.Entry(XPackFeatureUsage.class, XPackField.ROLLUP, RollupFeatureSetUsage::new), new NamedWriteableRegistry.Entry(PersistentTaskParams.class, RollupJob.NAME, RollupJob::new), new NamedWriteableRegistry.Entry(Task.Status.class, RollupJobStatus.NAME, RollupJobStatus::new), new NamedWriteableRegistry.Entry(PersistentTaskState.class, RollupJobStatus.NAME, RollupJobStatus::new), @@ -207,9 +207,9 @@ public List getNamedWriteables() { in -> AutoFollowMetadata.readDiffFrom(Metadata.Custom.class, AutoFollowMetadata.TYPE, in) ), // ILM - new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, XPackField.INDEX_LIFECYCLE, IndexLifecycleFeatureSetUsage::new), + new NamedWriteableRegistry.Entry(XPackFeatureUsage.class, XPackField.INDEX_LIFECYCLE, IndexLifecycleFeatureSetUsage::new), // SLM - new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, XPackField.SNAPSHOT_LIFECYCLE, SLMFeatureSetUsage::new), + new NamedWriteableRegistry.Entry(XPackFeatureUsage.class, XPackField.SNAPSHOT_LIFECYCLE, SLMFeatureSetUsage::new), // ILM - Custom Metadata new NamedWriteableRegistry.Entry(Metadata.Custom.class, IndexLifecycleMetadata.TYPE, IndexLifecycleMetadata::new), new NamedWriteableRegistry.Entry( @@ -247,7 +247,7 @@ public List getNamedWriteables() { // Transforms new NamedWriteableRegistry.Entry(Metadata.Custom.class, TransformMetadata.TYPE, TransformMetadata::new), new NamedWriteableRegistry.Entry(NamedDiff.class, TransformMetadata.TYPE, TransformMetadata.TransformMetadataDiff::new), - new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, XPackField.TRANSFORM, TransformFeatureSetUsage::new), + new NamedWriteableRegistry.Entry(XPackFeatureUsage.class, XPackField.TRANSFORM, TransformFeatureSetUsage::new), new NamedWriteableRegistry.Entry(PersistentTaskParams.class, TransformField.TASK_NAME, TransformTaskParams::new), new NamedWriteableRegistry.Entry(Task.Status.class, TransformField.TASK_NAME, TransformState::new), new NamedWriteableRegistry.Entry(PersistentTaskState.class, TransformField.TASK_NAME, TransformState::new), @@ -263,48 +263,44 @@ public List getNamedWriteables() { i -> NullRetentionPolicyConfig.INSTANCE ), // Voting Only Node - new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, XPackField.VOTING_ONLY, VotingOnlyNodeFeatureSetUsage::new), + new NamedWriteableRegistry.Entry(XPackFeatureUsage.class, XPackField.VOTING_ONLY, VotingOnlyNodeFeatureSetUsage::new), // Frozen indices - new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, XPackField.FROZEN_INDICES, FrozenIndicesFeatureSetUsage::new), + new NamedWriteableRegistry.Entry(XPackFeatureUsage.class, XPackField.FROZEN_INDICES, FrozenIndicesFeatureSetUsage::new), // Spatial - new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, XPackField.SPATIAL, SpatialFeatureSetUsage::new), + new NamedWriteableRegistry.Entry(XPackFeatureUsage.class, XPackField.SPATIAL, SpatialFeatureSetUsage::new), // Analytics - new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, XPackField.ANALYTICS, AnalyticsFeatureSetUsage::new), + new NamedWriteableRegistry.Entry(XPackFeatureUsage.class, XPackField.ANALYTICS, AnalyticsFeatureSetUsage::new), // Aggregate metric field type - new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, XPackField.AGGREGATE_METRIC, AggregateMetricFeatureSetUsage::new), + new NamedWriteableRegistry.Entry(XPackFeatureUsage.class, XPackField.AGGREGATE_METRIC, AggregateMetricFeatureSetUsage::new), // Enrich - new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, XPackField.ENRICH, EnrichFeatureSetUsage::new), + new NamedWriteableRegistry.Entry(XPackFeatureUsage.class, XPackField.ENRICH, EnrichFeatureSetUsage::new), new NamedWriteableRegistry.Entry(Task.Status.class, ExecuteEnrichPolicyStatus.NAME, ExecuteEnrichPolicyStatus::new), // Searchable snapshots new NamedWriteableRegistry.Entry( - XPackFeatureSet.Usage.class, + XPackFeatureUsage.class, XPackField.SEARCHABLE_SNAPSHOTS, SearchableSnapshotFeatureSetUsage::new ), // Data Streams - new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, XPackField.DATA_STREAMS, DataStreamFeatureSetUsage::new), + new NamedWriteableRegistry.Entry(XPackFeatureUsage.class, XPackField.DATA_STREAMS, DataStreamFeatureSetUsage::new), new NamedWriteableRegistry.Entry( - XPackFeatureSet.Usage.class, + XPackFeatureUsage.class, XPackField.DATA_STREAM_LIFECYCLE, DataStreamLifecycleFeatureSetUsage::new ), // Data Tiers - new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, XPackField.DATA_TIERS, DataTiersFeatureSetUsage::new), + new NamedWriteableRegistry.Entry(XPackFeatureUsage.class, XPackField.DATA_TIERS, DataTiersFeatureSetUsage::new), // Archive - new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, XPackField.ARCHIVE, ArchiveFeatureSetUsage::new), + new NamedWriteableRegistry.Entry(XPackFeatureUsage.class, XPackField.ARCHIVE, ArchiveFeatureSetUsage::new), // TSDB Downsampling new NamedWriteableRegistry.Entry(LifecycleAction.class, DownsampleAction.NAME, DownsampleAction::new), // Health API usage - new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, XPackField.HEALTH_API, HealthApiFeatureSetUsage::new), + new NamedWriteableRegistry.Entry(XPackFeatureUsage.class, XPackField.HEALTH_API, HealthApiFeatureSetUsage::new), // Remote cluster usage - new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, XPackField.REMOTE_CLUSTERS, RemoteClusterFeatureSetUsage::new), + new NamedWriteableRegistry.Entry(XPackFeatureUsage.class, XPackField.REMOTE_CLUSTERS, RemoteClusterFeatureSetUsage::new), // Enterprise Search - new NamedWriteableRegistry.Entry( - XPackFeatureSet.Usage.class, - XPackField.ENTERPRISE_SEARCH, - EnterpriseSearchFeatureSetUsage::new - ), - new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, XPackField.UNIVERSAL_PROFILING, ProfilingUsage::new), + new NamedWriteableRegistry.Entry(XPackFeatureUsage.class, XPackField.ENTERPRISE_SEARCH, EnterpriseSearchFeatureSetUsage::new), + new NamedWriteableRegistry.Entry(XPackFeatureUsage.class, XPackField.UNIVERSAL_PROFILING, ProfilingUsage::new), new NamedWriteableRegistry.Entry( PersistentTaskParams.class, SecurityMigrationTaskParams.TASK_NAME, diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackFeatureSet.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackFeatureSet.java deleted file mode 100644 index 7593bc9af2902..0000000000000 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackFeatureSet.java +++ /dev/null @@ -1,83 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ -package org.elasticsearch.xpack.core; - -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.VersionedNamedWriteable; -import org.elasticsearch.xcontent.ToXContentObject; -import org.elasticsearch.xcontent.XContentBuilder; - -import java.io.IOException; -import java.util.Objects; - -public interface XPackFeatureSet { - - String name(); - - boolean available(); - - boolean enabled(); - - abstract class Usage implements ToXContentObject, VersionedNamedWriteable { - - private static final String AVAILABLE_XFIELD = "available"; - private static final String ENABLED_XFIELD = "enabled"; - - protected final String name; - protected final boolean available; - protected final boolean enabled; - - public Usage(StreamInput input) throws IOException { - this(input.readString(), input.readBoolean(), input.readBoolean()); - } - - public Usage(String name, boolean available, boolean enabled) { - Objects.requireNonNull(name); - this.name = name; - this.available = available; - this.enabled = enabled; - } - - public String name() { - return name; - } - - public boolean available() { - return available; - } - - public boolean enabled() { - return enabled; - } - - @Override - public String getWriteableName() { - return name; - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeString(name); - out.writeBoolean(available); - out.writeBoolean(enabled); - } - - @Override - public final XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject(); - innerXContent(builder, params); - return builder.endObject(); - } - - protected void innerXContent(XContentBuilder builder, Params params) throws IOException { - builder.field(AVAILABLE_XFIELD, available); - builder.field(ENABLED_XFIELD, enabled); - } - } - -} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackFeatureUsage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackFeatureUsage.java new file mode 100644 index 0000000000000..0cbafd7972ebe --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackFeatureUsage.java @@ -0,0 +1,74 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.core; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.VersionedNamedWriteable; +import org.elasticsearch.xcontent.ToXContentObject; +import org.elasticsearch.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Objects; + +public abstract class XPackFeatureUsage implements ToXContentObject, VersionedNamedWriteable { + + private static final String AVAILABLE_XFIELD = "available"; + private static final String ENABLED_XFIELD = "enabled"; + + protected final String name; + protected final boolean available; + protected final boolean enabled; + + public XPackFeatureUsage(StreamInput input) throws IOException { + this(input.readString(), input.readBoolean(), input.readBoolean()); + } + + public XPackFeatureUsage(String name, boolean available, boolean enabled) { + Objects.requireNonNull(name); + this.name = name; + this.available = available; + this.enabled = enabled; + } + + public String name() { + return name; + } + + public boolean available() { + return available; + } + + public boolean enabled() { + return enabled; + } + + @Override + public String getWriteableName() { + return name; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(name); + out.writeBoolean(available); + out.writeBoolean(enabled); + } + + @Override + public final XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + innerXContent(builder, params); + return builder.endObject(); + } + + protected void innerXContent(XContentBuilder builder, Params params) throws IOException { + builder.field(AVAILABLE_XFIELD, available); + builder.field(ENABLED_XFIELD, enabled); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/TransportXPackUsageAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/TransportXPackUsageAction.java index 3f26dfdd78ca8..e78b8786e92e3 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/TransportXPackUsageAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/TransportXPackUsageAction.java @@ -21,7 +21,7 @@ import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; -import org.elasticsearch.xpack.core.XPackFeatureSet; +import org.elasticsearch.xpack.core.XPackFeatureUsage; import java.util.ArrayList; import java.util.List; @@ -64,7 +64,7 @@ protected List> usageActions() { @Override protected void masterOperation(Task task, XPackUsageRequest request, ClusterState state, ActionListener listener) { new ActionRunnable<>(listener) { - final List responses = new ArrayList<>(usageActions.size()); + final List responses = new ArrayList<>(usageActions.size()); @Override protected void doRun() { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/XPackUsageFeatureResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/XPackUsageFeatureResponse.java index 71bb9993f3a29..0cacf67e7b309 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/XPackUsageFeatureResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/XPackUsageFeatureResponse.java @@ -9,24 +9,24 @@ import org.elasticsearch.action.ActionResponse; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.xpack.core.XPackFeatureSet; +import org.elasticsearch.xpack.core.XPackFeatureUsage; import java.io.IOException; public class XPackUsageFeatureResponse extends ActionResponse { - private final XPackFeatureSet.Usage usage; + private final XPackFeatureUsage usage; public XPackUsageFeatureResponse(StreamInput in) throws IOException { super(in); - usage = in.readNamedWriteable(XPackFeatureSet.Usage.class); + usage = in.readNamedWriteable(XPackFeatureUsage.class); } - public XPackUsageFeatureResponse(XPackFeatureSet.Usage usage) { + public XPackUsageFeatureResponse(XPackFeatureUsage usage) { this.usage = usage; } - public XPackFeatureSet.Usage getUsage() { + public XPackFeatureUsage getUsage() { return usage; } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/XPackUsageResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/XPackUsageResponse.java index e6ed59539d161..6301d29316f25 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/XPackUsageResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/XPackUsageResponse.java @@ -9,7 +9,7 @@ import org.elasticsearch.action.ActionResponse; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.xpack.core.XPackFeatureSet; +import org.elasticsearch.xpack.core.XPackFeatureUsage; import java.io.IOException; import java.util.List; @@ -17,30 +17,30 @@ public class XPackUsageResponse extends ActionResponse { - private final List usages; + private final List usages; - public XPackUsageResponse(final List usages) { + public XPackUsageResponse(final List usages) { this.usages = Objects.requireNonNull(usages); } public XPackUsageResponse(final StreamInput in) throws IOException { - usages = in.readNamedWriteableCollectionAsList(XPackFeatureSet.Usage.class); + usages = in.readNamedWriteableCollectionAsList(XPackFeatureUsage.class); } - public List getUsages() { + public List getUsages() { return usages; } @Override public void writeTo(final StreamOutput out) throws IOException { // we can only write the usages with version the coordinating node is compatible with otherwise it will not know the named writeable - final List usagesToWrite = usages.stream() + final List usagesToWrite = usages.stream() .filter(usage -> out.getTransportVersion().onOrAfter(usage.getMinimalSupportedVersion())) .toList(); writeTo(out, usagesToWrite); } - private static void writeTo(final StreamOutput out, final List usages) throws IOException { + private static void writeTo(final StreamOutput out, final List usages) throws IOException { out.writeNamedWriteableCollection(usages); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/aggregatemetric/AggregateMetricFeatureSetUsage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/aggregatemetric/AggregateMetricFeatureSetUsage.java index 56a2fad47cf2f..5505cf3271b8b 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/aggregatemetric/AggregateMetricFeatureSetUsage.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/aggregatemetric/AggregateMetricFeatureSetUsage.java @@ -10,13 +10,13 @@ import org.elasticsearch.TransportVersion; import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.xpack.core.XPackFeatureSet; +import org.elasticsearch.xpack.core.XPackFeatureUsage; import org.elasticsearch.xpack.core.XPackField; import java.io.IOException; import java.util.Objects; -public class AggregateMetricFeatureSetUsage extends XPackFeatureSet.Usage { +public class AggregateMetricFeatureSetUsage extends XPackFeatureUsage { public AggregateMetricFeatureSetUsage(StreamInput input) throws IOException { super(input); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/analytics/AnalyticsFeatureSetUsage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/analytics/AnalyticsFeatureSetUsage.java index 89bd749f2ea1d..8d2fd2ecc0870 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/analytics/AnalyticsFeatureSetUsage.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/analytics/AnalyticsFeatureSetUsage.java @@ -12,14 +12,14 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xpack.core.XPackFeatureSet; +import org.elasticsearch.xpack.core.XPackFeatureUsage; import org.elasticsearch.xpack.core.XPackField; import org.elasticsearch.xpack.core.analytics.action.AnalyticsStatsAction; import java.io.IOException; import java.util.Objects; -public class AnalyticsFeatureSetUsage extends XPackFeatureSet.Usage { +public class AnalyticsFeatureSetUsage extends XPackFeatureUsage { private final AnalyticsStatsAction.Response response; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/application/EnterpriseSearchFeatureSetUsage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/application/EnterpriseSearchFeatureSetUsage.java index 45b9d557b72b3..b1dac4898945d 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/application/EnterpriseSearchFeatureSetUsage.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/application/EnterpriseSearchFeatureSetUsage.java @@ -12,7 +12,7 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xpack.core.XPackFeatureSet; +import org.elasticsearch.xpack.core.XPackFeatureUsage; import org.elasticsearch.xpack.core.XPackField; import java.io.IOException; @@ -20,7 +20,7 @@ import java.util.Map; import java.util.Objects; -public class EnterpriseSearchFeatureSetUsage extends XPackFeatureSet.Usage { +public class EnterpriseSearchFeatureSetUsage extends XPackFeatureUsage { static final TransportVersion BEHAVIORAL_ANALYTICS_TRANSPORT_VERSION = TransportVersions.V_8_8_1; static final TransportVersion QUERY_RULES_TRANSPORT_VERSION = TransportVersions.V_8_10_X; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/application/ProfilingUsage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/application/ProfilingUsage.java index a487bbb4e27df..5f46e5aa2d4c2 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/application/ProfilingUsage.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/application/ProfilingUsage.java @@ -10,12 +10,12 @@ import org.elasticsearch.TransportVersion; import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.xpack.core.XPackFeatureSet; +import org.elasticsearch.xpack.core.XPackFeatureUsage; import org.elasticsearch.xpack.core.XPackField; import java.io.IOException; -public class ProfilingUsage extends XPackFeatureSet.Usage { +public class ProfilingUsage extends XPackFeatureUsage { public ProfilingUsage(StreamInput input) throws IOException { super(input); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/archive/ArchiveFeatureSetUsage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/archive/ArchiveFeatureSetUsage.java index 4201a30034786..5f7c7554f78f2 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/archive/ArchiveFeatureSetUsage.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/archive/ArchiveFeatureSetUsage.java @@ -13,13 +13,13 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xpack.core.XPackFeatureSet; +import org.elasticsearch.xpack.core.XPackFeatureUsage; import org.elasticsearch.xpack.core.XPackField; import java.io.IOException; import java.util.Objects; -public class ArchiveFeatureSetUsage extends XPackFeatureSet.Usage { +public class ArchiveFeatureSetUsage extends XPackFeatureUsage { private final int numberOfArchiveIndices; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/datastreams/DataStreamFeatureSetUsage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/datastreams/DataStreamFeatureSetUsage.java index e267c94e06892..1a964f3c57dbb 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/datastreams/DataStreamFeatureSetUsage.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/datastreams/DataStreamFeatureSetUsage.java @@ -15,13 +15,13 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xpack.core.XPackFeatureSet; +import org.elasticsearch.xpack.core.XPackFeatureUsage; import org.elasticsearch.xpack.core.XPackField; import java.io.IOException; import java.util.Objects; -public class DataStreamFeatureSetUsage extends XPackFeatureSet.Usage { +public class DataStreamFeatureSetUsage extends XPackFeatureUsage { private final DataStreamStats streamStats; public DataStreamFeatureSetUsage(StreamInput input) throws IOException { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/datastreams/DataStreamLifecycleFeatureSetUsage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/datastreams/DataStreamLifecycleFeatureSetUsage.java index 4c550c69e4c09..7a31888a440c3 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/datastreams/DataStreamLifecycleFeatureSetUsage.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/datastreams/DataStreamLifecycleFeatureSetUsage.java @@ -16,7 +16,7 @@ import org.elasticsearch.core.TimeValue; import org.elasticsearch.xcontent.ToXContentFragment; import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xpack.core.XPackFeatureSet; +import org.elasticsearch.xpack.core.XPackFeatureUsage; import org.elasticsearch.xpack.core.XPackField; import java.io.IOException; @@ -24,7 +24,7 @@ import java.util.Map; import java.util.Objects; -public class DataStreamLifecycleFeatureSetUsage extends XPackFeatureSet.Usage { +public class DataStreamLifecycleFeatureSetUsage extends XPackFeatureUsage { public static final DataStreamLifecycleFeatureSetUsage DISABLED = new DataStreamLifecycleFeatureSetUsage(); final LifecycleStats lifecycleStats; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/datatiers/DataTiersFeatureSetUsage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/datatiers/DataTiersFeatureSetUsage.java index f990118763bad..a33dd7dff3469 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/datatiers/DataTiersFeatureSetUsage.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/datatiers/DataTiersFeatureSetUsage.java @@ -16,7 +16,7 @@ import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xpack.core.XPackFeatureSet; +import org.elasticsearch.xpack.core.XPackFeatureUsage; import org.elasticsearch.xpack.core.XPackField; import java.io.IOException; @@ -30,7 +30,7 @@ * See {@link TierSpecificStats} for the stats that are tracked on a per-tier * basis. */ -public class DataTiersFeatureSetUsage extends XPackFeatureSet.Usage { +public class DataTiersFeatureSetUsage extends XPackFeatureUsage { private final Map tierStats; public DataTiersFeatureSetUsage(StreamInput in) throws IOException { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/enrich/EnrichFeatureSetUsage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/enrich/EnrichFeatureSetUsage.java index ab058909761d7..819b3d86b68c8 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/enrich/EnrichFeatureSetUsage.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/enrich/EnrichFeatureSetUsage.java @@ -10,12 +10,12 @@ import org.elasticsearch.TransportVersion; import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.xpack.core.XPackFeatureSet; +import org.elasticsearch.xpack.core.XPackFeatureUsage; import org.elasticsearch.xpack.core.XPackField; import java.io.IOException; -public class EnrichFeatureSetUsage extends XPackFeatureSet.Usage { +public class EnrichFeatureSetUsage extends XPackFeatureUsage { public EnrichFeatureSetUsage() { super(XPackField.ENRICH, true, true); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/eql/EqlFeatureSetUsage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/eql/EqlFeatureSetUsage.java index 6285840b66039..0edbda79ed975 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/eql/EqlFeatureSetUsage.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/eql/EqlFeatureSetUsage.java @@ -12,13 +12,13 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xpack.core.XPackFeatureSet; +import org.elasticsearch.xpack.core.XPackFeatureUsage; import org.elasticsearch.xpack.core.XPackField; import java.io.IOException; import java.util.Map; -public class EqlFeatureSetUsage extends XPackFeatureSet.Usage { +public class EqlFeatureSetUsage extends XPackFeatureUsage { private final Map stats; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/esql/EsqlFeatureSetUsage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/esql/EsqlFeatureSetUsage.java index 5707bc054e58f..665f8e7952363 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/esql/EsqlFeatureSetUsage.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/esql/EsqlFeatureSetUsage.java @@ -12,13 +12,13 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xpack.core.XPackFeatureSet; +import org.elasticsearch.xpack.core.XPackFeatureUsage; import org.elasticsearch.xpack.core.XPackField; import java.io.IOException; import java.util.Map; -public class EsqlFeatureSetUsage extends XPackFeatureSet.Usage { +public class EsqlFeatureSetUsage extends XPackFeatureUsage { private final Map stats; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/frozen/FrozenIndicesFeatureSetUsage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/frozen/FrozenIndicesFeatureSetUsage.java index a8702560e4804..b8b6877e877fa 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/frozen/FrozenIndicesFeatureSetUsage.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/frozen/FrozenIndicesFeatureSetUsage.java @@ -11,13 +11,13 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xpack.core.XPackFeatureSet; +import org.elasticsearch.xpack.core.XPackFeatureUsage; import org.elasticsearch.xpack.core.XPackField; import java.io.IOException; import java.util.Objects; -public class FrozenIndicesFeatureSetUsage extends XPackFeatureSet.Usage { +public class FrozenIndicesFeatureSetUsage extends XPackFeatureUsage { private final int numberOfFrozenIndices; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/graph/GraphFeatureSetUsage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/graph/GraphFeatureSetUsage.java index 2ac1c11ce9147..b046efaa30082 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/graph/GraphFeatureSetUsage.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/graph/GraphFeatureSetUsage.java @@ -9,12 +9,12 @@ import org.elasticsearch.TransportVersion; import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.xpack.core.XPackFeatureSet; +import org.elasticsearch.xpack.core.XPackFeatureUsage; import org.elasticsearch.xpack.core.XPackField; import java.io.IOException; -public class GraphFeatureSetUsage extends XPackFeatureSet.Usage { +public class GraphFeatureSetUsage extends XPackFeatureUsage { public GraphFeatureSetUsage(StreamInput input) throws IOException { super(input); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/IndexLifecycleFeatureSetUsage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/IndexLifecycleFeatureSetUsage.java index cc2e54e5be247..822f15d1ed74a 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/IndexLifecycleFeatureSetUsage.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/IndexLifecycleFeatureSetUsage.java @@ -18,7 +18,7 @@ import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xpack.core.XPackFeatureSet; +import org.elasticsearch.xpack.core.XPackFeatureUsage; import org.elasticsearch.xpack.core.XPackField; import java.io.IOException; @@ -27,7 +27,7 @@ import java.util.Map; import java.util.Objects; -public class IndexLifecycleFeatureSetUsage extends XPackFeatureSet.Usage { +public class IndexLifecycleFeatureSetUsage extends XPackFeatureUsage { private List policyStats; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/InferenceFeatureSetUsage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/InferenceFeatureSetUsage.java index 61409f59f9d85..00bb8087c9fb3 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/InferenceFeatureSetUsage.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/InferenceFeatureSetUsage.java @@ -15,14 +15,14 @@ import org.elasticsearch.inference.TaskType; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xpack.core.XPackFeatureSet; +import org.elasticsearch.xpack.core.XPackFeatureUsage; import org.elasticsearch.xpack.core.XPackField; import java.io.IOException; import java.util.Collection; import java.util.Objects; -public class InferenceFeatureSetUsage extends XPackFeatureSet.Usage { +public class InferenceFeatureSetUsage extends XPackFeatureUsage { public static class ModelStats implements ToXContentObject, Writeable { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/logstash/LogstashFeatureSetUsage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/logstash/LogstashFeatureSetUsage.java index a83b8439aa612..f3f0214b89c04 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/logstash/LogstashFeatureSetUsage.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/logstash/LogstashFeatureSetUsage.java @@ -9,12 +9,12 @@ import org.elasticsearch.TransportVersion; import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.xpack.core.XPackFeatureSet; +import org.elasticsearch.xpack.core.XPackFeatureUsage; import org.elasticsearch.xpack.core.XPackField; import java.io.IOException; -public class LogstashFeatureSetUsage extends XPackFeatureSet.Usage { +public class LogstashFeatureSetUsage extends XPackFeatureUsage { public LogstashFeatureSetUsage(StreamInput in) throws IOException { super(in); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MachineLearningFeatureSetUsage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MachineLearningFeatureSetUsage.java index 60484675ec90b..0645299dfc30e 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MachineLearningFeatureSetUsage.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MachineLearningFeatureSetUsage.java @@ -11,14 +11,14 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xpack.core.XPackFeatureSet; +import org.elasticsearch.xpack.core.XPackFeatureUsage; import org.elasticsearch.xpack.core.XPackField; import java.io.IOException; import java.util.Map; import java.util.Objects; -public class MachineLearningFeatureSetUsage extends XPackFeatureSet.Usage { +public class MachineLearningFeatureSetUsage extends XPackFeatureUsage { public static final String ALL = "_all"; public static final String JOBS_FIELD = "jobs"; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/monitoring/MonitoringFeatureSetUsage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/monitoring/MonitoringFeatureSetUsage.java index 4c0f347a3ffed..b181bd78dfe41 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/monitoring/MonitoringFeatureSetUsage.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/monitoring/MonitoringFeatureSetUsage.java @@ -12,14 +12,14 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.core.Nullable; import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xpack.core.XPackFeatureSet; +import org.elasticsearch.xpack.core.XPackFeatureUsage; import org.elasticsearch.xpack.core.XPackField; import java.io.IOException; import java.util.Collections; import java.util.Map; -public class MonitoringFeatureSetUsage extends XPackFeatureSet.Usage { +public class MonitoringFeatureSetUsage extends XPackFeatureUsage { @Nullable private Boolean collectionEnabled; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rest/action/RestXPackUsageAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rest/action/RestXPackUsageAction.java index ca57dbec5bef5..d373a9e0bf3ad 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rest/action/RestXPackUsageAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rest/action/RestXPackUsageAction.java @@ -18,7 +18,7 @@ import org.elasticsearch.rest.action.RestBuilderListener; import org.elasticsearch.rest.action.RestCancellableNodeClient; import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xpack.core.XPackFeatureSet; +import org.elasticsearch.xpack.core.XPackFeatureUsage; import org.elasticsearch.xpack.core.action.XPackUsageAction; import org.elasticsearch.xpack.core.action.XPackUsageResponse; @@ -52,7 +52,7 @@ public RestChannelConsumer prepareRequest(RestRequest request, NodeClient client @Override public RestResponse buildResponse(XPackUsageResponse response, XContentBuilder builder) throws Exception { builder.startObject(); - for (XPackFeatureSet.Usage usage : response.getUsages()) { + for (XPackFeatureUsage usage : response.getUsages()) { builder.field(usage.name(), usage); } builder.endObject(); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/RollupFeatureSetUsage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/RollupFeatureSetUsage.java index 6b4ee3c1fdd16..82253ba08165a 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/RollupFeatureSetUsage.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/RollupFeatureSetUsage.java @@ -11,12 +11,12 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xpack.core.XPackFeatureSet; +import org.elasticsearch.xpack.core.XPackFeatureUsage; import org.elasticsearch.xpack.core.XPackField; import java.io.IOException; -public class RollupFeatureSetUsage extends XPackFeatureSet.Usage { +public class RollupFeatureSetUsage extends XPackFeatureUsage { private final int numberOfRollupJobs; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/searchablesnapshots/SearchableSnapshotFeatureSetUsage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/searchablesnapshots/SearchableSnapshotFeatureSetUsage.java index e1644e5113a27..d7d0320b602b4 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/searchablesnapshots/SearchableSnapshotFeatureSetUsage.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/searchablesnapshots/SearchableSnapshotFeatureSetUsage.java @@ -13,13 +13,13 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xpack.core.XPackFeatureSet; +import org.elasticsearch.xpack.core.XPackFeatureUsage; import org.elasticsearch.xpack.core.XPackField; import java.io.IOException; import java.util.Objects; -public class SearchableSnapshotFeatureSetUsage extends XPackFeatureSet.Usage { +public class SearchableSnapshotFeatureSetUsage extends XPackFeatureUsage { private final int numberOfSearchableSnapshotIndices; private final int numberOfFullCopySearchableSnapshotIndices; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/SecurityFeatureSetUsage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/SecurityFeatureSetUsage.java index c88e13f80ba01..2793ddea3bd06 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/SecurityFeatureSetUsage.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/SecurityFeatureSetUsage.java @@ -11,14 +11,14 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xpack.core.XPackFeatureSet; +import org.elasticsearch.xpack.core.XPackFeatureUsage; import org.elasticsearch.xpack.core.XPackField; import java.io.IOException; import java.util.Collections; import java.util.Map; -public class SecurityFeatureSetUsage extends XPackFeatureSet.Usage { +public class SecurityFeatureSetUsage extends XPackFeatureUsage { private static final String REALMS_XFIELD = "realms"; private static final String ROLES_XFIELD = "roles"; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/SLMFeatureSetUsage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/SLMFeatureSetUsage.java index 289c76714b731..099eaa2468e1c 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/SLMFeatureSetUsage.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/SLMFeatureSetUsage.java @@ -13,13 +13,13 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.core.Nullable; import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xpack.core.XPackFeatureSet; +import org.elasticsearch.xpack.core.XPackFeatureUsage; import org.elasticsearch.xpack.core.XPackField; import java.io.IOException; import java.util.Objects; -public class SLMFeatureSetUsage extends XPackFeatureSet.Usage { +public class SLMFeatureSetUsage extends XPackFeatureUsage { @Nullable private final SnapshotLifecycleStats slmStats; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/spatial/SpatialFeatureSetUsage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/spatial/SpatialFeatureSetUsage.java index 380c0e97d30dd..ac495ddebab3c 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/spatial/SpatialFeatureSetUsage.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/spatial/SpatialFeatureSetUsage.java @@ -11,14 +11,14 @@ import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.xpack.core.XPackFeatureSet; +import org.elasticsearch.xpack.core.XPackFeatureUsage; import org.elasticsearch.xpack.core.XPackField; import org.elasticsearch.xpack.core.spatial.action.SpatialStatsAction; import java.io.IOException; import java.util.Objects; -public class SpatialFeatureSetUsage extends XPackFeatureSet.Usage { +public class SpatialFeatureSetUsage extends XPackFeatureUsage { private final SpatialStatsAction.Response statsResponse; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/sql/SqlFeatureSetUsage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/sql/SqlFeatureSetUsage.java index a431dab0b34d5..2f41c8d2e2bb6 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/sql/SqlFeatureSetUsage.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/sql/SqlFeatureSetUsage.java @@ -12,13 +12,13 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xpack.core.XPackFeatureSet; +import org.elasticsearch.xpack.core.XPackFeatureUsage; import org.elasticsearch.xpack.core.XPackField; import java.io.IOException; import java.util.Map; -public class SqlFeatureSetUsage extends XPackFeatureSet.Usage { +public class SqlFeatureSetUsage extends XPackFeatureUsage { private final Map stats; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/TransformFeatureSetUsage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/TransformFeatureSetUsage.java index 66c97876c1f6f..e4c15a3b9007c 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/TransformFeatureSetUsage.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/TransformFeatureSetUsage.java @@ -13,7 +13,7 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xpack.core.XPackFeatureSet.Usage; +import org.elasticsearch.xpack.core.XPackFeatureUsage; import org.elasticsearch.xpack.core.XPackField; import org.elasticsearch.xpack.core.transform.transforms.TransformIndexerStats; @@ -22,7 +22,7 @@ import java.util.Map.Entry; import java.util.Objects; -public class TransformFeatureSetUsage extends Usage { +public class TransformFeatureSetUsage extends XPackFeatureUsage { private static final String FEATURE_COUNTS = "feature_counts"; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/votingonly/VotingOnlyNodeFeatureSetUsage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/votingonly/VotingOnlyNodeFeatureSetUsage.java index cf9d2499b60b7..6d8ed0e33d7d8 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/votingonly/VotingOnlyNodeFeatureSetUsage.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/votingonly/VotingOnlyNodeFeatureSetUsage.java @@ -9,12 +9,12 @@ import org.elasticsearch.TransportVersion; import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.xpack.core.XPackFeatureSet; +import org.elasticsearch.xpack.core.XPackFeatureUsage; import org.elasticsearch.xpack.core.XPackField; import java.io.IOException; -public class VotingOnlyNodeFeatureSetUsage extends XPackFeatureSet.Usage { +public class VotingOnlyNodeFeatureSetUsage extends XPackFeatureUsage { public VotingOnlyNodeFeatureSetUsage(StreamInput input) throws IOException { super(input); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/WatcherFeatureSetUsage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/WatcherFeatureSetUsage.java index fce9399a9bf01..77280c727be3b 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/WatcherFeatureSetUsage.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/WatcherFeatureSetUsage.java @@ -11,13 +11,13 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xpack.core.XPackFeatureSet; +import org.elasticsearch.xpack.core.XPackFeatureUsage; import org.elasticsearch.xpack.core.XPackField; import java.io.IOException; import java.util.Map; -public class WatcherFeatureSetUsage extends XPackFeatureSet.Usage { +public class WatcherFeatureSetUsage extends XPackFeatureUsage { private final Map stats; diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/action/XPackUsageResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/action/XPackUsageResponseTests.java index 184a4fc064441..fee3a091a9f4e 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/action/XPackUsageResponseTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/action/XPackUsageResponseTests.java @@ -14,7 +14,7 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.TransportVersionUtils; -import org.elasticsearch.xpack.core.XPackFeatureSet; +import org.elasticsearch.xpack.core.XPackFeatureUsage; import org.junit.BeforeClass; import java.io.IOException; @@ -42,7 +42,7 @@ public static void setVersion() { ); } - public static class OldUsage extends XPackFeatureSet.Usage { + public static class OldUsage extends XPackFeatureUsage { public OldUsage() { super("old", randomBoolean(), randomBoolean()); @@ -59,7 +59,7 @@ public TransportVersion getMinimalSupportedVersion() { } - public static class NewUsage extends XPackFeatureSet.Usage { + public static class NewUsage extends XPackFeatureUsage { public NewUsage() { super("new", randomBoolean(), randomBoolean()); @@ -84,8 +84,8 @@ public void testVersionDependentSerializationWriteToOldStream() throws IOExcepti final NamedWriteableRegistry registry = new NamedWriteableRegistry( List.of( - new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, "old", OldUsage::new), - new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, "new", NewUsage::new) + new NamedWriteableRegistry.Entry(XPackFeatureUsage.class, "old", OldUsage::new), + new NamedWriteableRegistry.Entry(XPackFeatureUsage.class, "new", NewUsage::new) ) ); @@ -103,8 +103,8 @@ public void testVersionDependentSerializationWriteToNewStream() throws IOExcepti final NamedWriteableRegistry registry = new NamedWriteableRegistry( List.of( - new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, "old", OldUsage::new), - new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, "new", NewUsage::new) + new NamedWriteableRegistry.Entry(XPackFeatureUsage.class, "old", OldUsage::new), + new NamedWriteableRegistry.Entry(XPackFeatureUsage.class, "new", NewUsage::new) ) ); diff --git a/x-pack/plugin/graph/src/test/java/org/elasticsearch/xpack/graph/GraphInfoTransportActionTests.java b/x-pack/plugin/graph/src/test/java/org/elasticsearch/xpack/graph/GraphInfoTransportActionTests.java index 90034c9620923..db6bfe798568d 100644 --- a/x-pack/plugin/graph/src/test/java/org/elasticsearch/xpack/graph/GraphInfoTransportActionTests.java +++ b/x-pack/plugin/graph/src/test/java/org/elasticsearch/xpack/graph/GraphInfoTransportActionTests.java @@ -15,7 +15,7 @@ import org.elasticsearch.test.MockUtils; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; -import org.elasticsearch.xpack.core.XPackFeatureSet; +import org.elasticsearch.xpack.core.XPackFeatureUsage; import org.elasticsearch.xpack.core.action.XPackUsageFeatureResponse; import org.elasticsearch.xpack.core.graph.GraphFeatureSetUsage; import org.junit.Before; @@ -56,12 +56,12 @@ public void testAvailable() throws Exception { ); PlainActionFuture future = new PlainActionFuture<>(); usageAction.masterOperation(null, null, null, future); - XPackFeatureSet.Usage usage = future.get().getUsage(); + XPackFeatureUsage usage = future.get().getUsage(); assertThat(usage.available(), is(available)); BytesStreamOutput out = new BytesStreamOutput(); usage.writeTo(out); - XPackFeatureSet.Usage serializedUsage = new GraphFeatureSetUsage(out.bytes().streamInput()); + XPackFeatureUsage serializedUsage = new GraphFeatureSetUsage(out.bytes().streamInput()); assertThat(serializedUsage.available(), is(available)); } @@ -96,12 +96,12 @@ public void testEnabled() throws Exception { ); PlainActionFuture future = new PlainActionFuture<>(); usageAction.masterOperation(null, null, null, future); - XPackFeatureSet.Usage usage = future.get().getUsage(); + XPackFeatureUsage usage = future.get().getUsage(); assertThat(usage.enabled(), is(enabled)); BytesStreamOutput out = new BytesStreamOutput(); usage.writeTo(out); - XPackFeatureSet.Usage serializedUsage = new GraphFeatureSetUsage(out.bytes().streamInput()); + XPackFeatureUsage serializedUsage = new GraphFeatureSetUsage(out.bytes().streamInput()); assertThat(serializedUsage.enabled(), is(enabled)); } diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/action/TransportInferenceUsageActionTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/action/TransportInferenceUsageActionTests.java index b0c59fe160be3..bf173432f3d91 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/action/TransportInferenceUsageActionTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/action/TransportInferenceUsageActionTests.java @@ -28,7 +28,7 @@ import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentFactory; -import org.elasticsearch.xpack.core.XPackFeatureSet; +import org.elasticsearch.xpack.core.XPackFeatureUsage; import org.elasticsearch.xpack.core.XPackField; import org.elasticsearch.xpack.core.action.XPackUsageFeatureResponse; import org.elasticsearch.xpack.core.inference.InferenceFeatureSetUsage; @@ -98,7 +98,7 @@ public void test() throws Exception { BytesStreamOutput out = new BytesStreamOutput(); future.get().getUsage().writeTo(out); - XPackFeatureSet.Usage usage = new InferenceFeatureSetUsage(out.bytes().streamInput()); + XPackFeatureUsage usage = new InferenceFeatureSetUsage(out.bytes().streamInput()); assertThat(usage.name(), is(XPackField.INFERENCE)); assertTrue(usage.enabled()); diff --git a/x-pack/plugin/logstash/src/test/java/org/elasticsearch/xpack/logstash/LogstashInfoTransportActionTests.java b/x-pack/plugin/logstash/src/test/java/org/elasticsearch/xpack/logstash/LogstashInfoTransportActionTests.java index 428d3bfeaa5ee..cfcefbaee461a 100644 --- a/x-pack/plugin/logstash/src/test/java/org/elasticsearch/xpack/logstash/LogstashInfoTransportActionTests.java +++ b/x-pack/plugin/logstash/src/test/java/org/elasticsearch/xpack/logstash/LogstashInfoTransportActionTests.java @@ -13,7 +13,7 @@ import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; -import org.elasticsearch.xpack.core.XPackFeatureSet; +import org.elasticsearch.xpack.core.XPackFeatureUsage; import org.elasticsearch.xpack.core.action.XPackUsageFeatureResponse; import org.elasticsearch.xpack.core.logstash.LogstashFeatureSetUsage; @@ -36,11 +36,11 @@ public void testEnabledDefault() throws Exception { LogstashUsageTransportAction usageAction = newUsageAction(false); PlainActionFuture future = new PlainActionFuture<>(); usageAction.masterOperation(null, null, null, future); - XPackFeatureSet.Usage usage = future.get().getUsage(); + XPackFeatureUsage usage = future.get().getUsage(); BytesStreamOutput out = new BytesStreamOutput(); usage.writeTo(out); - XPackFeatureSet.Usage serializedUsage = new LogstashFeatureSetUsage(out.bytes().streamInput()); + XPackFeatureUsage serializedUsage = new LogstashFeatureSetUsage(out.bytes().streamInput()); assertThat(serializedUsage.enabled(), is(true)); } @@ -54,12 +54,12 @@ public void testAvailable() throws Exception { var usageAction = newUsageAction(available); PlainActionFuture future = new PlainActionFuture<>(); usageAction.masterOperation(null, null, null, future); - XPackFeatureSet.Usage usage = future.get().getUsage(); + XPackFeatureUsage usage = future.get().getUsage(); assertThat(usage.available(), is(available)); BytesStreamOutput out = new BytesStreamOutput(); usage.writeTo(out); - XPackFeatureSet.Usage serializedUsage = new LogstashFeatureSetUsage(out.bytes().streamInput()); + XPackFeatureUsage serializedUsage = new LogstashFeatureSetUsage(out.bytes().streamInput()); assertThat(serializedUsage.available(), is(available)); } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MachineLearningInfoTransportActionTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MachineLearningInfoTransportActionTests.java index 4fdb7d2e5e46c..9bb5259b0b30e 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MachineLearningInfoTransportActionTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MachineLearningInfoTransportActionTests.java @@ -36,7 +36,7 @@ import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentFactory; -import org.elasticsearch.xpack.core.XPackFeatureSet; +import org.elasticsearch.xpack.core.XPackFeatureUsage; import org.elasticsearch.xpack.core.XPackField; import org.elasticsearch.xpack.core.action.XPackUsageFeatureResponse; import org.elasticsearch.xpack.core.action.util.QueryPage; @@ -203,12 +203,12 @@ public void testAvailable() throws Exception { var usageAction = newUsageAction(commonSettings, randomBoolean(), randomBoolean(), randomBoolean()); PlainActionFuture future = new PlainActionFuture<>(); usageAction.masterOperation(null, null, ClusterState.EMPTY_STATE, future); - XPackFeatureSet.Usage usage = future.get().getUsage(); + XPackFeatureUsage usage = future.get().getUsage(); assertThat(usage.available(), is(available)); BytesStreamOutput out = new BytesStreamOutput(); usage.writeTo(out); - XPackFeatureSet.Usage serializedUsage = new MachineLearningFeatureSetUsage(out.bytes().streamInput()); + XPackFeatureUsage serializedUsage = new MachineLearningFeatureSetUsage(out.bytes().streamInput()); assertThat(serializedUsage.available(), is(available)); } @@ -233,12 +233,12 @@ public void testEnabled() throws Exception { var usageAction = newUsageAction(settings.build(), randomBoolean(), randomBoolean(), randomBoolean()); PlainActionFuture future = new PlainActionFuture<>(); usageAction.masterOperation(null, null, ClusterState.EMPTY_STATE, future); - XPackFeatureSet.Usage usage = future.get().getUsage(); + XPackFeatureUsage usage = future.get().getUsage(); assertThat(usage.enabled(), is(expected)); BytesStreamOutput out = new BytesStreamOutput(); usage.writeTo(out); - XPackFeatureSet.Usage serializedUsage = new MachineLearningFeatureSetUsage(out.bytes().streamInput()); + XPackFeatureUsage serializedUsage = new MachineLearningFeatureSetUsage(out.bytes().streamInput()); assertThat(serializedUsage.enabled(), is(expected)); } @@ -254,13 +254,13 @@ public void testUsage() throws Exception { var usageAction = newUsageAction(settings.build(), true, true, true); PlainActionFuture future = new PlainActionFuture<>(); usageAction.masterOperation(null, null, ClusterState.EMPTY_STATE, future); - XPackFeatureSet.Usage mlUsage = future.get().getUsage(); + XPackFeatureUsage mlUsage = future.get().getUsage(); BytesStreamOutput out = new BytesStreamOutput(); mlUsage.writeTo(out); - XPackFeatureSet.Usage serializedUsage = new MachineLearningFeatureSetUsage(out.bytes().streamInput()); + XPackFeatureUsage serializedUsage = new MachineLearningFeatureSetUsage(out.bytes().streamInput()); - for (XPackFeatureSet.Usage usage : Arrays.asList(mlUsage, serializedUsage)) { + for (XPackFeatureUsage usage : Arrays.asList(mlUsage, serializedUsage)) { assertThat(usage, is(notNullValue())); assertThat(usage.name(), is(XPackField.MACHINE_LEARNING)); assertThat(usage.enabled(), is(true)); @@ -412,13 +412,13 @@ public void testAnomalyDetectionDisabled() throws Exception { var usageAction = newUsageAction(settings.build(), false, true, true); PlainActionFuture future = new PlainActionFuture<>(); usageAction.masterOperation(null, null, ClusterState.EMPTY_STATE, future); - XPackFeatureSet.Usage mlUsage = future.get().getUsage(); + XPackFeatureUsage mlUsage = future.get().getUsage(); BytesStreamOutput out = new BytesStreamOutput(); mlUsage.writeTo(out); - XPackFeatureSet.Usage serializedUsage = new MachineLearningFeatureSetUsage(out.bytes().streamInput()); + XPackFeatureUsage serializedUsage = new MachineLearningFeatureSetUsage(out.bytes().streamInput()); - for (XPackFeatureSet.Usage usage : Arrays.asList(mlUsage, serializedUsage)) { + for (XPackFeatureUsage usage : Arrays.asList(mlUsage, serializedUsage)) { assertThat(usage, is(notNullValue())); assertThat(usage.name(), is(XPackField.MACHINE_LEARNING)); assertThat(usage.enabled(), is(true)); @@ -508,13 +508,13 @@ public void testUsageWithTrainedModelsDisabled() throws Exception { var usageAction = newUsageAction(settings.build(), true, false, false); PlainActionFuture future = new PlainActionFuture<>(); usageAction.masterOperation(null, null, ClusterState.EMPTY_STATE, future); - XPackFeatureSet.Usage mlUsage = future.get().getUsage(); + XPackFeatureUsage mlUsage = future.get().getUsage(); BytesStreamOutput out = new BytesStreamOutput(); mlUsage.writeTo(out); - XPackFeatureSet.Usage serializedUsage = new MachineLearningFeatureSetUsage(out.bytes().streamInput()); + XPackFeatureUsage serializedUsage = new MachineLearningFeatureSetUsage(out.bytes().streamInput()); - for (XPackFeatureSet.Usage usage : Arrays.asList(mlUsage, serializedUsage)) { + for (XPackFeatureUsage usage : Arrays.asList(mlUsage, serializedUsage)) { assertThat(usage, is(notNullValue())); assertThat(usage.name(), is(XPackField.MACHINE_LEARNING)); assertThat(usage.enabled(), is(true)); @@ -605,7 +605,7 @@ public void testUsageWithOrphanedTask() throws Exception { var usageAction = newUsageAction(settings.build(), true, true, true); PlainActionFuture future = new PlainActionFuture<>(); usageAction.masterOperation(null, null, ClusterState.EMPTY_STATE, future); - XPackFeatureSet.Usage usage = future.get().getUsage(); + XPackFeatureUsage usage = future.get().getUsage(); XContentSource source; try (XContentBuilder builder = XContentFactory.jsonBuilder()) { @@ -640,12 +640,12 @@ public void testUsageDisabledML() throws Exception { var usageAction = newUsageAction(settings.build(), randomBoolean(), randomBoolean(), randomBoolean()); PlainActionFuture future = new PlainActionFuture<>(); usageAction.masterOperation(null, null, ClusterState.EMPTY_STATE, future); - XPackFeatureSet.Usage mlUsage = future.get().getUsage(); + XPackFeatureUsage mlUsage = future.get().getUsage(); BytesStreamOutput out = new BytesStreamOutput(); mlUsage.writeTo(out); - XPackFeatureSet.Usage serializedUsage = new MachineLearningFeatureSetUsage(out.bytes().streamInput()); + XPackFeatureUsage serializedUsage = new MachineLearningFeatureSetUsage(out.bytes().streamInput()); - for (XPackFeatureSet.Usage usage : Arrays.asList(mlUsage, serializedUsage)) { + for (XPackFeatureUsage usage : Arrays.asList(mlUsage, serializedUsage)) { assertThat(usage, is(notNullValue())); assertThat(usage.name(), is(XPackField.MACHINE_LEARNING)); assertThat(usage.enabled(), is(false)); @@ -662,14 +662,14 @@ public void testNodeCount() throws Exception { var usageAction = newUsageAction(settings.build(), randomBoolean(), randomBoolean(), randomBoolean()); PlainActionFuture future = new PlainActionFuture<>(); usageAction.masterOperation(null, null, clusterState, future); - XPackFeatureSet.Usage usage = future.get().getUsage(); + XPackFeatureUsage usage = future.get().getUsage(); assertThat(usage.available(), is(true)); assertThat(usage.enabled(), is(true)); BytesStreamOutput out = new BytesStreamOutput(); usage.writeTo(out); - XPackFeatureSet.Usage serializedUsage = new MachineLearningFeatureSetUsage(out.bytes().streamInput()); + XPackFeatureUsage serializedUsage = new MachineLearningFeatureSetUsage(out.bytes().streamInput()); XContentSource source; try (XContentBuilder builder = XContentFactory.jsonBuilder()) { @@ -688,7 +688,7 @@ public void testUsageGivenMlMetadataNotInstalled() throws Exception { var usageAction = newUsageAction(settings.build(), true, true, true); PlainActionFuture future = new PlainActionFuture<>(); usageAction.masterOperation(null, null, ClusterState.EMPTY_STATE, future); - XPackFeatureSet.Usage usage = future.get().getUsage(); + XPackFeatureUsage usage = future.get().getUsage(); assertThat(usage.available(), is(true)); assertThat(usage.enabled(), is(true)); diff --git a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/collector/cluster/ClusterStatsCollector.java b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/collector/cluster/ClusterStatsCollector.java index a5f3792d6df8d..add6efe8d1cf1 100644 --- a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/collector/cluster/ClusterStatsCollector.java +++ b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/collector/cluster/ClusterStatsCollector.java @@ -26,7 +26,7 @@ import org.elasticsearch.license.LicenseUtils; import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.protocol.xpack.XPackUsageRequest; -import org.elasticsearch.xpack.core.XPackFeatureSet; +import org.elasticsearch.xpack.core.XPackFeatureUsage; import org.elasticsearch.xpack.core.action.XPackUsageAction; import org.elasticsearch.xpack.core.monitoring.exporter.MonitoringDoc; import org.elasticsearch.xpack.monitoring.collector.Collector; @@ -93,7 +93,7 @@ protected Collection doCollect(final MonitoringDoc.Node node, fin final String clusterUuid = clusterUuid(clusterState); final String version = Build.current().version(); final License license = licenseService.getLicense(); - final List xpackUsage = collect( + final List xpackUsage = collect( () -> client.execute(XPackUsageAction.INSTANCE, new XPackUsageRequest(getCollectionTimeout())) .actionGet(getCollectionTimeout()) .getUsages() diff --git a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/collector/cluster/ClusterStatsMonitoringDoc.java b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/collector/cluster/ClusterStatsMonitoringDoc.java index e25c11304a7b2..d056164ba610c 100644 --- a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/collector/cluster/ClusterStatsMonitoringDoc.java +++ b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/collector/cluster/ClusterStatsMonitoringDoc.java @@ -17,7 +17,7 @@ import org.elasticsearch.license.License; import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xpack.core.XPackFeatureSet; +import org.elasticsearch.xpack.core.XPackFeatureUsage; import org.elasticsearch.xpack.core.monitoring.MonitoredSystem; import org.elasticsearch.xpack.core.monitoring.exporter.MonitoringDoc; @@ -53,7 +53,7 @@ public class ClusterStatsMonitoringDoc extends MonitoringDoc { private final String version; private final License license; private final boolean apmIndicesExist; - private final List usages; + private final List usages; private final ClusterStatsResponse clusterStats; private final ClusterState clusterState; private final ClusterHealthStatus status; @@ -69,7 +69,7 @@ public class ClusterStatsMonitoringDoc extends MonitoringDoc { final ClusterHealthStatus status, @Nullable final License license, final boolean apmIndicesExist, - @Nullable final List usages, + @Nullable final List usages, @Nullable final ClusterStatsResponse clusterStats, @Nullable final ClusterState clusterState, final boolean clusterNeedsTLSEnabled @@ -103,7 +103,7 @@ boolean getAPMIndicesExist() { return apmIndicesExist; } - List getUsages() { + List getUsages() { return usages; } @@ -198,7 +198,7 @@ protected void innerToXContent(XContentBuilder builder, Params params) throws IO if (usages != null) { builder.startObject("xpack"); - for (final XPackFeatureSet.Usage usage : usages) { + for (final XPackFeatureUsage usage : usages) { builder.field(usage.name(), usage); } builder.endObject(); diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/MonitoringInfoTransportActionTests.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/MonitoringInfoTransportActionTests.java index 4d242db394d10..0b2cbe03e6431 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/MonitoringInfoTransportActionTests.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/MonitoringInfoTransportActionTests.java @@ -20,7 +20,7 @@ import org.elasticsearch.transport.TransportService; import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xpack.core.XPackFeatureSet; +import org.elasticsearch.xpack.core.XPackFeatureUsage; import org.elasticsearch.xpack.core.action.XPackUsageFeatureResponse; import org.elasticsearch.xpack.core.monitoring.MonitoringFeatureSetUsage; import org.elasticsearch.xpack.monitoring.exporter.Exporter; @@ -109,8 +109,8 @@ public void testUsage() throws Exception { monitoringUsage.writeTo(out); StreamInput in = out.bytes().streamInput(); in.setTransportVersion(serializedVersion); - XPackFeatureSet.Usage serializedUsage = new MonitoringFeatureSetUsage(in); - for (XPackFeatureSet.Usage usage : Arrays.asList(monitoringUsage, serializedUsage)) { + XPackFeatureUsage serializedUsage = new MonitoringFeatureSetUsage(in); + for (XPackFeatureUsage usage : Arrays.asList(monitoringUsage, serializedUsage)) { ObjectPath source; try (XContentBuilder builder = jsonBuilder()) { usage.toXContent(builder, ToXContent.EMPTY_PARAMS); diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/cluster/ClusterStatsMonitoringDocTests.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/cluster/ClusterStatsMonitoringDocTests.java index 3a9069dee064d..9458442557694 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/cluster/ClusterStatsMonitoringDocTests.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/cluster/ClusterStatsMonitoringDocTests.java @@ -60,7 +60,7 @@ import org.elasticsearch.test.BuildUtils; import org.elasticsearch.transport.TransportInfo; import org.elasticsearch.xcontent.XContentType; -import org.elasticsearch.xpack.core.XPackFeatureSet; +import org.elasticsearch.xpack.core.XPackFeatureUsage; import org.elasticsearch.xpack.core.monitoring.MonitoredSystem; import org.elasticsearch.xpack.core.monitoring.MonitoringFeatureSetUsage; import org.elasticsearch.xpack.core.monitoring.exporter.MonitoringDoc; @@ -91,7 +91,7 @@ public class ClusterStatsMonitoringDocTests extends BaseMonitoringDocTestCase usages; + private List usages; private ClusterStatsResponse clusterStats; private ClusterState clusterState; private License license; @@ -312,7 +312,7 @@ public void testToXContent() throws IOException { .maxNodes(2) .build(); - final List usageList = singletonList(new MonitoringFeatureSetUsage(false, null)); + final List usageList = singletonList(new MonitoringFeatureSetUsage(false, null)); final NodeInfo mockNodeInfo = mock(NodeInfo.class); var mockNodeVersion = randomAlphaOfLengthBetween(6, 32); diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/esnative/NativeRealmIntegTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/esnative/NativeRealmIntegTests.java index e178f4bf3eb6c..7ddeb3f9695ef 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/esnative/NativeRealmIntegTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/esnative/NativeRealmIntegTests.java @@ -30,7 +30,7 @@ import org.elasticsearch.test.SecuritySettingsSourceField; import org.elasticsearch.transport.TransportRequest; import org.elasticsearch.xcontent.XContentType; -import org.elasticsearch.xpack.core.XPackFeatureSet; +import org.elasticsearch.xpack.core.XPackFeatureUsage; import org.elasticsearch.xpack.core.action.XPackUsageAction; import org.elasticsearch.xpack.core.action.XPackUsageResponse; import org.elasticsearch.xpack.core.security.SecurityFeatureSetUsage; @@ -945,7 +945,7 @@ public void testRealmUsageStats() { } XPackUsageResponse response = safeGet(client().execute(XPackUsageAction.INSTANCE, new XPackUsageRequest(SAFE_AWAIT_TIMEOUT))); - Optional securityUsage = response.getUsages() + Optional securityUsage = response.getUsages() .stream() .filter(usage -> usage instanceof SecurityFeatureSetUsage) .findFirst(); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityInfoTransportActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityInfoTransportActionTests.java index 88f233087e1dd..688e12fe727c9 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityInfoTransportActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityInfoTransportActionTests.java @@ -19,7 +19,7 @@ import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentFactory; -import org.elasticsearch.xpack.core.XPackFeatureSet; +import org.elasticsearch.xpack.core.XPackFeatureUsage; import org.elasticsearch.xpack.core.XPackField; import org.elasticsearch.xpack.core.XPackSettings; import org.elasticsearch.xpack.core.action.XPackUsageFeatureResponse; @@ -216,8 +216,8 @@ public void testUsage() throws Exception { SecurityFeatureSetUsage securityUsage = (SecurityFeatureSetUsage) future.get().getUsage(); BytesStreamOutput out = new BytesStreamOutput(); securityUsage.writeTo(out); - XPackFeatureSet.Usage serializedUsage = new SecurityFeatureSetUsage(out.bytes().streamInput()); - for (XPackFeatureSet.Usage usage : Arrays.asList(securityUsage, serializedUsage)) { + XPackFeatureUsage serializedUsage = new SecurityFeatureSetUsage(out.bytes().streamInput()); + for (XPackFeatureUsage usage : Arrays.asList(securityUsage, serializedUsage)) { assertThat(usage, is(notNullValue())); assertThat(usage.name(), is(XPackField.SECURITY)); assertThat(usage.enabled(), is(enabled)); @@ -318,7 +318,7 @@ public void testUsage() throws Exception { } } - private XContentSource getXContentSource(XPackFeatureSet.Usage usage) throws IOException { + private XContentSource getXContentSource(XPackFeatureUsage usage) throws IOException { XContentSource source; try (XContentBuilder builder = XContentFactory.jsonBuilder()) { usage.toXContent(builder, ToXContent.EMPTY_PARAMS); diff --git a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/action/SpatialInfoTransportActionTests.java b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/action/SpatialInfoTransportActionTests.java index ede64decbca33..683dcea48e46e 100644 --- a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/action/SpatialInfoTransportActionTests.java +++ b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/action/SpatialInfoTransportActionTests.java @@ -22,7 +22,7 @@ import org.elasticsearch.test.MockUtils; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; -import org.elasticsearch.xpack.core.XPackFeatureSet; +import org.elasticsearch.xpack.core.XPackFeatureUsage; import org.elasticsearch.xpack.core.action.XPackUsageFeatureResponse; import org.elasticsearch.xpack.core.spatial.SpatialFeatureSetUsage; import org.elasticsearch.xpack.core.spatial.action.SpatialStatsAction; @@ -73,12 +73,12 @@ public void testAvailable() throws Exception { PlainActionFuture future = new PlainActionFuture<>(); Task task = new Task(1L, "_type", "_action", "_description", null, Collections.emptyMap()); usageAction.masterOperation(task, null, clusterService.state(), future); - XPackFeatureSet.Usage usage = future.get().getUsage(); + XPackFeatureUsage usage = future.get().getUsage(); assertThat(usage.available(), is(true)); BytesStreamOutput out = new BytesStreamOutput(); usage.writeTo(out); - XPackFeatureSet.Usage serializedUsage = new SpatialFeatureSetUsage(out.bytes().streamInput()); + XPackFeatureUsage serializedUsage = new SpatialFeatureSetUsage(out.bytes().streamInput()); assertThat(serializedUsage.available(), is(true)); } @@ -99,12 +99,12 @@ public void testEnabled() throws Exception { ); PlainActionFuture future = new PlainActionFuture<>(); usageAction.masterOperation(mock(Task.class), null, clusterService.state(), future); - XPackFeatureSet.Usage usage = future.get().getUsage(); + XPackFeatureUsage usage = future.get().getUsage(); assertTrue(usage.enabled()); BytesStreamOutput out = new BytesStreamOutput(); usage.writeTo(out); - XPackFeatureSet.Usage serializedUsage = new SpatialFeatureSetUsage(out.bytes().streamInput()); + XPackFeatureUsage serializedUsage = new SpatialFeatureSetUsage(out.bytes().streamInput()); assertTrue(serializedUsage.enabled()); } diff --git a/x-pack/plugin/voting-only-node/src/main/java/org/elasticsearch/cluster/coordination/votingonly/VotingOnlyInfoTransportAction.java b/x-pack/plugin/voting-only-node/src/main/java/org/elasticsearch/cluster/coordination/votingonly/VotingOnlyInfoTransportAction.java new file mode 100644 index 0000000000000..9e758ee155644 --- /dev/null +++ b/x-pack/plugin/voting-only-node/src/main/java/org/elasticsearch/cluster/coordination/votingonly/VotingOnlyInfoTransportAction.java @@ -0,0 +1,38 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.cluster.coordination.votingonly; + +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.injection.guice.Inject; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.XPackField; +import org.elasticsearch.xpack.core.action.XPackInfoFeatureAction; +import org.elasticsearch.xpack.core.action.XPackInfoFeatureTransportAction; + +public class VotingOnlyInfoTransportAction extends XPackInfoFeatureTransportAction { + + @Inject + public VotingOnlyInfoTransportAction(TransportService transportService, ActionFilters actionFilters) { + super(XPackInfoFeatureAction.VOTING_ONLY.name(), transportService, actionFilters); + } + + @Override + protected String name() { + return XPackField.VOTING_ONLY; + } + + @Override + protected boolean available() { + return true; + } + + @Override + protected boolean enabled() { + return true; + } +} diff --git a/x-pack/plugin/voting-only-node/src/main/java/org/elasticsearch/cluster/coordination/votingonly/VotingOnlyNodeFeatureSet.java b/x-pack/plugin/voting-only-node/src/main/java/org/elasticsearch/cluster/coordination/votingonly/VotingOnlyNodeFeatureSet.java deleted file mode 100644 index 54bb265321799..0000000000000 --- a/x-pack/plugin/voting-only-node/src/main/java/org/elasticsearch/cluster/coordination/votingonly/VotingOnlyNodeFeatureSet.java +++ /dev/null @@ -1,99 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ -package org.elasticsearch.cluster.coordination.votingonly; - -import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.support.ActionFilters; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; -import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.injection.guice.Inject; -import org.elasticsearch.protocol.xpack.XPackUsageRequest; -import org.elasticsearch.tasks.Task; -import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.TransportService; -import org.elasticsearch.xpack.core.XPackFeatureSet; -import org.elasticsearch.xpack.core.XPackField; -import org.elasticsearch.xpack.core.action.XPackInfoFeatureAction; -import org.elasticsearch.xpack.core.action.XPackInfoFeatureTransportAction; -import org.elasticsearch.xpack.core.action.XPackUsageFeatureAction; -import org.elasticsearch.xpack.core.action.XPackUsageFeatureResponse; -import org.elasticsearch.xpack.core.action.XPackUsageFeatureTransportAction; -import org.elasticsearch.xpack.core.votingonly.VotingOnlyNodeFeatureSetUsage; - -public class VotingOnlyNodeFeatureSet implements XPackFeatureSet { - - @Override - public String name() { - return XPackField.VOTING_ONLY; - } - - @Override - public boolean available() { - return true; - } - - @Override - public boolean enabled() { - return true; - } - - public static class UsageTransportAction extends XPackUsageFeatureTransportAction { - - @Inject - public UsageTransportAction( - TransportService transportService, - ClusterService clusterService, - ThreadPool threadPool, - ActionFilters actionFilters, - IndexNameExpressionResolver indexNameExpressionResolver - ) { - super( - XPackUsageFeatureAction.VOTING_ONLY.name(), - transportService, - clusterService, - threadPool, - actionFilters, - indexNameExpressionResolver - ); - } - - @Override - protected void masterOperation( - Task task, - XPackUsageRequest request, - ClusterState state, - ActionListener listener - ) { - final VotingOnlyNodeFeatureSetUsage usage = new VotingOnlyNodeFeatureSetUsage(); - listener.onResponse(new XPackUsageFeatureResponse(usage)); - } - } - - public static class UsageInfoAction extends XPackInfoFeatureTransportAction { - - @Inject - public UsageInfoAction(TransportService transportService, ActionFilters actionFilters) { - super(XPackInfoFeatureAction.VOTING_ONLY.name(), transportService, actionFilters); - } - - @Override - protected String name() { - return XPackField.VOTING_ONLY; - } - - @Override - protected boolean available() { - return true; - } - - @Override - protected boolean enabled() { - return true; - } - } -} diff --git a/x-pack/plugin/voting-only-node/src/main/java/org/elasticsearch/cluster/coordination/votingonly/VotingOnlyNodePlugin.java b/x-pack/plugin/voting-only-node/src/main/java/org/elasticsearch/cluster/coordination/votingonly/VotingOnlyNodePlugin.java index 8bd951cff40da..1e1103a6e54a2 100644 --- a/x-pack/plugin/voting-only-node/src/main/java/org/elasticsearch/cluster/coordination/votingonly/VotingOnlyNodePlugin.java +++ b/x-pack/plugin/voting-only-node/src/main/java/org/elasticsearch/cluster/coordination/votingonly/VotingOnlyNodePlugin.java @@ -84,8 +84,8 @@ public Collection createComponents(PluginServices services) { @Override public List> getActions() { return Arrays.asList( - new ActionHandler<>(XPackUsageFeatureAction.VOTING_ONLY, VotingOnlyNodeFeatureSet.UsageTransportAction.class), - new ActionHandler<>(XPackInfoFeatureAction.VOTING_ONLY, VotingOnlyNodeFeatureSet.UsageInfoAction.class) + new ActionHandler<>(XPackUsageFeatureAction.VOTING_ONLY, VotingOnlyUsageTransportAction.class), + new ActionHandler<>(XPackInfoFeatureAction.VOTING_ONLY, VotingOnlyInfoTransportAction.class) ); } diff --git a/x-pack/plugin/voting-only-node/src/main/java/org/elasticsearch/cluster/coordination/votingonly/VotingOnlyUsageTransportAction.java b/x-pack/plugin/voting-only-node/src/main/java/org/elasticsearch/cluster/coordination/votingonly/VotingOnlyUsageTransportAction.java new file mode 100644 index 0000000000000..dd449b10fd87f --- /dev/null +++ b/x-pack/plugin/voting-only-node/src/main/java/org/elasticsearch/cluster/coordination/votingonly/VotingOnlyUsageTransportAction.java @@ -0,0 +1,55 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.cluster.coordination.votingonly; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.injection.guice.Inject; +import org.elasticsearch.protocol.xpack.XPackUsageRequest; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.action.XPackUsageFeatureAction; +import org.elasticsearch.xpack.core.action.XPackUsageFeatureResponse; +import org.elasticsearch.xpack.core.action.XPackUsageFeatureTransportAction; +import org.elasticsearch.xpack.core.votingonly.VotingOnlyNodeFeatureSetUsage; + +public class VotingOnlyUsageTransportAction extends XPackUsageFeatureTransportAction { + + @Inject + public VotingOnlyUsageTransportAction( + TransportService transportService, + ClusterService clusterService, + ThreadPool threadPool, + ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver + ) { + super( + XPackUsageFeatureAction.VOTING_ONLY.name(), + transportService, + clusterService, + threadPool, + actionFilters, + indexNameExpressionResolver + ); + } + + @Override + protected void masterOperation( + Task task, + XPackUsageRequest request, + ClusterState state, + ActionListener listener + ) { + final VotingOnlyNodeFeatureSetUsage usage = new VotingOnlyNodeFeatureSetUsage(); + listener.onResponse(new XPackUsageFeatureResponse(usage)); + } +} diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherInfoTransportActionTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherInfoTransportActionTests.java index ac683dca5bf26..87258afcc5320 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherInfoTransportActionTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherInfoTransportActionTests.java @@ -26,7 +26,7 @@ import org.elasticsearch.xcontent.ObjectPath; import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xpack.core.XPackFeatureSet; +import org.elasticsearch.xpack.core.XPackFeatureUsage; import org.elasticsearch.xpack.core.action.XPackUsageFeatureResponse; import org.elasticsearch.xpack.core.watcher.WatcherFeatureSetUsage; import org.elasticsearch.xpack.core.watcher.WatcherField; @@ -155,9 +155,9 @@ public void testUsageStats() throws Exception { assertThat(spam, is(1L)); BytesStreamOutput out = new BytesStreamOutput(); watcherUsage.writeTo(out); - XPackFeatureSet.Usage serializedUsage = new WatcherFeatureSetUsage(out.bytes().streamInput()); + XPackFeatureUsage serializedUsage = new WatcherFeatureSetUsage(out.bytes().streamInput()); - for (XPackFeatureSet.Usage usage : Arrays.asList(watcherUsage, serializedUsage)) { + for (XPackFeatureUsage usage : Arrays.asList(watcherUsage, serializedUsage)) { XContentBuilder builder = jsonBuilder(); usage.toXContent(builder, ToXContent.EMPTY_PARAMS); From 9dae38e7efde09eba269385d426774d634373204 Mon Sep 17 00:00:00 2001 From: Andrei Dan Date: Mon, 28 Oct 2024 14:44:09 +0000 Subject: [PATCH 153/324] Unmute test that was fixed by #112145 (#115682) The template warning was fixed by https://github.com/elastic/elasticsearch/pull/112145 #112147 was reopened as it was still muted even though the issue was closed. This unmutes the test. Closes #112147 --- muted-tests.yml | 2 -- 1 file changed, 2 deletions(-) diff --git a/muted-tests.yml b/muted-tests.yml index b69455a68790b..51345113addec 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -65,8 +65,6 @@ tests: - class: org.elasticsearch.xpack.restart.CoreFullClusterRestartIT method: testSnapshotRestore {cluster=UPGRADED} issue: https://github.com/elastic/elasticsearch/issues/111799 -- class: org.elasticsearch.smoketest.SmokeTestMultiNodeClientYamlTestSuiteIT - issue: https://github.com/elastic/elasticsearch/issues/112147 - class: org.elasticsearch.smoketest.WatcherYamlRestIT method: test {p0=watcher/usage/10_basic/Test watcher usage stats output} issue: https://github.com/elastic/elasticsearch/issues/112189 From 2207743e4c11d9ae1db48526122eda85d54ca504 Mon Sep 17 00:00:00 2001 From: Keith Massey Date: Mon, 28 Oct 2024 09:48:21 -0500 Subject: [PATCH 154/324] Adding testUsedMaxMindResponseClassesAreAccountedFor back to MaxMindSupportTests (#115706) --- .../ingest/geoip/MaxMindSupportTests.java | 62 +++++++++++++++++++ 1 file changed, 62 insertions(+) diff --git a/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/MaxMindSupportTests.java b/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/MaxMindSupportTests.java index 79a4190af284a..292a7e3c632d3 100644 --- a/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/MaxMindSupportTests.java +++ b/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/MaxMindSupportTests.java @@ -24,9 +24,11 @@ import com.maxmind.geoip2.record.MaxMind; import org.elasticsearch.common.util.set.Sets; +import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.test.ESTestCase; import java.lang.reflect.Method; +import java.lang.reflect.Modifier; import java.lang.reflect.ParameterizedType; import java.lang.reflect.Type; import java.net.InetAddress; @@ -479,6 +481,36 @@ public void testUnknownMaxMindResponseClassess() { ); } + /* + * This tests that this test has a mapping in TYPE_TO_MAX_MIND_CLASS for all MaxMind classes exposed through GeoIpDatabase. + */ + public void testUsedMaxMindResponseClassesAreAccountedFor() { + Set> usedMaxMindResponseClasses = getUsedMaxMindResponseClasses(); + Set> supportedMaxMindClasses = new HashSet<>(TYPE_TO_MAX_MIND_CLASS.values()); + Set> usedButNotSupportedMaxMindResponseClasses = Sets.difference( + usedMaxMindResponseClasses, + supportedMaxMindClasses + ); + assertThat( + "MaxmindIpDataLookups exposes MaxMind response classes that this test does not know what to do with. Add mappings to " + + "TYPE_TO_MAX_MIND_CLASS for the following: " + + usedButNotSupportedMaxMindResponseClasses, + usedButNotSupportedMaxMindResponseClasses, + empty() + ); + Set> supportedButNotUsedMaxMindClasses = Sets.difference( + supportedMaxMindClasses, + usedMaxMindResponseClasses + ); + assertThat( + "This test claims to support MaxMind response classes that are not exposed in GeoIpDatabase. Remove the following from " + + "TYPE_TO_MAX_MIND_CLASS: " + + supportedButNotUsedMaxMindClasses, + supportedButNotUsedMaxMindClasses, + empty() + ); + } + /* * This is the list of field types that causes us to stop recursing. That is, fields of these types are the lowest-level fields that * we care about. @@ -597,4 +629,34 @@ private static String getFormattedList(Set fields) { } return result.toString(); } + + /* + * This returns all AbstractResponse classes that are declared in transform methods in classes defined in MaxmindIpDataLookups. + */ + @SuppressWarnings("unchecked") + @SuppressForbidden(reason = "Need declared classes and methods") + private static Set> getUsedMaxMindResponseClasses() { + Set> result = new HashSet<>(); + Class[] declaredClasses = MaxmindIpDataLookups.class.getDeclaredClasses(); + for (Class declaredClass : declaredClasses) { + if (Modifier.isAbstract(declaredClass.getModifiers())) { + continue; + } + Method[] declaredMethods = declaredClass.getDeclaredMethods(); + Optional nonAbstractTransformMethod = Arrays.stream(declaredMethods) + .filter( + method -> method.getName().equals("transform") + && method.getParameterTypes().length == 1 + && Modifier.isAbstract(method.getParameterTypes()[0].getModifiers()) == false + ) + .findAny(); + if (nonAbstractTransformMethod.isPresent()) { + Class responseClass = nonAbstractTransformMethod.get().getParameterTypes()[0]; + if (AbstractResponse.class.isAssignableFrom(responseClass)) { + result.add((Class) responseClass); + } + } + } + return result; + } } From 03f2559f37fba8cd52f1e3d1add76b431a375a80 Mon Sep 17 00:00:00 2001 From: Luke Whiting Date: Mon, 28 Oct 2024 14:50:56 +0000 Subject: [PATCH 155/324] #114220 Unmute ExplainLifecycleIT testStepInfoPreservedOnAutoRetry (#115768) --- muted-tests.yml | 3 --- 1 file changed, 3 deletions(-) diff --git a/muted-tests.yml b/muted-tests.yml index 51345113addec..c37733cb7307c 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -198,9 +198,6 @@ tests: - class: org.elasticsearch.xpack.inference.InferenceCrudIT method: testGet issue: https://github.com/elastic/elasticsearch/issues/114135 -- class: org.elasticsearch.xpack.ilm.ExplainLifecycleIT - method: testStepInfoPreservedOnAutoRetry - issue: https://github.com/elastic/elasticsearch/issues/114220 - class: org.elasticsearch.xpack.inference.InferenceRestIT method: test {p0=inference/30_semantic_text_inference/Calculates embeddings using the default ELSER 2 endpoint} issue: https://github.com/elastic/elasticsearch/issues/114412 From a1670c18bfeac4a14057130d328053ecc3c8fe71 Mon Sep 17 00:00:00 2001 From: David Turner Date: Mon, 28 Oct 2024 15:32:00 +0000 Subject: [PATCH 156/324] Avoid `catch (Throwable t)` in `AmazonBedrockStreamingChatProcessor` (#115715) `CompletableFuture.runAsync` implicitly catches all `Throwable` instances thrown by the task, which includes `Error` instances that no reasonable application should catch. Moreover, discarding the return value from these methods means that any such `Error` will be ignored, allowing the JVM to carry on running in an invalid state. This commit replaces these trappy calls with more appropriate exception handling. --- docs/changelog/115715.yaml | 5 +++++ .../inference/src/main/java/module-info.java | 1 + .../AmazonBedrockStreamingChatProcessor.java | 19 +++++++++++++++---- 3 files changed, 21 insertions(+), 4 deletions(-) create mode 100644 docs/changelog/115715.yaml diff --git a/docs/changelog/115715.yaml b/docs/changelog/115715.yaml new file mode 100644 index 0000000000000..378f2c42e5e50 --- /dev/null +++ b/docs/changelog/115715.yaml @@ -0,0 +1,5 @@ +pr: 115715 +summary: Avoid `catch (Throwable t)` in `AmazonBedrockStreamingChatProcessor` +area: Machine Learning +type: bug +issues: [] diff --git a/x-pack/plugin/inference/src/main/java/module-info.java b/x-pack/plugin/inference/src/main/java/module-info.java index 60cb254e0afbe..53974657e4e23 100644 --- a/x-pack/plugin/inference/src/main/java/module-info.java +++ b/x-pack/plugin/inference/src/main/java/module-info.java @@ -33,6 +33,7 @@ requires org.slf4j; requires software.amazon.awssdk.retries.api; requires org.reactivestreams; + requires org.elasticsearch.logging; exports org.elasticsearch.xpack.inference.action; exports org.elasticsearch.xpack.inference.registry; diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockStreamingChatProcessor.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockStreamingChatProcessor.java index 439fc5b65efd5..12f394e300e0f 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockStreamingChatProcessor.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockStreamingChatProcessor.java @@ -14,11 +14,12 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.core.Strings; +import org.elasticsearch.logging.LogManager; +import org.elasticsearch.logging.Logger; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xpack.core.inference.results.StreamingChatCompletionResults; import java.util.ArrayDeque; -import java.util.concurrent.CompletableFuture; import java.util.concurrent.Flow; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicLong; @@ -27,6 +28,8 @@ import static org.elasticsearch.xpack.inference.InferencePlugin.UTILITY_THREAD_POOL_NAME; class AmazonBedrockStreamingChatProcessor implements Flow.Processor { + private static final Logger logger = LogManager.getLogger(AmazonBedrockStreamingChatProcessor.class); + private final AtomicReference error = new AtomicReference<>(null); private final AtomicLong demand = new AtomicLong(0); private final AtomicBoolean isDone = new AtomicBoolean(false); @@ -75,13 +78,13 @@ public void onNext(ConverseStreamOutput item) { // this is always called from a netty thread maintained by the AWS SDK, we'll move it to our thread to process the response private void sendDownstreamOnAnotherThread(ContentBlockDeltaEvent event) { - CompletableFuture.runAsync(() -> { + runOnUtilityThreadPool(() -> { var text = event.delta().text(); var result = new ArrayDeque(1); result.offer(new StreamingChatCompletionResults.Result(text)); var results = new StreamingChatCompletionResults.Results(result); downstream.onNext(results); - }, threadPool.executor(UTILITY_THREAD_POOL_NAME)); + }); } @Override @@ -108,6 +111,14 @@ public void onComplete() { } } + private void runOnUtilityThreadPool(Runnable runnable) { + try { + threadPool.executor(UTILITY_THREAD_POOL_NAME).execute(runnable); + } catch (Exception e) { + logger.error(Strings.format("failed to fork [%s] to utility thread pool", runnable), e); + } + } + private class StreamSubscription implements Flow.Subscription { @Override public void request(long n) { @@ -142,7 +153,7 @@ private void requestOnMlThread(long n) { if (UTILITY_THREAD_POOL_NAME.equalsIgnoreCase(currentThreadPool)) { upstream.request(n); } else { - CompletableFuture.runAsync(() -> upstream.request(n), threadPool.executor(UTILITY_THREAD_POOL_NAME)); + runOnUtilityThreadPool(() -> upstream.request(n)); } } From db2eca345ddc9d8a9354a2ef7597638b647ae958 Mon Sep 17 00:00:00 2001 From: Matteo Piergiovanni <134913285+piergm@users.noreply.github.com> Date: Mon, 28 Oct 2024 16:49:44 +0100 Subject: [PATCH 157/324] fixed testCCSClusterDetailsWhereAllShardsSkippedInCanMatch (#115774) --- muted-tests.yml | 3 --- .../elasticsearch/xpack/search/CrossClusterAsyncSearchIT.java | 1 - 2 files changed, 4 deletions(-) diff --git a/muted-tests.yml b/muted-tests.yml index c37733cb7307c..4128d41bf252c 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -262,9 +262,6 @@ tests: - class: org.elasticsearch.oldrepos.OldRepositoryAccessIT method: testOldRepoAccess issue: https://github.com/elastic/elasticsearch/issues/115631 -- class: org.elasticsearch.xpack.search.CrossClusterAsyncSearchIT - method: testCCSClusterDetailsWhereAllShardsSkippedInCanMatch - issue: https://github.com/elastic/elasticsearch/issues/115652 - class: org.elasticsearch.index.get.GetResultTests method: testToAndFromXContent issue: https://github.com/elastic/elasticsearch/issues/115688 diff --git a/x-pack/plugin/async-search/src/internalClusterTest/java/org/elasticsearch/xpack/search/CrossClusterAsyncSearchIT.java b/x-pack/plugin/async-search/src/internalClusterTest/java/org/elasticsearch/xpack/search/CrossClusterAsyncSearchIT.java index 3cd8778069d0c..3b5647da1399f 100644 --- a/x-pack/plugin/async-search/src/internalClusterTest/java/org/elasticsearch/xpack/search/CrossClusterAsyncSearchIT.java +++ b/x-pack/plugin/async-search/src/internalClusterTest/java/org/elasticsearch/xpack/search/CrossClusterAsyncSearchIT.java @@ -287,7 +287,6 @@ public void testCCSClusterDetailsWhereAllShardsSkippedInCanMatch() throws Except try { responseId = response.getId(); assertNotNull(response.getSearchResponse()); - assertTrue(response.isRunning()); SearchResponse.Clusters clusters = response.getSearchResponse().getClusters(); assertThat(clusters.getTotal(), equalTo(2)); if (dfs) { From da108fcb0d31540ff076f42d798a657624e364ec Mon Sep 17 00:00:00 2001 From: Brian Seeders Date: Mon, 28 Oct 2024 11:55:16 -0400 Subject: [PATCH 158/324] [CI] Set max-workers to 4 during agent image creation --- .buildkite/packer_cache.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.buildkite/packer_cache.sh b/.buildkite/packer_cache.sh index 752914ba55c23..01e1ad5cd7823 100755 --- a/.buildkite/packer_cache.sh +++ b/.buildkite/packer_cache.sh @@ -29,6 +29,6 @@ for branch in "${branches[@]}"; do fi export JAVA_HOME="$HOME/.java/$ES_BUILD_JAVA" - "checkout/${branch}/gradlew" --project-dir "$CHECKOUT_DIR" --parallel -s resolveAllDependencies -Dorg.gradle.warning.mode=none -DisCI + "checkout/${branch}/gradlew" --project-dir "$CHECKOUT_DIR" --parallel -s resolveAllDependencies -Dorg.gradle.warning.mode=none -DisCI --max-workers=4 rm -rf "checkout/${branch}" done From 2b1dc5adbe2ee4b6ef5a1a97eba785e03445bcb0 Mon Sep 17 00:00:00 2001 From: Francesco Gualazzi Date: Mon, 28 Oct 2024 17:47:45 +0100 Subject: [PATCH 159/324] plugin(apm-data): amend logs-apm component template (#115778) Restore standard mode after LogsDB took precedence. Related: #114501 --- .../resources/component-templates/logs-apm@settings.yaml | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/x-pack/plugin/apm-data/src/main/resources/component-templates/logs-apm@settings.yaml b/x-pack/plugin/apm-data/src/main/resources/component-templates/logs-apm@settings.yaml index 6bb8079c534c8..af84ba50fb075 100644 --- a/x-pack/plugin/apm-data/src/main/resources/component-templates/logs-apm@settings.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/component-templates/logs-apm@settings.yaml @@ -5,5 +5,6 @@ _meta: managed: true template: settings: - codec: best_compression - mode: standard + index: + codec: best_compression + mode: standard From 232622afd3d32ee1aace64d01385111e9cfc0bed Mon Sep 17 00:00:00 2001 From: Martijn van Groningen Date: Mon, 28 Oct 2024 18:02:19 +0100 Subject: [PATCH 160/324] Tweak LogsdbIndexingRollingUpgradeIT and TsdbIndexingRollingUpgradeIT (#115785) to fix following test bugs: * Deal with trail already started error. * Ensure k8s.pod.name field is mapped as keyword Closes #115755, #115756, #115757, #115758, #115758 --- .../upgrades/LogsdbIndexingRollingUpgradeIT.java | 12 ++++++++++-- .../upgrades/TsdbIndexingRollingUpgradeIT.java | 14 ++++++++++++-- 2 files changed, 22 insertions(+), 4 deletions(-) diff --git a/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/LogsdbIndexingRollingUpgradeIT.java b/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/LogsdbIndexingRollingUpgradeIT.java index 9bdc43543e331..226cb3dda2ba1 100644 --- a/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/LogsdbIndexingRollingUpgradeIT.java +++ b/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/LogsdbIndexingRollingUpgradeIT.java @@ -13,6 +13,7 @@ import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; +import org.elasticsearch.client.ResponseException; import org.elasticsearch.common.network.NetworkAddress; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.test.rest.ObjectPath; @@ -21,6 +22,7 @@ import java.io.IOException; import java.io.InputStream; import java.time.Instant; +import java.util.Locale; import java.util.Map; import static org.elasticsearch.upgrades.LogsIndexModeRollingUpgradeIT.enableLogsdbByDefault; @@ -73,7 +75,7 @@ public void testIndexing() throws Exception { if (isOldCluster()) { startTrial(); enableLogsdbByDefault(); - createTemplate(dataStreamName, "3", TEMPLATE); + createTemplate(dataStreamName, getClass().getSimpleName().toLowerCase(Locale.ROOT), TEMPLATE); Instant startTime = Instant.now().minusSeconds(60 * 60); bulkIndex(dataStreamName, 4, 1024, startTime); @@ -233,7 +235,13 @@ void query(String dataStreamName) throws Exception { protected static void startTrial() throws IOException { Request startTrial = new Request("POST", "/_license/start_trial"); startTrial.addParameter("acknowledge", "true"); - assertOK(client().performRequest(startTrial)); + try { + assertOK(client().performRequest(startTrial)); + } catch (ResponseException e) { + var responseBody = entityAsMap(e.getResponse()); + String error = ObjectPath.evaluate(responseBody, "error_message"); + assertThat(error, equalTo("Trial was already activated.")); + } } static Map getIndexSettingsWithDefaults(String index) throws IOException { diff --git a/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/TsdbIndexingRollingUpgradeIT.java b/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/TsdbIndexingRollingUpgradeIT.java index 1ac919ea57001..37e7aab94f7ca 100644 --- a/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/TsdbIndexingRollingUpgradeIT.java +++ b/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/TsdbIndexingRollingUpgradeIT.java @@ -16,10 +16,13 @@ import org.elasticsearch.test.rest.ObjectPath; import java.time.Instant; +import java.util.Locale; import java.util.Map; import static org.elasticsearch.upgrades.LogsIndexModeRollingUpgradeIT.getWriteBackingIndex; -import static org.elasticsearch.upgrades.LogsdbIndexingRollingUpgradeIT.*; +import static org.elasticsearch.upgrades.LogsdbIndexingRollingUpgradeIT.createTemplate; +import static org.elasticsearch.upgrades.LogsdbIndexingRollingUpgradeIT.getIndexSettingsWithDefaults; +import static org.elasticsearch.upgrades.LogsdbIndexingRollingUpgradeIT.startTrial; import static org.elasticsearch.upgrades.TsdbIT.TEMPLATE; import static org.elasticsearch.upgrades.TsdbIT.formatInstant; import static org.hamcrest.Matchers.equalTo; @@ -42,7 +45,7 @@ public void testIndexing() throws Exception { String dataStreamName = "k9s"; if (isOldCluster()) { startTrial(); - createTemplate(dataStreamName, "2", TEMPLATE); + createTemplate(dataStreamName, getClass().getSimpleName().toLowerCase(Locale.ROOT), TEMPLATE); Instant startTime = Instant.now().minusSeconds(60 * 60); bulkIndex(dataStreamName, 4, 1024, startTime); @@ -52,6 +55,13 @@ public void testIndexing() throws Exception { assertThat(((Map) settings.get("settings")).get("index.mode"), equalTo("time_series")); assertThat(((Map) settings.get("defaults")).get("index.mapping.source.mode"), equalTo("SYNTHETIC")); + var mapping = getIndexMappingAsMap(firstBackingIndex); + assertThat( + "incorrect k8s.pod.name field in mapping:" + mapping, + "keyword", + equalTo(ObjectPath.evaluate(mapping, "properties.k8s.properties.pod.properties.name.type")) + ); + ensureGreen(dataStreamName); search(dataStreamName); query(dataStreamName); From 9992edc588814ff0d577b30303fac73546fa5118 Mon Sep 17 00:00:00 2001 From: Pat Whelan Date: Mon, 28 Oct 2024 14:54:03 -0400 Subject: [PATCH 161/324] [ML] Fix stream support for TaskType.ANY (#115656) If we support one, then we support any. --- docs/changelog/115656.yaml | 5 +++++ .../xpack/inference/services/SenderService.java | 3 ++- .../services/amazonbedrock/AmazonBedrockServiceTests.java | 7 +++++++ .../services/anthropic/AnthropicServiceTests.java | 7 +++++++ .../services/azureaistudio/AzureAiStudioServiceTests.java | 7 +++++++ .../services/azureopenai/AzureOpenAiServiceTests.java | 7 +++++++ .../inference/services/cohere/CohereServiceTests.java | 7 +++++++ .../googleaistudio/GoogleAiStudioServiceTests.java | 7 +++++++ .../inference/services/openai/OpenAiServiceTests.java | 7 +++++++ 9 files changed, 56 insertions(+), 1 deletion(-) create mode 100644 docs/changelog/115656.yaml diff --git a/docs/changelog/115656.yaml b/docs/changelog/115656.yaml new file mode 100644 index 0000000000000..13b612b052fc1 --- /dev/null +++ b/docs/changelog/115656.yaml @@ -0,0 +1,5 @@ +pr: 115656 +summary: Fix stream support for `TaskType.ANY` +area: Machine Learning +type: bug +issues: [] diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/SenderService.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/SenderService.java index 71b38d7a0785a..953cf4cf6ad77 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/SenderService.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/SenderService.java @@ -25,13 +25,14 @@ import org.elasticsearch.xpack.inference.external.http.sender.Sender; import java.io.IOException; +import java.util.EnumSet; import java.util.List; import java.util.Map; import java.util.Objects; import java.util.Set; public abstract class SenderService implements InferenceService { - protected static final Set COMPLETION_ONLY = Set.of(TaskType.COMPLETION); + protected static final Set COMPLETION_ONLY = EnumSet.of(TaskType.COMPLETION, TaskType.ANY); private final Sender sender; private final ServiceComponents serviceComponents; diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/amazonbedrock/AmazonBedrockServiceTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/amazonbedrock/AmazonBedrockServiceTests.java index 06c5a68987a9e..931d418a3664b 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/amazonbedrock/AmazonBedrockServiceTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/amazonbedrock/AmazonBedrockServiceTests.java @@ -1304,6 +1304,13 @@ public void testInfer_UnauthorizedResponse() throws IOException { } } + public void testSupportsStreaming() throws IOException { + try (var service = new AmazonBedrockService(mock(), mock(), createWithEmptySettings(mock()))) { + assertTrue(service.canStream(TaskType.COMPLETION)); + assertTrue(service.canStream(TaskType.ANY)); + } + } + public void testChunkedInfer_CallsInfer_ConvertsFloatResponse_ForEmbeddings() throws IOException { var model = AmazonBedrockEmbeddingsModelTests.createModel( "id", diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/anthropic/AnthropicServiceTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/anthropic/AnthropicServiceTests.java index 48277112d9306..c4f7fbfb14437 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/anthropic/AnthropicServiceTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/anthropic/AnthropicServiceTests.java @@ -593,6 +593,13 @@ public void testInfer_StreamRequest_ErrorResponse() throws Exception { .hasErrorContaining("blah"); } + public void testSupportsStreaming() throws IOException { + try (var service = new AnthropicService(mock(), createWithEmptySettings(mock()))) { + assertTrue(service.canStream(TaskType.COMPLETION)); + assertTrue(service.canStream(TaskType.ANY)); + } + } + private AnthropicService createServiceWithMockSender() { return new AnthropicService(mock(HttpRequestSender.Factory.class), createWithEmptySettings(threadPool)); } diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/azureaistudio/AzureAiStudioServiceTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/azureaistudio/AzureAiStudioServiceTests.java index e85edf573ba96..4d2eb60767f44 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/azureaistudio/AzureAiStudioServiceTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/azureaistudio/AzureAiStudioServiceTests.java @@ -1384,6 +1384,13 @@ public void testInfer_StreamRequest_ErrorResponse() throws Exception { .hasErrorContaining("You didn't provide an API key..."); } + public void testSupportsStreaming() throws IOException { + try (var service = new AzureAiStudioService(mock(), createWithEmptySettings(mock()))) { + assertTrue(service.canStream(TaskType.COMPLETION)); + assertTrue(service.canStream(TaskType.ANY)); + } + } + // ---------------------------------------------------------------- private AzureAiStudioService createService() { diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/azureopenai/AzureOpenAiServiceTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/azureopenai/AzureOpenAiServiceTests.java index 3408fc358cac0..1bae6ce66d6aa 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/azureopenai/AzureOpenAiServiceTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/azureopenai/AzureOpenAiServiceTests.java @@ -1504,6 +1504,13 @@ public void testInfer_StreamRequest_ErrorResponse() throws Exception { .hasErrorContaining("You didn't provide an API key..."); } + public void testSupportsStreaming() throws IOException { + try (var service = new AzureOpenAiService(mock(), createWithEmptySettings(mock()))) { + assertTrue(service.canStream(TaskType.COMPLETION)); + assertTrue(service.canStream(TaskType.ANY)); + } + } + private AzureOpenAiService createAzureOpenAiService() { return new AzureOpenAiService(mock(HttpRequestSender.Factory.class), createWithEmptySettings(threadPool)); } diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/cohere/CohereServiceTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/cohere/CohereServiceTests.java index 758c38166778b..d44be4246f844 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/cohere/CohereServiceTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/cohere/CohereServiceTests.java @@ -1683,6 +1683,13 @@ public void testInfer_StreamRequest_ErrorResponse() throws Exception { .hasErrorContaining("how dare you"); } + public void testSupportsStreaming() throws IOException { + try (var service = new CohereService(mock(), createWithEmptySettings(mock()))) { + assertTrue(service.canStream(TaskType.COMPLETION)); + assertTrue(service.canStream(TaskType.ANY)); + } + } + private Map getRequestConfigMap( Map serviceSettings, Map taskSettings, diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/googleaistudio/GoogleAiStudioServiceTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/googleaistudio/GoogleAiStudioServiceTests.java index e8382876868c5..27a53177658c6 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/googleaistudio/GoogleAiStudioServiceTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/googleaistudio/GoogleAiStudioServiceTests.java @@ -1219,6 +1219,13 @@ private void testUpdateModelWithEmbeddingDetails_Successful(SimilarityMeasure si } } + public void testSupportsStreaming() throws IOException { + try (var service = new GoogleAiStudioService(mock(), createWithEmptySettings(mock()))) { + assertTrue(service.canStream(TaskType.COMPLETION)); + assertTrue(service.canStream(TaskType.ANY)); + } + } + public static Map buildExpectationCompletions(List completions) { return Map.of( ChatCompletionResults.COMPLETION, diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/OpenAiServiceTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/OpenAiServiceTests.java index cf1438b334478..0698b9652b767 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/OpenAiServiceTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/OpenAiServiceTests.java @@ -1077,6 +1077,13 @@ public void testInfer_StreamRequest_ErrorResponse() throws Exception { .hasErrorContaining("You didn't provide an API key..."); } + public void testSupportsStreaming() throws IOException { + try (var service = new OpenAiService(mock(), createWithEmptySettings(mock()))) { + assertTrue(service.canStream(TaskType.COMPLETION)); + assertTrue(service.canStream(TaskType.ANY)); + } + } + public void testCheckModelConfig_IncludesMaxTokens() throws IOException { var senderFactory = HttpRequestSenderTests.createSenderFactory(threadPool, clientManager); From 023c856c24fdc2ad04a09aac736db6afb03fc0cb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Slobodan=20Adamovi=C4=87?= Date: Mon, 28 Oct 2024 20:19:51 +0100 Subject: [PATCH 162/324] [DOCS] `_cat/shards`: clarify required permissions for restricted indices (#115650) Clarify security requirements when requesting detailed shard information for restricted indices. --- docs/reference/cat/shards.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/reference/cat/shards.asciidoc b/docs/reference/cat/shards.asciidoc index 812b946ab2c47..16f52a11f026a 100644 --- a/docs/reference/cat/shards.asciidoc +++ b/docs/reference/cat/shards.asciidoc @@ -33,7 +33,7 @@ For <>, the API returns information about the stream' * If the {es} {security-features} are enabled, you must have the `monitor` or `manage` <> to use this API. You must also have the `monitor` or `manage` <> -for any data stream, index, or alias you retrieve. +to view the full information for any data stream, index, or alias you retrieve. [[cat-shards-path-params]] ==== {api-path-parms-title} From 8787f0de09bacaf29190c7c547d252a75ca917ad Mon Sep 17 00:00:00 2001 From: Keith Massey Date: Mon, 28 Oct 2024 15:07:38 -0500 Subject: [PATCH 163/324] Removing code that rarely adds a legacy global template to yaml rest tests (#115799) --- .../rest/yaml/ESClientYamlSuiteTestCase.java | 59 ++----------------- 1 file changed, 4 insertions(+), 55 deletions(-) diff --git a/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCase.java b/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCase.java index 823e9e1fe9a7c..d835a8d0c1635 100644 --- a/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCase.java +++ b/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCase.java @@ -18,13 +18,10 @@ import org.apache.lucene.tests.util.TimeUnits; import org.elasticsearch.client.Node; import org.elasticsearch.client.Request; -import org.elasticsearch.client.RequestOptions; import org.elasticsearch.client.Response; import org.elasticsearch.client.RestClient; import org.elasticsearch.client.RestClientBuilder; -import org.elasticsearch.client.WarningsHandler; import org.elasticsearch.client.sniff.ElasticsearchNodesSniffer; -import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.support.XContentMapValues; @@ -41,11 +38,8 @@ import org.elasticsearch.test.rest.yaml.section.ExecutableSection; import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xcontent.ParseField; -import org.elasticsearch.xcontent.ToXContent; -import org.elasticsearch.xcontent.XContentBuilder; import org.junit.AfterClass; import org.junit.Before; -import org.junit.BeforeClass; import java.io.IOException; import java.nio.file.Files; @@ -67,8 +61,6 @@ import java.util.stream.Collectors; import java.util.stream.Stream; -import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; - /** * Runs a suite of yaml tests shared with all the official Elasticsearch * clients against an elasticsearch cluster. @@ -125,15 +117,6 @@ protected ESClientYamlSuiteTestCase(ClientYamlTestCandidate testCandidate) { this.testCandidate = testCandidate; } - private static Settings globalTemplateIndexSettings; - - @BeforeClass - public static void initializeGlobalTemplateIndexSettings() { - globalTemplateIndexSettings = usually() - ? Settings.EMPTY - : Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 2).build(); - } - @Before public void initAndResetContext() throws Exception { if (restTestExecutionContext == null) { @@ -511,34 +494,6 @@ public void test() throws IOException { inFipsJvm() && testCandidate.getTestSection().getPrerequisiteSection().hasYamlRunnerFeature("fips_140") ); - final Settings globalTemplateSettings = getGlobalTemplateSettings( - testCandidate.getTestSection().getPrerequisiteSection().hasYamlRunnerFeature("default_shards") - ); - if (globalTemplateSettings.isEmpty() == false && ESRestTestCase.has(ProductFeature.LEGACY_TEMPLATES)) { - - final XContentBuilder template = jsonBuilder(); - template.startObject(); - { - template.array("index_patterns", "*"); - template.startObject("settings"); - globalTemplateSettings.toXContent(template, ToXContent.EMPTY_PARAMS); - template.endObject(); - } - template.endObject(); - - final Request request = new Request("PUT", "/_template/global"); - request.setJsonEntity(Strings.toString(template)); - // Because not all case have transitioned to a composable template, it's possible that - // this can overlap an installed composable template since this is a global (*) - // template. In order to avoid this failing the test, we override the warnings handler - // to be permissive in this case. This can be removed once all tests use composable - // templates instead of legacy templates - RequestOptions.Builder builder = RequestOptions.DEFAULT.toBuilder(); - builder.setWarningsHandler(WarningsHandler.PERMISSIVE); - request.setOptions(builder.build()); - adminClient().performRequest(request); - } - if (skipSetupSections() == false && testCandidate.getSetupSection().isEmpty() == false) { logger.debug("start setup test [{}]", testCandidate.getTestPath()); for (ExecutableSection executableSection : testCandidate.getSetupSection().getExecutableSections()) { @@ -564,19 +519,13 @@ public void test() throws IOException { @Deprecated protected Settings getGlobalTemplateSettings(List features) { - if (features.contains("default_shards")) { - return Settings.EMPTY; - } else { - return globalTemplateIndexSettings; - } + // This method will be deleted once its uses in serverless are deleted + return Settings.EMPTY; } protected Settings getGlobalTemplateSettings(boolean defaultShardsFeature) { - if (defaultShardsFeature) { - return Settings.EMPTY; - } else { - return globalTemplateIndexSettings; - } + // This method will be deleted once its uses in serverless are deleted + return Settings.EMPTY; } protected boolean skipSetupSections() { From 1ea3573ad1fd09c699bcf50bcff61b59b420a549 Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Tue, 29 Oct 2024 07:15:26 +1100 Subject: [PATCH 164/324] Mute org.elasticsearch.xpack.core.ml.calendars.ScheduledEventTests testBuild_SucceedsWithDefaultSkipResultAndSkipModelUpdatesValues #115476 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index 4128d41bf252c..b338e094531b0 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -286,6 +286,9 @@ tests: - class: org.elasticsearch.xpack.spatial.search.GeoGridAggAndQueryConsistencyIT method: testGeoShapeGeoHex issue: https://github.com/elastic/elasticsearch/issues/115705 +- class: org.elasticsearch.xpack.core.ml.calendars.ScheduledEventTests + method: testBuild_SucceedsWithDefaultSkipResultAndSkipModelUpdatesValues + issue: https://github.com/elastic/elasticsearch/issues/115476 # Examples: # From bae01c58062596e76d7141813bdbd87e42e3da92 Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Tue, 29 Oct 2024 08:02:00 +1100 Subject: [PATCH 165/324] Mute org.elasticsearch.xpack.test.rest.XPackRestIT test {p0=transform/transforms_start_stop/Verify start transform reuses destination index} #115808 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index b338e094531b0..3960007515bdb 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -289,6 +289,9 @@ tests: - class: org.elasticsearch.xpack.core.ml.calendars.ScheduledEventTests method: testBuild_SucceedsWithDefaultSkipResultAndSkipModelUpdatesValues issue: https://github.com/elastic/elasticsearch/issues/115476 +- class: org.elasticsearch.xpack.test.rest.XPackRestIT + method: test {p0=transform/transforms_start_stop/Verify start transform reuses destination index} + issue: https://github.com/elastic/elasticsearch/issues/115808 # Examples: # From 2d5aad8c520e879e0e5c711385f731e2c1e2dcda Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Tue, 29 Oct 2024 08:10:15 +1100 Subject: [PATCH 166/324] Mute org.elasticsearch.smoketest.DocsClientYamlTestSuiteIT test {yaml=reference/watcher/example-watches/example-watch-clusterstatus/line_137} #115809 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index 3960007515bdb..804f595e02162 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -292,6 +292,9 @@ tests: - class: org.elasticsearch.xpack.test.rest.XPackRestIT method: test {p0=transform/transforms_start_stop/Verify start transform reuses destination index} issue: https://github.com/elastic/elasticsearch/issues/115808 +- class: org.elasticsearch.smoketest.DocsClientYamlTestSuiteIT + method: test {yaml=reference/watcher/example-watches/example-watch-clusterstatus/line_137} + issue: https://github.com/elastic/elasticsearch/issues/115809 # Examples: # From 690ad1ea6098f05b857c2c089b11667250750559 Mon Sep 17 00:00:00 2001 From: Kathleen DeRusso Date: Mon, 28 Oct 2024 17:32:29 -0400 Subject: [PATCH 167/324] Query rules retriever (#114855) --- docs/changelog/114855.yaml | 5 + .../org/elasticsearch/TransportVersions.java | 1 + .../search/retriever/RetrieverBuilder.java | 4 + .../retriever/RetrieverBuilderWrapper.java | 142 ++++++ .../retriever/TestRetrieverBuilder.java | 2 + .../rules/80_query_rules_retriever.yml | 414 ++++++++++++++++++ .../xpack/application/EnterpriseSearch.java | 15 + .../application/EnterpriseSearchFeatures.java | 3 +- .../retriever/QueryRuleRetrieverBuilder.java | 217 +++++++++ .../rules/retriever/RuleQueryRankDoc.java | 102 +++++ .../QueryRuleRetrieverBuilderTests.java | 156 +++++++ 11 files changed, 1060 insertions(+), 1 deletion(-) create mode 100644 docs/changelog/114855.yaml create mode 100644 server/src/main/java/org/elasticsearch/search/retriever/RetrieverBuilderWrapper.java create mode 100644 x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/rules/80_query_rules_retriever.yml create mode 100644 x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/retriever/QueryRuleRetrieverBuilder.java create mode 100644 x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/retriever/RuleQueryRankDoc.java create mode 100644 x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/rules/retriever/QueryRuleRetrieverBuilderTests.java diff --git a/docs/changelog/114855.yaml b/docs/changelog/114855.yaml new file mode 100644 index 0000000000000..daa6b985a14cf --- /dev/null +++ b/docs/changelog/114855.yaml @@ -0,0 +1,5 @@ +pr: 114855 +summary: Add query rules retriever +area: Relevance +type: enhancement +issues: [ ] diff --git a/server/src/main/java/org/elasticsearch/TransportVersions.java b/server/src/main/java/org/elasticsearch/TransportVersions.java index 9454c27dd787c..7bf3204b7e1a6 100644 --- a/server/src/main/java/org/elasticsearch/TransportVersions.java +++ b/server/src/main/java/org/elasticsearch/TransportVersions.java @@ -184,6 +184,7 @@ static TransportVersion def(int id) { public static final TransportVersion INDEX_MODE_LOOKUP = def(8_779_00_0); public static final TransportVersion INDEX_REQUEST_REMOVE_METERING = def(8_780_00_0); public static final TransportVersion CPU_STAT_STRING_PARSING = def(8_781_00_0); + public static final TransportVersion QUERY_RULES_RETRIEVER = def(8_782_00_0); /* * STOP! READ THIS FIRST! No, really, diff --git a/server/src/main/java/org/elasticsearch/search/retriever/RetrieverBuilder.java b/server/src/main/java/org/elasticsearch/search/retriever/RetrieverBuilder.java index 5e36ad0fd4fd6..d52c354cad69e 100644 --- a/server/src/main/java/org/elasticsearch/search/retriever/RetrieverBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/retriever/RetrieverBuilder.java @@ -218,6 +218,10 @@ public void setRankDocs(RankDoc[] rankDocs) { this.rankDocs = rankDocs; } + public RankDoc[] getRankDocs() { + return rankDocs; + } + /** * Gets the filters for this retriever. */ diff --git a/server/src/main/java/org/elasticsearch/search/retriever/RetrieverBuilderWrapper.java b/server/src/main/java/org/elasticsearch/search/retriever/RetrieverBuilderWrapper.java new file mode 100644 index 0000000000000..153203c0527d2 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/search/retriever/RetrieverBuilderWrapper.java @@ -0,0 +1,142 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.search.retriever; + +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.index.query.QueryRewriteContext; +import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.search.rank.RankDoc; +import org.elasticsearch.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.List; + +/** + * A wrapper that can be used to modify the behaviour of an existing {@link RetrieverBuilder}. + */ +public abstract class RetrieverBuilderWrapper extends RetrieverBuilder { + protected final RetrieverBuilder in; + + protected RetrieverBuilderWrapper(RetrieverBuilder in) { + this.in = in; + } + + protected abstract T clone(RetrieverBuilder sub); + + @Override + public RetrieverBuilder rewrite(QueryRewriteContext ctx) throws IOException { + var inRewrite = in.rewrite(ctx); + if (inRewrite != in) { + return clone(inRewrite); + } + return this; + } + + @Override + public QueryBuilder topDocsQuery() { + return in.topDocsQuery(); + } + + @Override + public RetrieverBuilder minScore(Float minScore) { + return in.minScore(minScore); + } + + @Override + public List getPreFilterQueryBuilders() { + return in.preFilterQueryBuilders; + } + + @Override + public ActionRequestValidationException validate( + SearchSourceBuilder source, + ActionRequestValidationException validationException, + boolean isScroll, + boolean allowPartialSearchResults + ) { + return in.validate(source, validationException, isScroll, allowPartialSearchResults); + } + + @Override + public RetrieverBuilder retrieverName(String retrieverName) { + return in.retrieverName(retrieverName); + } + + @Override + public void setRankDocs(RankDoc[] rankDocs) { + in.setRankDocs(rankDocs); + } + + @Override + public RankDoc[] getRankDocs() { + return in.getRankDocs(); + } + + @Override + public boolean isCompound() { + return in.isCompound(); + } + + @Override + public QueryBuilder explainQuery() { + return in.explainQuery(); + } + + @Override + public Float minScore() { + return in.minScore(); + } + + @Override + public boolean isFragment() { + return in.isFragment(); + } + + @Override + public String toString() { + return in.toString(); + } + + @Override + public String retrieverName() { + return in.retrieverName(); + } + + @Override + public void extractToSearchSourceBuilder(SearchSourceBuilder searchSourceBuilder, boolean compoundUsed) { + in.extractToSearchSourceBuilder(searchSourceBuilder, compoundUsed); + } + + @Override + public String getName() { + return in.getName(); + } + + @Override + protected void doToXContent(XContentBuilder builder, Params params) throws IOException { + in.doToXContent(builder, params); + } + + @Override + protected boolean doEquals(Object o) { + // Handle the edge case where we need to unwrap the incoming retriever + if (o instanceof RetrieverBuilderWrapper wrapper) { + return in.doEquals(wrapper.in); + } else { + return in.doEquals(o); + } + } + + @Override + protected int doHashCode() { + return in.doHashCode(); + } +} diff --git a/test/framework/src/main/java/org/elasticsearch/search/retriever/TestRetrieverBuilder.java b/test/framework/src/main/java/org/elasticsearch/search/retriever/TestRetrieverBuilder.java index 01380aa4d86b0..5ce775c508976 100644 --- a/test/framework/src/main/java/org/elasticsearch/search/retriever/TestRetrieverBuilder.java +++ b/test/framework/src/main/java/org/elasticsearch/search/retriever/TestRetrieverBuilder.java @@ -89,6 +89,8 @@ public void doToXContent(XContentBuilder builder, Params params) throws IOExcept @Override public boolean doEquals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; TestRetrieverBuilder that = (TestRetrieverBuilder) o; return Objects.equals(value, that.value); } diff --git a/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/rules/80_query_rules_retriever.yml b/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/rules/80_query_rules_retriever.yml new file mode 100644 index 0000000000000..7967516c6ad5a --- /dev/null +++ b/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/rules/80_query_rules_retriever.yml @@ -0,0 +1,414 @@ +setup: + - requires: + cluster_features: 'query_rule_retriever_supported' + reason: 'test requires query rule retriever implementation' + + - do: + indices.create: + index: test-index1 + body: + settings: + index: + number_of_shards: 1 + number_of_replicas: 0 + + - do: + bulk: + refresh: true + index: test-index1 + body: + - index: + _id: foo + - { "text": "foo - pinned doc for foo" } + - index: + _id: bar + - { "text": "bar - exclude doc for bar" } + - index: + _id: baz + - { "text": "baz - no rule attached" } + - index: + _id: foo_no_rule + - { "text": "text search result for foo with no rule attached" } + - index: + _id: bar_no_rule + - { "text": "text search result for bar with no rule attached" } + - index: + _id: foo2 + - { "text": "foo2 - second pinned doc for foo" } + + - do: + query_rules.put_ruleset: + ruleset_id: test-ruleset + body: + rules: + - rule_id: foo + type: pinned + criteria: + - type: exact + metadata: foo + values: [ foo ] + actions: + ids: + - foo + - foo2 + - rule_id: bar + type: exclude + criteria: + - type: exact + metadata: bar + values: [ bar ] + actions: + ids: + - bar + +--- +"standalone query rules retriever": + + - do: + search: + index: test-index1 + body: + retriever: + rule: + match_criteria: + foo: foo + bar: bar + ruleset_ids: + test-ruleset + retriever: + standard: + query: + query_string: + query: bar + + - match: { hits.total.value: 3 } + - match: { hits.hits.0._id: foo } + - match: { hits.hits.1._id: foo2 } + - match: { hits.hits.2._id: bar_no_rule } + +--- +"query rules retriever combined with rrf": + + - do: + search: + index: test-index1 + body: + retriever: + rule: + match_criteria: + foo: foo + bar: bar + ruleset_ids: + test-ruleset + retriever: + rrf: + retrievers: [ + { + standard: { + query: { + query_string: { + query: bar + } + } + } + }, + { + standard: { + query: { + query_string: { + query: baz + } + } + } + } + ] + + - match: { hits.total.value: 4 } + - match: { hits.hits.0._id: foo } + - match: { hits.hits.1._id: foo2 } + + +--- +"query rules retriever combined with rrf and pagination": + + - do: + search: + index: test-index1 + body: + size: 1 + from: 1 + retriever: + rule: + match_criteria: + foo: foo + bar: bar + ruleset_ids: + test-ruleset + retriever: + rrf: + retrievers: [ + { + standard: { + query: { + query_string: { + query: bar + } + } + } + }, + { + standard: { + query: { + query_string: { + query: baz + } + } + } + } + ] + + - match: { hits.total.value: 4 } + - length: { hits.hits: 1 } + - match: { hits.hits.0._id: foo2 } + +--- +"query rules allowed to be defined as a sub-retriever": + + - do: + search: + index: test-index1 + body: + retriever: + rrf: + retrievers: [ + { + standard: { + query: { + query_string: { + query: bar + } + } + } + }, + { + rule: { + match_criteria: { + foo: foo, + bar: bar + }, + ruleset_ids: test-ruleset, + retriever: { + standard: { + query: { + query_string: { + query: baz + } + } + } + } + } + } + ] + + - match: { hits.total.value: 5 } + +--- +"query rules retriever supports explicit sort on score": + + - do: + search: + index: test-index1 + body: + retriever: + rule: + match_criteria: + foo: foo + bar: bar + ruleset_ids: + test-ruleset + retriever: + standard: + query: + query_string: + query: bar + sort: [ "_score" ] + + - match: { hits.total.value: 3 } + - match: { hits.hits.0._id: foo } + - match: { hits.hits.1._id: foo2 } + - match: { hits.hits.2._id: bar_no_rule } + +--- +"query rules retriever supports explicit sort on score with secondary sort allowed": + + - do: + search: + index: test-index1 + body: + retriever: + rule: + match_criteria: + foo: foo + bar: bar + ruleset_ids: + test-ruleset + retriever: + standard: + query: + query_string: + query: bar + sort: [ "_score", { "text.keyword": "asc" } ] + + - match: { hits.total.value: 3 } + - match: { hits.hits.0._id: foo } + - match: { hits.hits.1._id: foo2 } + - match: { hits.hits.2._id: bar_no_rule } + + +--- +"query rules retriever supports rank window size": + - skip: + features: headers + + - do: + headers: + # Force JSON content type so that we use a parser that interprets the floating-point score as a double + Content-Type: application/json + search: + index: test-index1 + body: + retriever: + rule: + match_criteria: + foo: foo + bar: bar + ruleset_ids: + test-ruleset + retriever: + standard: + query: + query_string: + query: bar + rank_window_size: 1 + + - match: { hits.total.value: 3 } + - match: { hits.hits.0._id: foo } + - match: { hits.hits.0._score: 1.7014124E38 } + - match: { hits.hits.1._score: 0 } + - match: { hits.hits.2._score: 0 } + + - do: + headers: + # Force JSON content type so that we use a parser that interprets the floating-point score as a double + Content-Type: application/json + search: + index: test-index1 + body: + retriever: + rule: + match_criteria: + foo: foo + bar: bar + ruleset_ids: + test-ruleset + retriever: + standard: + query: + query_string: + query: bar + rank_window_size: 2 + + - match: { hits.total.value: 3 } + - match: { hits.hits.0._id: foo } + - match: { hits.hits.0._score: 1.7014124E38 } + - match: { hits.hits.1._id: foo2 } + - match: { hits.hits.1._score: 1.7014122E38 } + - match: { hits.hits.2._id: bar_no_rule } + - match: { hits.hits.2._score: 0 } + + - do: + headers: + # Force JSON content type so that we use a parser that interprets the floating-point score as a double + Content-Type: application/json + search: + index: test-index1 + body: + retriever: + rule: + match_criteria: + foo: foo + bar: bar + ruleset_ids: + test-ruleset + retriever: + standard: + query: + query_string: + query: bar + rank_window_size: 10 + + - match: { hits.total.value: 3 } + - match: { hits.hits.0._id: foo } + - match: { hits.hits.0._score: 1.7014124E38 } + - match: { hits.hits.1._id: foo2 } + - match: { hits.hits.1._score: 1.7014122E38 } + - match: { hits.hits.2._id: bar_no_rule } + - match: { hits.hits.2._score: 0.87832844 } + +--- +"query rules will error if sorting by anything other than score": + + - do: + catch: /\[rule\] retriever only supports sort by score/ + search: + index: test-index1 + body: + retriever: + rule: + match_criteria: + foo: foo + bar: bar + ruleset_ids: + test-ruleset + retriever: + standard: + query: + query_string: + query: bar + sort: [ { "_id": "desc" } ] + +--- +"query rules retriever explains pinned documents as constant score queries": + - skip: + features: [ headers ] + + - do: + headers: + # Force JSON content type so that we use a parser that interprets the floating-point score as a double + Content-Type: application/json + search: + index: test-index1 + body: + retriever: + rule: + match_criteria: + foo: foo + bar: bar + ruleset_ids: + test-ruleset + retriever: + standard: + query: + query_string: + query: bar + explain: true + + - match: { hits.hits.0._id: foo } + - match: { hits.hits.0._explanation.value: 1.7014124E38 } + - match: { hits.hits.0._explanation.description: "query rules evaluated rules from rulesets [test-ruleset] and match criteria {bar=bar, foo=foo}" } + - match: { hits.hits.0._explanation.details.0.value: 1 } + - match: { hits.hits.0._explanation.details.0.description: "doc [0] with an original score of [1.7014124E38] is at rank [1] from the following source queries." } + - match: { hits.hits.0._explanation.details.0.details.0.description: "sum of:" } + - match: { hits.hits.0._explanation.details.0.details.0.details.0.description: "max of:" } + - match: { hits.hits.0._explanation.details.0.details.0.details.0.details.0.description: "max of:" } + - match: { hits.hits.0._explanation.details.0.details.0.details.0.details.0.details.0.description: "ConstantScore(_id:([7e 8a]))^1.7014124E38" } diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/EnterpriseSearch.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/EnterpriseSearch.java index d5aef3b8808e8..a354ca4b4b31c 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/EnterpriseSearch.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/EnterpriseSearch.java @@ -19,6 +19,8 @@ import org.elasticsearch.common.settings.SettingsFilter; import org.elasticsearch.features.NodeFeature; import org.elasticsearch.indices.SystemIndexDescriptor; +import org.elasticsearch.license.License; +import org.elasticsearch.license.LicensedFeature; import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.logging.LogManager; import org.elasticsearch.logging.Logger; @@ -28,6 +30,7 @@ import org.elasticsearch.plugins.SystemIndexPlugin; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestHandler; +import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xpack.application.analytics.AnalyticsTemplateRegistry; import org.elasticsearch.xpack.application.analytics.action.DeleteAnalyticsCollectionAction; import org.elasticsearch.xpack.application.analytics.action.GetAnalyticsCollectionAction; @@ -175,6 +178,7 @@ import org.elasticsearch.xpack.application.rules.action.TransportPutQueryRuleAction; import org.elasticsearch.xpack.application.rules.action.TransportPutQueryRulesetAction; import org.elasticsearch.xpack.application.rules.action.TransportTestQueryRulesetAction; +import org.elasticsearch.xpack.application.rules.retriever.QueryRuleRetrieverBuilder; import org.elasticsearch.xpack.application.search.SearchApplicationIndexService; import org.elasticsearch.xpack.application.search.action.DeleteSearchApplicationAction; import org.elasticsearch.xpack.application.search.action.GetSearchApplicationAction; @@ -237,6 +241,12 @@ protected XPackLicenseState getLicenseState() { return XPackPlugin.getSharedLicenseState(); } + public static final LicensedFeature.Momentary QUERY_RULES_RETRIEVER_FEATURE = LicensedFeature.momentary( + null, + "rule-retriever", + License.OperationMode.ENTERPRISE + ); + @Override public List> getActions() { var usageAction = new ActionHandler<>(XPackUsageFeatureAction.ENTERPRISE_SEARCH, EnterpriseSearchUsageTransportAction.class); @@ -506,4 +516,9 @@ public List> getQueries() { new QuerySpec<>(RuleQueryBuilder.NAME, RuleQueryBuilder::new, p -> RuleQueryBuilder.fromXContent(p, getLicenseState())) ); } + + @Override + public List> getRetrievers() { + return List.of(new RetrieverSpec<>(new ParseField(QueryRuleRetrieverBuilder.NAME), QueryRuleRetrieverBuilder::fromXContent)); + } } diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/EnterpriseSearchFeatures.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/EnterpriseSearchFeatures.java index 174bcbe886dfb..ae8e63bdb6420 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/EnterpriseSearchFeatures.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/EnterpriseSearchFeatures.java @@ -12,6 +12,7 @@ import org.elasticsearch.features.NodeFeature; import org.elasticsearch.xpack.application.analytics.AnalyticsTemplateRegistry; import org.elasticsearch.xpack.application.connector.ConnectorTemplateRegistry; +import org.elasticsearch.xpack.application.rules.retriever.QueryRuleRetrieverBuilder; import java.util.Map; import java.util.Set; @@ -22,7 +23,7 @@ public class EnterpriseSearchFeatures implements FeatureSpecification { @Override public Set getFeatures() { - return Set.of(QUERY_RULES_TEST_API); + return Set.of(QUERY_RULES_TEST_API, QueryRuleRetrieverBuilder.QUERY_RULE_RETRIEVERS_SUPPORTED); } @Override diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/retriever/QueryRuleRetrieverBuilder.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/retriever/QueryRuleRetrieverBuilder.java new file mode 100644 index 0000000000000..9ef2f630b50bd --- /dev/null +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/retriever/QueryRuleRetrieverBuilder.java @@ -0,0 +1,217 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.application.rules.retriever; + +import org.apache.lucene.search.ScoreDoc; +import org.elasticsearch.common.ParsingException; +import org.elasticsearch.features.NodeFeature; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.license.LicenseUtils; +import org.elasticsearch.search.builder.PointInTimeBuilder; +import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.search.rank.RankDoc; +import org.elasticsearch.search.retriever.CompoundRetrieverBuilder; +import org.elasticsearch.search.retriever.RetrieverBuilder; +import org.elasticsearch.search.retriever.RetrieverBuilderWrapper; +import org.elasticsearch.search.retriever.RetrieverParserContext; +import org.elasticsearch.search.retriever.rankdoc.RankDocsQueryBuilder; +import org.elasticsearch.search.sort.ScoreSortBuilder; +import org.elasticsearch.search.sort.SortBuilder; +import org.elasticsearch.xcontent.ConstructingObjectParser; +import org.elasticsearch.xcontent.ParseField; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xpack.application.EnterpriseSearch; +import org.elasticsearch.xpack.application.rules.RuleQueryBuilder; +import org.elasticsearch.xpack.core.XPackPlugin; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.Objects; + +import static org.elasticsearch.search.rank.RankBuilder.DEFAULT_RANK_WINDOW_SIZE; +import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; +import static org.elasticsearch.xcontent.ConstructingObjectParser.optionalConstructorArg; + +/** + * A query rule retriever applies query rules defined in one or more rulesets to the underlying retriever. + */ +public final class QueryRuleRetrieverBuilder extends CompoundRetrieverBuilder { + + public static final String NAME = "rule"; + public static final NodeFeature QUERY_RULE_RETRIEVERS_SUPPORTED = new NodeFeature("query_rule_retriever_supported"); + + public static final ParseField RULESET_IDS_FIELD = new ParseField("ruleset_ids"); + public static final ParseField MATCH_CRITERIA_FIELD = new ParseField("match_criteria"); + public static final ParseField RETRIEVER_FIELD = new ParseField("retriever"); + public static final ParseField RANK_WINDOW_SIZE_FIELD = new ParseField("rank_window_size"); + + @SuppressWarnings("unchecked") + public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + NAME, + args -> { + List rulesetIds = (List) args[0]; + Map matchCriteria = (Map) args[1]; + RetrieverBuilder retrieverBuilder = (RetrieverBuilder) args[2]; + int rankWindowSize = args[3] == null ? DEFAULT_RANK_WINDOW_SIZE : (int) args[3]; + return new QueryRuleRetrieverBuilder(rulesetIds, matchCriteria, retrieverBuilder, rankWindowSize); + } + ); + + static { + PARSER.declareStringArray(constructorArg(), RULESET_IDS_FIELD); + PARSER.declareObject(constructorArg(), (p, c) -> p.map(), MATCH_CRITERIA_FIELD); + PARSER.declareNamedObject(constructorArg(), (p, c, n) -> { + RetrieverBuilder innerRetriever = p.namedObject(RetrieverBuilder.class, n, c); + c.trackRetrieverUsage(innerRetriever.getName()); + return innerRetriever; + }, RETRIEVER_FIELD); + PARSER.declareInt(optionalConstructorArg(), RANK_WINDOW_SIZE_FIELD); + RetrieverBuilder.declareBaseParserFields(NAME, PARSER); + } + + public static QueryRuleRetrieverBuilder fromXContent(XContentParser parser, RetrieverParserContext context) throws IOException { + if (context.clusterSupportsFeature(QUERY_RULE_RETRIEVERS_SUPPORTED) == false) { + throw new ParsingException(parser.getTokenLocation(), "unknown retriever [" + NAME + "]"); + } + if (EnterpriseSearch.QUERY_RULES_RETRIEVER_FEATURE.check(XPackPlugin.getSharedLicenseState()) == false) { + throw LicenseUtils.newComplianceException("Query Rules"); + } + try { + return PARSER.apply(parser, context); + } catch (Exception e) { + throw new ParsingException(parser.getTokenLocation(), e.getMessage(), e); + } + } + + private final List rulesetIds; + private final Map matchCriteria; + + public QueryRuleRetrieverBuilder( + List rulesetIds, + Map matchCriteria, + RetrieverBuilder retrieverBuilder, + int rankWindowSize + ) { + super(new ArrayList<>(), rankWindowSize); + this.rulesetIds = rulesetIds; + this.matchCriteria = matchCriteria; + addChild(new QueryRuleRetrieverBuilderWrapper(retrieverBuilder)); + } + + public QueryRuleRetrieverBuilder( + List rulesetIds, + Map matchCriteria, + List retrieverSource, + int rankWindowSize, + String retrieverName + ) { + super(retrieverSource, rankWindowSize); + this.rulesetIds = rulesetIds; + this.matchCriteria = matchCriteria; + this.retrieverName = retrieverName; + } + + @Override + public String getName() { + return NAME; + } + + public int rankWindowSize() { + return rankWindowSize; + } + + @Override + protected SearchSourceBuilder createSearchSourceBuilder(PointInTimeBuilder pit, RetrieverBuilder retrieverBuilder) { + var ret = super.createSearchSourceBuilder(pit, retrieverBuilder); + checkValidSort(ret.sorts()); + ret.query(new RuleQueryBuilder(ret.query(), matchCriteria, rulesetIds)); + return ret; + } + + private static void checkValidSort(List> sortBuilders) { + if (sortBuilders.isEmpty()) { + return; + } + + if (sortBuilders.getFirst() instanceof ScoreSortBuilder == false) { + throw new IllegalArgumentException("[" + NAME + "] retriever only supports sort by score, got: " + sortBuilders); + } + } + + @Override + public void doToXContent(XContentBuilder builder, Params params) throws IOException { + builder.array(RULESET_IDS_FIELD.getPreferredName(), rulesetIds.toArray()); + builder.startObject(MATCH_CRITERIA_FIELD.getPreferredName()); + builder.mapContents(matchCriteria); + builder.endObject(); + builder.field(RETRIEVER_FIELD.getPreferredName(), innerRetrievers.getFirst().retriever()); + // We need to explicitly include this here as it's not propagated by the wrapper + builder.field(RANK_WINDOW_SIZE_FIELD.getPreferredName(), rankWindowSize); + } + + @Override + protected QueryRuleRetrieverBuilder clone(List newChildRetrievers) { + return new QueryRuleRetrieverBuilder(rulesetIds, matchCriteria, newChildRetrievers, rankWindowSize, retrieverName); + } + + @Override + protected RankDoc[] combineInnerRetrieverResults(List rankResults) { + assert rankResults.size() == 1; + ScoreDoc[] scoreDocs = rankResults.getFirst(); + RankDoc[] rankDocs = new RuleQueryRankDoc[scoreDocs.length]; + for (int i = 0; i < scoreDocs.length; i++) { + ScoreDoc scoreDoc = scoreDocs[i]; + rankDocs[i] = new RuleQueryRankDoc(scoreDoc.doc, scoreDoc.score, scoreDoc.shardIndex, rulesetIds, matchCriteria); + rankDocs[i].rank = i + 1; + } + return rankDocs; + } + + @Override + public boolean doEquals(Object o) { + QueryRuleRetrieverBuilder that = (QueryRuleRetrieverBuilder) o; + return super.doEquals(o) && Objects.equals(rulesetIds, that.rulesetIds) && Objects.equals(matchCriteria, that.matchCriteria); + } + + @Override + public int doHashCode() { + return Objects.hash(super.doHashCode(), rulesetIds, matchCriteria); + } + + /** + * We need to wrap the QueryRulesRetrieverBuilder in order to ensure that the top docs query that is generated + * by this retriever correctly generates and executes a Rule query. + */ + class QueryRuleRetrieverBuilderWrapper extends RetrieverBuilderWrapper { + protected QueryRuleRetrieverBuilderWrapper(RetrieverBuilder in) { + super(in); + } + + @Override + protected QueryRuleRetrieverBuilderWrapper clone(RetrieverBuilder in) { + return new QueryRuleRetrieverBuilderWrapper(in); + } + + @Override + public QueryBuilder topDocsQuery() { + return new RuleQueryBuilder(in.topDocsQuery(), matchCriteria, rulesetIds); + } + + @Override + public QueryBuilder explainQuery() { + return new RankDocsQueryBuilder( + in.getRankDocs(), + new QueryBuilder[] { new RuleQueryBuilder(in.explainQuery(), matchCriteria, rulesetIds) }, + true + ); + } + } +} diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/retriever/RuleQueryRankDoc.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/retriever/RuleQueryRankDoc.java new file mode 100644 index 0000000000000..9c329f20f0cb2 --- /dev/null +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/retriever/RuleQueryRankDoc.java @@ -0,0 +1,102 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.application.rules.retriever; + +import org.apache.lucene.search.Explanation; +import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.search.rank.RankDoc; +import org.elasticsearch.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.List; +import java.util.Map; +import java.util.Objects; + +public class RuleQueryRankDoc extends RankDoc { + + public static final String NAME = "query_rule_rank_doc"; + + public final List rulesetIds; + public final Map matchCriteria; + + public RuleQueryRankDoc(int doc, float score, int shardIndex, List rulesetIds, Map matchCriteria) { + super(doc, score, shardIndex); + this.rulesetIds = rulesetIds; + this.matchCriteria = matchCriteria; + } + + public RuleQueryRankDoc(StreamInput in) throws IOException { + super(in); + rulesetIds = in.readStringCollectionAsImmutableList(); + matchCriteria = in.readGenericMap(); + } + + @Override + public Explanation explain(Explanation[] sources, String[] queryNames) { + + return Explanation.match( + score, + "query rules evaluated rules from rulesets " + rulesetIds + " and match criteria " + matchCriteria, + sources + ); + } + + @Override + public void doWriteTo(StreamOutput out) throws IOException { + out.writeStringCollection(rulesetIds); + out.writeGenericMap(matchCriteria); + } + + @Override + public boolean doEquals(RankDoc rd) { + RuleQueryRankDoc rqrd = (RuleQueryRankDoc) rd; + return Objects.equals(rulesetIds, rqrd.rulesetIds) && Objects.equals(matchCriteria, rqrd.matchCriteria); + } + + @Override + public int doHashCode() { + return Objects.hash(rulesetIds, matchCriteria); + } + + @Override + public String toString() { + return "QueryRuleRankDoc{" + + "doc=" + + doc + + ", shardIndex=" + + shardIndex + + ", score=" + + score + + ", rulesetIds=" + + rulesetIds + + ", matchCriteria=" + + matchCriteria + + "}"; + } + + @Override + public String getWriteableName() { + return NAME; + } + + @Override + protected void doToXContent(XContentBuilder builder, Params params) throws IOException { + builder.array("rulesetIds", rulesetIds.toArray()); + builder.startObject("matchCriteria"); + builder.mapContents(matchCriteria); + builder.endObject(); + } + + @Override + public TransportVersion getMinimalSupportedVersion() { + return TransportVersions.QUERY_RULES_RETRIEVER; + } +} diff --git a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/rules/retriever/QueryRuleRetrieverBuilderTests.java b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/rules/retriever/QueryRuleRetrieverBuilderTests.java new file mode 100644 index 0000000000000..3081dcf11d95e --- /dev/null +++ b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/rules/retriever/QueryRuleRetrieverBuilderTests.java @@ -0,0 +1,156 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.application.rules.retriever; + +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.search.SearchModule; +import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.search.retriever.RetrieverBuilder; +import org.elasticsearch.search.retriever.RetrieverParserContext; +import org.elasticsearch.search.retriever.TestRetrieverBuilder; +import org.elasticsearch.test.AbstractXContentTestCase; +import org.elasticsearch.usage.SearchUsage; +import org.elasticsearch.usage.SearchUsageHolder; +import org.elasticsearch.usage.UsageService; +import org.elasticsearch.xcontent.NamedXContentRegistry; +import org.elasticsearch.xcontent.ParseField; +import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xcontent.json.JsonXContent; +import org.elasticsearch.xpack.application.EnterpriseSearchModuleTestUtils; + +import java.io.IOException; +import java.util.List; + +import static org.elasticsearch.search.rank.RankBuilder.DEFAULT_RANK_WINDOW_SIZE; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.instanceOf; + +public class QueryRuleRetrieverBuilderTests extends AbstractXContentTestCase { + + public static QueryRuleRetrieverBuilder createRandomQueryRuleRetrieverBuilder() { + return new QueryRuleRetrieverBuilder( + randomBoolean() + ? List.of(randomAlphaOfLengthBetween(5, 10)) + : List.of(randomAlphaOfLengthBetween(5, 10), randomAlphaOfLengthBetween(5, 10)), + EnterpriseSearchModuleTestUtils.randomMatchCriteria(), + TestRetrieverBuilder.createRandomTestRetrieverBuilder(), + randomIntBetween(1, 100) + ); + } + + @Override + protected QueryRuleRetrieverBuilder createTestInstance() { + return createRandomQueryRuleRetrieverBuilder(); + } + + @Override + protected QueryRuleRetrieverBuilder doParseInstance(XContentParser parser) throws IOException { + return (QueryRuleRetrieverBuilder) RetrieverBuilder.parseTopLevelRetrieverBuilder( + parser, + new RetrieverParserContext( + new SearchUsage(), + nf -> nf == RetrieverBuilder.RETRIEVERS_SUPPORTED || nf == QueryRuleRetrieverBuilder.QUERY_RULE_RETRIEVERS_SUPPORTED + ) + ); + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } + + @Override + protected String[] getShuffleFieldsExceptions() { + return new String[] { + QueryRuleRetrieverBuilder.MATCH_CRITERIA_FIELD.getPreferredName(), + QueryRuleRetrieverBuilder.RULESET_IDS_FIELD.getPreferredName() }; + } + + @Override + protected NamedXContentRegistry xContentRegistry() { + List entries = new SearchModule(Settings.EMPTY, List.of()).getNamedXContents(); + entries.add( + new NamedXContentRegistry.Entry( + RetrieverBuilder.class, + TestRetrieverBuilder.TEST_SPEC.getName(), + (p, c) -> TestRetrieverBuilder.TEST_SPEC.getParser().fromXContent(p, (RetrieverParserContext) c), + TestRetrieverBuilder.TEST_SPEC.getName().getForRestApiVersion() + ) + ); + entries.add( + new NamedXContentRegistry.Entry( + RetrieverBuilder.class, + new ParseField(QueryRuleRetrieverBuilder.NAME), + (p, c) -> QueryRuleRetrieverBuilder.PARSER.apply(p, (RetrieverParserContext) c) + ) + ); + return new NamedXContentRegistry(entries); + } + + public void testParserDefaults() throws IOException { + // Inner retriever content only sent to parser + String json = """ + { + "match_criteria": { "foo": "bar" }, + "ruleset_ids": [ "baz" ], + "retriever": { "standard": { "query": { "query_string": { "query": "i like pugs" } } } } + }"""; + + try (XContentParser parser = createParser(JsonXContent.jsonXContent, json)) { + QueryRuleRetrieverBuilder parsed = QueryRuleRetrieverBuilder.PARSER.parse( + parser, + new RetrieverParserContext(new SearchUsage(), nf -> true) + ); + assertEquals(DEFAULT_RANK_WINDOW_SIZE, parsed.rankWindowSize()); + } + } + + public void testQueryRuleRetrieverParsing() throws IOException { + String restContent = """ + { + "retriever": { + "rule": { + "retriever": { + "test": { + "value": "my-test-retriever" + } + }, + "ruleset_ids": [ + "baz" + ], + "match_criteria": { + "key": "value" + }, + "rank_window_size": 100, + "_name": "my_rule_retriever" + } + } + }"""; + + SearchUsageHolder searchUsageHolder = new UsageService().getSearchUsageHolder(); + try (XContentParser jsonParser = createParser(JsonXContent.jsonXContent, restContent)) { + SearchSourceBuilder source = new SearchSourceBuilder().parseXContent(jsonParser, true, searchUsageHolder, nf -> true); + assertThat(source.retriever(), instanceOf(QueryRuleRetrieverBuilder.class)); + QueryRuleRetrieverBuilder parsed = (QueryRuleRetrieverBuilder) source.retriever(); + assertThat(parsed.retrieverName(), equalTo("my_rule_retriever")); + try (XContentParser parseSerialized = createParser(JsonXContent.jsonXContent, Strings.toString(source))) { + SearchSourceBuilder deserializedSource = new SearchSourceBuilder().parseXContent( + parseSerialized, + true, + searchUsageHolder, + nf -> true + ); + assertThat(deserializedSource.retriever(), instanceOf(QueryRuleRetrieverBuilder.class)); + QueryRuleRetrieverBuilder deserialized = (QueryRuleRetrieverBuilder) source.retriever(); + assertThat(parsed, equalTo(deserialized)); + } + } + } + +} From bb6ec6ea6ca834a388c6591e4f72c8cc63062ab0 Mon Sep 17 00:00:00 2001 From: Athena Brown Date: Mon, 28 Oct 2024 17:20:00 -0600 Subject: [PATCH 168/324] Unmute testPopulationOfCacheWhenLoadingPrivilegesForAllApplications (#115710) Unmutes the test which was not unmuted when the fix was added. --- muted-tests.yml | 3 --- 1 file changed, 3 deletions(-) diff --git a/muted-tests.yml b/muted-tests.yml index 804f595e02162..8895ddb9e9587 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -20,9 +20,6 @@ tests: - class: "org.elasticsearch.xpack.searchablesnapshots.FrozenSearchableSnapshotsIntegTests" issue: "https://github.com/elastic/elasticsearch/issues/110408" method: "testCreateAndRestorePartialSearchableSnapshot" -- class: org.elasticsearch.xpack.security.authz.store.NativePrivilegeStoreCacheTests - method: testPopulationOfCacheWhenLoadingPrivilegesForAllApplications - issue: https://github.com/elastic/elasticsearch/issues/110789 - class: org.elasticsearch.nativeaccess.VectorSystemPropertyTests method: testSystemPropertyDisabled issue: https://github.com/elastic/elasticsearch/issues/110949 From edc917d6a7371ac8e761322e7c9abf1e5b60b624 Mon Sep 17 00:00:00 2001 From: Yang Wang Date: Tue, 29 Oct 2024 15:32:17 +1100 Subject: [PATCH 169/324] Close channel on stream handler exception (#115505) In case a stream handler throws uncaught exception, we should close the channel and release associated resources to avoid the channel entering a limbo state. This PR does that. Resolves: ES-9537 --- .../Netty4IncrementalRequestHandlingIT.java | 31 +++++++++++++++++++ .../netty4/Netty4HttpRequestBodyStream.java | 6 +++- .../elasticsearch/rest/BaseRestHandler.java | 9 ++++++ 3 files changed, 45 insertions(+), 1 deletion(-) diff --git a/modules/transport-netty4/src/internalClusterTest/java/org/elasticsearch/http/netty4/Netty4IncrementalRequestHandlingIT.java b/modules/transport-netty4/src/internalClusterTest/java/org/elasticsearch/http/netty4/Netty4IncrementalRequestHandlingIT.java index b5c272f41a1d5..647d38c626c74 100644 --- a/modules/transport-netty4/src/internalClusterTest/java/org/elasticsearch/http/netty4/Netty4IncrementalRequestHandlingIT.java +++ b/modules/transport-netty4/src/internalClusterTest/java/org/elasticsearch/http/netty4/Netty4IncrementalRequestHandlingIT.java @@ -70,6 +70,7 @@ import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.MockLog; import org.elasticsearch.test.junit.annotations.TestLogging; +import org.elasticsearch.transport.Transports; import org.elasticsearch.transport.netty4.Netty4Utils; import java.util.Collection; @@ -215,6 +216,29 @@ public void testServerCloseConnectionMidStream() throws Exception { } } + public void testServerExceptionMidStream() throws Exception { + try (var ctx = setupClientCtx()) { + var opaqueId = opaqueId(0); + + // write half of http request + ctx.clientChannel.write(httpRequest(opaqueId, 2 * 1024)); + ctx.clientChannel.writeAndFlush(randomContent(1024, false)); + + // await stream handler is ready and request full content + var handler = ctx.awaitRestChannelAccepted(opaqueId); + assertBusy(() -> assertNotNull(handler.stream.buf())); + assertFalse(handler.streamClosed); + + handler.shouldThrowInsideHandleChunk = true; + handler.stream.next(); + + assertBusy(() -> { + assertNull(handler.stream.buf()); + assertTrue(handler.streamClosed); + }); + } + } + // ensure that client's socket buffers data when server is not consuming data public void testClientBackpressure() throws Exception { try (var ctx = setupClientCtx()) { @@ -598,6 +622,7 @@ static class ServerRequestHandler implements BaseRestHandler.RequestBodyChunkCon RestChannel channel; boolean recvLast = false; volatile boolean streamClosed = false; + volatile boolean shouldThrowInsideHandleChunk = false; ServerRequestHandler(String opaqueId, Netty4HttpRequestBodyStream stream) { this.opaqueId = opaqueId; @@ -606,6 +631,12 @@ static class ServerRequestHandler implements BaseRestHandler.RequestBodyChunkCon @Override public void handleChunk(RestChannel channel, ReleasableBytesReference chunk, boolean isLast) { + Transports.assertTransportThread(); + if (shouldThrowInsideHandleChunk) { + // Must close the chunk. This is the contract of this method. + chunk.close(); + throw new RuntimeException("simulated exception inside handleChunk"); + } recvChunks.add(new Chunk(chunk, isLast)); } diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpRequestBodyStream.java b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpRequestBodyStream.java index 977846a2947d8..9a0dc09b7566c 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpRequestBodyStream.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpRequestBodyStream.java @@ -71,7 +71,11 @@ public void next() { if (buf == null) { channel.read(); } else { - send(); + try { + send(); + } catch (Exception e) { + channel.pipeline().fireExceptionCaught(e); + } } }); } diff --git a/server/src/main/java/org/elasticsearch/rest/BaseRestHandler.java b/server/src/main/java/org/elasticsearch/rest/BaseRestHandler.java index 0a99ee777bb76..c8cf0bf93879b 100644 --- a/server/src/main/java/org/elasticsearch/rest/BaseRestHandler.java +++ b/server/src/main/java/org/elasticsearch/rest/BaseRestHandler.java @@ -210,6 +210,15 @@ default void close() {} } public interface RequestBodyChunkConsumer extends RestChannelConsumer { + + /** + * Handle one chunk of the request body. The handler must close the chunk once it is no longer + * needed to avoid leaking. + * + * @param channel The rest channel associated to the request + * @param chunk The chunk of request body that is ready for processing + * @param isLast Whether the chunk is the last one of the request + */ void handleChunk(RestChannel channel, ReleasableBytesReference chunk, boolean isLast); /** From b5390d83ebfad5fcefb174f77d6630e119a0109c Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Tue, 29 Oct 2024 15:58:09 +1100 Subject: [PATCH 170/324] Mute org.elasticsearch.search.StressSearchServiceReaperIT testStressReaper #115816 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index 8895ddb9e9587..89b772b4921a2 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -292,6 +292,9 @@ tests: - class: org.elasticsearch.smoketest.DocsClientYamlTestSuiteIT method: test {yaml=reference/watcher/example-watches/example-watch-clusterstatus/line_137} issue: https://github.com/elastic/elasticsearch/issues/115809 +- class: org.elasticsearch.search.StressSearchServiceReaperIT + method: testStressReaper + issue: https://github.com/elastic/elasticsearch/issues/115816 # Examples: # From aa979b6f11bf5e231c7794274a54295d9f8e6357 Mon Sep 17 00:00:00 2001 From: kosabogi <105062005+kosabogi@users.noreply.github.com> Date: Tue, 29 Oct 2024 07:37:03 +0100 Subject: [PATCH 171/324] Adds 8.16 version to css matrix (#115788) --- .../ccs-version-compat-matrix.asciidoc | 43 ++++++++++--------- 1 file changed, 22 insertions(+), 21 deletions(-) diff --git a/docs/reference/search/search-your-data/ccs-version-compat-matrix.asciidoc b/docs/reference/search/search-your-data/ccs-version-compat-matrix.asciidoc index 6b9b13b124e9f..5859ccd03e511 100644 --- a/docs/reference/search/search-your-data/ccs-version-compat-matrix.asciidoc +++ b/docs/reference/search/search-your-data/ccs-version-compat-matrix.asciidoc @@ -1,24 +1,25 @@ |==== -| 19+^h| Remote cluster version +| 20+^h| Remote cluster version h| Local cluster version - | 6.8 | 7.1–7.16 | 7.17 | 8.0 | 8.1 | 8.2 | 8.3 | 8.4 | 8.5 | 8.6 | 8.7 | 8.8 | 8.9 | 8.10 | 8.11 | 8.12 | 8.13 | 8.14 | 8.15 -| 6.8 | {yes-icon} | {yes-icon} | {yes-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} -| 7.1–7.16 | {yes-icon} | {yes-icon} | {yes-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} -| 7.17 | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon}| {yes-icon}| {yes-icon}| {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} -| 8.0 | {no-icon} | {no-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon}| {yes-icon}| {yes-icon}| {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} -| 8.1 | {no-icon} | {no-icon} | {no-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon}| {yes-icon}| {yes-icon}| {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} -| 8.2 | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {yes-icon} | {yes-icon} | {yes-icon}| {yes-icon}| {yes-icon}| {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} -| 8.3 | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {yes-icon} | {yes-icon}| {yes-icon}| {yes-icon}| {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} -| 8.4 | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {yes-icon}| {yes-icon}| {yes-icon}| {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} -| 8.5 | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {yes-icon}| {yes-icon}| {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} -| 8.6 | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {yes-icon}| {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} -| 8.7 | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} -| 8.8 | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} -| 8.9 | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} -| 8.10 | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} -| 8.11 | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} -| 8.12 | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} -| 8.13 | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} -| 8.14 | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {yes-icon} | {yes-icon} | {yes-icon} -| 8.15 | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {yes-icon} | {yes-icon} + | 6.8 | 7.1–7.16 | 7.17 | 8.0 | 8.1 | 8.2 | 8.3 | 8.4 | 8.5 | 8.6 | 8.7 | 8.8 | 8.9 | 8.10 | 8.11 | 8.12 | 8.13 | 8.14 | 8.15 | 8.16 +| 6.8 | {yes-icon} | {yes-icon} | {yes-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} +| 7.1–7.16 | {yes-icon} | {yes-icon} | {yes-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} +| 7.17 | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon}| {yes-icon}| {yes-icon}| {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} +| 8.0 | {no-icon} | {no-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon}| {yes-icon}| {yes-icon}| {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} +| 8.1 | {no-icon} | {no-icon} | {no-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon}| {yes-icon}| {yes-icon}| {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} +| 8.2 | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {yes-icon} | {yes-icon} | {yes-icon}| {yes-icon}| {yes-icon}| {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} +| 8.3 | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {yes-icon} | {yes-icon}| {yes-icon}| {yes-icon}| {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} +| 8.4 | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {yes-icon}| {yes-icon}| {yes-icon}| {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} +| 8.5 | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {yes-icon}| {yes-icon}| {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} +| 8.6 | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {yes-icon}| {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} +| 8.7 | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} +| 8.8 | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} +| 8.9 | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} +| 8.10 | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} +| 8.11 | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} +| 8.12 | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} +| 8.13 | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} +| 8.14 | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} +| 8.15 | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {yes-icon} | {yes-icon} | {yes-icon} +| 8.16 | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {yes-icon} | {yes-icon} |==== From b97b6637a6d342b24f3a7e56e9026c49c837b92b Mon Sep 17 00:00:00 2001 From: Kostas Krikellas <131142368+kkrik-es@users.noreply.github.com> Date: Tue, 29 Oct 2024 09:57:03 +0200 Subject: [PATCH 172/324] [TEST] Run tsdb tests with both base and trial licenses (#115653) * Run tsdb tests with both base and trial licenses. * ignore license error in serverless * update * update * update --- .../DisabledSecurityDataStreamTestCase.java | 1 - .../datastreams/TsdbDataStreamRestIT.java | 29 +++++++++++++++---- .../DataStreamsClientYamlTestSuiteIT.java | 3 ++ .../org/elasticsearch/test/ESTestCase.java | 3 +- 4 files changed, 28 insertions(+), 8 deletions(-) diff --git a/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/DisabledSecurityDataStreamTestCase.java b/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/DisabledSecurityDataStreamTestCase.java index 619bfd74d853c..9839f9abb080e 100644 --- a/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/DisabledSecurityDataStreamTestCase.java +++ b/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/DisabledSecurityDataStreamTestCase.java @@ -28,7 +28,6 @@ public abstract class DisabledSecurityDataStreamTestCase extends ESRestTestCase public static ElasticsearchCluster cluster = ElasticsearchCluster.local() .distribution(DistributionType.DEFAULT) .feature(FeatureFlag.FAILURE_STORE_ENABLED) - .setting("xpack.license.self_generated.type", "trial") .setting("xpack.security.enabled", "false") .setting("xpack.watcher.enabled", "false") .build(); diff --git a/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/TsdbDataStreamRestIT.java b/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/TsdbDataStreamRestIT.java index d820c6ddbf431..9be0c18d18213 100644 --- a/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/TsdbDataStreamRestIT.java +++ b/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/TsdbDataStreamRestIT.java @@ -25,7 +25,6 @@ import java.util.Set; import static org.elasticsearch.cluster.metadata.DataStreamTestHelper.backingIndexEqualTo; -import static org.hamcrest.Matchers.aMapWithSize; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.empty; @@ -54,6 +53,7 @@ public class TsdbDataStreamRestIT extends DisabledSecurityDataStreamTestCase { "number_of_replicas": 1, "number_of_shards": 2, "mode": "time_series" + SOURCEMODE } }, "mappings":{ @@ -201,15 +201,35 @@ public class TsdbDataStreamRestIT extends DisabledSecurityDataStreamTestCase { {"@timestamp": "$now", "metricset": "pod", "k8s": {"pod": {"name": "elephant", "uid":"df3145b3-0563-4d3b-a0f7-897eb2876eb4", "ip": "10.10.55.3", "network": {"tx": 1434595272, "rx": 530605511}}}} """; + private static String getTemplate() { + return TEMPLATE.replace("SOURCEMODE", randomFrom("", """ + , "mapping": { "source": { "mode": "stored" } }""", """ + , "mapping": { "source": { "mode": "synthetic" } }""")); + } + + private static boolean trialStarted = false; + @Before public void setup() throws IOException { + if (trialStarted == false) { + // Start trial to support synthetic source. + Request startTrial = new Request("POST", "/_license/start_trial"); + startTrial.addParameter("acknowledge", "true"); + try { + client().performRequest(startTrial); + } catch (Exception e) { + // Ignore failures, the API is not present in Serverless. + } + trialStarted = true; + } + // Add component template: var request = new Request("POST", "/_component_template/custom_template"); request.setJsonEntity(COMPONENT_TEMPLATE); assertOK(client().performRequest(request)); // Add composable index template request = new Request("POST", "/_index_template/1"); - request.setJsonEntity(TEMPLATE); + request.setJsonEntity(getTemplate()); assertOK(client().performRequest(request)); } @@ -220,7 +240,7 @@ public void testTsdbDataStreams() throws Exception { public void testTsdbDataStreamsNanos() throws Exception { // Overwrite template to use date_nanos field type: var putComposableIndexTemplateRequest = new Request("POST", "/_index_template/1"); - putComposableIndexTemplateRequest.setJsonEntity(TEMPLATE.replace("date", "date_nanos")); + putComposableIndexTemplateRequest.setJsonEntity(getTemplate().replace("date", "date_nanos")); assertOK(client().performRequest(putComposableIndexTemplateRequest)); assertTsdbDataStream(); @@ -407,7 +427,6 @@ public void testSimulateTsdbDataStreamTemplate() throws Exception { var response = client().performRequest(simulateIndexTemplateRequest); assertOK(response); var responseBody = entityAsMap(response); - assertThat(ObjectPath.evaluate(responseBody, "template.settings.index"), aMapWithSize(6)); assertThat(ObjectPath.evaluate(responseBody, "template.settings.index.number_of_shards"), equalTo("2")); assertThat(ObjectPath.evaluate(responseBody, "template.settings.index.number_of_replicas"), equalTo("1")); assertThat(ObjectPath.evaluate(responseBody, "template.settings.index.mode"), equalTo("time_series")); @@ -493,7 +512,7 @@ public void testMigrateRegularDataStreamToTsdbDataStream() throws Exception { // Update template putComposableIndexTemplateRequest = new Request("POST", "/_index_template/1"); - putComposableIndexTemplateRequest.setJsonEntity(TEMPLATE); + putComposableIndexTemplateRequest.setJsonEntity(getTemplate()); assertOK(client().performRequest(putComposableIndexTemplateRequest)); var rolloverRequest = new Request("POST", "/k8s/_rollover"); diff --git a/modules/data-streams/src/yamlRestTest/java/org/elasticsearch/datastreams/DataStreamsClientYamlTestSuiteIT.java b/modules/data-streams/src/yamlRestTest/java/org/elasticsearch/datastreams/DataStreamsClientYamlTestSuiteIT.java index 2030ad251a2be..22f2a9fa394fb 100644 --- a/modules/data-streams/src/yamlRestTest/java/org/elasticsearch/datastreams/DataStreamsClientYamlTestSuiteIT.java +++ b/modules/data-streams/src/yamlRestTest/java/org/elasticsearch/datastreams/DataStreamsClientYamlTestSuiteIT.java @@ -50,6 +50,9 @@ private static ElasticsearchCluster createCluster() { .setting("xpack.security.enabled", "true") .keystore("bootstrap.password", "x-pack-test-password") .user("x_pack_rest_user", "x-pack-test-password"); + if (initTestSeed().nextBoolean()) { + clusterBuilder.setting("xpack.license.self_generated.type", "trial"); + } boolean setNodes = Boolean.parseBoolean(System.getProperty("yaml.rest.tests.set_num_nodes", "true")); if (setNodes) { clusterBuilder.nodes(2); diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java index e1ba661eb24d4..5d7012db80a6e 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java @@ -210,7 +210,6 @@ import java.util.stream.LongStream; import java.util.stream.Stream; -import static com.carrotsearch.randomizedtesting.RandomizedTest.randomBoolean; import static java.util.Collections.emptyMap; import static org.elasticsearch.common.util.CollectionUtils.arrayAsArrayList; import static org.hamcrest.Matchers.anyOf; @@ -381,7 +380,7 @@ public void append(LogEvent event) { JAVA_ZONE_IDS = ZoneId.getAvailableZoneIds().stream().filter(unsupportedZoneIdsPredicate.negate()).sorted().toList(); } - static Random initTestSeed() { + protected static Random initTestSeed() { String inputSeed = System.getProperty("tests.seed"); long seed; if (inputSeed == null) { From 2522c9877a5d5f87d70e23fd7bdf12d9ff6e08da Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lorenzo=20Dematt=C3=A9?= Date: Tue, 29 Oct 2024 09:51:39 +0100 Subject: [PATCH 173/324] Fix testApmIntegration histogram assertions (#115578) --- muted-tests.yml | 3 - .../test/apmintegration/MetricsApmIT.java | 55 ++++++++++++------- 2 files changed, 34 insertions(+), 24 deletions(-) diff --git a/muted-tests.yml b/muted-tests.yml index 89b772b4921a2..610681949ade0 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -247,9 +247,6 @@ tests: - class: org.elasticsearch.xpack.restart.MLModelDeploymentFullClusterRestartIT method: testDeploymentSurvivesRestart {cluster=UPGRADED} issue: https://github.com/elastic/elasticsearch/issues/115528 -- class: org.elasticsearch.test.apmintegration.MetricsApmIT - method: testApmIntegration - issue: https://github.com/elastic/elasticsearch/issues/115415 - class: org.elasticsearch.smoketest.DocsClientYamlTestSuiteIT method: test {yaml=reference/esql/esql-across-clusters/line_197} issue: https://github.com/elastic/elasticsearch/issues/115575 diff --git a/test/external-modules/apm-integration/src/javaRestTest/java/org/elasticsearch/test/apmintegration/MetricsApmIT.java b/test/external-modules/apm-integration/src/javaRestTest/java/org/elasticsearch/test/apmintegration/MetricsApmIT.java index f988a632437e5..e974c31bf5c08 100644 --- a/test/external-modules/apm-integration/src/javaRestTest/java/org/elasticsearch/test/apmintegration/MetricsApmIT.java +++ b/test/external-modules/apm-integration/src/javaRestTest/java/org/elasticsearch/test/apmintegration/MetricsApmIT.java @@ -31,7 +31,10 @@ import java.util.function.Consumer; import java.util.function.Function; import java.util.function.Predicate; +import java.util.stream.Collectors; +import java.util.stream.Stream; +import static java.util.Map.entry; import static org.hamcrest.Matchers.closeTo; import static org.hamcrest.Matchers.equalTo; @@ -58,27 +61,21 @@ protected String getTestRestCluster() { @SuppressWarnings("unchecked") public void testApmIntegration() throws Exception { - Map>> sampleAssertions = new HashMap<>( + Map>> valueAssertions = new HashMap<>( Map.ofEntries( assertion("es.test.long_counter.total", m -> (Double) m.get("value"), closeTo(1.0, 0.001)), assertion("es.test.double_counter.total", m -> (Double) m.get("value"), closeTo(1.0, 0.001)), assertion("es.test.async_double_counter.total", m -> (Double) m.get("value"), closeTo(1.0, 0.001)), assertion("es.test.async_long_counter.total", m -> (Integer) m.get("value"), equalTo(1)), assertion("es.test.double_gauge.current", m -> (Double) m.get("value"), closeTo(1.0, 0.001)), - assertion("es.test.long_gauge.current", m -> (Integer) m.get("value"), equalTo(1)), - assertion( - "es.test.double_histogram.histogram", - m -> ((Collection) m.get("counts")).stream().mapToInt(Integer::intValue).sum(), - equalTo(2) - ), - assertion( - "es.test.long_histogram.histogram", - m -> ((Collection) m.get("counts")).stream().mapToInt(Integer::intValue).sum(), - equalTo(2) - ) + assertion("es.test.long_gauge.current", m -> (Integer) m.get("value"), equalTo(1)) ) ); + Map histogramAssertions = new HashMap<>( + Map.ofEntries(entry("es.test.double_histogram.histogram", 2), entry("es.test.long_histogram.histogram", 2)) + ); + CountDownLatch finished = new CountDownLatch(1); // a consumer that will remove the assertions from a map once it matched @@ -91,21 +88,35 @@ public void testApmIntegration() throws Exception { var samples = (Map) metricset.get("samples"); samples.forEach((key, value) -> { - var assertion = sampleAssertions.get(key);// sample name - if (assertion != null) { - logger.info("Matched {}", key); + var valueAssertion = valueAssertions.get(key);// sample name + if (valueAssertion != null) { + logger.info("Matched {}:{}", key, value); var sampleObject = (Map) value; - if (assertion.test(sampleObject)) {// sample object + if (valueAssertion.test(sampleObject)) {// sample object + logger.info("{} assertion PASSED", key); + valueAssertions.remove(key); + } else { + logger.error("{} assertion FAILED", key); + } + } + var histogramAssertion = histogramAssertions.get(key); + if (histogramAssertion != null) { + logger.info("Matched {}:{}", key, value); + var samplesObject = (Map) value; + var counts = ((Collection) samplesObject.get("counts")).stream().mapToInt(Integer::intValue).sum(); + var remaining = histogramAssertion - counts; + if (remaining == 0) { logger.info("{} assertion PASSED", key); - sampleAssertions.remove(key); + histogramAssertions.remove(key); } else { - logger.error("{} assertion FAILED: {}", key, sampleObject.get("value")); + logger.info("{} assertion PENDING: {} remaining", key, remaining); + histogramAssertions.put(key, remaining); } } }); } - if (sampleAssertions.isEmpty()) { + if (valueAssertions.isEmpty()) { finished.countDown(); } }; @@ -115,7 +126,9 @@ public void testApmIntegration() throws Exception { client().performRequest(new Request("GET", "/_use_apm_metrics")); var completed = finished.await(30, TimeUnit.SECONDS); - assertTrue("Timeout when waiting for assertions to complete. Remaining assertions to match: " + sampleAssertions, completed); + var remainingAssertions = Stream.concat(valueAssertions.keySet().stream(), histogramAssertions.keySet().stream()) + .collect(Collectors.joining()); + assertTrue("Timeout when waiting for assertions to complete. Remaining assertions to match: " + remainingAssertions, completed); } private Map.Entry>> assertion( @@ -123,7 +136,7 @@ private Map.Entry>> assertion( Function, T> accessor, Matcher expected ) { - return Map.entry(sampleKeyName, new Predicate<>() { + return entry(sampleKeyName, new Predicate<>() { @Override public boolean test(Map sampleObject) { return expected.matches(accessor.apply(sampleObject)); From d0f71fc4295fff95a521e18a3235acfd81cbd545 Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Tue, 29 Oct 2024 10:23:53 +0100 Subject: [PATCH 174/324] Stop instantiating RankFeaturePhase unnecessarily (#115724) We should not create the phase instance when we know we won't be doing any rank feature execution up-front. An instance of these isn't free and entails creating an array of searched_shard_count size which along is non-trivial. Also, this needlessly obscured the threading logic for fetch which has already led to a bug before. --- .../action/search/RankFeaturePhase.java | 16 +-- .../SearchDfsQueryThenFetchAsyncAction.java | 2 +- .../SearchQueryThenFetchAsyncAction.java | 16 ++- .../action/search/RankFeaturePhaseTests.java | 133 ++---------------- 4 files changed, 33 insertions(+), 134 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/action/search/RankFeaturePhase.java b/server/src/main/java/org/elasticsearch/action/search/RankFeaturePhase.java index 81053a70eca9f..dd3c28bba0fce 100644 --- a/server/src/main/java/org/elasticsearch/action/search/RankFeaturePhase.java +++ b/server/src/main/java/org/elasticsearch/action/search/RankFeaturePhase.java @@ -42,15 +42,17 @@ public class RankFeaturePhase extends SearchPhase { final SearchPhaseResults rankPhaseResults; private final AggregatedDfs aggregatedDfs; private final SearchProgressListener progressListener; - private final Client client; + private final RankFeaturePhaseRankCoordinatorContext rankFeaturePhaseRankCoordinatorContext; RankFeaturePhase( SearchPhaseResults queryPhaseResults, AggregatedDfs aggregatedDfs, SearchPhaseContext context, - Client client + RankFeaturePhaseRankCoordinatorContext rankFeaturePhaseRankCoordinatorContext ) { super("rank-feature"); + assert rankFeaturePhaseRankCoordinatorContext != null; + this.rankFeaturePhaseRankCoordinatorContext = rankFeaturePhaseRankCoordinatorContext; if (context.getNumShards() != queryPhaseResults.getNumShards()) { throw new IllegalStateException( "number of shards must match the length of the query results but doesn't:" @@ -65,17 +67,10 @@ public class RankFeaturePhase extends SearchPhase { this.rankPhaseResults = new ArraySearchPhaseResults<>(context.getNumShards()); context.addReleasable(rankPhaseResults); this.progressListener = context.getTask().getProgressListener(); - this.client = client; } @Override public void run() { - RankFeaturePhaseRankCoordinatorContext rankFeaturePhaseRankCoordinatorContext = coordinatorContext(context.getRequest().source()); - if (rankFeaturePhaseRankCoordinatorContext == null) { - moveToNextPhase(queryPhaseResults, null); - return; - } - context.execute(new AbstractRunnable() { @Override protected void doRun() throws Exception { @@ -122,7 +117,7 @@ void innerRun(RankFeaturePhaseRankCoordinatorContext rankFeaturePhaseRankCoordin } } - private RankFeaturePhaseRankCoordinatorContext coordinatorContext(SearchSourceBuilder source) { + static RankFeaturePhaseRankCoordinatorContext coordinatorContext(SearchSourceBuilder source, Client client) { return source == null || source.rankBuilder() == null ? null : source.rankBuilder().buildRankFeaturePhaseCoordinatorContext(source.size(), source.from(), client); @@ -175,7 +170,6 @@ private void onPhaseDone( RankFeaturePhaseRankCoordinatorContext rankFeaturePhaseRankCoordinatorContext, SearchPhaseController.ReducedQueryPhase reducedQueryPhase ) { - assert rankFeaturePhaseRankCoordinatorContext != null; ThreadedActionListener rankResultListener = new ThreadedActionListener<>(context, new ActionListener<>() { @Override public void onResponse(RankFeatureDoc[] docsWithUpdatedScores) { diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchDfsQueryThenFetchAsyncAction.java b/server/src/main/java/org/elasticsearch/action/search/SearchDfsQueryThenFetchAsyncAction.java index 87b16da2bb78a..5b7ee04d020fc 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchDfsQueryThenFetchAsyncAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchDfsQueryThenFetchAsyncAction.java @@ -105,7 +105,7 @@ protected SearchPhase getNextPhase(final SearchPhaseResults res aggregatedDfs, mergedKnnResults, queryPhaseResultConsumer, - (queryResults) -> new RankFeaturePhase(queryResults, aggregatedDfs, context, client), + (queryResults) -> SearchQueryThenFetchAsyncAction.nextPhase(client, context, queryResults, aggregatedDfs), context ); } diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchQueryThenFetchAsyncAction.java b/server/src/main/java/org/elasticsearch/action/search/SearchQueryThenFetchAsyncAction.java index ecf81980f894a..e0ad4691fa991 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchQueryThenFetchAsyncAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchQueryThenFetchAsyncAction.java @@ -18,6 +18,7 @@ import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.search.SearchPhaseResult; import org.elasticsearch.search.SearchShardTarget; +import org.elasticsearch.search.dfs.AggregatedDfs; import org.elasticsearch.search.internal.AliasFilter; import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.search.internal.ShardSearchRequest; @@ -125,9 +126,22 @@ && getRequest().scroll() == null super.onShardResult(result, shardIt); } + static SearchPhase nextPhase( + Client client, + SearchPhaseContext context, + SearchPhaseResults queryResults, + AggregatedDfs aggregatedDfs + ) { + var rankFeaturePhaseCoordCtx = RankFeaturePhase.coordinatorContext(context.getRequest().source(), client); + if (rankFeaturePhaseCoordCtx == null) { + return new FetchSearchPhase(queryResults, aggregatedDfs, context, null); + } + return new RankFeaturePhase(queryResults, aggregatedDfs, context, rankFeaturePhaseCoordCtx); + } + @Override protected SearchPhase getNextPhase(final SearchPhaseResults results, SearchPhaseContext context) { - return new RankFeaturePhase(results, null, this, client); + return nextPhase(client, this, results, null); } private ShardSearchRequest rewriteShardSearchRequest(ShardSearchRequest request) { diff --git a/server/src/test/java/org/elasticsearch/action/search/RankFeaturePhaseTests.java b/server/src/test/java/org/elasticsearch/action/search/RankFeaturePhaseTests.java index 82463d601d164..a4201716d31e2 100644 --- a/server/src/test/java/org/elasticsearch/action/search/RankFeaturePhaseTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/RankFeaturePhaseTests.java @@ -287,95 +287,6 @@ public void sendExecuteRankFeature( } } - public void testRankFeaturePhaseNoNeedForFetchingFieldData() { - AtomicBoolean phaseDone = new AtomicBoolean(false); - final ScoreDoc[][] finalResults = new ScoreDoc[1][1]; - - // build the appropriate RankBuilder; using a null rankFeaturePhaseRankShardContext - // and non-field based rankFeaturePhaseRankCoordinatorContext - RankBuilder rankBuilder = rankBuilder( - DEFAULT_RANK_WINDOW_SIZE, - defaultQueryPhaseRankShardContext(Collections.emptyList(), DEFAULT_RANK_WINDOW_SIZE), - negatingScoresQueryFeaturePhaseRankCoordinatorContext(DEFAULT_SIZE, DEFAULT_FROM, DEFAULT_RANK_WINDOW_SIZE), - null, - null - ); - // create a SearchSource to attach to the request - SearchSourceBuilder searchSourceBuilder = searchSourceWithRankBuilder(rankBuilder); - - SearchPhaseController controller = searchPhaseController(); - SearchShardTarget shard1Target = new SearchShardTarget("node0", new ShardId("test", "na", 0), null); - - MockSearchPhaseContext mockSearchPhaseContext = new MockSearchPhaseContext(1); - mockSearchPhaseContext.getRequest().source(searchSourceBuilder); - try (SearchPhaseResults results = searchPhaseResults(controller, mockSearchPhaseContext)) { - // generate the QuerySearchResults that the RankFeaturePhase would have received from QueryPhase - // here we have 2 results, with doc ids 1 and 2 - final ShardSearchContextId ctx = new ShardSearchContextId(UUIDs.base64UUID(), 123); - QuerySearchResult queryResult = new QuerySearchResult(ctx, shard1Target, null); - - try { - queryResult.setShardIndex(shard1Target.getShardId().getId()); - int totalHits = randomIntBetween(2, 100); - final ScoreDoc[] shard1Docs = new ScoreDoc[] { new ScoreDoc(1, 10.0F), new ScoreDoc(2, 9.0F) }; - populateQuerySearchResult(queryResult, totalHits, shard1Docs); - results.consumeResult(queryResult, () -> {}); - // do not make an actual http request, but rather generate the response - // as if we would have read it from the RankFeatureShardPhase - mockSearchPhaseContext.searchTransport = new SearchTransportService(null, null, null) { - @Override - public void sendExecuteRankFeature( - Transport.Connection connection, - final RankFeatureShardRequest request, - SearchTask task, - final ActionListener listener - ) { - // make sure to match the context id generated above, otherwise we throw - if (request.contextId().getId() == 123 && Arrays.equals(request.getDocIds(), new int[] { 1, 2 })) { - listener.onFailure(new UnsupportedOperationException("should not have reached here")); - } else { - listener.onFailure(new MockDirectoryWrapper.FakeIOException()); - } - } - }; - } finally { - queryResult.decRef(); - } - // override the RankFeaturePhase to skip moving to next phase - RankFeaturePhase rankFeaturePhase = rankFeaturePhase(results, mockSearchPhaseContext, finalResults, phaseDone); - try { - rankFeaturePhase.run(); - mockSearchPhaseContext.assertNoFailure(); - assertTrue(mockSearchPhaseContext.failures.isEmpty()); - assertTrue(phaseDone.get()); - - // in this case there was no additional "RankFeature" results on shards, so we shortcut directly to queryPhaseResults - SearchPhaseResults rankPhaseResults = rankFeaturePhase.queryPhaseResults; - assertNotNull(rankPhaseResults.getAtomicArray()); - assertEquals(1, rankPhaseResults.getAtomicArray().length()); - assertEquals(1, rankPhaseResults.getSuccessfulResults().count()); - - SearchPhaseResult shardResult = rankPhaseResults.getAtomicArray().get(0); - assertTrue(shardResult instanceof QuerySearchResult); - QuerySearchResult rankResult = (QuerySearchResult) shardResult; - assertNull(rankResult.rankFeatureResult()); - assertNotNull(rankResult.queryResult()); - - List expectedFinalResults = List.of( - new ExpectedRankFeatureDoc(2, 1, -9.0F, null), - new ExpectedRankFeatureDoc(1, 2, -10.0F, null) - ); - assertFinalResults(finalResults[0], expectedFinalResults); - } finally { - rankFeaturePhase.rankPhaseResults.close(); - } - } finally { - if (mockSearchPhaseContext.searchResponse.get() != null) { - mockSearchPhaseContext.searchResponse.get().decRef(); - } - } - } - public void testRankFeaturePhaseOneShardFails() { AtomicBoolean phaseDone = new AtomicBoolean(false); final ScoreDoc[][] finalResults = new ScoreDoc[1][1]; @@ -534,7 +445,12 @@ public void sendExecuteRankFeature( queryResult.decRef(); } // override the RankFeaturePhase to raise an exception - RankFeaturePhase rankFeaturePhase = new RankFeaturePhase(results, null, mockSearchPhaseContext, null) { + RankFeaturePhase rankFeaturePhase = new RankFeaturePhase( + results, + null, + mockSearchPhaseContext, + defaultRankFeaturePhaseRankCoordinatorContext(DEFAULT_SIZE, DEFAULT_FROM, DEFAULT_RANK_WINDOW_SIZE) + ) { @Override void innerRun(RankFeaturePhaseRankCoordinatorContext rankFeaturePhaseRankCoordinatorContext) { throw new IllegalArgumentException("simulated failure"); @@ -890,36 +806,6 @@ public RankFeatureDoc[] rankAndPaginate(RankFeatureDoc[] rankFeatureDocs) { }; } - private QueryPhaseRankCoordinatorContext negatingScoresQueryFeaturePhaseRankCoordinatorContext(int size, int from, int rankWindowSize) { - return new QueryPhaseRankCoordinatorContext(rankWindowSize) { - @Override - public ScoreDoc[] rankQueryPhaseResults( - List rankSearchResults, - SearchPhaseController.TopDocsStats topDocsStats - ) { - List docScores = new ArrayList<>(); - for (QuerySearchResult phaseResults : rankSearchResults) { - docScores.addAll(Arrays.asList(phaseResults.topDocs().topDocs.scoreDocs)); - } - ScoreDoc[] sortedDocs = docScores.toArray(new ScoreDoc[0]); - // negating scores - Arrays.stream(sortedDocs).forEach(doc -> doc.score *= -1); - - Arrays.sort(sortedDocs, Comparator.comparing((ScoreDoc doc) -> doc.score).reversed()); - sortedDocs = Arrays.stream(sortedDocs).limit(rankWindowSize).toArray(ScoreDoc[]::new); - RankFeatureDoc[] topResults = new RankFeatureDoc[Math.max(0, Math.min(size, sortedDocs.length - from))]; - // perform pagination - for (int rank = 0; rank < topResults.length; ++rank) { - ScoreDoc base = sortedDocs[from + rank]; - topResults[rank] = new RankFeatureDoc(base.doc, base.score, base.shardIndex); - topResults[rank].rank = from + rank + 1; - } - topDocsStats.fetchHits = topResults.length; - return topResults; - } - }; - } - private RankFeaturePhaseRankShardContext defaultRankFeaturePhaseRankShardContext(String field) { return new RankFeaturePhaseRankShardContext(field) { @Override @@ -1134,7 +1020,12 @@ private RankFeaturePhase rankFeaturePhase( AtomicBoolean phaseDone ) { // override the RankFeaturePhase to skip moving to next phase - return new RankFeaturePhase(results, null, mockSearchPhaseContext, null) { + return new RankFeaturePhase( + results, + null, + mockSearchPhaseContext, + RankFeaturePhase.coordinatorContext(mockSearchPhaseContext.getRequest().source(), null) + ) { @Override public void moveToNextPhase( SearchPhaseResults phaseResults, From c9a989407ea8b12d34e8675f324b5924e87fdf85 Mon Sep 17 00:00:00 2001 From: Chris Hegarty <62058229+ChrisHegarty@users.noreply.github.com> Date: Tue, 29 Oct 2024 10:15:40 +0000 Subject: [PATCH 175/324] Add unit test for negative values in ByteBufferStreamInput::readVLong (#115749) --- .../io/stream/ByteBufferStreamInputTests.java | 48 +++++++++++++++++++ .../org/elasticsearch/test/ESTestCase.java | 7 +++ 2 files changed, 55 insertions(+) diff --git a/server/src/test/java/org/elasticsearch/common/io/stream/ByteBufferStreamInputTests.java b/server/src/test/java/org/elasticsearch/common/io/stream/ByteBufferStreamInputTests.java index ef386afdbabbc..971bcb7f0a2e6 100644 --- a/server/src/test/java/org/elasticsearch/common/io/stream/ByteBufferStreamInputTests.java +++ b/server/src/test/java/org/elasticsearch/common/io/stream/ByteBufferStreamInputTests.java @@ -21,4 +21,52 @@ protected StreamInput getStreamInput(BytesReference bytesReference) throws IOExc final BytesRef bytesRef = bytesReference.toBytesRef(); return new ByteBufferStreamInput(ByteBuffer.wrap(bytesRef.bytes, bytesRef.offset, bytesRef.length)); } + + public void testReadVLongNegative() throws IOException { + for (int i = 0; i < 1024; i++) { + long write = randomNegativeLong(); + BytesStreamOutput out = new BytesStreamOutput(); + out.writeVLongNoCheck(write); + long read = getStreamInput(out.bytes()).readVLong(); + assertEquals(write, read); + } + } + + public void testReadVLongBounds() throws IOException { + long write = Long.MAX_VALUE; + BytesStreamOutput out = new BytesStreamOutput(); + out.writeVLongNoCheck(write); + long read = getStreamInput(out.bytes()).readVLong(); + assertEquals(write, read); + + write = Long.MIN_VALUE; + out = new BytesStreamOutput(); + out.writeVLongNoCheck(write); + read = getStreamInput(out.bytes()).readVLong(); + assertEquals(write, read); + } + + public void testReadVIntNegative() throws IOException { + for (int i = 0; i < 1024; i++) { + int write = randomNegativeInt(); + BytesStreamOutput out = new BytesStreamOutput(); + out.writeVInt(write); + int read = getStreamInput(out.bytes()).readVInt(); + assertEquals(write, read); + } + } + + public void testReadVIntBounds() throws IOException { + int write = Integer.MAX_VALUE; + BytesStreamOutput out = new BytesStreamOutput(); + out.writeVInt(write); + long read = getStreamInput(out.bytes()).readVInt(); + assertEquals(write, read); + + write = Integer.MIN_VALUE; + out = new BytesStreamOutput(); + out.writeVInt(write); + read = getStreamInput(out.bytes()).readVInt(); + assertEquals(write, read); + } } diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java index 5d7012db80a6e..5bfcd54e963b3 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java @@ -1032,6 +1032,13 @@ public static long randomNonNegativeLong() { return randomLong() & Long.MAX_VALUE; } + /** + * @return a long between Long.MIN_VALUE and -1 (inclusive) chosen uniformly at random. + */ + public static long randomNegativeLong() { + return randomLong() | Long.MIN_VALUE; + } + /** * @return an int between 0 and Integer.MAX_VALUE (inclusive) chosen uniformly at random. */ From 7feb4d51591389fbaefe003f6e58596a7e5cbddb Mon Sep 17 00:00:00 2001 From: Artem Prigoda Date: Tue, 29 Oct 2024 12:03:06 +0100 Subject: [PATCH 176/324] =?UTF-8?q?Revert=20"[test]=20Dynamically=20pick?= =?UTF-8?q?=20up=20the=20upper=20bound=20snapshot=20index=20version=20(#1?= =?UTF-8?q?=E2=80=A6"=20(#115827)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This reverts commit 32dee6aaaeb18a8d6d4f0fee8bbf338e8991650d. --- .../snapshots/AbstractSnapshotIntegTestCase.java | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/test/framework/src/main/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java index 7a72a7bd0daf0..8bc81fef2157d 100644 --- a/test/framework/src/main/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java @@ -34,6 +34,7 @@ import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.IndexVersions; import org.elasticsearch.plugins.Plugin; @@ -365,9 +366,15 @@ protected static Settings.Builder indexSettingsNoReplicas(int shards) { /** * Randomly write an empty snapshot of an older version to an empty repository to simulate an older repository metadata format. */ + @UpdateForV9(owner = UpdateForV9.Owner.DISTRIBUTED_COORDINATION) + // This used to pick an index version from 7.0.0 to 8.9.0. The minimum now is 8.0.0 but it's not clear what the upper range should be protected void maybeInitWithOldSnapshotVersion(String repoName, Path repoPath) throws Exception { if (randomBoolean() && randomBoolean()) { - initWithSnapshotVersion(repoName, repoPath, IndexVersionUtils.randomVersion()); + initWithSnapshotVersion( + repoName, + repoPath, + IndexVersionUtils.randomVersionBetween(random(), IndexVersions.MINIMUM_COMPATIBLE, IndexVersions.V_8_9_0) + ); } } From ef2cf37a6df0e6797417f476061e46cd6bfea760 Mon Sep 17 00:00:00 2001 From: Artem Prigoda Date: Tue, 29 Oct 2024 12:03:19 +0100 Subject: [PATCH 177/324] Revert "Don't return or accept `node_version` in the Desired Nodes API (#114580)" (#115829) This reverts commit c64226c3503b458c3285064d95528932d324177d. --- .../upgrades/DesiredNodesUpgradeIT.java | 13 ++- rest-api-spec/build.gradle | 2 - .../test/cluster.desired_nodes/10_basic.yml | 95 +++++++++++++++++++ .../cluster/metadata/DesiredNode.java | 77 ++++++++++++++- .../metadata/DesiredNodeWithStatus.java | 5 +- .../cluster/RestUpdateDesiredNodesAction.java | 12 +++ 6 files changed, 191 insertions(+), 13 deletions(-) diff --git a/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/DesiredNodesUpgradeIT.java b/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/DesiredNodesUpgradeIT.java index 17618d5439d48..e0d1e7aafa637 100644 --- a/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/DesiredNodesUpgradeIT.java +++ b/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/DesiredNodesUpgradeIT.java @@ -11,6 +11,7 @@ import com.carrotsearch.randomizedtesting.annotations.Name; +import org.elasticsearch.Build; import org.elasticsearch.action.admin.cluster.desirednodes.UpdateDesiredNodesRequest; import org.elasticsearch.client.Request; import org.elasticsearch.client.ResponseException; @@ -81,7 +82,8 @@ private void assertDesiredNodesUpdatedWithRoundedUpFloatsAreIdempotent() throws Settings.builder().put(NODE_NAME_SETTING.getKey(), nodeName).build(), 1238.49922909, ByteSizeValue.ofGb(32), - ByteSizeValue.ofGb(128) + ByteSizeValue.ofGb(128), + clusterHasFeature(DesiredNode.DESIRED_NODE_VERSION_DEPRECATED) ? null : Build.current().version() ) ) .toList(); @@ -151,7 +153,8 @@ private void addClusterNodesToDesiredNodesWithProcessorsOrProcessorRanges(int ve Settings.builder().put(NODE_NAME_SETTING.getKey(), nodeName).build(), processorsPrecision == ProcessorsPrecision.DOUBLE ? randomDoubleProcessorCount() : 0.5f, ByteSizeValue.ofGb(randomIntBetween(10, 24)), - ByteSizeValue.ofGb(randomIntBetween(128, 256)) + ByteSizeValue.ofGb(randomIntBetween(128, 256)), + clusterHasFeature(DesiredNode.DESIRED_NODE_VERSION_DEPRECATED) ? null : Build.current().version() ) ) .toList(); @@ -164,7 +167,8 @@ private void addClusterNodesToDesiredNodesWithProcessorsOrProcessorRanges(int ve Settings.builder().put(NODE_NAME_SETTING.getKey(), nodeName).build(), new DesiredNode.ProcessorsRange(minProcessors, minProcessors + randomIntBetween(10, 20)), ByteSizeValue.ofGb(randomIntBetween(10, 24)), - ByteSizeValue.ofGb(randomIntBetween(128, 256)) + ByteSizeValue.ofGb(randomIntBetween(128, 256)), + clusterHasFeature(DesiredNode.DESIRED_NODE_VERSION_DEPRECATED) ? null : Build.current().version() ); }).toList(); } @@ -178,7 +182,8 @@ private void addClusterNodesToDesiredNodesWithIntegerProcessors(int version) thr Settings.builder().put(NODE_NAME_SETTING.getKey(), nodeName).build(), randomIntBetween(1, 24), ByteSizeValue.ofGb(randomIntBetween(10, 24)), - ByteSizeValue.ofGb(randomIntBetween(128, 256)) + ByteSizeValue.ofGb(randomIntBetween(128, 256)), + clusterHasFeature(DesiredNode.DESIRED_NODE_VERSION_DEPRECATED) ? null : Build.current().version() ) ) .toList(); diff --git a/rest-api-spec/build.gradle b/rest-api-spec/build.gradle index 1a398f79085e7..6cc2028bffa39 100644 --- a/rest-api-spec/build.gradle +++ b/rest-api-spec/build.gradle @@ -61,6 +61,4 @@ tasks.named("yamlRestCompatTestTransform").configure ({ task -> task.skipTest("search/330_fetch_fields/Test search rewrite", "warning does not exist for compatibility") task.skipTest("indices.create/21_synthetic_source_stored/object param - nested object with stored array", "temporary until backported") task.skipTest("cat.aliases/10_basic/Deprecated local parameter", "CAT APIs not covered by compatibility policy") - task.skipTest("cluster.desired_nodes/10_basic/Test delete desired nodes with node_version generates a warning", "node_version warning is removed in 9.0") - task.skipTest("cluster.desired_nodes/10_basic/Test update desired nodes with node_version generates a warning", "node_version warning is removed in 9.0") }) diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.desired_nodes/10_basic.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.desired_nodes/10_basic.yml index a45146a4e147a..1d1aa524ffb21 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.desired_nodes/10_basic.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.desired_nodes/10_basic.yml @@ -59,6 +59,61 @@ teardown: - contains: { nodes: { settings: { node: { name: "instance-000187" } }, processors: 8.5, memory: "64gb", storage: "128gb" } } - contains: { nodes: { settings: { node: { name: "instance-000188" } }, processors: 16.0, memory: "128gb", storage: "1tb" } } --- +"Test update desired nodes with node_version generates a warning": + - skip: + reason: "contains is a newly added assertion" + features: ["contains", "allowed_warnings"] + - do: + cluster.state: {} + + # Get master node id + - set: { master_node: master } + + - do: + nodes.info: {} + - set: { nodes.$master.version: es_version } + + - do: + _internal.update_desired_nodes: + history_id: "test" + version: 1 + body: + nodes: + - { settings: { "node.name": "instance-000187" }, processors: 8.5, memory: "64gb", storage: "128gb", node_version: $es_version } + allowed_warnings: + - "[version removal] Specifying node_version in desired nodes requests is deprecated." + - match: { replaced_existing_history_id: false } + + - do: + _internal.get_desired_nodes: {} + - match: + $body: + history_id: "test" + version: 1 + nodes: + - { settings: { node: { name: "instance-000187" } }, processors: 8.5, memory: "64gb", storage: "128gb", node_version: $es_version } + + - do: + _internal.update_desired_nodes: + history_id: "test" + version: 2 + body: + nodes: + - { settings: { "node.name": "instance-000187" }, processors: 8.5, memory: "64gb", storage: "128gb", node_version: $es_version } + - { settings: { "node.name": "instance-000188" }, processors: 16.0, memory: "128gb", storage: "1tb", node_version: $es_version } + allowed_warnings: + - "[version removal] Specifying node_version in desired nodes requests is deprecated." + - match: { replaced_existing_history_id: false } + + - do: + _internal.get_desired_nodes: {} + + - match: { history_id: "test" } + - match: { version: 2 } + - length: { nodes: 2 } + - contains: { nodes: { settings: { node: { name: "instance-000187" } }, processors: 8.5, memory: "64gb", storage: "128gb", node_version: $es_version } } + - contains: { nodes: { settings: { node: { name: "instance-000188" } }, processors: 16.0, memory: "128gb", storage: "1tb", node_version: $es_version } } +--- "Test update move to a new history id": - skip: reason: "contains is a newly added assertion" @@ -144,6 +199,46 @@ teardown: _internal.get_desired_nodes: {} - match: { status: 404 } --- +"Test delete desired nodes with node_version generates a warning": + - skip: + features: allowed_warnings + - do: + cluster.state: {} + + - set: { master_node: master } + + - do: + nodes.info: {} + - set: { nodes.$master.version: es_version } + + - do: + _internal.update_desired_nodes: + history_id: "test" + version: 1 + body: + nodes: + - { settings: { "node.external_id": "instance-000187" }, processors: 8.0, memory: "64gb", storage: "128gb", node_version: $es_version } + allowed_warnings: + - "[version removal] Specifying node_version in desired nodes requests is deprecated." + - match: { replaced_existing_history_id: false } + + - do: + _internal.get_desired_nodes: {} + - match: + $body: + history_id: "test" + version: 1 + nodes: + - { settings: { node: { external_id: "instance-000187" } }, processors: 8.0, memory: "64gb", storage: "128gb", node_version: $es_version } + + - do: + _internal.delete_desired_nodes: {} + + - do: + catch: missing + _internal.get_desired_nodes: {} + - match: { status: 404 } +--- "Test update desired nodes is idempotent": - skip: reason: "contains is a newly added assertion" diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/DesiredNode.java b/server/src/main/java/org/elasticsearch/cluster/metadata/DesiredNode.java index fe72a59565cf6..fb8559b19d81d 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/DesiredNode.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/DesiredNode.java @@ -14,6 +14,7 @@ import org.elasticsearch.Version; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodeRole; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; @@ -21,6 +22,7 @@ import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.Processors; import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.features.NodeFeature; import org.elasticsearch.xcontent.ConstructingObjectParser; import org.elasticsearch.xcontent.ObjectParser; @@ -36,6 +38,7 @@ import java.util.Set; import java.util.TreeSet; import java.util.function.Predicate; +import java.util.regex.Pattern; import static java.lang.String.format; import static org.elasticsearch.node.Node.NODE_EXTERNAL_ID_SETTING; @@ -55,6 +58,8 @@ public final class DesiredNode implements Writeable, ToXContentObject, Comparabl private static final ParseField PROCESSORS_RANGE_FIELD = new ParseField("processors_range"); private static final ParseField MEMORY_FIELD = new ParseField("memory"); private static final ParseField STORAGE_FIELD = new ParseField("storage"); + @UpdateForV9(owner = UpdateForV9.Owner.DISTRIBUTED_COORDINATION) // Remove deprecated field + private static final ParseField VERSION_FIELD = new ParseField("node_version"); public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( "desired_node", @@ -64,7 +69,8 @@ public final class DesiredNode implements Writeable, ToXContentObject, Comparabl (Processors) args[1], (ProcessorsRange) args[2], (ByteSizeValue) args[3], - (ByteSizeValue) args[4] + (ByteSizeValue) args[4], + (String) args[5] ) ); @@ -98,6 +104,12 @@ static void configureParser(ConstructingObjectParser parser) { STORAGE_FIELD, ObjectParser.ValueType.STRING ); + parser.declareField( + ConstructingObjectParser.optionalConstructorArg(), + (p, c) -> p.text(), + VERSION_FIELD, + ObjectParser.ValueType.STRING + ); } private final Settings settings; @@ -106,9 +118,21 @@ static void configureParser(ConstructingObjectParser parser) { private final ByteSizeValue memory; private final ByteSizeValue storage; + @UpdateForV9(owner = UpdateForV9.Owner.DISTRIBUTED_COORDINATION) // Remove deprecated version field + private final String version; private final String externalId; private final Set roles; + @Deprecated + public DesiredNode(Settings settings, ProcessorsRange processorsRange, ByteSizeValue memory, ByteSizeValue storage, String version) { + this(settings, null, processorsRange, memory, storage, version); + } + + @Deprecated + public DesiredNode(Settings settings, double processors, ByteSizeValue memory, ByteSizeValue storage, String version) { + this(settings, Processors.of(processors), null, memory, storage, version); + } + public DesiredNode(Settings settings, ProcessorsRange processorsRange, ByteSizeValue memory, ByteSizeValue storage) { this(settings, null, processorsRange, memory, storage); } @@ -118,6 +142,17 @@ public DesiredNode(Settings settings, double processors, ByteSizeValue memory, B } DesiredNode(Settings settings, Processors processors, ProcessorsRange processorsRange, ByteSizeValue memory, ByteSizeValue storage) { + this(settings, processors, processorsRange, memory, storage, null); + } + + DesiredNode( + Settings settings, + Processors processors, + ProcessorsRange processorsRange, + ByteSizeValue memory, + ByteSizeValue storage, + @Deprecated String version + ) { assert settings != null; assert memory != null; assert storage != null; @@ -151,6 +186,7 @@ public DesiredNode(Settings settings, double processors, ByteSizeValue memory, B this.processorsRange = processorsRange; this.memory = memory; this.storage = storage; + this.version = version; this.externalId = NODE_EXTERNAL_ID_SETTING.get(settings); this.roles = Collections.unmodifiableSortedSet(new TreeSet<>(DiscoveryNode.getRolesFromSettings(settings))); } @@ -174,7 +210,19 @@ public static DesiredNode readFrom(StreamInput in) throws IOException { } else { version = Version.readVersion(in).toString(); } - return new DesiredNode(settings, processors, processorsRange, memory, storage); + return new DesiredNode(settings, processors, processorsRange, memory, storage, version); + } + + private static final Pattern SEMANTIC_VERSION_PATTERN = Pattern.compile("^(\\d+\\.\\d+\\.\\d+)\\D?.*"); + + private static Version parseLegacyVersion(String version) { + if (version != null) { + var semanticVersionMatcher = SEMANTIC_VERSION_PATTERN.matcher(version); + if (semanticVersionMatcher.matches()) { + return Version.fromString(semanticVersionMatcher.group(1)); + } + } + return null; } @Override @@ -191,9 +239,15 @@ public void writeTo(StreamOutput out) throws IOException { memory.writeTo(out); storage.writeTo(out); if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_13_0)) { - out.writeOptionalString(null); + out.writeOptionalString(version); } else { - Version.writeVersion(Version.CURRENT, out); + Version parsedVersion = parseLegacyVersion(version); + if (version == null) { + // Some node is from before we made the version field not required. If so, fill in with the current node version. + Version.writeVersion(Version.CURRENT, out); + } else { + Version.writeVersion(parsedVersion, out); + } } } @@ -221,6 +275,14 @@ public void toInnerXContent(XContentBuilder builder, Params params) throws IOExc } builder.field(MEMORY_FIELD.getPreferredName(), memory); builder.field(STORAGE_FIELD.getPreferredName(), storage); + addDeprecatedVersionField(builder); + } + + @UpdateForV9(owner = UpdateForV9.Owner.DISTRIBUTED_COORDINATION) // Remove deprecated field from response + private void addDeprecatedVersionField(XContentBuilder builder) throws IOException { + if (version != null) { + builder.field(VERSION_FIELD.getPreferredName(), version); + } } public boolean hasMasterRole() { @@ -304,6 +366,7 @@ private boolean equalsWithoutProcessorsSpecification(DesiredNode that) { return Objects.equals(settings, that.settings) && Objects.equals(memory, that.memory) && Objects.equals(storage, that.storage) + && Objects.equals(version, that.version) && Objects.equals(externalId, that.externalId) && Objects.equals(roles, that.roles); } @@ -316,7 +379,7 @@ public boolean equalsWithProcessorsCloseTo(DesiredNode that) { @Override public int hashCode() { - return Objects.hash(settings, processors, processorsRange, memory, storage, externalId, roles); + return Objects.hash(settings, processors, processorsRange, memory, storage, version, externalId, roles); } @Override @@ -345,6 +408,10 @@ public String toString() { + '}'; } + public boolean hasVersion() { + return Strings.isNullOrBlank(version) == false; + } + public record ProcessorsRange(Processors min, @Nullable Processors max) implements Writeable, ToXContentObject { private static final ParseField MIN_FIELD = new ParseField("min"); diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/DesiredNodeWithStatus.java b/server/src/main/java/org/elasticsearch/cluster/metadata/DesiredNodeWithStatus.java index 606309adf205c..7b89406be9aa0 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/DesiredNodeWithStatus.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/DesiredNodeWithStatus.java @@ -44,12 +44,13 @@ public record DesiredNodeWithStatus(DesiredNode desiredNode, Status status) (Processors) args[1], (DesiredNode.ProcessorsRange) args[2], (ByteSizeValue) args[3], - (ByteSizeValue) args[4] + (ByteSizeValue) args[4], + (String) args[5] ), // An unknown status is expected during upgrades to versions >= STATUS_TRACKING_SUPPORT_VERSION // the desired node status would be populated when a node in the newer version is elected as // master, the desired nodes status update happens in NodeJoinExecutor. - args[5] == null ? Status.PENDING : (Status) args[5] + args[6] == null ? Status.PENDING : (Status) args[6] ) ); diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestUpdateDesiredNodesAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestUpdateDesiredNodesAction.java index b8e1fa0c836a3..ec8bb6285bdd4 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestUpdateDesiredNodesAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestUpdateDesiredNodesAction.java @@ -12,11 +12,13 @@ import org.elasticsearch.action.admin.cluster.desirednodes.UpdateDesiredNodesAction; import org.elasticsearch.action.admin.cluster.desirednodes.UpdateDesiredNodesRequest; import org.elasticsearch.client.internal.node.NodeClient; +import org.elasticsearch.cluster.metadata.DesiredNode; import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.features.NodeFeature; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.action.RestToXContentListener; +import org.elasticsearch.xcontent.XContentParseException; import org.elasticsearch.xcontent.XContentParser; import java.io.IOException; @@ -65,6 +67,16 @@ protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient cli ); } + if (clusterSupportsFeature.test(DesiredNode.DESIRED_NODE_VERSION_DEPRECATED)) { + if (updateDesiredNodesRequest.getNodes().stream().anyMatch(DesiredNode::hasVersion)) { + deprecationLogger.compatibleCritical("desired_nodes_version", VERSION_DEPRECATION_MESSAGE); + } + } else { + if (updateDesiredNodesRequest.getNodes().stream().anyMatch(n -> n.hasVersion() == false)) { + throw new XContentParseException("[node_version] field is required and must have a valid value"); + } + } + return restChannel -> client.execute( UpdateDesiredNodesAction.INSTANCE, updateDesiredNodesRequest, From fb9e028565ddc9fb57216238998cfc101e1f7f23 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Slobodan=20Adamovi=C4=87?= Date: Tue, 29 Oct 2024 12:04:15 +0100 Subject: [PATCH 178/324] Fix lingering license warning header in IP filter (#115510) Fixes another place where we do not stash thread context that causes the license warning header to persist in the thread context across Netty worker threads. Resolves #114865 Relates to #107573 --- docs/changelog/115510.yaml | 6 ++++++ muted-tests.yml | 4 +--- .../org/elasticsearch/license/LicensingTests.java | 2 ++ .../netty4/IpFilterRemoteAddressFilter.java | 10 ++++++++-- .../netty4/SecurityNetty4ServerTransport.java | 2 +- .../netty4/IpFilterRemoteAddressFilterTests.java | 12 +++++++++--- 6 files changed, 27 insertions(+), 9 deletions(-) create mode 100644 docs/changelog/115510.yaml diff --git a/docs/changelog/115510.yaml b/docs/changelog/115510.yaml new file mode 100644 index 0000000000000..1e71270e18f97 --- /dev/null +++ b/docs/changelog/115510.yaml @@ -0,0 +1,6 @@ +pr: 115510 +summary: Fix lingering license warning header in IP filter +area: License +type: bug +issues: + - 114865 diff --git a/muted-tests.yml b/muted-tests.yml index 610681949ade0..4315f1283a347 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -224,8 +224,6 @@ tests: - class: org.elasticsearch.xpack.remotecluster.RemoteClusterSecurityWithApmTracingRestIT method: testTracingCrossCluster issue: https://github.com/elastic/elasticsearch/issues/112731 -- class: org.elasticsearch.license.LicensingTests - issue: https://github.com/elastic/elasticsearch/issues/114865 - class: org.elasticsearch.packaging.test.EnrollmentProcessTests method: test20DockerAutoFormCluster issue: https://github.com/elastic/elasticsearch/issues/114885 @@ -330,4 +328,4 @@ tests: # issue: "https://github.com/elastic/elasticsearch/..." # - class: "org.elasticsearch.xpack.esql.**" # method: "test {union_types.MultiIndexIpStringStatsInline *}" -# issue: "https://github.com/elastic/elasticsearch/..." +# issue: "https://github.com/elastic/elasticsearch/..." \ No newline at end of file diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/license/LicensingTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/license/LicensingTests.java index 42b807b5f045b..3a9194ee2eac1 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/license/LicensingTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/license/LicensingTests.java @@ -41,6 +41,7 @@ import org.elasticsearch.xpack.security.LocalStateSecurity; import org.hamcrest.Matchers; import org.junit.After; +import org.junit.Assert; import org.junit.Before; import java.nio.file.Files; @@ -241,6 +242,7 @@ public void testNoWarningHeaderWhenAuthenticationFailed() throws Exception { Header[] headers = null; try { getRestClient().performRequest(request); + Assert.fail("expected response exception"); } catch (ResponseException e) { headers = e.getResponse().getHeaders(); List afterWarningHeaders = getWarningHeaders(headers); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/netty4/IpFilterRemoteAddressFilter.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/netty4/IpFilterRemoteAddressFilter.java index 9a3c9c847d131..55a3dbcdfde95 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/netty4/IpFilterRemoteAddressFilter.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/netty4/IpFilterRemoteAddressFilter.java @@ -10,6 +10,7 @@ import io.netty.channel.ChannelHandlerContext; import io.netty.handler.ipfilter.AbstractRemoteAddressFilter; +import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.xpack.security.transport.filter.IPFilter; import java.net.InetSocketAddress; @@ -19,16 +20,21 @@ class IpFilterRemoteAddressFilter extends AbstractRemoteAddressFilter Date: Tue, 29 Oct 2024 12:19:53 +0100 Subject: [PATCH 179/324] Try to simplify geometries that fail with TopologyException (#115834) This geometries are valid and they can actually be simplified so lets make the clipping algorithm a best effort and return the original geometry in those cases so the simplification can handle it. --- docs/changelog/115834.yaml | 5 +++++ .../xpack/vectortile/feature/FeatureFactory.java | 7 +++++-- 2 files changed, 10 insertions(+), 2 deletions(-) create mode 100644 docs/changelog/115834.yaml diff --git a/docs/changelog/115834.yaml b/docs/changelog/115834.yaml new file mode 100644 index 0000000000000..91f9e9a4e2e41 --- /dev/null +++ b/docs/changelog/115834.yaml @@ -0,0 +1,5 @@ +pr: 115834 +summary: Try to simplify geometries that fail with `TopologyException` +area: Geo +type: bug +issues: [] diff --git a/x-pack/plugin/vector-tile/src/main/java/org/elasticsearch/xpack/vectortile/feature/FeatureFactory.java b/x-pack/plugin/vector-tile/src/main/java/org/elasticsearch/xpack/vectortile/feature/FeatureFactory.java index 0c4ff1780ae1e..b5f9088edc4be 100644 --- a/x-pack/plugin/vector-tile/src/main/java/org/elasticsearch/xpack/vectortile/feature/FeatureFactory.java +++ b/x-pack/plugin/vector-tile/src/main/java/org/elasticsearch/xpack/vectortile/feature/FeatureFactory.java @@ -307,8 +307,11 @@ private static org.locationtech.jts.geom.Geometry clipGeometry( return null; } } catch (TopologyException ex) { - // we should never get here but just to be super safe because a TopologyException will kill the node - throw new IllegalArgumentException(ex); + // Note we should never throw a TopologyException as it kill the node + // unfortunately the intersection method is not perfect and it will throw this error for complex + // geometries even when valid. We can still simplify such geometry so we just return the original and + // let the simplification process handle it. + return geometry; } } } From b868677c5b156233257612ab2121f4a01ca69aed Mon Sep 17 00:00:00 2001 From: Nikolaj Volgushev Date: Tue, 29 Oct 2024 12:26:27 +0100 Subject: [PATCH 180/324] Fix file settings service tests (#115770) This PR addresses some of the failure causes tracked under https://github.com/elastic/elasticsearch/issues/115280 and https://github.com/elastic/elasticsearch/issues/115725: the latch-await setup was rather convoluted and the move command not always correctly invoked in the right order. This PR cleans up latching by separating awaiting the first processing call (on start) from waiting on the subsequent call. Also, it makes writing the file more robust w.r.t. OS'es where `atomic_move` may not be available. This should address failures around the timeout await, and the assertion failures around invoked methods tracked here: https://github.com/elastic/elasticsearch/issues/115725#issuecomment-2441989470 But will likely require another round of changes to address the failures to delete files. Relates: https://github.com/elastic/elasticsearch/issues/115280 Relates: https://github.com/elastic/elasticsearch/issues/115725 --- muted-tests.yml | 3 - .../service/FileSettingsServiceTests.java | 85 ++++++++++++------- 2 files changed, 52 insertions(+), 36 deletions(-) diff --git a/muted-tests.yml b/muted-tests.yml index 4315f1283a347..419e8fbb68566 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -236,9 +236,6 @@ tests: - class: org.elasticsearch.xpack.inference.DefaultEndPointsIT method: testInferDeploysDefaultE5 issue: https://github.com/elastic/elasticsearch/issues/115361 -- class: org.elasticsearch.reservedstate.service.FileSettingsServiceTests - method: testProcessFileChanges - issue: https://github.com/elastic/elasticsearch/issues/115280 - class: org.elasticsearch.xpack.security.FileSettingsRoleMappingsRestartIT method: testFileSettingsReprocessedOnRestartWithoutVersionChange issue: https://github.com/elastic/elasticsearch/issues/115450 diff --git a/server/src/test/java/org/elasticsearch/reservedstate/service/FileSettingsServiceTests.java b/server/src/test/java/org/elasticsearch/reservedstate/service/FileSettingsServiceTests.java index 8af36e2f9677e..f67d7ddcc7550 100644 --- a/server/src/test/java/org/elasticsearch/reservedstate/service/FileSettingsServiceTests.java +++ b/server/src/test/java/org/elasticsearch/reservedstate/service/FileSettingsServiceTests.java @@ -24,6 +24,8 @@ import org.elasticsearch.common.component.Lifecycle; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.IOUtils; +import org.elasticsearch.core.Strings; import org.elasticsearch.core.TimeValue; import org.elasticsearch.env.BuildVersion; import org.elasticsearch.env.Environment; @@ -39,9 +41,10 @@ import org.mockito.stubbing.Answer; import java.io.IOException; +import java.io.UncheckedIOException; +import java.nio.file.AtomicMoveNotSupportedException; import java.nio.file.Files; import java.nio.file.Path; -import java.nio.file.StandardCopyOption; import java.util.List; import java.util.Map; import java.util.Set; @@ -50,6 +53,8 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.Consumer; +import static java.nio.file.StandardCopyOption.ATOMIC_MOVE; +import static java.nio.file.StandardCopyOption.REPLACE_EXISTING; import static org.elasticsearch.node.Node.NODE_NAME_SETTING; import static org.hamcrest.Matchers.anEmptyMap; import static org.hamcrest.Matchers.hasEntry; @@ -190,9 +195,7 @@ public void testInitialFileWorks() throws Exception { return null; }).when(controller).process(any(), any(XContentParser.class), any(), any()); - CountDownLatch latch = new CountDownLatch(1); - - fileSettingsService.addFileChangedListener(latch::countDown); + CountDownLatch fileProcessingLatch = new CountDownLatch(1); Files.createDirectories(fileSettingsService.watchedFileDir()); // contents of the JSON don't matter, we just need a file to exist @@ -202,15 +205,14 @@ public void testInitialFileWorks() throws Exception { try { return invocation.callRealMethod(); } finally { - latch.countDown(); + fileProcessingLatch.countDown(); } }).when(fileSettingsService).processFileOnServiceStart(); fileSettingsService.start(); fileSettingsService.clusterChanged(new ClusterChangedEvent("test", clusterService.state(), ClusterState.EMPTY_STATE)); - // wait for listener to be called - assertTrue(latch.await(20, TimeUnit.SECONDS)); + longAwait(fileProcessingLatch); verify(fileSettingsService, times(1)).processFileOnServiceStart(); verify(controller, times(1)).process(any(), any(XContentParser.class), eq(ReservedStateVersionCheck.HIGHER_OR_SAME_VERSION), any()); @@ -223,40 +225,40 @@ public void testProcessFileChanges() throws Exception { return null; }).when(controller).process(any(), any(XContentParser.class), any(), any()); - // we get three events: initial clusterChanged event, first write, second write - CountDownLatch latch = new CountDownLatch(3); - - fileSettingsService.addFileChangedListener(latch::countDown); - - Files.createDirectories(fileSettingsService.watchedFileDir()); - // contents of the JSON don't matter, we just need a file to exist - writeTestFile(fileSettingsService.watchedFile(), "{}"); - + CountDownLatch changesOnStartLatch = new CountDownLatch(1); doAnswer((Answer) invocation -> { try { return invocation.callRealMethod(); } finally { - latch.countDown(); + changesOnStartLatch.countDown(); } }).when(fileSettingsService).processFileOnServiceStart(); + + CountDownLatch changesLatch = new CountDownLatch(1); doAnswer((Answer) invocation -> { try { return invocation.callRealMethod(); } finally { - latch.countDown(); + changesLatch.countDown(); } }).when(fileSettingsService).processFileChanges(); + Files.createDirectories(fileSettingsService.watchedFileDir()); + // contents of the JSON don't matter, we just need a file to exist + writeTestFile(fileSettingsService.watchedFile(), "{}"); + fileSettingsService.start(); fileSettingsService.clusterChanged(new ClusterChangedEvent("test", clusterService.state(), ClusterState.EMPTY_STATE)); - // second file change; contents still don't matter - overwriteTestFile(fileSettingsService.watchedFile(), "{}"); - // wait for listener to be called (once for initial processing, once for subsequent update) - assertTrue(latch.await(20, TimeUnit.SECONDS)); + longAwait(changesOnStartLatch); verify(fileSettingsService, times(1)).processFileOnServiceStart(); verify(controller, times(1)).process(any(), any(XContentParser.class), eq(ReservedStateVersionCheck.HIGHER_OR_SAME_VERSION), any()); + + // second file change; contents still don't matter + writeTestFile(fileSettingsService.watchedFile(), "[]"); + longAwait(changesLatch); + verify(fileSettingsService, times(1)).processFileChanges(); verify(controller, times(1)).process(any(), any(XContentParser.class), eq(ReservedStateVersionCheck.HIGHER_VERSION_ONLY), any()); } @@ -295,9 +297,7 @@ public void testStopWorksInMiddleOfProcessing() throws Exception { // Make some fake settings file to cause the file settings service to process it writeTestFile(fileSettingsService.watchedFile(), "{}"); - // we need to wait a bit, on MacOS it may take up to 10 seconds for the Java watcher service to notice the file, - // on Linux is instantaneous. Windows is instantaneous too. - assertTrue(processFileLatch.await(30, TimeUnit.SECONDS)); + longAwait(processFileLatch); // Stopping the service should interrupt the watcher thread, we should be able to stop fileSettingsService.stop(); @@ -352,15 +352,34 @@ public void testHandleSnapshotRestoreResetsMetadata() throws Exception { } // helpers - private void writeTestFile(Path path, String contents) throws IOException { - Path tempFilePath = createTempFile(); - Files.writeString(tempFilePath, contents); - Files.move(tempFilePath, path, StandardCopyOption.ATOMIC_MOVE, StandardCopyOption.REPLACE_EXISTING); + private static void writeTestFile(Path path, String contents) { + Path tempFile = null; + try { + tempFile = Files.createTempFile(path.getParent(), path.getFileName().toString(), "tmp"); + Files.writeString(tempFile, contents); + + try { + Files.move(tempFile, path, REPLACE_EXISTING, ATOMIC_MOVE); + } catch (AtomicMoveNotSupportedException e) { + Files.move(tempFile, path, REPLACE_EXISTING); + } + } catch (final IOException e) { + throw new UncheckedIOException(Strings.format("could not write file [%s]", path.toAbsolutePath()), e); + } finally { + // we are ignoring exceptions here, so we do not need handle whether or not tempFile was initialized nor if the file exists + IOUtils.deleteFilesIgnoringExceptions(tempFile); + } } - private void overwriteTestFile(Path path, String contents) throws IOException { - Path tempFilePath = createTempFile(); - Files.writeString(tempFilePath, contents); - Files.move(tempFilePath, path, StandardCopyOption.REPLACE_EXISTING); + // this waits for up to 20 seconds to account for watcher service differences between OSes + // on MacOS it may take up to 10 seconds for the Java watcher service to notice the file, + // on Linux is instantaneous. Windows is instantaneous too. + private static void longAwait(CountDownLatch latch) { + try { + assertTrue("longAwait: CountDownLatch did not reach zero within the timeout", latch.await(20, TimeUnit.SECONDS)); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + fail(e, "longAwait: interrupted waiting for CountDownLatch to reach zero"); + } } } From a7031d871654c4ab73fc747b43de2bcbf863cf45 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Johannes=20Fred=C3=A9n?= <109296772+jfreden@users.noreply.github.com> Date: Tue, 29 Oct 2024 12:31:02 +0100 Subject: [PATCH 181/324] Add ECK Role Mapping Cleanup (#115823) * Add security migration for cleaning up ECK role mappings --- docs/changelog/115823.yaml | 5 + .../FileSettingsRoleMappingUpgradeIT.java | 40 +- .../metadata/ReservedStateMetadata.java | 15 + .../elasticsearch/index/IndexVersions.java | 2 +- .../support/mapper/ExpressionRoleMapping.java | 15 +- .../security/authz/RoleMappingMetadata.java | 8 + .../RoleMappingFileSettingsIT.java | 23 + ...eanupRoleMappingDuplicatesMigrationIT.java | 417 ++++++++++++++++++ .../xpack/security/SecurityFeatures.java | 3 +- .../support/SecurityIndexManager.java | 83 +++- .../security/support/SecurityMigrations.java | 235 +++++++--- .../support/SecuritySystemIndices.java | 1 + .../authc/AuthenticationServiceTests.java | 1 + .../authc/esnative/NativeRealmTests.java | 1 + .../mapper/NativeRoleMappingStoreTests.java | 1 + .../authz/store/CompositeRolesStoreTests.java | 1 + .../store/NativePrivilegeStoreTests.java | 1 + .../CacheInvalidatorRegistryTests.java | 1 + .../support/SecurityIndexManagerTests.java | 138 ++++++ .../support/SecurityMigrationsTests.java | 174 ++++++++ x-pack/qa/rolling-upgrade/build.gradle | 2 + .../upgrades/AbstractUpgradeTestCase.java | 21 + .../SecurityIndexRoleMappingCleanupIT.java | 146 ++++++ ...SecurityIndexRolesMetadataMigrationIT.java | 19 +- .../operator_defined_role_mappings.json | 38 ++ 25 files changed, 1298 insertions(+), 93 deletions(-) create mode 100644 docs/changelog/115823.yaml create mode 100644 x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/support/CleanupRoleMappingDuplicatesMigrationIT.java create mode 100644 x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/support/SecurityMigrationsTests.java create mode 100644 x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/SecurityIndexRoleMappingCleanupIT.java create mode 100644 x-pack/qa/rolling-upgrade/src/test/resources/operator_defined_role_mappings.json diff --git a/docs/changelog/115823.yaml b/docs/changelog/115823.yaml new file mode 100644 index 0000000000000..a6119e0fa56e4 --- /dev/null +++ b/docs/changelog/115823.yaml @@ -0,0 +1,5 @@ +pr: 115823 +summary: Add ECK Role Mapping Cleanup +area: Security +type: bug +issues: [] diff --git a/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/FileSettingsRoleMappingUpgradeIT.java b/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/FileSettingsRoleMappingUpgradeIT.java index 834d97f755dfb..4caf33feeeebb 100644 --- a/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/FileSettingsRoleMappingUpgradeIT.java +++ b/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/FileSettingsRoleMappingUpgradeIT.java @@ -23,19 +23,20 @@ import org.junit.rules.TemporaryFolder; import org.junit.rules.TestRule; -import java.io.IOException; import java.util.List; +import java.util.Map; import java.util.function.Supplier; +import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.hasItem; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.nullValue; public class FileSettingsRoleMappingUpgradeIT extends ParameterizedRollingUpgradeTestCase { - private static final String settingsJSON = """ + private static final int ROLE_MAPPINGS_CLEANUP_MIGRATION_VERSION = 2; + private static final String SETTING_JSON = """ { "metadata": { "version": "1", @@ -53,7 +54,6 @@ public class FileSettingsRoleMappingUpgradeIT extends ParameterizedRollingUpgrad }"""; private static final TemporaryFolder repoDirectory = new TemporaryFolder(); - private static final ElasticsearchCluster cluster = ElasticsearchCluster.local() .distribution(DistributionType.DEFAULT) .version(getOldClusterTestVersion()) @@ -68,7 +68,7 @@ public String get() { .setting("xpack.security.enabled", "true") // workaround to avoid having to set up clients and authorization headers .setting("xpack.security.authc.anonymous.roles", "superuser") - .configFile("operator/settings.json", Resource.fromString(settingsJSON)) + .configFile("operator/settings.json", Resource.fromString(SETTING_JSON)) .build(); @ClassRule @@ -91,7 +91,30 @@ public void checkVersions() { ); } - public void testRoleMappingsAppliedOnUpgrade() throws IOException { + private static void waitForSecurityMigrationCompletionIfIndexExists() throws Exception { + final Request request = new Request("GET", "_cluster/state/metadata/.security-7"); + assertBusy(() -> { + Map indices = new XContentTestUtils.JsonMapView(entityAsMap(client().performRequest(request))).get( + "metadata.indices" + ); + assertNotNull(indices); + // If the security index exists, migration needs to happen. There is a bug in pre cluster state role mappings code that tries + // to write file based role mappings before security index manager state is recovered, this makes it look like the security + // index is outdated (isIndexUpToDate == false). Because we can't rely on the index being there for old versions, this check + // is needed. + if (indices.containsKey(".security-7")) { + // JsonMapView doesn't support . prefixed indices (splits on .) + @SuppressWarnings("unchecked") + String responseVersion = new XContentTestUtils.JsonMapView((Map) indices.get(".security-7")).get( + "migration_version.version" + ); + assertNotNull(responseVersion); + assertTrue(Integer.parseInt(responseVersion) >= ROLE_MAPPINGS_CLEANUP_MIGRATION_VERSION); + } + }); + } + + public void testRoleMappingsAppliedOnUpgrade() throws Exception { if (isOldCluster()) { Request clusterStateRequest = new Request("GET", "/_cluster/state/metadata"); List roleMappings = new XContentTestUtils.JsonMapView(entityAsMap(client().performRequest(clusterStateRequest))).get( @@ -107,11 +130,10 @@ public void testRoleMappingsAppliedOnUpgrade() throws IOException { ).get("metadata.role_mappings.role_mappings"); assertThat(clusterStateRoleMappings, is(not(nullValue()))); assertThat(clusterStateRoleMappings.size(), equalTo(1)); - + waitForSecurityMigrationCompletionIfIndexExists(); assertThat( entityAsMap(client().performRequest(new Request("GET", "/_security/role_mapping"))).keySet(), - // TODO change this to `contains` once the clean-up migration work is merged - hasItem("everyone_kibana-read-only-operator-mapping") + contains("everyone_kibana-read-only-operator-mapping") ); } } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/ReservedStateMetadata.java b/server/src/main/java/org/elasticsearch/cluster/metadata/ReservedStateMetadata.java index 2390c96664057..a0b35f7cfc3eb 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/ReservedStateMetadata.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/ReservedStateMetadata.java @@ -91,6 +91,21 @@ public Set conflicts(String handlerName, Set modified) { return Collections.unmodifiableSet(intersect); } + /** + * Get the reserved keys for the handler name + * + * @param handlerName handler name to get keys for + * @return set of keys for that handler + */ + public Set keys(String handlerName) { + ReservedStateHandlerMetadata handlerMetadata = handlers.get(handlerName); + if (handlerMetadata == null || handlerMetadata.keys().isEmpty()) { + return Collections.emptySet(); + } + + return Collections.unmodifiableSet(handlerMetadata.keys()); + } + /** * Reads an {@link ReservedStateMetadata} from a {@link StreamInput} * diff --git a/server/src/main/java/org/elasticsearch/index/IndexVersions.java b/server/src/main/java/org/elasticsearch/index/IndexVersions.java index efb1facc79b3a..2919f98ee200e 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexVersions.java +++ b/server/src/main/java/org/elasticsearch/index/IndexVersions.java @@ -128,7 +128,7 @@ private static Version parseUnchecked(String version) { public static final IndexVersion MERGE_ON_RECOVERY_VERSION = def(8_515_00_0, Version.LUCENE_9_11_1); public static final IndexVersion UPGRADE_TO_LUCENE_9_12 = def(8_516_00_0, Version.LUCENE_9_12_0); public static final IndexVersion ENABLE_IGNORE_ABOVE_LOGSDB = def(8_517_00_0, Version.LUCENE_9_12_0); - + public static final IndexVersion ADD_ROLE_MAPPING_CLEANUP_MIGRATION = def(8_518_00_0, Version.LUCENE_9_12_0); public static final IndexVersion UPGRADE_TO_LUCENE_10_0_0 = def(9_000_00_0, Version.LUCENE_10_0_0); /* diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/mapper/ExpressionRoleMapping.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/mapper/ExpressionRoleMapping.java index c504ebe56ed45..41fd3c6938dfc 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/mapper/ExpressionRoleMapping.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/mapper/ExpressionRoleMapping.java @@ -206,7 +206,7 @@ public RoleMapperExpression getExpression() { * that match the {@link #getExpression() expression} in this mapping. */ public List getRoles() { - return Collections.unmodifiableList(roles); + return roles != null ? Collections.unmodifiableList(roles) : Collections.emptyList(); } /** @@ -214,7 +214,7 @@ public List getRoles() { * that should be assigned to users that match the {@link #getExpression() expression} in this mapping. */ public List getRoleTemplates() { - return Collections.unmodifiableList(roleTemplates); + return roleTemplates != null ? Collections.unmodifiableList(roleTemplates) : Collections.emptyList(); } /** @@ -223,7 +223,7 @@ public List getRoleTemplates() { * This is not used within the mapping process, and does not affect whether the expression matches, nor which roles are assigned. */ public Map getMetadata() { - return Collections.unmodifiableMap(metadata); + return metadata != null ? Collections.unmodifiableMap(metadata) : Collections.emptyMap(); } /** @@ -233,6 +233,15 @@ public boolean isEnabled() { return enabled; } + /** + * Whether this mapping is an operator defined/read only role mapping + */ + public boolean isReadOnly() { + return metadata != null && metadata.get(ExpressionRoleMapping.READ_ONLY_ROLE_MAPPING_METADATA_FLAG) instanceof Boolean readOnly + ? readOnly + : false; + } + @Override public String toString() { return getClass().getSimpleName() + "<" + name + " ; " + roles + "/" + roleTemplates + " = " + Strings.toString(expression) + ">"; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/RoleMappingMetadata.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/RoleMappingMetadata.java index 74c6223b1ebdd..31fe86ca77edd 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/RoleMappingMetadata.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/RoleMappingMetadata.java @@ -191,6 +191,14 @@ public static boolean hasFallbackName(ExpressionRoleMapping expressionRoleMappin return expressionRoleMapping.getName().equals(FALLBACK_NAME); } + /** + * Check if any of the role mappings have a fallback name + * @return true if any role mappings have the fallback name + */ + public boolean hasAnyMappingWithFallbackName() { + return roleMappings.stream().anyMatch(RoleMappingMetadata::hasFallbackName); + } + /** * Parse a role mapping from XContent, restoring the name from a reserved metadata field. * Used to parse a role mapping annotated with its name in metadata via @see {@link #copyWithNameInMetadata(ExpressionRoleMapping)}. diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/RoleMappingFileSettingsIT.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/RoleMappingFileSettingsIT.java index fdd854e7a9673..9e36055e917a6 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/RoleMappingFileSettingsIT.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/RoleMappingFileSettingsIT.java @@ -204,6 +204,29 @@ public void clusterChanged(ClusterChangedEvent event) { return new Tuple<>(savedClusterState, metadataVersion); } + // Wait for any file metadata + public static Tuple setupClusterStateListener(String node) { + ClusterService clusterService = internalCluster().clusterService(node); + CountDownLatch savedClusterState = new CountDownLatch(1); + AtomicLong metadataVersion = new AtomicLong(-1); + clusterService.addListener(new ClusterStateListener() { + @Override + public void clusterChanged(ClusterChangedEvent event) { + ReservedStateMetadata reservedState = event.state().metadata().reservedStateMetadata().get(FileSettingsService.NAMESPACE); + if (reservedState != null) { + ReservedStateHandlerMetadata handlerMetadata = reservedState.handlers().get(ReservedRoleMappingAction.NAME); + if (handlerMetadata != null) { + clusterService.removeListener(this); + metadataVersion.set(event.state().metadata().version()); + savedClusterState.countDown(); + } + } + } + }); + + return new Tuple<>(savedClusterState, metadataVersion); + } + public static Tuple setupClusterStateListenerForCleanup(String node) { ClusterService clusterService = internalCluster().clusterService(node); CountDownLatch savedClusterState = new CountDownLatch(1); diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/support/CleanupRoleMappingDuplicatesMigrationIT.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/support/CleanupRoleMappingDuplicatesMigrationIT.java new file mode 100644 index 0000000000000..63c510062bdad --- /dev/null +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/support/CleanupRoleMappingDuplicatesMigrationIT.java @@ -0,0 +1,417 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.security.support; + +import org.elasticsearch.action.ActionFuture; +import org.elasticsearch.cluster.ClusterChangedEvent; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ClusterStateListener; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.reservedstate.service.FileSettingsService; +import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.test.SecurityIntegTestCase; +import org.elasticsearch.xpack.core.security.action.UpdateIndexMigrationVersionAction; +import org.elasticsearch.xpack.core.security.action.rolemapping.DeleteRoleMappingAction; +import org.elasticsearch.xpack.core.security.action.rolemapping.DeleteRoleMappingRequest; +import org.elasticsearch.xpack.core.security.action.rolemapping.DeleteRoleMappingResponse; +import org.elasticsearch.xpack.core.security.action.rolemapping.GetRoleMappingsAction; +import org.elasticsearch.xpack.core.security.action.rolemapping.GetRoleMappingsRequest; +import org.elasticsearch.xpack.core.security.action.rolemapping.GetRoleMappingsResponse; +import org.elasticsearch.xpack.core.security.action.rolemapping.PutRoleMappingAction; +import org.elasticsearch.xpack.core.security.action.rolemapping.PutRoleMappingRequest; +import org.elasticsearch.xpack.core.security.action.rolemapping.PutRoleMappingResponse; +import org.elasticsearch.xpack.core.security.authc.support.mapper.ExpressionRoleMapping; +import org.elasticsearch.xpack.core.security.authc.support.mapper.expressiondsl.FieldExpression; +import org.junit.Before; + +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicLong; + +import static org.elasticsearch.integration.RoleMappingFileSettingsIT.setupClusterStateListener; +import static org.elasticsearch.integration.RoleMappingFileSettingsIT.writeJSONFile; +import static org.elasticsearch.xpack.core.security.action.UpdateIndexMigrationVersionAction.MIGRATION_VERSION_CUSTOM_DATA_KEY; +import static org.elasticsearch.xpack.core.security.action.UpdateIndexMigrationVersionAction.MIGRATION_VERSION_CUSTOM_KEY; +import static org.elasticsearch.xpack.core.security.test.TestRestrictedIndices.INTERNAL_SECURITY_MAIN_INDEX_7; +import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.lessThan; +import static org.hamcrest.Matchers.lessThanOrEqualTo; + +@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0, autoManageMasterNodes = false) +public class CleanupRoleMappingDuplicatesMigrationIT extends SecurityIntegTestCase { + + private final AtomicLong versionCounter = new AtomicLong(1); + + @Before + public void resetVersion() { + versionCounter.set(1); + } + + private static final String TEST_JSON_WITH_ROLE_MAPPINGS = """ + { + "metadata": { + "version": "%s", + "compatibility": "8.4.0" + }, + "state": { + "role_mappings": { + "everyone_kibana_alone": { + "enabled": true, + "roles": [ "kibana_user" ], + "rules": { "field": { "username": "*" } }, + "metadata": { + "uuid" : "b9a59ba9-6b92-4be2-bb8d-02bb270cb3a7", + "_foo": "something" + } + }, + "everyone_fleet_alone": { + "enabled": false, + "roles": [ "fleet_user" ], + "rules": { "field": { "username": "*" } }, + "metadata": { + "uuid" : "b9a59ba9-6b92-4be3-bb8d-02bb270cb3a7", + "_foo": "something_else" + } + } + } + } + }"""; + + private static final String TEST_JSON_WITH_FALLBACK_NAME = """ + { + "metadata": { + "version": "%s", + "compatibility": "8.4.0" + }, + "state": { + "role_mappings": { + "name_not_available_after_deserialization": { + "enabled": true, + "roles": [ "kibana_user", "kibana_admin" ], + "rules": { "field": { "username": "*" } }, + "metadata": { + "uuid" : "b9a59ba9-6b92-4be2-bb8d-02bb270cb3a7", + "_foo": "something" + } + } + } + } + }"""; + + private static final String TEST_JSON_WITH_EMPTY_ROLE_MAPPINGS = """ + { + "metadata": { + "version": "%s", + "compatibility": "8.4.0" + }, + "state": { + "role_mappings": {} + } + }"""; + + public void testMigrationSuccessful() throws Exception { + internalCluster().setBootstrapMasterNodeIndex(0); + ensureGreen(); + final String masterNode = internalCluster().getMasterName(); + + // Create a native role mapping to create security index and trigger migration (skipped initially) + createNativeRoleMapping("everyone_kibana_alone"); + createNativeRoleMapping("everyone_fleet_alone"); + createNativeRoleMapping("dont_clean_this_up"); + assertAllRoleMappings("everyone_kibana_alone", "everyone_fleet_alone", "dont_clean_this_up"); + + // Wait for file watcher to start + awaitFileSettingsWatcher(); + // Setup listener to wait for role mapping + var fileBasedRoleMappingsWrittenListener = setupClusterStateListener(masterNode, "everyone_kibana_alone"); + // Write role mappings + writeJSONFile(masterNode, TEST_JSON_WITH_ROLE_MAPPINGS, logger, versionCounter); + assertTrue(fileBasedRoleMappingsWrittenListener.v1().await(20, TimeUnit.SECONDS)); + waitForMigrationCompletion(SecurityMigrations.CLEANUP_ROLE_MAPPING_DUPLICATES_MIGRATION_VERSION); + + // First migration is on a new index, so should skip all migrations. If we reset, it should re-trigger and run all migrations + resetMigration(); + + // Wait for the first migration to finish + waitForMigrationCompletion(SecurityMigrations.CLEANUP_ROLE_MAPPING_DUPLICATES_MIGRATION_VERSION); + + assertAllRoleMappings( + "everyone_kibana_alone" + ExpressionRoleMapping.READ_ONLY_ROLE_MAPPING_SUFFIX, + "everyone_fleet_alone" + ExpressionRoleMapping.READ_ONLY_ROLE_MAPPING_SUFFIX, + "dont_clean_this_up" + ); + } + + public void testMigrationSuccessfulNoOverlap() throws Exception { + internalCluster().setBootstrapMasterNodeIndex(0); + ensureGreen(); + final String masterNode = internalCluster().getMasterName(); + + // Create a native role mapping to create security index and trigger migration (skipped initially) + createNativeRoleMapping("some_native_mapping"); + createNativeRoleMapping("some_other_native_mapping"); + assertAllRoleMappings("some_native_mapping", "some_other_native_mapping"); + + // Wait for file watcher to start + awaitFileSettingsWatcher(); + // Setup listener to wait for role mapping + var fileBasedRoleMappingsWrittenListener = setupClusterStateListener(masterNode, "everyone_kibana_alone"); + // Write role mappings with fallback name, this should block any security migration + writeJSONFile(masterNode, TEST_JSON_WITH_ROLE_MAPPINGS, logger, versionCounter); + assertTrue(fileBasedRoleMappingsWrittenListener.v1().await(20, TimeUnit.SECONDS)); + waitForMigrationCompletion(SecurityMigrations.CLEANUP_ROLE_MAPPING_DUPLICATES_MIGRATION_VERSION); + + // First migration is on a new index, so should skip all migrations. If we reset, it should re-trigger and run all migrations + resetMigration(); + + // Wait for the first migration to finish + waitForMigrationCompletion(SecurityMigrations.CLEANUP_ROLE_MAPPING_DUPLICATES_MIGRATION_VERSION); + + assertAllRoleMappings( + "everyone_kibana_alone" + ExpressionRoleMapping.READ_ONLY_ROLE_MAPPING_SUFFIX, + "everyone_fleet_alone" + ExpressionRoleMapping.READ_ONLY_ROLE_MAPPING_SUFFIX, + "some_native_mapping", + "some_other_native_mapping" + ); + } + + public void testMigrationSuccessfulNoNative() throws Exception { + internalCluster().setBootstrapMasterNodeIndex(0); + ensureGreen(); + final String masterNode = internalCluster().getMasterName(); + + // Create a native role mapping to create security index and trigger migration (skipped initially) + // Then delete it to test an empty role mapping store + createNativeRoleMapping("some_native_mapping"); + deleteNativeRoleMapping("some_native_mapping"); + // Wait for file watcher to start + awaitFileSettingsWatcher(); + // Setup listener to wait for role mapping + var fileBasedRoleMappingsWrittenListener = setupClusterStateListener(masterNode, "everyone_kibana_alone"); + // Write role mappings with fallback name, this should block any security migration + writeJSONFile(masterNode, TEST_JSON_WITH_ROLE_MAPPINGS, logger, versionCounter); + assertTrue(fileBasedRoleMappingsWrittenListener.v1().await(20, TimeUnit.SECONDS)); + waitForMigrationCompletion(SecurityMigrations.CLEANUP_ROLE_MAPPING_DUPLICATES_MIGRATION_VERSION); + + // First migration is on a new index, so should skip all migrations. If we reset, it should re-trigger and run all migrations + resetMigration(); + + // Wait for the first migration to finish + waitForMigrationCompletion(SecurityMigrations.CLEANUP_ROLE_MAPPING_DUPLICATES_MIGRATION_VERSION); + + assertAllRoleMappings( + "everyone_kibana_alone" + ExpressionRoleMapping.READ_ONLY_ROLE_MAPPING_SUFFIX, + "everyone_fleet_alone" + ExpressionRoleMapping.READ_ONLY_ROLE_MAPPING_SUFFIX + ); + } + + public void testMigrationFallbackNamePreCondition() throws Exception { + internalCluster().setBootstrapMasterNodeIndex(0); + ensureGreen(); + final String masterNode = internalCluster().getMasterName(); + // Wait for file watcher to start + awaitFileSettingsWatcher(); + + // Setup listener to wait for role mapping + var nameNotAvailableListener = setupClusterStateListener(masterNode, "name_not_available_after_deserialization"); + // Write role mappings with fallback name, this should block any security migration + writeJSONFile(masterNode, TEST_JSON_WITH_FALLBACK_NAME, logger, versionCounter); + assertTrue(nameNotAvailableListener.v1().await(20, TimeUnit.SECONDS)); + + // Create a native role mapping to create security index and trigger migration + createNativeRoleMapping("everyone_fleet_alone"); + waitForMigrationCompletion(SecurityMigrations.CLEANUP_ROLE_MAPPING_DUPLICATES_MIGRATION_VERSION); + // First migration is on a new index, so should skip all migrations. If we reset, it should re-trigger and run all migrations + resetMigration(); + // Wait for the first migration to finish + waitForMigrationCompletion(SecurityMigrations.CLEANUP_ROLE_MAPPING_DUPLICATES_MIGRATION_VERSION - 1); + + // Make sure migration didn't run yet (blocked by the fallback name) + assertMigrationLessThan(SecurityMigrations.CLEANUP_ROLE_MAPPING_DUPLICATES_MIGRATION_VERSION); + ClusterService clusterService = internalCluster().getInstance(ClusterService.class); + SecurityIndexManager.RoleMappingsCleanupMigrationStatus status = SecurityIndexManager.getRoleMappingsCleanupMigrationStatus( + clusterService.state(), + SecurityMigrations.CLEANUP_ROLE_MAPPING_DUPLICATES_MIGRATION_VERSION - 1 + ); + assertThat(status, equalTo(SecurityIndexManager.RoleMappingsCleanupMigrationStatus.NOT_READY)); + + // Write file without fallback name in it to unblock migration + writeJSONFile(masterNode, TEST_JSON_WITH_ROLE_MAPPINGS, logger, versionCounter); + waitForMigrationCompletion(SecurityMigrations.CLEANUP_ROLE_MAPPING_DUPLICATES_MIGRATION_VERSION); + } + + public void testSkipMigrationNoFileBasedMappings() throws Exception { + internalCluster().setBootstrapMasterNodeIndex(0); + ensureGreen(); + // Create a native role mapping to create security index and trigger migration (skipped initially) + createNativeRoleMapping("everyone_kibana_alone"); + createNativeRoleMapping("everyone_fleet_alone"); + assertAllRoleMappings("everyone_kibana_alone", "everyone_fleet_alone"); + + waitForMigrationCompletion(SecurityMigrations.CLEANUP_ROLE_MAPPING_DUPLICATES_MIGRATION_VERSION); + + // First migration is on a new index, so should skip all migrations. If we reset, it should re-trigger and run all migrations + resetMigration(); + + // Wait for the first migration to finish + waitForMigrationCompletion(SecurityMigrations.CLEANUP_ROLE_MAPPING_DUPLICATES_MIGRATION_VERSION); + + assertAllRoleMappings("everyone_kibana_alone", "everyone_fleet_alone"); + } + + public void testSkipMigrationEmptyFileBasedMappings() throws Exception { + internalCluster().setBootstrapMasterNodeIndex(0); + ensureGreen(); + final String masterNode = internalCluster().getMasterName(); + + // Wait for file watcher to start + awaitFileSettingsWatcher(); + // Setup listener to wait for any role mapping + var fileBasedRoleMappingsWrittenListener = setupClusterStateListener(masterNode); + // Write role mappings + writeJSONFile(masterNode, TEST_JSON_WITH_EMPTY_ROLE_MAPPINGS, logger, versionCounter); + assertTrue(fileBasedRoleMappingsWrittenListener.v1().await(20, TimeUnit.SECONDS)); + + // Create a native role mapping to create security index and trigger migration (skipped initially) + createNativeRoleMapping("everyone_kibana_alone"); + createNativeRoleMapping("everyone_fleet_alone"); + assertAllRoleMappings("everyone_kibana_alone", "everyone_fleet_alone"); + + waitForMigrationCompletion(SecurityMigrations.CLEANUP_ROLE_MAPPING_DUPLICATES_MIGRATION_VERSION); + + // First migration is on a new index, so should skip all migrations. If we reset, it should re-trigger and run all migrations + resetMigration(); + + // Wait for the first migration to finish + waitForMigrationCompletion(SecurityMigrations.CLEANUP_ROLE_MAPPING_DUPLICATES_MIGRATION_VERSION); + + assertAllRoleMappings("everyone_kibana_alone", "everyone_fleet_alone"); + } + + public void testNewIndexSkipMigration() { + internalCluster().setBootstrapMasterNodeIndex(0); + final String masterNode = internalCluster().getMasterName(); + ensureGreen(); + CountDownLatch awaitMigrations = awaitMigrationVersionUpdates( + masterNode, + SecurityMigrations.CLEANUP_ROLE_MAPPING_DUPLICATES_MIGRATION_VERSION + ); + // Create a native role mapping to create security index and trigger migration + createNativeRoleMapping("everyone_kibana_alone"); + // Make sure no migration ran (set to current version without applying prior migrations) + safeAwait(awaitMigrations); + } + + /** + * Make sure all versions are applied to cluster state sequentially + */ + private CountDownLatch awaitMigrationVersionUpdates(String node, final int... versions) { + final ClusterService clusterService = internalCluster().clusterService(node); + final CountDownLatch allVersionsCountDown = new CountDownLatch(1); + final AtomicInteger currentVersionIdx = new AtomicInteger(0); + clusterService.addListener(new ClusterStateListener() { + @Override + public void clusterChanged(ClusterChangedEvent event) { + int currentMigrationVersion = getCurrentMigrationVersion(event.state()); + if (currentMigrationVersion > 0) { + assertThat(versions[currentVersionIdx.get()], lessThanOrEqualTo(currentMigrationVersion)); + if (versions[currentVersionIdx.get()] == currentMigrationVersion) { + currentVersionIdx.incrementAndGet(); + } + + if (currentVersionIdx.get() >= versions.length) { + clusterService.removeListener(this); + allVersionsCountDown.countDown(); + } + } + } + }); + + return allVersionsCountDown; + } + + private void assertAllRoleMappings(String... roleMappingNames) { + GetRoleMappingsResponse response = client().execute(GetRoleMappingsAction.INSTANCE, new GetRoleMappingsRequest()).actionGet(); + + assertTrue(response.hasMappings()); + assertThat(response.mappings().length, equalTo(roleMappingNames.length)); + + assertThat( + Arrays.stream(response.mappings()).map(ExpressionRoleMapping::getName).toList(), + containsInAnyOrder( + roleMappingNames + + ) + ); + } + + private void awaitFileSettingsWatcher() throws Exception { + final String masterNode = internalCluster().getMasterName(); + FileSettingsService masterFileSettingsService = internalCluster().getInstance(FileSettingsService.class, masterNode); + assertBusy(() -> assertTrue(masterFileSettingsService.watching())); + } + + private void resetMigration() { + client().execute( + UpdateIndexMigrationVersionAction.INSTANCE, + // -1 is a hack, since running a migration on version 0 on a new cluster will cause all migrations to be skipped (not needed) + new UpdateIndexMigrationVersionAction.Request(TimeValue.MAX_VALUE, -1, INTERNAL_SECURITY_MAIN_INDEX_7) + ).actionGet(); + } + + private void createNativeRoleMapping(String name) { + PutRoleMappingRequest request = new PutRoleMappingRequest(); + request.setName(name); + request.setRules(new FieldExpression("username", Collections.singletonList(new FieldExpression.FieldValue("*")))); + request.setRoles(List.of("superuser")); + + ActionFuture response = client().execute(PutRoleMappingAction.INSTANCE, request); + response.actionGet(); + } + + private void deleteNativeRoleMapping(String name) { + DeleteRoleMappingRequest request = new DeleteRoleMappingRequest(); + request.setName(name); + + ActionFuture response = client().execute(DeleteRoleMappingAction.INSTANCE, request); + response.actionGet(); + } + + private void assertMigrationVersionAtLeast(int expectedVersion) { + assertThat(getCurrentMigrationVersion(), greaterThanOrEqualTo(expectedVersion)); + } + + private void assertMigrationLessThan(int expectedVersion) { + assertThat(getCurrentMigrationVersion(), lessThan(expectedVersion)); + } + + private int getCurrentMigrationVersion(ClusterState state) { + IndexMetadata indexMetadata = state.metadata().getIndices().get(INTERNAL_SECURITY_MAIN_INDEX_7); + if (indexMetadata == null || indexMetadata.getCustomData(MIGRATION_VERSION_CUSTOM_KEY) == null) { + return 0; + } + return Integer.parseInt(indexMetadata.getCustomData(MIGRATION_VERSION_CUSTOM_KEY).get(MIGRATION_VERSION_CUSTOM_DATA_KEY)); + } + + private int getCurrentMigrationVersion() { + ClusterService clusterService = internalCluster().getInstance(ClusterService.class); + return getCurrentMigrationVersion(clusterService.state()); + } + + private void waitForMigrationCompletion(int version) throws Exception { + assertBusy(() -> assertMigrationVersionAtLeast(version)); + } +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/SecurityFeatures.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/SecurityFeatures.java index c1fe553f41334..d0292f32cd75f 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/SecurityFeatures.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/SecurityFeatures.java @@ -17,13 +17,14 @@ import static org.elasticsearch.xpack.security.support.SecuritySystemIndices.SECURITY_MIGRATION_FRAMEWORK; import static org.elasticsearch.xpack.security.support.SecuritySystemIndices.SECURITY_PROFILE_ORIGIN_FEATURE; import static org.elasticsearch.xpack.security.support.SecuritySystemIndices.SECURITY_ROLES_METADATA_FLATTENED; +import static org.elasticsearch.xpack.security.support.SecuritySystemIndices.SECURITY_ROLE_MAPPING_CLEANUP; import static org.elasticsearch.xpack.security.support.SecuritySystemIndices.VERSION_SECURITY_PROFILE_ORIGIN; public class SecurityFeatures implements FeatureSpecification { @Override public Set getFeatures() { - return Set.of(SECURITY_ROLES_METADATA_FLATTENED, SECURITY_MIGRATION_FRAMEWORK); + return Set.of(SECURITY_ROLE_MAPPING_CLEANUP, SECURITY_ROLES_METADATA_FLATTENED, SECURITY_MIGRATION_FRAMEWORK); } @Override diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/SecurityIndexManager.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/SecurityIndexManager.java index 6d9b0ef6aeebe..12ef800a7aae7 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/SecurityIndexManager.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/SecurityIndexManager.java @@ -31,6 +31,7 @@ import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.MappingMetadata; import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.cluster.metadata.ReservedStateMetadata; import org.elasticsearch.cluster.routing.IndexRoutingTable; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.core.TimeValue; @@ -46,7 +47,9 @@ import org.elasticsearch.rest.RestStatus; import org.elasticsearch.threadpool.Scheduler; import org.elasticsearch.xcontent.XContentType; +import org.elasticsearch.xpack.core.security.authz.RoleMappingMetadata; import org.elasticsearch.xpack.security.SecurityFeatures; +import org.elasticsearch.xpack.security.action.rolemapping.ReservedRoleMappingAction; import java.time.Instant; import java.util.List; @@ -75,7 +78,7 @@ public class SecurityIndexManager implements ClusterStateListener { public static final String SECURITY_VERSION_STRING = "security-version"; - + protected static final String FILE_SETTINGS_METADATA_NAMESPACE = "file_settings"; private static final Logger logger = LogManager.getLogger(SecurityIndexManager.class); /** @@ -86,6 +89,13 @@ public enum Availability { PRIMARY_SHARDS } + public enum RoleMappingsCleanupMigrationStatus { + READY, + NOT_READY, + SKIP, + DONE + } + private final Client client; private final SystemIndexDescriptor systemIndexDescriptor; @@ -195,10 +205,6 @@ public boolean isMigrationsVersionAtLeast(Integer expectedMigrationsVersion) { return indexExists() && this.state.migrationsVersion.compareTo(expectedMigrationsVersion) >= 0; } - public boolean isCreatedOnLatestVersion() { - return this.state.createdOnLatestVersion; - } - public ElasticsearchException getUnavailableReason(Availability availability) { // ensure usage of a local copy so all checks execute against the same state! if (defensiveCopy == false) { @@ -261,6 +267,7 @@ private SystemIndexDescriptor.MappingsVersion getMinSecurityIndexMappingVersion( /** * Check if the index was created on the latest index version available in the cluster */ + private static boolean isCreatedOnLatestVersion(IndexMetadata indexMetadata) { final IndexVersion indexVersionCreated = indexMetadata != null ? SETTING_INDEX_VERSION_CREATED.get(indexMetadata.getSettings()) @@ -268,6 +275,50 @@ private static boolean isCreatedOnLatestVersion(IndexMetadata indexMetadata) { return indexVersionCreated != null && indexVersionCreated.onOrAfter(IndexVersion.current()); } + /** + * Check if a role mappings cleanup migration is needed or has already been performed and if the cluster is ready for a cleanup + * migration + * + * @param clusterState current cluster state + * @param migrationsVersion current migration version + * + * @return RoleMappingsCleanupMigrationStatus + */ + static RoleMappingsCleanupMigrationStatus getRoleMappingsCleanupMigrationStatus(ClusterState clusterState, int migrationsVersion) { + // Migration already finished + if (migrationsVersion >= SecurityMigrations.CLEANUP_ROLE_MAPPING_DUPLICATES_MIGRATION_VERSION) { + return RoleMappingsCleanupMigrationStatus.DONE; + } + + ReservedStateMetadata fileSettingsMetadata = clusterState.metadata().reservedStateMetadata().get(FILE_SETTINGS_METADATA_NAMESPACE); + boolean hasFileSettingsMetadata = fileSettingsMetadata != null; + // If there is no fileSettingsMetadata, there should be no reserved state (this is to catch bugs related to + // name changes to FILE_SETTINGS_METADATA_NAMESPACE) + assert hasFileSettingsMetadata || clusterState.metadata().reservedStateMetadata().isEmpty() + : "ReservedStateMetadata contains unknown namespace"; + + // If no file based role mappings available -> migration not needed + if (hasFileSettingsMetadata == false || fileSettingsMetadata.keys(ReservedRoleMappingAction.NAME).isEmpty()) { + return RoleMappingsCleanupMigrationStatus.SKIP; + } + + RoleMappingMetadata roleMappingMetadata = RoleMappingMetadata.getFromClusterState(clusterState); + + // If there are file based role mappings, make sure they have the latest format (name available) and that they have all been + // synced to cluster state (same size as the reserved state keys) + if (roleMappingMetadata.getRoleMappings().size() == fileSettingsMetadata.keys(ReservedRoleMappingAction.NAME).size() + && roleMappingMetadata.hasAnyMappingWithFallbackName() == false) { + return RoleMappingsCleanupMigrationStatus.READY; + } + + // If none of the above conditions are met, wait for a state change to re-evaluate if the cluster is ready for migration + return RoleMappingsCleanupMigrationStatus.NOT_READY; + } + + public RoleMappingsCleanupMigrationStatus getRoleMappingsCleanupMigrationStatus() { + return state.roleMappingsCleanupMigrationStatus; + } + @Override public void clusterChanged(ClusterChangedEvent event) { if (event.state().blocks().hasGlobalBlock(GatewayService.STATE_NOT_RECOVERED_BLOCK)) { @@ -285,8 +336,12 @@ public void clusterChanged(ClusterChangedEvent event) { Tuple available = checkIndexAvailable(event.state()); final boolean indexAvailableForWrite = available.v1(); final boolean indexAvailableForSearch = available.v2(); - final boolean mappingIsUpToDate = indexMetadata == null || checkIndexMappingUpToDate(event.state()); final int migrationsVersion = getMigrationVersionFromIndexMetadata(indexMetadata); + final RoleMappingsCleanupMigrationStatus roleMappingsCleanupMigrationStatus = getRoleMappingsCleanupMigrationStatus( + event.state(), + migrationsVersion + ); + final boolean mappingIsUpToDate = indexMetadata == null || checkIndexMappingUpToDate(event.state()); final SystemIndexDescriptor.MappingsVersion minClusterMappingVersion = getMinSecurityIndexMappingVersion(event.state()); final int indexMappingVersion = loadIndexMappingVersion(systemIndexDescriptor.getAliasName(), event.state()); final String concreteIndexName = indexMetadata == null @@ -315,6 +370,7 @@ public void clusterChanged(ClusterChangedEvent event) { indexAvailableForWrite, mappingIsUpToDate, createdOnLatestVersion, + roleMappingsCleanupMigrationStatus, migrationsVersion, minClusterMappingVersion, indexMappingVersion, @@ -474,7 +530,8 @@ private Tuple checkIndexAvailable(ClusterState state) { public boolean isEligibleSecurityMigration(SecurityMigrations.SecurityMigration securityMigration) { return state.securityFeatures.containsAll(securityMigration.nodeFeaturesRequired()) - && state.indexMappingVersion >= securityMigration.minMappingVersion(); + && state.indexMappingVersion >= securityMigration.minMappingVersion() + && securityMigration.checkPreConditions(state); } public boolean isReadyForSecurityMigration(SecurityMigrations.SecurityMigration securityMigration) { @@ -680,6 +737,10 @@ public void onFailure(Exception e) { } } + public boolean isCreatedOnLatestVersion() { + return state.createdOnLatestVersion; + } + /** * Return true if the state moves from an unhealthy ("RED") index state to a healthy ("non-RED") state. */ @@ -714,6 +775,7 @@ public static class State { null, null, null, + null, Set.of() ); public final Instant creationTime; @@ -722,6 +784,7 @@ public static class State { public final boolean indexAvailableForWrite; public final boolean mappingUpToDate; public final boolean createdOnLatestVersion; + public final RoleMappingsCleanupMigrationStatus roleMappingsCleanupMigrationStatus; public final Integer migrationsVersion; // Min mapping version supported by the descriptors in the cluster public final SystemIndexDescriptor.MappingsVersion minClusterMappingVersion; @@ -740,6 +803,7 @@ public State( boolean indexAvailableForWrite, boolean mappingUpToDate, boolean createdOnLatestVersion, + RoleMappingsCleanupMigrationStatus roleMappingsCleanupMigrationStatus, Integer migrationsVersion, SystemIndexDescriptor.MappingsVersion minClusterMappingVersion, Integer indexMappingVersion, @@ -756,6 +820,7 @@ public State( this.mappingUpToDate = mappingUpToDate; this.migrationsVersion = migrationsVersion; this.createdOnLatestVersion = createdOnLatestVersion; + this.roleMappingsCleanupMigrationStatus = roleMappingsCleanupMigrationStatus; this.minClusterMappingVersion = minClusterMappingVersion; this.indexMappingVersion = indexMappingVersion; this.concreteIndexName = concreteIndexName; @@ -776,6 +841,7 @@ public boolean equals(Object o) { && indexAvailableForWrite == state.indexAvailableForWrite && mappingUpToDate == state.mappingUpToDate && createdOnLatestVersion == state.createdOnLatestVersion + && roleMappingsCleanupMigrationStatus == state.roleMappingsCleanupMigrationStatus && Objects.equals(indexMappingVersion, state.indexMappingVersion) && Objects.equals(migrationsVersion, state.migrationsVersion) && Objects.equals(minClusterMappingVersion, state.minClusterMappingVersion) @@ -798,6 +864,7 @@ public int hashCode() { indexAvailableForWrite, mappingUpToDate, createdOnLatestVersion, + roleMappingsCleanupMigrationStatus, migrationsVersion, minClusterMappingVersion, indexMappingVersion, @@ -822,6 +889,8 @@ public String toString() { + mappingUpToDate + ", createdOnLatestVersion=" + createdOnLatestVersion + + ", roleMappingsCleanupMigrationStatus=" + + roleMappingsCleanupMigrationStatus + ", migrationsVersion=" + migrationsVersion + ", minClusterMappingVersion=" diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/SecurityMigrations.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/SecurityMigrations.java index 5cd8cba763d3d..203dec9e25b91 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/SecurityMigrations.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/SecurityMigrations.java @@ -11,6 +11,8 @@ import org.apache.logging.log4j.Logger; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.action.support.GroupedActionListener; +import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.client.internal.Client; import org.elasticsearch.features.NodeFeature; import org.elasticsearch.index.query.BoolQueryBuilder; @@ -20,20 +22,35 @@ import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptType; import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.xpack.core.security.action.rolemapping.DeleteRoleMappingAction; +import org.elasticsearch.xpack.core.security.action.rolemapping.DeleteRoleMappingRequestBuilder; +import org.elasticsearch.xpack.core.security.action.rolemapping.DeleteRoleMappingResponse; +import org.elasticsearch.xpack.core.security.action.rolemapping.GetRoleMappingsAction; +import org.elasticsearch.xpack.core.security.action.rolemapping.GetRoleMappingsRequestBuilder; +import org.elasticsearch.xpack.core.security.action.rolemapping.GetRoleMappingsResponse; +import org.elasticsearch.xpack.core.security.authc.support.mapper.ExpressionRoleMapping; +import java.util.Arrays; import java.util.Collections; +import java.util.List; import java.util.Map; import java.util.Set; import java.util.TreeMap; +import java.util.stream.Collectors; +import static org.elasticsearch.xpack.core.ClientHelper.SECURITY_ORIGIN; +import static org.elasticsearch.xpack.core.ClientHelper.executeAsyncWithOrigin; +import static org.elasticsearch.xpack.security.support.SecurityIndexManager.RoleMappingsCleanupMigrationStatus.READY; +import static org.elasticsearch.xpack.security.support.SecurityIndexManager.RoleMappingsCleanupMigrationStatus.SKIP; +import static org.elasticsearch.xpack.security.support.SecuritySystemIndices.SecurityMainIndexMappingVersion.ADD_MANAGE_ROLES_PRIVILEGE; import static org.elasticsearch.xpack.security.support.SecuritySystemIndices.SecurityMainIndexMappingVersion.ADD_REMOTE_CLUSTER_AND_DESCRIPTION_FIELDS; -/** - * Interface for creating SecurityMigrations that will be automatically applied once to existing .security indices - * IMPORTANT: A new index version needs to be added to {@link org.elasticsearch.index.IndexVersions} for the migration to be triggered - */ public class SecurityMigrations { + /** + * Interface for creating SecurityMigrations that will be automatically applied once to existing .security indices + * IMPORTANT: A new index version needs to be added to {@link org.elasticsearch.index.IndexVersions} for the migration to be triggered + */ public interface SecurityMigration { /** * Method that will execute the actual migration - needs to be idempotent and non-blocking @@ -52,6 +69,16 @@ public interface SecurityMigration { */ Set nodeFeaturesRequired(); + /** + * Check that any pre-conditions are met before launching migration + * + * @param securityIndexManagerState current state of the security index + * @return true if pre-conditions met, otherwise false + */ + default boolean checkPreConditions(SecurityIndexManager.State securityIndexManagerState) { + return true; + } + /** * The min mapping version required to support this migration. This makes sure that the index has at least the min mapping that is * required to support the migration. @@ -62,63 +89,163 @@ public interface SecurityMigration { } public static final Integer ROLE_METADATA_FLATTENED_MIGRATION_VERSION = 1; + public static final Integer CLEANUP_ROLE_MAPPING_DUPLICATES_MIGRATION_VERSION = 2; + private static final Logger logger = LogManager.getLogger(SecurityMigration.class); public static final TreeMap MIGRATIONS_BY_VERSION = new TreeMap<>( - Map.of(ROLE_METADATA_FLATTENED_MIGRATION_VERSION, new SecurityMigration() { - private static final Logger logger = LogManager.getLogger(SecurityMigration.class); - - @Override - public void migrate(SecurityIndexManager indexManager, Client client, ActionListener listener) { - BoolQueryBuilder filterQuery = new BoolQueryBuilder().filter(QueryBuilders.termQuery("type", "role")) - .mustNot(QueryBuilders.existsQuery("metadata_flattened")); - SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder().query(filterQuery).size(0).trackTotalHits(true); - SearchRequest countRequest = new SearchRequest(indexManager.getConcreteIndexName()); - countRequest.source(searchSourceBuilder); - - client.search(countRequest, ActionListener.wrap(response -> { - // If there are no roles, skip migration - if (response.getHits().getTotalHits().value() > 0) { - logger.info("Preparing to migrate [" + response.getHits().getTotalHits().value() + "] roles"); - updateRolesByQuery(indexManager, client, filterQuery, listener); - } else { - listener.onResponse(null); - } + Map.of( + ROLE_METADATA_FLATTENED_MIGRATION_VERSION, + new RoleMetadataFlattenedMigration(), + CLEANUP_ROLE_MAPPING_DUPLICATES_MIGRATION_VERSION, + new CleanupRoleMappingDuplicatesMigration() + ) + ); + + public static class RoleMetadataFlattenedMigration implements SecurityMigration { + @Override + public void migrate(SecurityIndexManager indexManager, Client client, ActionListener listener) { + BoolQueryBuilder filterQuery = new BoolQueryBuilder().filter(QueryBuilders.termQuery("type", "role")) + .mustNot(QueryBuilders.existsQuery("metadata_flattened")); + SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder().query(filterQuery).size(0).trackTotalHits(true); + SearchRequest countRequest = new SearchRequest(indexManager.getConcreteIndexName()); + countRequest.source(searchSourceBuilder); + + client.search(countRequest, ActionListener.wrap(response -> { + // If there are no roles, skip migration + if (response.getHits().getTotalHits().value() > 0) { + logger.info("Preparing to migrate [" + response.getHits().getTotalHits().value() + "] roles"); + updateRolesByQuery(indexManager, client, filterQuery, listener); + } else { + listener.onResponse(null); + } + }, listener::onFailure)); + } + + private void updateRolesByQuery( + SecurityIndexManager indexManager, + Client client, + BoolQueryBuilder filterQuery, + ActionListener listener + ) { + UpdateByQueryRequest updateByQueryRequest = new UpdateByQueryRequest(indexManager.getConcreteIndexName()); + updateByQueryRequest.setQuery(filterQuery); + updateByQueryRequest.setScript( + new Script(ScriptType.INLINE, "painless", "ctx._source.metadata_flattened = ctx._source.metadata", Collections.emptyMap()) + ); + client.admin() + .cluster() + .execute(UpdateByQueryAction.INSTANCE, updateByQueryRequest, ActionListener.wrap(bulkByScrollResponse -> { + logger.info("Migrated [" + bulkByScrollResponse.getTotal() + "] roles"); + listener.onResponse(null); }, listener::onFailure)); + } + + @Override + public Set nodeFeaturesRequired() { + return Set.of(SecuritySystemIndices.SECURITY_ROLES_METADATA_FLATTENED); + } + + @Override + public int minMappingVersion() { + return ADD_REMOTE_CLUSTER_AND_DESCRIPTION_FIELDS.id(); + } + } + + public static class CleanupRoleMappingDuplicatesMigration implements SecurityMigration { + @Override + public void migrate(SecurityIndexManager indexManager, Client client, ActionListener listener) { + if (indexManager.getRoleMappingsCleanupMigrationStatus() == SKIP) { + listener.onResponse(null); + return; } + assert indexManager.getRoleMappingsCleanupMigrationStatus() == READY; - private void updateRolesByQuery( - SecurityIndexManager indexManager, - Client client, - BoolQueryBuilder filterQuery, - ActionListener listener - ) { - UpdateByQueryRequest updateByQueryRequest = new UpdateByQueryRequest(indexManager.getConcreteIndexName()); - updateByQueryRequest.setQuery(filterQuery); - updateByQueryRequest.setScript( - new Script( - ScriptType.INLINE, - "painless", - "ctx._source.metadata_flattened = ctx._source.metadata", - Collections.emptyMap() - ) + getRoleMappings(client, ActionListener.wrap(roleMappings -> { + List roleMappingsToDelete = getDuplicateRoleMappingNames(roleMappings.mappings()); + if (roleMappingsToDelete.isEmpty() == false) { + logger.info("Found [" + roleMappingsToDelete.size() + "] role mapping(s) to cleanup in .security index."); + deleteNativeRoleMappings(client, roleMappingsToDelete, listener); + } else { + listener.onResponse(null); + } + }, listener::onFailure)); + } + + private void getRoleMappings(Client client, ActionListener listener) { + executeAsyncWithOrigin( + client, + SECURITY_ORIGIN, + GetRoleMappingsAction.INSTANCE, + new GetRoleMappingsRequestBuilder(client).request(), + listener + ); + } + + private void deleteNativeRoleMappings(Client client, List names, ActionListener listener) { + assert names.isEmpty() == false; + ActionListener groupListener = new GroupedActionListener<>( + names.size(), + ActionListener.wrap(responses -> { + long foundRoleMappings = responses.stream().filter(DeleteRoleMappingResponse::isFound).count(); + if (responses.size() > foundRoleMappings) { + logger.warn( + "[" + (responses.size() - foundRoleMappings) + "] Role mapping(s) not found during role mapping clean up." + ); + } + if (foundRoleMappings > 0) { + logger.info("Deleted [" + foundRoleMappings + "] duplicated role mapping(s) from .security index"); + } + listener.onResponse(null); + }, listener::onFailure) + ); + + for (String name : names) { + executeAsyncWithOrigin( + client, + SECURITY_ORIGIN, + DeleteRoleMappingAction.INSTANCE, + new DeleteRoleMappingRequestBuilder(client).name(name).setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE).request(), + groupListener ); - client.admin() - .cluster() - .execute(UpdateByQueryAction.INSTANCE, updateByQueryRequest, ActionListener.wrap(bulkByScrollResponse -> { - logger.info("Migrated [" + bulkByScrollResponse.getTotal() + "] roles"); - listener.onResponse(null); - }, listener::onFailure)); } - @Override - public Set nodeFeaturesRequired() { - return Set.of(SecuritySystemIndices.SECURITY_ROLES_METADATA_FLATTENED); - } + } - @Override - public int minMappingVersion() { - return ADD_REMOTE_CLUSTER_AND_DESCRIPTION_FIELDS.id(); - } - }) - ); + @Override + public boolean checkPreConditions(SecurityIndexManager.State securityIndexManagerState) { + // Block migration until expected role mappings are in cluster state and in the correct format or skip if no role mappings + // are expected + return securityIndexManagerState.roleMappingsCleanupMigrationStatus == READY + || securityIndexManagerState.roleMappingsCleanupMigrationStatus == SKIP; + } + + @Override + public Set nodeFeaturesRequired() { + return Set.of(SecuritySystemIndices.SECURITY_ROLE_MAPPING_CLEANUP); + } + + @Override + public int minMappingVersion() { + return ADD_MANAGE_ROLES_PRIVILEGE.id(); + } + + // Visible for testing + protected static List getDuplicateRoleMappingNames(ExpressionRoleMapping... roleMappings) { + // Partition role mappings on if they're cluster state role mappings (true) or native role mappings (false) + Map> partitionedRoleMappings = Arrays.stream(roleMappings) + .collect(Collectors.partitioningBy(ExpressionRoleMapping::isReadOnly)); + + Set clusterStateRoleMappings = partitionedRoleMappings.get(true) + .stream() + .map(ExpressionRoleMapping::getName) + .map(ExpressionRoleMapping::removeReadOnlySuffixIfPresent) + .collect(Collectors.toSet()); + + return partitionedRoleMappings.get(false) + .stream() + .map(ExpressionRoleMapping::getName) + .filter(clusterStateRoleMappings::contains) + .toList(); + } + } } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/SecuritySystemIndices.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/SecuritySystemIndices.java index 36ea14c6e101b..77c7d19e94a9b 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/SecuritySystemIndices.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/SecuritySystemIndices.java @@ -61,6 +61,7 @@ public class SecuritySystemIndices { public static final NodeFeature SECURITY_PROFILE_ORIGIN_FEATURE = new NodeFeature("security.security_profile_origin"); public static final NodeFeature SECURITY_MIGRATION_FRAMEWORK = new NodeFeature("security.migration_framework"); public static final NodeFeature SECURITY_ROLES_METADATA_FLATTENED = new NodeFeature("security.roles_metadata_flattened"); + public static final NodeFeature SECURITY_ROLE_MAPPING_CLEANUP = new NodeFeature("security.role_mapping_cleanup"); /** * Security managed index mappings used to be updated based on the product version. They are now updated based on per-index mappings diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/AuthenticationServiceTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/AuthenticationServiceTests.java index e1c3b936e5a32..cd6c88cf525af 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/AuthenticationServiceTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/AuthenticationServiceTests.java @@ -2518,6 +2518,7 @@ private SecurityIndexManager.State dummyState(ClusterHealthStatus indexStatus) { null, null, null, + null, concreteSecurityIndexName, indexStatus, IndexMetadata.State.OPEN, diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/NativeRealmTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/NativeRealmTests.java index 2254c78a2910c..75d5959f351f0 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/NativeRealmTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/NativeRealmTests.java @@ -43,6 +43,7 @@ private SecurityIndexManager.State dummyState(ClusterHealthStatus indexStatus) { null, null, null, + null, concreteSecurityIndexName, indexStatus, IndexMetadata.State.OPEN, diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/mapper/NativeRoleMappingStoreTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/mapper/NativeRoleMappingStoreTests.java index 38f01d4d18bc7..ca84a9189d90a 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/mapper/NativeRoleMappingStoreTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/mapper/NativeRoleMappingStoreTests.java @@ -415,6 +415,7 @@ private SecurityIndexManager.State indexState(boolean isUpToDate, ClusterHealthS null, null, null, + null, concreteSecurityIndexName, healthStatus, IndexMetadata.State.OPEN, diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/CompositeRolesStoreTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/CompositeRolesStoreTests.java index 9587533d87d86..da903ff7f7177 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/CompositeRolesStoreTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/CompositeRolesStoreTests.java @@ -1702,6 +1702,7 @@ public SecurityIndexManager.State dummyIndexState(boolean isIndexUpToDate, Clust null, null, null, + null, concreteSecurityIndexName, healthStatus, IndexMetadata.State.OPEN, diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/NativePrivilegeStoreTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/NativePrivilegeStoreTests.java index f91cb567ba689..73a45dc20ac42 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/NativePrivilegeStoreTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/NativePrivilegeStoreTests.java @@ -904,6 +904,7 @@ private SecurityIndexManager.State dummyState( null, null, null, + null, concreteSecurityIndexName, healthStatus, IndexMetadata.State.OPEN, diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/support/CacheInvalidatorRegistryTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/support/CacheInvalidatorRegistryTests.java index e3b00dfbcc6b8..d551dded4e566 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/support/CacheInvalidatorRegistryTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/support/CacheInvalidatorRegistryTests.java @@ -63,6 +63,7 @@ public void testSecurityIndexStateChangeWillInvalidateAllRegisteredInvalidators( true, true, null, + null, new SystemIndexDescriptor.MappingsVersion(SecurityMainIndexMappingVersion.latest().id(), 0), null, ".security", diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/support/SecurityIndexManagerTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/support/SecurityIndexManagerTests.java index 493483a5e4a1b..0b98a595a6ab9 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/support/SecurityIndexManagerTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/support/SecurityIndexManagerTests.java @@ -23,6 +23,8 @@ import org.elasticsearch.cluster.metadata.AliasMetadata; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.cluster.metadata.ReservedStateHandlerMetadata; +import org.elasticsearch.cluster.metadata.ReservedStateMetadata; import org.elasticsearch.cluster.node.DiscoveryNodeRole; import org.elasticsearch.cluster.node.DiscoveryNodeUtils; import org.elasticsearch.cluster.node.DiscoveryNodes; @@ -51,8 +53,11 @@ import org.elasticsearch.test.client.NoOpClient; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xpack.core.security.authc.support.mapper.ExpressionRoleMapping; +import org.elasticsearch.xpack.core.security.authz.RoleMappingMetadata; import org.elasticsearch.xpack.core.security.test.TestRestrictedIndices; import org.elasticsearch.xpack.security.SecurityFeatures; +import org.elasticsearch.xpack.security.action.rolemapping.ReservedRoleMappingAction; import org.elasticsearch.xpack.security.support.SecuritySystemIndices.SecurityMainIndexMappingVersion; import org.elasticsearch.xpack.security.test.SecurityTestUtils; import org.hamcrest.Matchers; @@ -70,6 +75,7 @@ import java.util.stream.Collectors; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; +import static org.elasticsearch.xpack.security.support.SecurityIndexManager.FILE_SETTINGS_METADATA_NAMESPACE; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.is; @@ -654,6 +660,138 @@ public int minMappingVersion() { })); } + public void testNotReadyForMigrationBecauseOfPrecondition() { + final ClusterState.Builder clusterStateBuilder = createClusterState( + TestRestrictedIndices.INTERNAL_SECURITY_MAIN_INDEX_7, + SecuritySystemIndices.SECURITY_MAIN_ALIAS, + IndexMetadata.State.OPEN + ); + clusterStateBuilder.nodeFeatures( + Map.of("1", new SecurityFeatures().getFeatures().stream().map(NodeFeature::id).collect(Collectors.toSet())) + ); + manager.clusterChanged(event(markShardsAvailable(clusterStateBuilder))); + assertFalse(manager.isReadyForSecurityMigration(new SecurityMigrations.SecurityMigration() { + @Override + public void migrate(SecurityIndexManager indexManager, Client client, ActionListener listener) { + listener.onResponse(null); + } + + @Override + public Set nodeFeaturesRequired() { + return Set.of(); + } + + @Override + public int minMappingVersion() { + return 0; + } + + @Override + public boolean checkPreConditions(SecurityIndexManager.State securityIndexManagerState) { + return false; + } + })); + } + + private ClusterState.Builder clusterStateBuilderForMigrationTesting() { + return createClusterState( + TestRestrictedIndices.INTERNAL_SECURITY_MAIN_INDEX_7, + SecuritySystemIndices.SECURITY_MAIN_ALIAS, + IndexMetadata.State.OPEN + ); + } + + public void testGetRoleMappingsCleanupMigrationStatus() { + { + assertThat( + SecurityIndexManager.getRoleMappingsCleanupMigrationStatus( + clusterStateBuilderForMigrationTesting().build(), + SecurityMigrations.CLEANUP_ROLE_MAPPING_DUPLICATES_MIGRATION_VERSION + ), + equalTo(SecurityIndexManager.RoleMappingsCleanupMigrationStatus.DONE) + ); + } + { + // Migration should be skipped + ClusterState.Builder clusterStateBuilder = clusterStateBuilderForMigrationTesting(); + Metadata.Builder metadataBuilder = new Metadata.Builder(); + metadataBuilder.put(ReservedStateMetadata.builder(FILE_SETTINGS_METADATA_NAMESPACE).build()); + assertThat( + SecurityIndexManager.getRoleMappingsCleanupMigrationStatus(clusterStateBuilder.metadata(metadataBuilder).build(), 1), + equalTo(SecurityIndexManager.RoleMappingsCleanupMigrationStatus.SKIP) + ); + } + { + // Not ready for migration + ClusterState.Builder clusterStateBuilder = clusterStateBuilderForMigrationTesting(); + Metadata.Builder metadataBuilder = new Metadata.Builder(); + ReservedStateMetadata.Builder builder = ReservedStateMetadata.builder(FILE_SETTINGS_METADATA_NAMESPACE); + // File settings role mappings exist + ReservedStateHandlerMetadata reservedStateHandlerMetadata = new ReservedStateHandlerMetadata( + ReservedRoleMappingAction.NAME, + Set.of("role_mapping_1") + ); + builder.putHandler(reservedStateHandlerMetadata); + metadataBuilder.put(builder.build()); + + // No role mappings in cluster state yet + metadataBuilder.putCustom(RoleMappingMetadata.TYPE, new RoleMappingMetadata(Set.of())); + + assertThat( + SecurityIndexManager.getRoleMappingsCleanupMigrationStatus(clusterStateBuilder.metadata(metadataBuilder).build(), 1), + equalTo(SecurityIndexManager.RoleMappingsCleanupMigrationStatus.NOT_READY) + ); + } + { + // Old role mappings in cluster state + final ClusterState.Builder clusterStateBuilder = clusterStateBuilderForMigrationTesting(); + Metadata.Builder metadataBuilder = new Metadata.Builder(); + ReservedStateMetadata.Builder builder = ReservedStateMetadata.builder(FILE_SETTINGS_METADATA_NAMESPACE); + // File settings role mappings exist + ReservedStateHandlerMetadata reservedStateHandlerMetadata = new ReservedStateHandlerMetadata( + ReservedRoleMappingAction.NAME, + Set.of("role_mapping_1") + ); + builder.putHandler(reservedStateHandlerMetadata); + metadataBuilder.put(builder.build()); + + // Role mappings in cluster state with fallback name + metadataBuilder.putCustom( + RoleMappingMetadata.TYPE, + new RoleMappingMetadata(Set.of(new ExpressionRoleMapping(RoleMappingMetadata.FALLBACK_NAME, null, null, null, null, true))) + ); + + assertThat( + SecurityIndexManager.getRoleMappingsCleanupMigrationStatus(clusterStateBuilder.metadata(metadataBuilder).build(), 1), + equalTo(SecurityIndexManager.RoleMappingsCleanupMigrationStatus.NOT_READY) + ); + } + { + // Ready for migration + final ClusterState.Builder clusterStateBuilder = clusterStateBuilderForMigrationTesting(); + Metadata.Builder metadataBuilder = new Metadata.Builder(); + ReservedStateMetadata.Builder builder = ReservedStateMetadata.builder(FILE_SETTINGS_METADATA_NAMESPACE); + // File settings role mappings exist + ReservedStateHandlerMetadata reservedStateHandlerMetadata = new ReservedStateHandlerMetadata( + ReservedRoleMappingAction.NAME, + Set.of("role_mapping_1") + ); + builder.putHandler(reservedStateHandlerMetadata); + metadataBuilder.put(builder.build()); + + // Role mappings in cluster state + metadataBuilder.putCustom( + RoleMappingMetadata.TYPE, + new RoleMappingMetadata(Set.of(new ExpressionRoleMapping("role_mapping_1", null, null, null, null, true))) + ); + + assertThat( + SecurityIndexManager.getRoleMappingsCleanupMigrationStatus(clusterStateBuilder.metadata(metadataBuilder).build(), 1), + equalTo(SecurityIndexManager.RoleMappingsCleanupMigrationStatus.READY) + ); + } + } + public void testProcessClosedIndexState() { // Index initially exists final ClusterState.Builder indexAvailable = createClusterState( diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/support/SecurityMigrationsTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/support/SecurityMigrationsTests.java new file mode 100644 index 0000000000000..3d3cc47b55cf6 --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/support/SecurityMigrationsTests.java @@ -0,0 +1,174 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ +package org.elasticsearch.xpack.security.support; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.client.internal.Client; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.TestThreadPool; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xpack.core.security.action.rolemapping.DeleteRoleMappingAction; +import org.elasticsearch.xpack.core.security.action.rolemapping.DeleteRoleMappingRequest; +import org.elasticsearch.xpack.core.security.action.rolemapping.DeleteRoleMappingResponse; +import org.elasticsearch.xpack.core.security.action.rolemapping.GetRoleMappingsAction; +import org.elasticsearch.xpack.core.security.action.rolemapping.GetRoleMappingsRequest; +import org.elasticsearch.xpack.core.security.action.rolemapping.GetRoleMappingsResponse; +import org.elasticsearch.xpack.core.security.authc.support.mapper.ExpressionRoleMapping; +import org.junit.After; +import org.junit.Before; + +import java.util.List; +import java.util.Map; + +import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.empty; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasSize; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class SecurityMigrationsTests extends ESTestCase { + private ThreadPool threadPool; + private Client client; + + public void testGetDuplicateRoleMappingNames() { + assertThat(SecurityMigrations.CleanupRoleMappingDuplicatesMigration.getDuplicateRoleMappingNames(), empty()); + assertThat( + SecurityMigrations.CleanupRoleMappingDuplicatesMigration.getDuplicateRoleMappingNames( + nativeRoleMapping("roleMapping1"), + nativeRoleMapping("roleMapping2") + ), + empty() + ); + assertThat( + SecurityMigrations.CleanupRoleMappingDuplicatesMigration.getDuplicateRoleMappingNames( + nativeRoleMapping("roleMapping1"), + reservedRoleMapping("roleMapping1") + ), + equalTo(List.of("roleMapping1")) + ); + + { + List duplicates = SecurityMigrations.CleanupRoleMappingDuplicatesMigration.getDuplicateRoleMappingNames( + nativeRoleMapping("roleMapping1"), + nativeRoleMapping("roleMapping2"), + reservedRoleMapping("roleMapping1"), + reservedRoleMapping("roleMapping2") + ); + assertThat(duplicates, hasSize(2)); + assertThat(duplicates, containsInAnyOrder("roleMapping1", "roleMapping2")); + } + { + List duplicates = SecurityMigrations.CleanupRoleMappingDuplicatesMigration.getDuplicateRoleMappingNames( + nativeRoleMapping("roleMapping1"), + nativeRoleMapping("roleMapping2"), + nativeRoleMapping("roleMapping3"), + reservedRoleMapping("roleMapping1"), + reservedRoleMapping("roleMapping2"), + reservedRoleMapping("roleMapping4") + ); + assertThat(duplicates, hasSize(2)); + assertThat(duplicates, containsInAnyOrder("roleMapping1", "roleMapping2")); + } + { + List duplicates = SecurityMigrations.CleanupRoleMappingDuplicatesMigration.getDuplicateRoleMappingNames( + nativeRoleMapping("roleMapping1" + ExpressionRoleMapping.READ_ONLY_ROLE_MAPPING_SUFFIX), + nativeRoleMapping("roleMapping2" + ExpressionRoleMapping.READ_ONLY_ROLE_MAPPING_SUFFIX), + nativeRoleMapping("roleMapping3"), + reservedRoleMapping("roleMapping1"), + reservedRoleMapping("roleMapping2"), + reservedRoleMapping("roleMapping3") + ); + assertThat(duplicates, hasSize(1)); + assertThat(duplicates, containsInAnyOrder("roleMapping3")); + } + } + + private static ExpressionRoleMapping reservedRoleMapping(String name) { + return new ExpressionRoleMapping( + name + ExpressionRoleMapping.READ_ONLY_ROLE_MAPPING_SUFFIX, + null, + null, + null, + Map.of(ExpressionRoleMapping.READ_ONLY_ROLE_MAPPING_METADATA_FLAG, true), + true + ); + } + + private static ExpressionRoleMapping nativeRoleMapping(String name) { + return new ExpressionRoleMapping(name, null, null, null, randomBoolean() ? null : Map.of(), true); + } + + public void testCleanupRoleMappingDuplicatesMigrationPartialFailure() { + // Make sure migration continues even if a duplicate is not found + SecurityIndexManager securityIndexManager = mock(SecurityIndexManager.class); + when(securityIndexManager.getRoleMappingsCleanupMigrationStatus()).thenReturn( + SecurityIndexManager.RoleMappingsCleanupMigrationStatus.READY + ); + doAnswer(inv -> { + final Object[] args = inv.getArguments(); + @SuppressWarnings("unchecked") + ActionListener listener = (ActionListener) args[2]; + listener.onResponse( + new GetRoleMappingsResponse( + nativeRoleMapping("duplicate-0"), + reservedRoleMapping("duplicate-0"), + nativeRoleMapping("duplicate-1"), + reservedRoleMapping("duplicate-1"), + nativeRoleMapping("duplicate-2"), + reservedRoleMapping("duplicate-2") + ) + ); + return null; + }).when(client).execute(eq(GetRoleMappingsAction.INSTANCE), any(GetRoleMappingsRequest.class), any()); + + final boolean[] duplicatesDeleted = new boolean[3]; + doAnswer(inv -> { + final Object[] args = inv.getArguments(); + @SuppressWarnings("unchecked") + ActionListener listener = (ActionListener) args[2]; + DeleteRoleMappingRequest request = (DeleteRoleMappingRequest) args[1]; + if (request.getName().equals("duplicate-0")) { + duplicatesDeleted[0] = true; + } + if (request.getName().equals("duplicate-1")) { + if (randomBoolean()) { + listener.onResponse(new DeleteRoleMappingResponse(false)); + } else { + listener.onFailure(new IllegalStateException("bad state")); + } + } + if (request.getName().equals("duplicate-2")) { + duplicatesDeleted[2] = true; + } + return null; + }).when(client).execute(eq(DeleteRoleMappingAction.INSTANCE), any(DeleteRoleMappingRequest.class), any()); + + SecurityMigrations.SecurityMigration securityMigration = new SecurityMigrations.CleanupRoleMappingDuplicatesMigration(); + securityMigration.migrate(securityIndexManager, client, ActionListener.noop()); + + assertTrue(duplicatesDeleted[0]); + assertFalse(duplicatesDeleted[1]); + assertTrue(duplicatesDeleted[2]); + } + + @Before + public void createClientAndThreadPool() { + threadPool = new TestThreadPool("cleanup role mappings test pool"); + client = mock(Client.class); + when(client.threadPool()).thenReturn(threadPool); + } + + @After + public void stopThreadPool() { + terminate(threadPool); + } + +} diff --git a/x-pack/qa/rolling-upgrade/build.gradle b/x-pack/qa/rolling-upgrade/build.gradle index b9b0531fa5b68..38fbf99068a9b 100644 --- a/x-pack/qa/rolling-upgrade/build.gradle +++ b/x-pack/qa/rolling-upgrade/build.gradle @@ -88,6 +88,8 @@ BuildParams.bwcVersions.withWireCompatible { bwcVersion, baseName -> keystore 'xpack.watcher.encryption_key', file("${project.projectDir}/src/test/resources/system_key") setting 'xpack.watcher.encrypt_sensitive_data', 'true' + extraConfigFile 'operator/settings.json', file("${project.projectDir}/src/test/resources/operator_defined_role_mappings.json") + // Old versions of the code contain an invalid assertion that trips // during tests. Versions 5.6.9 and 6.2.4 have been fixed by removing // the assertion, but this is impossible for released versions. diff --git a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/AbstractUpgradeTestCase.java b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/AbstractUpgradeTestCase.java index 4324aed5fee18..b17644cd1c2a9 100644 --- a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/AbstractUpgradeTestCase.java +++ b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/AbstractUpgradeTestCase.java @@ -9,11 +9,13 @@ import org.elasticsearch.Build; import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; +import org.elasticsearch.client.RestClient; import org.elasticsearch.common.io.Streams; import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.core.Booleans; +import org.elasticsearch.test.XContentTestUtils; import org.elasticsearch.test.rest.ESRestTestCase; import org.elasticsearch.xpack.test.SecuritySettingsSourceField; import org.junit.Before; @@ -21,6 +23,7 @@ import java.util.Collection; import java.util.Collections; import java.util.List; +import java.util.Map; import java.util.stream.Collectors; public abstract class AbstractUpgradeTestCase extends ESRestTestCase { @@ -149,4 +152,22 @@ public void setupForTests() throws Exception { } }); } + + protected static void waitForSecurityMigrationCompletion(RestClient adminClient, int version) throws Exception { + final Request request = new Request("GET", "_cluster/state/metadata/.security-7"); + assertBusy(() -> { + Map indices = new XContentTestUtils.JsonMapView(entityAsMap(adminClient.performRequest(request))).get( + "metadata.indices" + ); + assertNotNull(indices); + assertTrue(indices.containsKey(".security-7")); + // JsonMapView doesn't support . prefixed indices (splits on .) + @SuppressWarnings("unchecked") + String responseVersion = new XContentTestUtils.JsonMapView((Map) indices.get(".security-7")).get( + "migration_version.version" + ); + assertNotNull(responseVersion); + assertTrue(Integer.parseInt(responseVersion) >= version); + }); + } } diff --git a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/SecurityIndexRoleMappingCleanupIT.java b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/SecurityIndexRoleMappingCleanupIT.java new file mode 100644 index 0000000000000..82d4050c044b1 --- /dev/null +++ b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/SecurityIndexRoleMappingCleanupIT.java @@ -0,0 +1,146 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ +package org.elasticsearch.upgrades; + +import org.elasticsearch.client.Request; +import org.elasticsearch.client.Response; +import org.elasticsearch.client.RestClient; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.xpack.core.security.authc.support.mapper.ExpressionRoleMapping; +import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; + +import java.io.IOException; +import java.util.List; +import java.util.Locale; +import java.util.Map; + +import static org.elasticsearch.TransportVersions.V_8_15_0; +import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; +import static org.hamcrest.CoreMatchers.is; +import static org.hamcrest.Matchers.containsInAnyOrder; + +public class SecurityIndexRoleMappingCleanupIT extends AbstractUpgradeTestCase { + + public void testCleanupDuplicateMappings() throws Exception { + if (CLUSTER_TYPE == ClusterType.OLD) { + // If we're in a state where the same operator-defined role mappings can exist both in cluster state and the native store + // (V_8_15_0 transport added to security.role_mapping_cleanup feature added), create a state + // where the native store will need to be cleaned up + assumeTrue( + "Cleanup only needed before security.role_mapping_cleanup feature available in cluster", + clusterHasFeature("security.role_mapping_cleanup") == false + ); + assumeTrue( + "If role mappings are in cluster state but cleanup has not been performed yet, create duplicated role mappings", + minimumTransportVersion().onOrAfter(V_8_15_0) + ); + // Since the old cluster has role mappings in cluster state, but doesn't check duplicates, create duplicates + createNativeRoleMapping("operator_role_mapping_1", Map.of("meta", "test"), true); + createNativeRoleMapping("operator_role_mapping_2", Map.of("meta", "test"), true); + } else if (CLUSTER_TYPE == ClusterType.MIXED) { + // Create a native role mapping that doesn't conflict with anything before the migration run + createNativeRoleMapping("no_name_conflict", Map.of("meta", "test")); + } else if (CLUSTER_TYPE == ClusterType.UPGRADED) { + waitForSecurityMigrationCompletion(adminClient(), 2); + assertAllRoleMappings( + client(), + "operator_role_mapping_1" + ExpressionRoleMapping.READ_ONLY_ROLE_MAPPING_SUFFIX, + "operator_role_mapping_2" + ExpressionRoleMapping.READ_ONLY_ROLE_MAPPING_SUFFIX, + "no_name_conflict" + ); + // In the old cluster we might have created these (depending on the node features), so make sure they were removed + assertFalse(roleMappingExistsInSecurityIndex("operator_role_mapping_1")); + assertFalse(roleMappingExistsInSecurityIndex("operator_role_mapping_2")); + assertTrue(roleMappingExistsInSecurityIndex("no_name_conflict")); + // Make sure we can create and delete a conflicting role mapping again + createNativeRoleMapping("operator_role_mapping_1", Map.of("meta", "test"), true); + deleteNativeRoleMapping("operator_role_mapping_1", true); + } + } + + @SuppressWarnings("unchecked") + private boolean roleMappingExistsInSecurityIndex(String mappingName) throws IOException { + final Request request = new Request("POST", "/.security/_search"); + request.setJsonEntity(String.format(Locale.ROOT, """ + {"query":{"bool":{"must":[{"term":{"_id":"%s_%s"}}]}}}""", "role-mapping", mappingName)); + + request.setOptions( + expectWarnings( + "this request accesses system indices: [.security-7]," + + " but in a future major version, direct access to system indices will be prevented by default" + ) + ); + + Response response = adminClient().performRequest(request); + assertOK(response); + final Map responseMap = responseAsMap(response); + + Map hits = ((Map) responseMap.get("hits")); + return ((List) hits.get("hits")).isEmpty() == false; + } + + private void createNativeRoleMapping(String roleMappingName, Map metadata) throws IOException { + createNativeRoleMapping(roleMappingName, metadata, false); + } + + private void createNativeRoleMapping(String roleMappingName, Map metadata, boolean expectWarning) throws IOException { + final Request request = new Request("POST", "/_security/role_mapping/" + roleMappingName); + if (expectWarning) { + request.setOptions( + expectWarnings( + "A read-only role mapping with the same name [" + + roleMappingName + + "] has been previously defined in a configuration file. " + + "Both role mappings will be used to determine role assignments." + ) + ); + } + + BytesReference source = BytesReference.bytes( + jsonBuilder().map( + Map.of( + ExpressionRoleMapping.Fields.ROLES.getPreferredName(), + List.of("superuser"), + ExpressionRoleMapping.Fields.ENABLED.getPreferredName(), + true, + ExpressionRoleMapping.Fields.RULES.getPreferredName(), + Map.of("field", Map.of("username", "role-mapping-test-user")), + RoleDescriptor.Fields.METADATA.getPreferredName(), + metadata + ) + ) + ); + request.setJsonEntity(source.utf8ToString()); + assertOK(client().performRequest(request)); + } + + private void deleteNativeRoleMapping(String roleMappingName, boolean expectWarning) throws IOException { + final Request request = new Request("DELETE", "/_security/role_mapping/" + roleMappingName); + if (expectWarning) { + request.setOptions( + expectWarnings( + "A read-only role mapping with the same name [" + + roleMappingName + + "] has previously been defined in a configuration file. " + + "The native role mapping was deleted, but the read-only mapping will remain active " + + "and will be used to determine role assignments." + ) + ); + } + assertOK(client().performRequest(request)); + } + + private void assertAllRoleMappings(RestClient client, String... roleNames) throws IOException { + Request request = new Request("GET", "/_security/role_mapping"); + Response response = client.performRequest(request); + assertOK(response); + Map responseMap = responseAsMap(response); + + assertThat(responseMap.keySet(), containsInAnyOrder(roleNames)); + assertThat(responseMap.size(), is(roleNames.length)); + } +} diff --git a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/SecurityIndexRolesMetadataMigrationIT.java b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/SecurityIndexRolesMetadataMigrationIT.java index d31130e970f03..6c34e68297aa0 100644 --- a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/SecurityIndexRolesMetadataMigrationIT.java +++ b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/SecurityIndexRolesMetadataMigrationIT.java @@ -58,7 +58,7 @@ public void testRoleMigration() throws Exception { } else if (CLUSTER_TYPE == ClusterType.UPGRADED) { createRoleWithMetadata(upgradedTestRole, Map.of("meta", "test")); assertTrue(canRolesBeMigrated()); - waitForMigrationCompletion(adminClient()); + waitForSecurityMigrationCompletion(adminClient(), 1); assertMigratedDocInSecurityIndex(oldTestRole, "meta", "test"); assertMigratedDocInSecurityIndex(mixed1TestRole, "meta", "test"); assertMigratedDocInSecurityIndex(mixed2TestRole, "meta", "test"); @@ -136,23 +136,6 @@ private static void assertNoMigration(RestClient adminClient) throws Exception { ); } - @SuppressWarnings("unchecked") - private static void waitForMigrationCompletion(RestClient adminClient) throws Exception { - final Request request = new Request("GET", "_cluster/state/metadata/" + INTERNAL_SECURITY_MAIN_INDEX_7); - assertBusy(() -> { - Response response = adminClient.performRequest(request); - assertOK(response); - Map responseMap = responseAsMap(response); - Map indicesMetadataMap = (Map) ((Map) responseMap.get("metadata")).get( - "indices" - ); - assertTrue(indicesMetadataMap.containsKey(INTERNAL_SECURITY_MAIN_INDEX_7)); - assertTrue( - ((Map) indicesMetadataMap.get(INTERNAL_SECURITY_MAIN_INDEX_7)).containsKey(MIGRATION_VERSION_CUSTOM_KEY) - ); - }); - } - private void createRoleWithMetadata(String roleName, Map metadata) throws IOException { final Request request = new Request("POST", "/_security/role/" + roleName); BytesReference source = BytesReference.bytes( diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/operator_defined_role_mappings.json b/x-pack/qa/rolling-upgrade/src/test/resources/operator_defined_role_mappings.json new file mode 100644 index 0000000000000..d897cabb8ab01 --- /dev/null +++ b/x-pack/qa/rolling-upgrade/src/test/resources/operator_defined_role_mappings.json @@ -0,0 +1,38 @@ +{ + "metadata": { + "version": "2", + "compatibility": "8.4.0" + }, + "state": { + "role_mappings": { + "operator_role_mapping_1": { + "enabled": true, + "roles": [ + "kibana_user" + ], + "metadata": { + "from_file": true + }, + "rules": { + "field": { + "username": "role-mapping-test-user" + } + } + }, + "operator_role_mapping_2": { + "enabled": true, + "roles": [ + "fleet_user" + ], + "metadata": { + "from_file": true + }, + "rules": { + "field": { + "username": "role-mapping-test-user" + } + } + } + } + } +} From f395f113e9fdf688f11dd9d272496913986b87fc Mon Sep 17 00:00:00 2001 From: David Turner Date: Tue, 29 Oct 2024 11:36:35 +0000 Subject: [PATCH 182/324] Increase minimum threshold in shard balancer (#115831) Support for thresholds between 0.0 and 1.0 was deprecated in #92100. This commit removes this support in 9.0. --- docs/changelog/115831.yaml | 13 +++++++ .../upgrades/FullClusterRestartIT.java | 35 +++++++++++++++++++ .../allocator/BalancedShardsAllocator.java | 31 ++-------------- .../allocation/BalancedSingleShardTests.java | 19 +++------- .../BalancedShardsAllocatorTests.java | 11 ++---- 5 files changed, 58 insertions(+), 51 deletions(-) create mode 100644 docs/changelog/115831.yaml diff --git a/docs/changelog/115831.yaml b/docs/changelog/115831.yaml new file mode 100644 index 0000000000000..18442ec3b97e6 --- /dev/null +++ b/docs/changelog/115831.yaml @@ -0,0 +1,13 @@ +pr: 115831 +summary: Increase minimum threshold in shard balancer +area: Allocation +type: breaking +issues: [] +breaking: + title: Minimum shard balancer threshold is now 1.0 + area: Cluster and node setting + details: >- + Earlier versions of {es} accepted any non-negative value for `cluster.routing.allocation.balance.threshold`, but values smaller than + `1.0` do not make sense and have been ignored since version 8.6.1. From 9.0.0 these nonsensical values are now forbidden. + impact: Do not set `cluster.routing.allocation.balance.threshold` to a value less than `1.0`. + notable: false diff --git a/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartIT.java b/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartIT.java index 92a704f793dc2..fcca3f9a4700c 100644 --- a/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartIT.java +++ b/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartIT.java @@ -16,9 +16,11 @@ import org.apache.http.util.EntityUtils; import org.elasticsearch.Build; import org.elasticsearch.client.Request; +import org.elasticsearch.client.RequestOptions; import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseException; import org.elasticsearch.client.RestClient; +import org.elasticsearch.client.WarningsHandler; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.MetadataIndexStateService; import org.elasticsearch.common.Strings; @@ -27,6 +29,7 @@ import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.core.Booleans; import org.elasticsearch.core.CheckedFunction; +import org.elasticsearch.core.UpdateForV10; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.IndexVersions; @@ -72,6 +75,7 @@ import static java.util.stream.Collectors.toList; import static org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.SYSTEM_INDEX_ENFORCEMENT_INDEX_VERSION; import static org.elasticsearch.cluster.routing.UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING; +import static org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator.THRESHOLD_SETTING; import static org.elasticsearch.cluster.routing.allocation.decider.MaxRetryAllocationDecider.SETTING_ALLOCATION_MAX_RETRY; import static org.elasticsearch.test.MapMatcher.assertMap; import static org.elasticsearch.test.MapMatcher.matchesMap; @@ -1949,4 +1953,35 @@ public static void assertNumHits(String index, int numHits, int totalShards) thr assertThat(XContentMapValues.extractValue("_shards.successful", resp), equalTo(totalShards)); assertThat(extractTotalHits(resp), equalTo(numHits)); } + + @UpdateForV10(owner = UpdateForV10.Owner.DISTRIBUTED_COORDINATION) // this test is just about v8->v9 upgrades, remove it in v10 + public void testBalancedShardsAllocatorThreshold() throws Exception { + assumeTrue("test only applies for v8->v9 upgrades", getOldClusterTestVersion().getMajor() == 8); + + final var chosenValue = randomFrom("0", "0.1", "0.5", "0.999"); + + if (isRunningAgainstOldCluster()) { + final var request = newXContentRequest( + HttpMethod.PUT, + "/_cluster/settings", + (builder, params) -> builder.startObject("persistent").field(THRESHOLD_SETTING.getKey(), chosenValue).endObject() + ); + request.setOptions(RequestOptions.DEFAULT.toBuilder().setWarningsHandler(WarningsHandler.PERMISSIVE)); + assertOK(client().performRequest(request)); + } + + final var clusterSettingsResponse = ObjectPath.createFromResponse( + client().performRequest(new Request("GET", "/_cluster/settings")) + ); + + final var settingsPath = "persistent." + THRESHOLD_SETTING.getKey(); + final var settingValue = clusterSettingsResponse.evaluate(settingsPath); + + if (isRunningAgainstOldCluster()) { + assertEquals(chosenValue, settingValue); + } else { + assertNull(settingValue); + assertNotNull(clusterSettingsResponse.evaluate("persistent.archived." + THRESHOLD_SETTING.getKey())); + } + } } diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java index 840aa3a3c1d3f..108bb83d90871 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java @@ -32,8 +32,6 @@ import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders; import org.elasticsearch.cluster.routing.allocation.decider.Decision; import org.elasticsearch.cluster.routing.allocation.decider.Decision.Type; -import org.elasticsearch.common.logging.DeprecationCategory; -import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; @@ -41,7 +39,6 @@ import org.elasticsearch.common.util.Maps; import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.core.Tuple; -import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.gateway.PriorityComparator; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.injection.guice.Inject; @@ -109,7 +106,7 @@ public class BalancedShardsAllocator implements ShardsAllocator { public static final Setting THRESHOLD_SETTING = Setting.floatSetting( "cluster.routing.allocation.balance.threshold", 1.0f, - 0.0f, + 1.0f, Property.Dynamic, Property.NodeScope ); @@ -140,34 +137,10 @@ public BalancedShardsAllocator(ClusterSettings clusterSettings, WriteLoadForecas clusterSettings.initializeAndWatch(INDEX_BALANCE_FACTOR_SETTING, value -> this.indexBalanceFactor = value); clusterSettings.initializeAndWatch(WRITE_LOAD_BALANCE_FACTOR_SETTING, value -> this.writeLoadBalanceFactor = value); clusterSettings.initializeAndWatch(DISK_USAGE_BALANCE_FACTOR_SETTING, value -> this.diskUsageBalanceFactor = value); - clusterSettings.initializeAndWatch(THRESHOLD_SETTING, value -> this.threshold = ensureValidThreshold(value)); + clusterSettings.initializeAndWatch(THRESHOLD_SETTING, value -> this.threshold = value); this.writeLoadForecaster = writeLoadForecaster; } - /** - * Clamp threshold to be at least 1, and log a critical deprecation warning if smaller values are given. - * - * Once {@link org.elasticsearch.Version#V_7_17_0} goes out of scope, start to properly reject such bad values. - */ - @UpdateForV9(owner = UpdateForV9.Owner.DISTRIBUTED_COORDINATION) - private static float ensureValidThreshold(float threshold) { - if (1.0f <= threshold) { - return threshold; - } else { - DeprecationLogger.getLogger(BalancedShardsAllocator.class) - .critical( - DeprecationCategory.SETTINGS, - "balance_threshold_too_small", - "ignoring value [{}] for [{}] since it is smaller than 1.0; " - + "setting [{}] to a value smaller than 1.0 will be forbidden in a future release", - threshold, - THRESHOLD_SETTING.getKey(), - THRESHOLD_SETTING.getKey() - ); - return 1.0f; - } - } - @Override public void allocate(RoutingAllocation allocation) { assert allocation.ignoreDisable() == false; diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/BalancedSingleShardTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/BalancedSingleShardTests.java index 41207a2d968b8..9a769567bee1c 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/BalancedSingleShardTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/BalancedSingleShardTests.java @@ -246,7 +246,7 @@ public void testNodeDecisionsRanking() { // return the same ranking as the current node ClusterState clusterState = ClusterStateCreationUtils.state(randomIntBetween(1, 10), new String[] { "idx" }, 1); ShardRouting shardToRebalance = clusterState.routingTable().index("idx").shardsWithState(ShardRoutingState.STARTED).get(0); - MoveDecision decision = executeRebalanceFor(shardToRebalance, clusterState, emptySet(), -1); + MoveDecision decision = executeRebalanceFor(shardToRebalance, clusterState, emptySet()); int currentRanking = decision.getCurrentNodeRanking(); assertEquals(1, currentRanking); for (NodeAllocationResult result : decision.getNodeDecisions()) { @@ -258,7 +258,7 @@ public void testNodeDecisionsRanking() { clusterState = ClusterStateCreationUtils.state(1, new String[] { "idx" }, randomIntBetween(2, 10)); shardToRebalance = clusterState.routingTable().index("idx").shardsWithState(ShardRoutingState.STARTED).get(0); clusterState = addNodesToClusterState(clusterState, randomIntBetween(1, 10)); - decision = executeRebalanceFor(shardToRebalance, clusterState, emptySet(), 0.01f); + decision = executeRebalanceFor(shardToRebalance, clusterState, emptySet()); for (NodeAllocationResult result : decision.getNodeDecisions()) { assertThat(result.getWeightRanking(), lessThan(decision.getCurrentNodeRanking())); } @@ -285,7 +285,7 @@ public void testNodeDecisionsRanking() { } } clusterState = addNodesToClusterState(clusterState, 1); - decision = executeRebalanceFor(shardToRebalance, clusterState, emptySet(), 0.01f); + decision = executeRebalanceFor(shardToRebalance, clusterState, emptySet()); for (NodeAllocationResult result : decision.getNodeDecisions()) { if (result.getWeightRanking() < decision.getCurrentNodeRanking()) { // highest ranked node should not be any of the initial nodes @@ -298,22 +298,13 @@ public void testNodeDecisionsRanking() { assertTrue(nodesWithTwoShards.contains(result.getNode().getId())); } } - - assertCriticalWarnings(""" - ignoring value [0.01] for [cluster.routing.allocation.balance.threshold] since it is smaller than 1.0; setting \ - [cluster.routing.allocation.balance.threshold] to a value smaller than 1.0 will be forbidden in a future release"""); } private MoveDecision executeRebalanceFor( final ShardRouting shardRouting, final ClusterState clusterState, - final Set noDecisionNodes, - final float threshold + final Set noDecisionNodes ) { - Settings settings = Settings.EMPTY; - if (Float.compare(-1.0f, threshold) != 0) { - settings = Settings.builder().put(BalancedShardsAllocator.THRESHOLD_SETTING.getKey(), threshold).build(); - } AllocationDecider allocationDecider = new AllocationDecider() { @Override public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) { @@ -329,7 +320,7 @@ public Decision canRebalance(ShardRouting shardRouting, RoutingAllocation alloca return Decision.YES; } }; - BalancedShardsAllocator allocator = new BalancedShardsAllocator(settings); + BalancedShardsAllocator allocator = new BalancedShardsAllocator(Settings.EMPTY); RoutingAllocation routingAllocation = newRoutingAllocation( new AllocationDeciders(Arrays.asList(allocationDecider, rebalanceDecider)), clusterState diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocatorTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocatorTests.java index 8392b6fe3e148..98c3451329f52 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocatorTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocatorTests.java @@ -441,15 +441,10 @@ public void testGetIndexDiskUsageInBytes() { public void testThresholdLimit() { final var badValue = (float) randomDoubleBetween(0.0, Math.nextDown(1.0f), true); - assertEquals( - 1.0f, - new BalancedShardsAllocator(Settings.builder().put(BalancedShardsAllocator.THRESHOLD_SETTING.getKey(), badValue).build()) - .getThreshold(), - 0.0f + expectThrows( + IllegalArgumentException.class, + () -> new BalancedShardsAllocator(Settings.builder().put(BalancedShardsAllocator.THRESHOLD_SETTING.getKey(), badValue).build()) ); - assertCriticalWarnings("ignoring value [" + badValue + """ - ] for [cluster.routing.allocation.balance.threshold] since it is smaller than 1.0; setting \ - [cluster.routing.allocation.balance.threshold] to a value smaller than 1.0 will be forbidden in a future release"""); final var goodValue = (float) randomDoubleBetween(1.0, 10.0, true); assertEquals( From d18824d4e6bd0beb2e8e5ec854caef64a31594b6 Mon Sep 17 00:00:00 2001 From: Jan Kuipers <148754765+jan-elastic@users.noreply.github.com> Date: Tue, 29 Oct 2024 13:47:32 +0100 Subject: [PATCH 183/324] Set assignment state to "started" in case of zero allocations (#115824) --- .../core/ml/inference/assignment/TrainedModelAssignment.java | 3 +++ .../ml/inference/assignment/TrainedModelAssignmentTests.java | 5 +++++ 2 files changed, 8 insertions(+) diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/assignment/TrainedModelAssignment.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/assignment/TrainedModelAssignment.java index 06c3f75587d62..efd07cceae09b 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/assignment/TrainedModelAssignment.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/assignment/TrainedModelAssignment.java @@ -533,6 +533,9 @@ public AssignmentState calculateAssignmentState() { if (assignmentState.equals(AssignmentState.STOPPING)) { return assignmentState; } + if (taskParams.getNumberOfAllocations() == 0) { + return AssignmentState.STARTED; + } if (nodeRoutingTable.values().stream().anyMatch(r -> r.getState().equals(RoutingState.STARTED))) { return AssignmentState.STARTED; } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/assignment/TrainedModelAssignmentTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/assignment/TrainedModelAssignmentTests.java index c3b6e0089b4ae..dc0a8b52e585a 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/assignment/TrainedModelAssignmentTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/assignment/TrainedModelAssignmentTests.java @@ -172,6 +172,11 @@ public void testCalculateAssignmentState_GivenNoStartedAssignments() { assertThat(builder.calculateAssignmentState(), equalTo(AssignmentState.STARTING)); } + public void testCalculateAssignmentState_GivenNumAllocationsIsZero() { + TrainedModelAssignment.Builder builder = TrainedModelAssignment.Builder.empty(randomTaskParams(0), null); + assertThat(builder.calculateAssignmentState(), equalTo(AssignmentState.STARTED)); + } + public void testCalculateAssignmentState_GivenOneStartedAssignment() { TrainedModelAssignment.Builder builder = TrainedModelAssignment.Builder.empty(randomTaskParams(5), null); builder.addRoutingEntry("node-1", new RoutingInfo(4, 4, RoutingState.STARTING, "")); From 6742147d6ada3af42ff73f03eb45fd2486cb64cc Mon Sep 17 00:00:00 2001 From: Max Hniebergall <137079448+maxhniebergall@users.noreply.github.com> Date: Tue, 29 Oct 2024 08:59:19 -0400 Subject: [PATCH 184/324] [Inference API] Improve chunked results error message (#115807) * Improve chunked results error message * Update RestStatus to conflict * precommit * Update docs/changelog/115807.yaml --- docs/changelog/115807.yaml | 5 +++++ .../xpack/core/inference/results/ResultUtils.java | 5 +++-- 2 files changed, 8 insertions(+), 2 deletions(-) create mode 100644 docs/changelog/115807.yaml diff --git a/docs/changelog/115807.yaml b/docs/changelog/115807.yaml new file mode 100644 index 0000000000000..d17cabca4bd03 --- /dev/null +++ b/docs/changelog/115807.yaml @@ -0,0 +1,5 @@ +pr: 115807 +summary: "[Inference API] Improve chunked results error message" +area: Machine Learning +type: enhancement +issues: [] diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/results/ResultUtils.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/results/ResultUtils.java index 4fe2c9ae486f1..eb68af7589717 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/results/ResultUtils.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/results/ResultUtils.java @@ -14,8 +14,9 @@ public class ResultUtils { public static ElasticsearchStatusException createInvalidChunkedResultException(String expectedResultName, String receivedResultName) { return new ElasticsearchStatusException( - "Expected a chunked inference [{}] received [{}]", - RestStatus.INTERNAL_SERVER_ERROR, + "Received incompatible results. Check that your model_id matches the task_type of this endpoint. " + + "Expected chunked output of type [{}] but received [{}].", + RestStatus.CONFLICT, expectedResultName, receivedResultName ); From 78a531bf4eed313d44b80638ddf015cc586ee2b6 Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Tue, 29 Oct 2024 14:11:14 +0100 Subject: [PATCH 185/324] Catch and handle disconnect exceptions in search (#115836) Getting a connection can throw an exception for a disconnected node. We failed to handle these in the adjusted spots, leading to a phase failure (and possible memory leaks for outstanding operations) instead of correctly recording a per-shard failure. --- docs/changelog/115836.yaml | 5 ++ .../action/search/DfsQueryPhase.java | 32 +++++++--- .../action/search/FetchSearchPhase.java | 61 +++++++++++-------- .../action/search/RankFeaturePhase.java | 55 ++++++++++------- .../SearchDfsQueryThenFetchAsyncAction.java | 14 +++-- .../SearchQueryThenFetchAsyncAction.java | 9 ++- 6 files changed, 111 insertions(+), 65 deletions(-) create mode 100644 docs/changelog/115836.yaml diff --git a/docs/changelog/115836.yaml b/docs/changelog/115836.yaml new file mode 100644 index 0000000000000..f6da638f1feff --- /dev/null +++ b/docs/changelog/115836.yaml @@ -0,0 +1,5 @@ +pr: 115836 +summary: Catch and handle disconnect exceptions in search +area: Search +type: bug +issues: [] diff --git a/server/src/main/java/org/elasticsearch/action/search/DfsQueryPhase.java b/server/src/main/java/org/elasticsearch/action/search/DfsQueryPhase.java index e0e240be0377a..93c8d66447e34 100644 --- a/server/src/main/java/org/elasticsearch/action/search/DfsQueryPhase.java +++ b/server/src/main/java/org/elasticsearch/action/search/DfsQueryPhase.java @@ -84,15 +84,20 @@ public void run() { for (final DfsSearchResult dfsResult : searchResults) { final SearchShardTarget shardTarget = dfsResult.getSearchShardTarget(); - Transport.Connection connection = context.getConnection(shardTarget.getClusterAlias(), shardTarget.getNodeId()); - ShardSearchRequest shardRequest = rewriteShardSearchRequest(dfsResult.getShardSearchRequest()); + final int shardIndex = dfsResult.getShardIndex(); QuerySearchRequest querySearchRequest = new QuerySearchRequest( - context.getOriginalIndices(dfsResult.getShardIndex()), + context.getOriginalIndices(shardIndex), dfsResult.getContextId(), - shardRequest, + rewriteShardSearchRequest(dfsResult.getShardSearchRequest()), dfs ); - final int shardIndex = dfsResult.getShardIndex(); + final Transport.Connection connection; + try { + connection = context.getConnection(shardTarget.getClusterAlias(), shardTarget.getNodeId()); + } catch (Exception e) { + shardFailure(e, querySearchRequest, shardIndex, shardTarget, counter); + return; + } searchTransportService.sendExecuteQuery( connection, querySearchRequest, @@ -112,10 +117,7 @@ protected void innerOnResponse(QuerySearchResult response) { @Override public void onFailure(Exception exception) { try { - context.getLogger() - .debug(() -> "[" + querySearchRequest.contextId() + "] Failed to execute query phase", exception); - progressListener.notifyQueryFailure(shardIndex, shardTarget, exception); - counter.onFailure(shardIndex, shardTarget, exception); + shardFailure(exception, querySearchRequest, shardIndex, shardTarget, counter); } finally { if (context.isPartOfPointInTime(querySearchRequest.contextId()) == false) { // the query might not have been executed at all (for example because thread pool rejected @@ -134,6 +136,18 @@ public void onFailure(Exception exception) { } } + private void shardFailure( + Exception exception, + QuerySearchRequest querySearchRequest, + int shardIndex, + SearchShardTarget shardTarget, + CountedCollector counter + ) { + context.getLogger().debug(() -> "[" + querySearchRequest.contextId() + "] Failed to execute query phase", exception); + progressListener.notifyQueryFailure(shardIndex, shardTarget, exception); + counter.onFailure(shardIndex, shardTarget, exception); + } + // package private for testing ShardSearchRequest rewriteShardSearchRequest(ShardSearchRequest request) { SearchSourceBuilder source = request.source(); diff --git a/server/src/main/java/org/elasticsearch/action/search/FetchSearchPhase.java b/server/src/main/java/org/elasticsearch/action/search/FetchSearchPhase.java index 99b24bd483fb4..29aba0eee1f55 100644 --- a/server/src/main/java/org/elasticsearch/action/search/FetchSearchPhase.java +++ b/server/src/main/java/org/elasticsearch/action/search/FetchSearchPhase.java @@ -21,6 +21,7 @@ import org.elasticsearch.search.internal.ShardSearchContextId; import org.elasticsearch.search.rank.RankDoc; import org.elasticsearch.search.rank.RankDocShardInfo; +import org.elasticsearch.transport.Transport; import java.util.ArrayList; import java.util.HashMap; @@ -214,9 +215,41 @@ private void executeFetch( final ShardSearchContextId contextId = shardPhaseResult.queryResult() != null ? shardPhaseResult.queryResult().getContextId() : shardPhaseResult.rankFeatureResult().getContextId(); + var listener = new SearchActionListener(shardTarget, shardIndex) { + @Override + public void innerOnResponse(FetchSearchResult result) { + try { + progressListener.notifyFetchResult(shardIndex); + counter.onResult(result); + } catch (Exception e) { + context.onPhaseFailure(FetchSearchPhase.this, "", e); + } + } + + @Override + public void onFailure(Exception e) { + try { + logger.debug(() -> "[" + contextId + "] Failed to execute fetch phase", e); + progressListener.notifyFetchFailure(shardIndex, shardTarget, e); + counter.onFailure(shardIndex, shardTarget, e); + } finally { + // the search context might not be cleared on the node where the fetch was executed for example + // because the action was rejected by the thread pool. in this case we need to send a dedicated + // request to clear the search context. + releaseIrrelevantSearchContext(shardPhaseResult, context); + } + } + }; + final Transport.Connection connection; + try { + connection = context.getConnection(shardTarget.getClusterAlias(), shardTarget.getNodeId()); + } catch (Exception e) { + listener.onFailure(e); + return; + } context.getSearchTransport() .sendExecuteFetch( - context.getConnection(shardTarget.getClusterAlias(), shardTarget.getNodeId()), + connection, new ShardFetchSearchRequest( context.getOriginalIndices(shardPhaseResult.getShardIndex()), contextId, @@ -228,31 +261,7 @@ private void executeFetch( aggregatedDfs ), context.getTask(), - new SearchActionListener<>(shardTarget, shardIndex) { - @Override - public void innerOnResponse(FetchSearchResult result) { - try { - progressListener.notifyFetchResult(shardIndex); - counter.onResult(result); - } catch (Exception e) { - context.onPhaseFailure(FetchSearchPhase.this, "", e); - } - } - - @Override - public void onFailure(Exception e) { - try { - logger.debug(() -> "[" + contextId + "] Failed to execute fetch phase", e); - progressListener.notifyFetchFailure(shardIndex, shardTarget, e); - counter.onFailure(shardIndex, shardTarget, e); - } finally { - // the search context might not be cleared on the node where the fetch was executed for example - // because the action was rejected by the thread pool. in this case we need to send a dedicated - // request to clear the search context. - releaseIrrelevantSearchContext(shardPhaseResult, context); - } - } - } + listener ); } diff --git a/server/src/main/java/org/elasticsearch/action/search/RankFeaturePhase.java b/server/src/main/java/org/elasticsearch/action/search/RankFeaturePhase.java index dd3c28bba0fce..e37d6d1729f9f 100644 --- a/server/src/main/java/org/elasticsearch/action/search/RankFeaturePhase.java +++ b/server/src/main/java/org/elasticsearch/action/search/RankFeaturePhase.java @@ -24,6 +24,7 @@ import org.elasticsearch.search.rank.feature.RankFeatureDoc; import org.elasticsearch.search.rank.feature.RankFeatureResult; import org.elasticsearch.search.rank.feature.RankFeatureShardRequest; +import org.elasticsearch.transport.Transport; import java.util.List; @@ -131,9 +132,38 @@ private void executeRankFeatureShardPhase( final SearchShardTarget shardTarget = queryResult.queryResult().getSearchShardTarget(); final ShardSearchContextId contextId = queryResult.queryResult().getContextId(); final int shardIndex = queryResult.getShardIndex(); + var listener = new SearchActionListener(shardTarget, shardIndex) { + @Override + protected void innerOnResponse(RankFeatureResult response) { + try { + progressListener.notifyRankFeatureResult(shardIndex); + rankRequestCounter.onResult(response); + } catch (Exception e) { + context.onPhaseFailure(RankFeaturePhase.this, "", e); + } + } + + @Override + public void onFailure(Exception e) { + try { + logger.debug(() -> "[" + contextId + "] Failed to execute rank phase", e); + progressListener.notifyRankFeatureFailure(shardIndex, shardTarget, e); + rankRequestCounter.onFailure(shardIndex, shardTarget, e); + } finally { + releaseIrrelevantSearchContext(queryResult, context); + } + } + }; + final Transport.Connection connection; + try { + connection = context.getConnection(shardTarget.getClusterAlias(), shardTarget.getNodeId()); + } catch (Exception e) { + listener.onFailure(e); + return; + } context.getSearchTransport() .sendExecuteRankFeature( - context.getConnection(shardTarget.getClusterAlias(), shardTarget.getNodeId()), + connection, new RankFeatureShardRequest( context.getOriginalIndices(queryResult.getShardIndex()), queryResult.getContextId(), @@ -141,28 +171,7 @@ private void executeRankFeatureShardPhase( entry ), context.getTask(), - new SearchActionListener<>(shardTarget, shardIndex) { - @Override - protected void innerOnResponse(RankFeatureResult response) { - try { - progressListener.notifyRankFeatureResult(shardIndex); - rankRequestCounter.onResult(response); - } catch (Exception e) { - context.onPhaseFailure(RankFeaturePhase.this, "", e); - } - } - - @Override - public void onFailure(Exception e) { - try { - logger.debug(() -> "[" + contextId + "] Failed to execute rank phase", e); - progressListener.notifyRankFeatureFailure(shardIndex, shardTarget, e); - rankRequestCounter.onFailure(shardIndex, shardTarget, e); - } finally { - releaseIrrelevantSearchContext(queryResult, context); - } - } - } + listener ); } diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchDfsQueryThenFetchAsyncAction.java b/server/src/main/java/org/elasticsearch/action/search/SearchDfsQueryThenFetchAsyncAction.java index 5b7ee04d020fc..26eb266cd457e 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchDfsQueryThenFetchAsyncAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchDfsQueryThenFetchAsyncAction.java @@ -87,12 +87,14 @@ protected void executePhaseOnShard( final SearchShardTarget shard, final SearchActionListener listener ) { - getSearchTransport().sendExecuteDfs( - getConnection(shard.getClusterAlias(), shard.getNodeId()), - buildShardSearchRequest(shardIt, listener.requestIndex), - getTask(), - listener - ); + final Transport.Connection connection; + try { + connection = getConnection(shard.getClusterAlias(), shard.getNodeId()); + } catch (Exception e) { + listener.onFailure(e); + return; + } + getSearchTransport().sendExecuteDfs(connection, buildShardSearchRequest(shardIt, listener.requestIndex), getTask(), listener); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchQueryThenFetchAsyncAction.java b/server/src/main/java/org/elasticsearch/action/search/SearchQueryThenFetchAsyncAction.java index e0ad4691fa991..33b2cdf74cd79 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchQueryThenFetchAsyncAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchQueryThenFetchAsyncAction.java @@ -94,8 +94,15 @@ protected void executePhaseOnShard( final SearchShardTarget shard, final SearchActionListener listener ) { + final Transport.Connection connection; + try { + connection = getConnection(shard.getClusterAlias(), shard.getNodeId()); + } catch (Exception e) { + listener.onFailure(e); + return; + } ShardSearchRequest request = rewriteShardSearchRequest(super.buildShardSearchRequest(shardIt, listener.requestIndex)); - getSearchTransport().sendExecuteQuery(getConnection(shard.getClusterAlias(), shard.getNodeId()), request, getTask(), listener); + getSearchTransport().sendExecuteQuery(connection, request, getTask(), listener); } @Override From 61829213cf34fc307395d1f1475f652427db6518 Mon Sep 17 00:00:00 2001 From: Martijn van Groningen Date: Tue, 29 Oct 2024 17:14:50 +0100 Subject: [PATCH 186/324] Tweak Logsdb* and TsdbIndexingRollingUpgradeIT (#115850) Adjust assertion to more losely detect an error that can be ignored. Closes #115817 --- .../upgrades/LogsdbIndexingRollingUpgradeIT.java | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/LogsdbIndexingRollingUpgradeIT.java b/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/LogsdbIndexingRollingUpgradeIT.java index 226cb3dda2ba1..9cb91438e09c0 100644 --- a/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/LogsdbIndexingRollingUpgradeIT.java +++ b/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/LogsdbIndexingRollingUpgradeIT.java @@ -28,10 +28,7 @@ import static org.elasticsearch.upgrades.LogsIndexModeRollingUpgradeIT.enableLogsdbByDefault; import static org.elasticsearch.upgrades.LogsIndexModeRollingUpgradeIT.getWriteBackingIndex; import static org.elasticsearch.upgrades.TsdbIT.formatInstant; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.greaterThan; -import static org.hamcrest.Matchers.greaterThanOrEqualTo; -import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.*; public class LogsdbIndexingRollingUpgradeIT extends AbstractRollingUpgradeTestCase { @@ -240,7 +237,7 @@ protected static void startTrial() throws IOException { } catch (ResponseException e) { var responseBody = entityAsMap(e.getResponse()); String error = ObjectPath.evaluate(responseBody, "error_message"); - assertThat(error, equalTo("Trial was already activated.")); + assertThat(error, containsString("Trial was already activated.")); } } From 23e1116adb1b266157b423d36fb05799a62b79ba Mon Sep 17 00:00:00 2001 From: Tim Brooks Date: Tue, 29 Oct 2024 10:57:12 -0600 Subject: [PATCH 187/324] Ensure thread context set for streaming (#115683) Currently the thread context is lost between streaming context switches. This commit ensures that each time the thread context is properly set before providing new data to the stream. --- .../netty4/Netty4HttpPipeliningHandler.java | 5 +- .../netty4/Netty4HttpRequestBodyStream.java | 33 ++++--- .../Netty4HttpRequestBodyStreamTests.java | 69 ++++++++++++- .../action/bulk/IncrementalBulkService.java | 99 ++++++++----------- .../elasticsearch/node/NodeConstruction.java | 6 +- .../elasticsearch/rest/BaseRestHandler.java | 1 + .../rest/action/document/RestBulkAction.java | 3 +- .../action/ActionModuleTests.java | 10 +- .../AbstractHttpServerTransportTests.java | 2 +- .../action/document/RestBulkActionTests.java | 14 ++- .../xpack/security/SecurityTests.java | 2 +- 11 files changed, 148 insertions(+), 96 deletions(-) diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpPipeliningHandler.java b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpPipeliningHandler.java index b08c93a4dc240..1a391a05add58 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpPipeliningHandler.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpPipeliningHandler.java @@ -137,7 +137,10 @@ public void channelRead(final ChannelHandlerContext ctx, final Object msg) { netty4HttpRequest = new Netty4HttpRequest(readSequence++, fullHttpRequest); currentRequestStream = null; } else { - var contentStream = new Netty4HttpRequestBodyStream(ctx.channel()); + var contentStream = new Netty4HttpRequestBodyStream( + ctx.channel(), + serverTransport.getThreadPool().getThreadContext() + ); currentRequestStream = contentStream; netty4HttpRequest = new Netty4HttpRequest(readSequence++, request, contentStream); } diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpRequestBodyStream.java b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpRequestBodyStream.java index 9a0dc09b7566c..238faa7a9237e 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpRequestBodyStream.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpRequestBodyStream.java @@ -16,6 +16,7 @@ import io.netty.handler.codec.http.HttpContent; import io.netty.handler.codec.http.LastHttpContent; +import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.core.Releasables; import org.elasticsearch.http.HttpBody; import org.elasticsearch.transport.netty4.Netty4Utils; @@ -34,14 +35,18 @@ public class Netty4HttpRequestBodyStream implements HttpBody.Stream { private final Channel channel; private final ChannelFutureListener closeListener = future -> doClose(); private final List tracingHandlers = new ArrayList<>(4); + private final ThreadContext threadContext; private ByteBuf buf; private boolean hasLast = false; private boolean requested = false; private boolean closing = false; private HttpBody.ChunkHandler handler; + private ThreadContext.StoredContext requestContext; - public Netty4HttpRequestBodyStream(Channel channel) { + public Netty4HttpRequestBodyStream(Channel channel, ThreadContext threadContext) { this.channel = channel; + this.threadContext = threadContext; + this.requestContext = threadContext.newStoredContext(); Netty4Utils.addListener(channel.closeFuture(), closeListener); channel.config().setAutoRead(false); } @@ -66,6 +71,7 @@ public void addTracingHandler(ChunkHandler chunkHandler) { public void next() { assert closing == false : "cannot request next chunk on closing stream"; assert handler != null : "handler must be set before requesting next chunk"; + requestContext = threadContext.newStoredContext(); channel.eventLoop().submit(() -> { requested = true; if (buf == null) { @@ -108,11 +114,6 @@ private void addChunk(ByteBuf chunk) { } } - // visible for test - Channel channel() { - return channel; - } - // visible for test ByteBuf buf() { return buf; @@ -129,10 +130,12 @@ private void send() { var bytesRef = Netty4Utils.toReleasableBytesReference(buf); requested = false; buf = null; - for (var tracer : tracingHandlers) { - tracer.onNext(bytesRef, hasLast); + try (var ignored = threadContext.restoreExistingContext(requestContext)) { + for (var tracer : tracingHandlers) { + tracer.onNext(bytesRef, hasLast); + } + handler.onNext(bytesRef, hasLast); } - handler.onNext(bytesRef, hasLast); if (hasLast) { channel.config().setAutoRead(true); channel.closeFuture().removeListener(closeListener); @@ -150,11 +153,13 @@ public void close() { private void doClose() { closing = true; - for (var tracer : tracingHandlers) { - Releasables.closeExpectNoException(tracer); - } - if (handler != null) { - handler.close(); + try (var ignored = threadContext.restoreExistingContext(requestContext)) { + for (var tracer : tracingHandlers) { + Releasables.closeExpectNoException(tracer); + } + if (handler != null) { + handler.close(); + } } if (buf != null) { buf.release(); diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpRequestBodyStreamTests.java b/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpRequestBodyStreamTests.java index f495883631a4e..5ff5a27e2d551 100644 --- a/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpRequestBodyStreamTests.java +++ b/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpRequestBodyStreamTests.java @@ -19,24 +19,33 @@ import io.netty.handler.flow.FlowControlHandler; import org.elasticsearch.common.bytes.ReleasableBytesReference; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.http.HttpBody; import org.elasticsearch.test.ESTestCase; import java.util.ArrayList; +import java.util.HashMap; +import java.util.Map; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicReference; + +import static org.hamcrest.Matchers.hasEntry; public class Netty4HttpRequestBodyStreamTests extends ESTestCase { - EmbeddedChannel channel; - Netty4HttpRequestBodyStream stream; + private final ThreadContext threadContext = new ThreadContext(Settings.EMPTY); + private EmbeddedChannel channel; + private Netty4HttpRequestBodyStream stream; static HttpBody.ChunkHandler discardHandler = (chunk, isLast) -> chunk.close(); @Override public void setUp() throws Exception { super.setUp(); channel = new EmbeddedChannel(); - stream = new Netty4HttpRequestBodyStream(channel); + threadContext.putHeader("header1", "value1"); + stream = new Netty4HttpRequestBodyStream(channel, threadContext); stream.setHandler(discardHandler); // set default handler, each test might override one channel.pipeline().addLast(new SimpleChannelInboundHandler(false) { @Override @@ -118,6 +127,60 @@ public void testReadFromChannel() { assertTrue("should receive last content", gotLast.get()); } + public void testReadFromHasCorrectThreadContext() throws InterruptedException { + var gotLast = new AtomicBoolean(false); + AtomicReference> headers = new AtomicReference<>(); + stream.setHandler(new HttpBody.ChunkHandler() { + @Override + public void onNext(ReleasableBytesReference chunk, boolean isLast) { + headers.set(threadContext.getHeaders()); + gotLast.set(isLast); + chunk.close(); + } + + @Override + public void close() { + headers.set(threadContext.getHeaders()); + } + }); + channel.pipeline().addFirst(new FlowControlHandler()); // block all incoming messages, need explicit channel.read() + var chunkSize = 1024; + + channel.writeInbound(randomContent(chunkSize)); + channel.writeInbound(randomLastContent(chunkSize)); + + threadContext.putHeader("header2", "value2"); + stream.next(); + + Thread thread = new Thread(() -> channel.runPendingTasks()); + thread.start(); + thread.join(); + + assertThat(headers.get(), hasEntry("header1", "value1")); + assertThat(headers.get(), hasEntry("header2", "value2")); + + threadContext.putHeader("header3", "value3"); + stream.next(); + + thread = new Thread(() -> channel.runPendingTasks()); + thread.start(); + thread.join(); + + assertThat(headers.get(), hasEntry("header1", "value1")); + assertThat(headers.get(), hasEntry("header2", "value2")); + assertThat(headers.get(), hasEntry("header3", "value3")); + + assertTrue("should receive last content", gotLast.get()); + + headers.set(new HashMap<>()); + + stream.close(); + + assertThat(headers.get(), hasEntry("header1", "value1")); + assertThat(headers.get(), hasEntry("header2", "value2")); + assertThat(headers.get(), hasEntry("header3", "value3")); + } + HttpContent randomContent(int size, boolean isLast) { var buf = Unpooled.wrappedBuffer(randomByteArrayOfLength(size)); if (isLast) { diff --git a/server/src/main/java/org/elasticsearch/action/bulk/IncrementalBulkService.java b/server/src/main/java/org/elasticsearch/action/bulk/IncrementalBulkService.java index 2e7c87301b2f6..6ce198260ba3c 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/IncrementalBulkService.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/IncrementalBulkService.java @@ -17,7 +17,6 @@ import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; -import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.Releasable; import org.elasticsearch.core.Releasables; @@ -43,12 +42,10 @@ public class IncrementalBulkService { private final Client client; private final AtomicBoolean enabledForTests = new AtomicBoolean(true); private final IndexingPressure indexingPressure; - private final ThreadContext threadContext; - public IncrementalBulkService(Client client, IndexingPressure indexingPressure, ThreadContext threadContext) { + public IncrementalBulkService(Client client, IndexingPressure indexingPressure) { this.client = client; this.indexingPressure = indexingPressure; - this.threadContext = threadContext; } public Handler newBulkRequest() { @@ -58,7 +55,7 @@ public Handler newBulkRequest() { public Handler newBulkRequest(@Nullable String waitForActiveShards, @Nullable TimeValue timeout, @Nullable String refresh) { ensureEnabled(); - return new Handler(client, threadContext, indexingPressure, waitForActiveShards, timeout, refresh); + return new Handler(client, indexingPressure, waitForActiveShards, timeout, refresh); } private void ensureEnabled() { @@ -94,7 +91,6 @@ public static class Handler implements Releasable { public static final BulkRequest.IncrementalState EMPTY_STATE = new BulkRequest.IncrementalState(Collections.emptyMap(), true); private final Client client; - private final ThreadContext threadContext; private final IndexingPressure indexingPressure; private final ActiveShardCount waitForActiveShards; private final TimeValue timeout; @@ -106,22 +102,18 @@ public static class Handler implements Releasable { private boolean globalFailure = false; private boolean incrementalRequestSubmitted = false; private boolean bulkInProgress = false; - private ThreadContext.StoredContext requestContext; private Exception bulkActionLevelFailure = null; private long currentBulkSize = 0L; private BulkRequest bulkRequest = null; protected Handler( Client client, - ThreadContext threadContext, IndexingPressure indexingPressure, @Nullable String waitForActiveShards, @Nullable TimeValue timeout, @Nullable String refresh ) { this.client = client; - this.threadContext = threadContext; - this.requestContext = threadContext.newStoredContext(); this.indexingPressure = indexingPressure; this.waitForActiveShards = waitForActiveShards != null ? ActiveShardCount.parseString(waitForActiveShards) : null; this.timeout = timeout; @@ -141,31 +133,28 @@ public void addItems(List> items, Releasable releasable, Runn if (shouldBackOff()) { final boolean isFirstRequest = incrementalRequestSubmitted == false; incrementalRequestSubmitted = true; - try (var ignored = threadContext.restoreExistingContext(requestContext)) { - final ArrayList toRelease = new ArrayList<>(releasables); - releasables.clear(); - bulkInProgress = true; - client.bulk(bulkRequest, ActionListener.runAfter(new ActionListener<>() { - - @Override - public void onResponse(BulkResponse bulkResponse) { - handleBulkSuccess(bulkResponse); - createNewBulkRequest( - new BulkRequest.IncrementalState(bulkResponse.getIncrementalState().shardLevelFailures(), true) - ); - } - - @Override - public void onFailure(Exception e) { - handleBulkFailure(isFirstRequest, e); - } - }, () -> { - bulkInProgress = false; - requestContext = threadContext.newStoredContext(); - toRelease.forEach(Releasable::close); - nextItems.run(); - })); - } + final ArrayList toRelease = new ArrayList<>(releasables); + releasables.clear(); + bulkInProgress = true; + client.bulk(bulkRequest, ActionListener.runAfter(new ActionListener<>() { + + @Override + public void onResponse(BulkResponse bulkResponse) { + handleBulkSuccess(bulkResponse); + createNewBulkRequest( + new BulkRequest.IncrementalState(bulkResponse.getIncrementalState().shardLevelFailures(), true) + ); + } + + @Override + public void onFailure(Exception e) { + handleBulkFailure(isFirstRequest, e); + } + }, () -> { + bulkInProgress = false; + toRelease.forEach(Releasable::close); + nextItems.run(); + })); } else { nextItems.run(); } @@ -187,28 +176,26 @@ public void lastItems(List> items, Releasable releasable, Act } else { assert bulkRequest != null; if (internalAddItems(items, releasable)) { - try (var ignored = threadContext.restoreExistingContext(requestContext)) { - final ArrayList toRelease = new ArrayList<>(releasables); - releasables.clear(); - // We do not need to set this back to false as this will be the last request. - bulkInProgress = true; - client.bulk(bulkRequest, ActionListener.runBefore(new ActionListener<>() { - - private final boolean isFirstRequest = incrementalRequestSubmitted == false; - - @Override - public void onResponse(BulkResponse bulkResponse) { - handleBulkSuccess(bulkResponse); - listener.onResponse(combineResponses()); - } + final ArrayList toRelease = new ArrayList<>(releasables); + releasables.clear(); + // We do not need to set this back to false as this will be the last request. + bulkInProgress = true; + client.bulk(bulkRequest, ActionListener.runBefore(new ActionListener<>() { + + private final boolean isFirstRequest = incrementalRequestSubmitted == false; + + @Override + public void onResponse(BulkResponse bulkResponse) { + handleBulkSuccess(bulkResponse); + listener.onResponse(combineResponses()); + } - @Override - public void onFailure(Exception e) { - handleBulkFailure(isFirstRequest, e); - errorResponse(listener); - } - }, () -> toRelease.forEach(Releasable::close))); - } + @Override + public void onFailure(Exception e) { + handleBulkFailure(isFirstRequest, e); + errorResponse(listener); + } + }, () -> toRelease.forEach(Releasable::close))); } else { errorResponse(listener); } diff --git a/server/src/main/java/org/elasticsearch/node/NodeConstruction.java b/server/src/main/java/org/elasticsearch/node/NodeConstruction.java index 0a88a202ac8d3..5354b1097326b 100644 --- a/server/src/main/java/org/elasticsearch/node/NodeConstruction.java +++ b/server/src/main/java/org/elasticsearch/node/NodeConstruction.java @@ -915,11 +915,7 @@ private void construct( terminationHandler = getSinglePlugin(terminationHandlers, TerminationHandler.class).orElse(null); final IndexingPressure indexingLimits = new IndexingPressure(settings); - final IncrementalBulkService incrementalBulkService = new IncrementalBulkService( - client, - indexingLimits, - threadPool.getThreadContext() - ); + final IncrementalBulkService incrementalBulkService = new IncrementalBulkService(client, indexingLimits); ActionModule actionModule = new ActionModule( settings, diff --git a/server/src/main/java/org/elasticsearch/rest/BaseRestHandler.java b/server/src/main/java/org/elasticsearch/rest/BaseRestHandler.java index c8cf0bf93879b..f1b59ed14cefb 100644 --- a/server/src/main/java/org/elasticsearch/rest/BaseRestHandler.java +++ b/server/src/main/java/org/elasticsearch/rest/BaseRestHandler.java @@ -125,6 +125,7 @@ public final void handleRequest(RestRequest request, RestChannel channel, NodeCl if (request.isStreamedContent()) { assert action instanceof RequestBodyChunkConsumer; var chunkConsumer = (RequestBodyChunkConsumer) action; + request.contentStream().setHandler(new HttpBody.ChunkHandler() { @Override public void onNext(ReleasableBytesReference chunk, boolean isLast) { diff --git a/server/src/main/java/org/elasticsearch/rest/action/document/RestBulkAction.java b/server/src/main/java/org/elasticsearch/rest/action/document/RestBulkAction.java index 1e80e6de60d65..7b82481d3d283 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/document/RestBulkAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/document/RestBulkAction.java @@ -173,8 +173,7 @@ static class ChunkHandler implements BaseRestHandler.RequestBodyChunkConsumer { this.defaultListExecutedPipelines = request.paramAsBoolean("list_executed_pipelines", false); this.defaultRequireAlias = request.paramAsBoolean(DocWriteRequest.REQUIRE_ALIAS, false); this.defaultRequireDataStream = request.paramAsBoolean(DocWriteRequest.REQUIRE_DATA_STREAM, false); - // TODO: Fix type deprecation logging - this.parser = new BulkRequestParser(false, request.getRestApiVersion()); + this.parser = new BulkRequestParser(true, request.getRestApiVersion()); this.handlerSupplier = handlerSupplier; } diff --git a/server/src/test/java/org/elasticsearch/action/ActionModuleTests.java b/server/src/test/java/org/elasticsearch/action/ActionModuleTests.java index 871062a687429..8d3561f2179cd 100644 --- a/server/src/test/java/org/elasticsearch/action/ActionModuleTests.java +++ b/server/src/test/java/org/elasticsearch/action/ActionModuleTests.java @@ -131,7 +131,7 @@ public void testSetupRestHandlerContainsKnownBuiltin() { null, List.of(), RestExtension.allowAll(), - new IncrementalBulkService(null, null, new ThreadContext(Settings.EMPTY)) + new IncrementalBulkService(null, null) ); actionModule.initRestHandlers(null, null); // At this point the easiest way to confirm that a handler is loaded is to try to register another one on top of it and to fail @@ -196,7 +196,7 @@ public String getName() { null, List.of(), RestExtension.allowAll(), - new IncrementalBulkService(null, null, new ThreadContext(Settings.EMPTY)) + new IncrementalBulkService(null, null) ); Exception e = expectThrows(IllegalArgumentException.class, () -> actionModule.initRestHandlers(null, null)); assertThat(e.getMessage(), startsWith("Cannot replace existing handler for [/_nodes] for method: GET")); @@ -254,7 +254,7 @@ public List getRestHandlers( null, List.of(), RestExtension.allowAll(), - new IncrementalBulkService(null, null, new ThreadContext(Settings.EMPTY)) + new IncrementalBulkService(null, null) ); actionModule.initRestHandlers(null, null); // At this point the easiest way to confirm that a handler is loaded is to try to register another one on top of it and to fail @@ -305,7 +305,7 @@ public void test3rdPartyHandlerIsNotInstalled() { null, List.of(), RestExtension.allowAll(), - new IncrementalBulkService(null, null, new ThreadContext(Settings.EMPTY)) + new IncrementalBulkService(null, null) ) ); assertThat( @@ -347,7 +347,7 @@ public void test3rdPartyRestControllerIsNotInstalled() { null, List.of(), RestExtension.allowAll(), - new IncrementalBulkService(null, null, new ThreadContext(Settings.EMPTY)) + new IncrementalBulkService(null, null) ) ); assertThat( diff --git a/server/src/test/java/org/elasticsearch/http/AbstractHttpServerTransportTests.java b/server/src/test/java/org/elasticsearch/http/AbstractHttpServerTransportTests.java index 77133516f37d5..cf623e77f740a 100644 --- a/server/src/test/java/org/elasticsearch/http/AbstractHttpServerTransportTests.java +++ b/server/src/test/java/org/elasticsearch/http/AbstractHttpServerTransportTests.java @@ -1179,7 +1179,7 @@ public Collection getRestHeaders() { null, List.of(), RestExtension.allowAll(), - new IncrementalBulkService(null, null, new ThreadContext(Settings.EMPTY)) + new IncrementalBulkService(null, null) ); } diff --git a/server/src/test/java/org/elasticsearch/rest/action/document/RestBulkActionTests.java b/server/src/test/java/org/elasticsearch/rest/action/document/RestBulkActionTests.java index d3cd6dd9ca420..25cfd1e56514c 100644 --- a/server/src/test/java/org/elasticsearch/rest/action/document/RestBulkActionTests.java +++ b/server/src/test/java/org/elasticsearch/rest/action/document/RestBulkActionTests.java @@ -20,8 +20,6 @@ import org.elasticsearch.client.internal.Client; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.ReleasableBytesReference; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.core.Releasable; import org.elasticsearch.http.HttpBody; import org.elasticsearch.index.IndexVersion; @@ -67,7 +65,7 @@ public void bulk(BulkRequest request, ActionListener listener) { params.put("pipeline", "timestamps"); new RestBulkAction( settings(IndexVersion.current()).build(), - new IncrementalBulkService(mock(Client.class), mock(IndexingPressure.class), new ThreadContext(Settings.EMPTY)) + new IncrementalBulkService(mock(Client.class), mock(IndexingPressure.class)) ).handleRequest( new FakeRestRequest.Builder(xContentRegistry()).withPath("my_index/_bulk").withParams(params).withContent(new BytesArray(""" {"index":{"_id":"1"}} @@ -102,7 +100,7 @@ public void bulk(BulkRequest request, ActionListener listener) { { new RestBulkAction( settings(IndexVersion.current()).build(), - new IncrementalBulkService(mock(Client.class), mock(IndexingPressure.class), new ThreadContext(Settings.EMPTY)) + new IncrementalBulkService(mock(Client.class), mock(IndexingPressure.class)) ).handleRequest( new FakeRestRequest.Builder(xContentRegistry()).withPath("my_index/_bulk") .withParams(params) @@ -126,7 +124,7 @@ public void bulk(BulkRequest request, ActionListener listener) { bulkCalled.set(false); new RestBulkAction( settings(IndexVersion.current()).build(), - new IncrementalBulkService(mock(Client.class), mock(IndexingPressure.class), new ThreadContext(Settings.EMPTY)) + new IncrementalBulkService(mock(Client.class), mock(IndexingPressure.class)) ).handleRequest( new FakeRestRequest.Builder(xContentRegistry()).withPath("my_index/_bulk") .withParams(params) @@ -149,7 +147,7 @@ public void bulk(BulkRequest request, ActionListener listener) { bulkCalled.set(false); new RestBulkAction( settings(IndexVersion.current()).build(), - new IncrementalBulkService(mock(Client.class), mock(IndexingPressure.class), new ThreadContext(Settings.EMPTY)) + new IncrementalBulkService(mock(Client.class), mock(IndexingPressure.class)) ).handleRequest( new FakeRestRequest.Builder(xContentRegistry()).withPath("my_index/_bulk") .withParams(params) @@ -173,7 +171,7 @@ public void bulk(BulkRequest request, ActionListener listener) { bulkCalled.set(false); new RestBulkAction( settings(IndexVersion.current()).build(), - new IncrementalBulkService(mock(Client.class), mock(IndexingPressure.class), new ThreadContext(Settings.EMPTY)) + new IncrementalBulkService(mock(Client.class), mock(IndexingPressure.class)) ).handleRequest( new FakeRestRequest.Builder(xContentRegistry()).withPath("my_index/_bulk") .withParams(params) @@ -229,7 +227,7 @@ public void next() { RestBulkAction.ChunkHandler chunkHandler = new RestBulkAction.ChunkHandler( true, request, - () -> new IncrementalBulkService.Handler(null, new ThreadContext(Settings.EMPTY), null, null, null, null) { + () -> new IncrementalBulkService.Handler(null, null, null, null, null) { @Override public void addItems(List> items, Releasable releasable, Runnable nextItems) { diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityTests.java index 8d580f10e5137..c0e55992df88f 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityTests.java @@ -824,7 +824,7 @@ public void testSecurityRestHandlerInterceptorCanBeInstalled() throws IllegalAcc null, List.of(), RestExtension.allowAll(), - new IncrementalBulkService(null, null, new ThreadContext(Settings.EMPTY)) + new IncrementalBulkService(null, null) ); actionModule.initRestHandlers(null, null); From 812e43849235507409880cdeba53707c3f22ddcc Mon Sep 17 00:00:00 2001 From: Brian Seeders Date: Tue, 29 Oct 2024 14:11:37 -0400 Subject: [PATCH 188/324] [CI] Stop gradle processes between tasks in packer cache --- .buildkite/packer_cache.sh | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.buildkite/packer_cache.sh b/.buildkite/packer_cache.sh index 01e1ad5cd7823..e4a80d439741d 100755 --- a/.buildkite/packer_cache.sh +++ b/.buildkite/packer_cache.sh @@ -30,5 +30,7 @@ for branch in "${branches[@]}"; do export JAVA_HOME="$HOME/.java/$ES_BUILD_JAVA" "checkout/${branch}/gradlew" --project-dir "$CHECKOUT_DIR" --parallel -s resolveAllDependencies -Dorg.gradle.warning.mode=none -DisCI --max-workers=4 + "checkout/${branch}/gradlew" --stop + pkill -f '.*GradleDaemon.*' rm -rf "checkout/${branch}" done From 18c3bcbd6cda661a179f7eacd29d13d592d692a5 Mon Sep 17 00:00:00 2001 From: Rene Groeschke Date: Tue, 29 Oct 2024 19:40:23 +0100 Subject: [PATCH 189/324] Fix cloud deploy PR job after removing cloud image (#115857) * Fix cloud deploy PR job after removing cloud image * Fix task name for building cloud ess docker image in cloud deploy --- .buildkite/scripts/cloud-deploy.sh | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.buildkite/scripts/cloud-deploy.sh b/.buildkite/scripts/cloud-deploy.sh index 2b98aa224406b..045b05ce16dee 100755 --- a/.buildkite/scripts/cloud-deploy.sh +++ b/.buildkite/scripts/cloud-deploy.sh @@ -2,11 +2,11 @@ set -euo pipefail -.ci/scripts/run-gradle.sh buildCloudDockerImage +.ci/scripts/run-gradle.sh buildCloudEssDockerImage ES_VERSION=$(grep 'elasticsearch' build-tools-internal/version.properties | awk '{print $3}') -DOCKER_TAG="docker.elastic.co/elasticsearch-ci/elasticsearch-cloud:${ES_VERSION}-${BUILDKITE_COMMIT:0:7}" -docker tag elasticsearch-cloud:test "$DOCKER_TAG" +DOCKER_TAG="docker.elastic.co/elasticsearch-ci/elasticsearch-cloud-ess:${ES_VERSION}-${BUILDKITE_COMMIT:0:7}" +docker tag elasticsearch-cloud-ess:test "$DOCKER_TAG" echo "$DOCKER_REGISTRY_PASSWORD" | docker login -u "$DOCKER_REGISTRY_USERNAME" --password-stdin docker.elastic.co unset DOCKER_REGISTRY_USERNAME DOCKER_REGISTRY_PASSWORD From 853f51fa05b1808d8741a20e2f009b06cbcf87b9 Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Wed, 30 Oct 2024 05:53:18 +1100 Subject: [PATCH 190/324] Mute org.elasticsearch.reservedstate.service.FileSettingsServiceTests testProcessFileChanges #115280 --- muted-tests.yml | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/muted-tests.yml b/muted-tests.yml index 419e8fbb68566..22e57a524f0bc 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -287,6 +287,9 @@ tests: - class: org.elasticsearch.search.StressSearchServiceReaperIT method: testStressReaper issue: https://github.com/elastic/elasticsearch/issues/115816 +- class: org.elasticsearch.reservedstate.service.FileSettingsServiceTests + method: testProcessFileChanges + issue: https://github.com/elastic/elasticsearch/issues/115280 # Examples: # @@ -325,4 +328,4 @@ tests: # issue: "https://github.com/elastic/elasticsearch/..." # - class: "org.elasticsearch.xpack.esql.**" # method: "test {union_types.MultiIndexIpStringStatsInline *}" -# issue: "https://github.com/elastic/elasticsearch/..." \ No newline at end of file +# issue: "https://github.com/elastic/elasticsearch/..." From 06eb0727c22db5559cc9cc9da46ba423bd3663c5 Mon Sep 17 00:00:00 2001 From: Kostas Krikellas <131142368+kkrik-es@users.noreply.github.com> Date: Tue, 29 Oct 2024 21:12:43 +0200 Subject: [PATCH 191/324] Use flattened names in ignored source (#115822) * Use flattened names in ignored source * spotless * fix rest compat * fix unittests * expand dots --- rest-api-spec/build.gradle | 4 + .../indices.create/20_synthetic_source.yml | 20 +-- .../21_synthetic_source_stored.yml | 49 +++++++ .../index/mapper/DocumentParserContext.java | 6 +- .../mapper/DotExpandingXContentParser.java | 4 + .../index/mapper/XContentDataHelper.java | 15 +- .../mapper/IgnoredSourceFieldMapperTests.java | 130 ++++++++++++++++++ 7 files changed, 211 insertions(+), 17 deletions(-) diff --git a/rest-api-spec/build.gradle b/rest-api-spec/build.gradle index 6cc2028bffa39..b9064ab1d79ad 100644 --- a/rest-api-spec/build.gradle +++ b/rest-api-spec/build.gradle @@ -59,6 +59,10 @@ tasks.named("yamlRestCompatTestTransform").configure ({ task -> task.replaceValueInMatch("profile.shards.0.dfs.knn.0.query.0.description", "DocAndScoreQuery[0,...][0.009673266,...],0.009673266", "dfs knn vector profiling with vector_operations_count") task.skipTest("indices.sort/10_basic/Index Sort", "warning does not exist for compatibility") task.skipTest("search/330_fetch_fields/Test search rewrite", "warning does not exist for compatibility") + task.skipTest("indices.create/20_synthetic_source/object with dynamic override", "temporary until backported") + task.skipTest("indices.create/20_synthetic_source/object with unmapped fields", "temporary until backported") + task.skipTest("indices.create/20_synthetic_source/empty object with unmapped fields", "temporary until backported") + task.skipTest("indices.create/20_synthetic_source/nested object with unmapped fields", "temporary until backported") task.skipTest("indices.create/21_synthetic_source_stored/object param - nested object with stored array", "temporary until backported") task.skipTest("cat.aliases/10_basic/Deprecated local parameter", "CAT APIs not covered by compatibility policy") }) diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/20_synthetic_source.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/20_synthetic_source.yml index a871d2ac0ae15..258dfeb57e00c 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/20_synthetic_source.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/20_synthetic_source.yml @@ -1,6 +1,6 @@ object with unmapped fields: - requires: - cluster_features: ["mapper.track_ignored_source"] + cluster_features: ["mapper.track_ignored_source", "mapper.bwc_workaround_9_0"] reason: requires tracking ignored source - do: @@ -41,13 +41,13 @@ object with unmapped fields: - match: { hits.hits.0._source.some_string: AaAa } - match: { hits.hits.0._source.some_int: 1000 } - match: { hits.hits.0._source.some_double: 123.456789 } - - match: { hits.hits.0._source.a.very.deeply.nested.field: AAAA } + - match: { hits.hits.0._source.a: { very.deeply.nested.field: AAAA } } - match: { hits.hits.0._source.some_bool: true } - match: { hits.hits.1._source.name: bbbb } - match: { hits.hits.1._source.some_string: BbBb } - match: { hits.hits.1._source.some_int: 2000 } - match: { hits.hits.1._source.some_double: 321.987654 } - - match: { hits.hits.1._source.a.very.deeply.nested.field: BBBB } + - match: { hits.hits.1._source.a: { very.deeply.nested.field: BBBB } } --- @@ -100,7 +100,7 @@ unmapped arrays: --- nested object with unmapped fields: - requires: - cluster_features: ["mapper.track_ignored_source"] + cluster_features: ["mapper.track_ignored_source", "mapper.bwc_workaround_9_0"] reason: requires tracking ignored source - do: @@ -143,16 +143,16 @@ nested object with unmapped fields: - match: { hits.total.value: 2 } - match: { hits.hits.0._source.path.to.name: aaaa } - match: { hits.hits.0._source.path.to.surname: AaAa } - - match: { hits.hits.0._source.path.some.other.name: AaAaAa } + - match: { hits.hits.0._source.path.some.other\.name: AaAaAa } - match: { hits.hits.1._source.path.to.name: bbbb } - match: { hits.hits.1._source.path.to.surname: BbBb } - - match: { hits.hits.1._source.path.some.other.name: BbBbBb } + - match: { hits.hits.1._source.path.some.other\.name: BbBbBb } --- empty object with unmapped fields: - requires: - cluster_features: ["mapper.track_ignored_source"] + cluster_features: ["mapper.track_ignored_source", "mapper.bwc_workaround_9_0"] reason: requires tracking ignored source - do: @@ -191,7 +191,7 @@ empty object with unmapped fields: - match: { hits.total.value: 1 } - match: { hits.hits.0._source.path.to.surname: AaAa } - - match: { hits.hits.0._source.path.some.other.name: AaAaAa } + - match: { hits.hits.0._source.path.some.other\.name: AaAaAa } --- @@ -434,7 +434,7 @@ mixed disabled and enabled objects: --- object with dynamic override: - requires: - cluster_features: ["mapper.ignored_source.dont_expand_dots"] + cluster_features: ["mapper.ignored_source.dont_expand_dots", "mapper.bwc_workaround_9_0"] reason: requires tracking ignored source - do: @@ -475,7 +475,7 @@ object with dynamic override: - match: { hits.hits.0._source.path_no.to: { a.very.deeply.nested.field: A } } - match: { hits.hits.0._source.path_runtime.name: bar } - match: { hits.hits.0._source.path_runtime.some_int: 20 } - - match: { hits.hits.0._source.path_runtime.to.a.very.deeply.nested.field: B } + - match: { hits.hits.0._source.path_runtime.to: { a.very.deeply.nested.field: B } } --- diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/21_synthetic_source_stored.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/21_synthetic_source_stored.yml index 6a4e92f694220..f3545bb0a3f0e 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/21_synthetic_source_stored.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/21_synthetic_source_stored.yml @@ -1249,3 +1249,52 @@ index param - nested object with stored array: - match: { hits.hits.1._source.nested.0.b.1.c: 300 } - match: { hits.hits.1._source.nested.1.b.0.c: 40 } - match: { hits.hits.1._source.nested.1.b.1.c: 400 } + + +--- +index param - flattened fields: + - requires: + cluster_features: ["mapper.synthetic_source_keep", "mapper.bwc_workaround_9_0"] + reason: requires keeping array source + + - do: + indices.create: + index: test + body: + settings: + index: + mapping: + synthetic_source_keep: arrays + mappings: + _source: + mode: synthetic + properties: + name: + type: keyword + outer: + properties: + inner: + type: object + + - do: + bulk: + index: test + refresh: true + body: + - '{ "create": { } }' + - '{ "name": "A", "outer": { "inner": [ { "a.b": "AA", "a.c": "AAA" } ] } }' + - '{ "create": { } }' + - '{ "name": "B", "outer": { "inner": [ { "a.x.y.z": "BB", "a.z.y.x": "BBB" } ] } }' + + + - match: { errors: false } + + - do: + search: + index: test + sort: name + - match: { hits.total.value: 2 } + - match: { hits.hits.0._source.name: A } + - match: { hits.hits.0._source.outer.inner: [{ a.b: AA, a.c: AAA }] } + - match: { hits.hits.1._source.name: B } + - match: { hits.hits.1._source.outer.inner: [{ a.x.y.z: BB, a.z.y.x: BBB }] } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DocumentParserContext.java b/server/src/main/java/org/elasticsearch/index/mapper/DocumentParserContext.java index 3b1f1a6d2809a..c884d68c8f0ee 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/DocumentParserContext.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/DocumentParserContext.java @@ -528,11 +528,7 @@ public final boolean addDynamicMapper(Mapper mapper) { if (canAddIgnoredField()) { try { addIgnoredField( - IgnoredSourceFieldMapper.NameValue.fromContext( - this, - mapper.fullPath(), - XContentDataHelper.encodeToken(parser()) - ) + IgnoredSourceFieldMapper.NameValue.fromContext(this, mapper.fullPath(), encodeFlattenedToken()) ); } catch (IOException e) { throw new IllegalArgumentException("failed to parse field [" + mapper.fullPath() + " ]", e); diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DotExpandingXContentParser.java b/server/src/main/java/org/elasticsearch/index/mapper/DotExpandingXContentParser.java index fc003e709cbca..42784e0974417 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/DotExpandingXContentParser.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/DotExpandingXContentParser.java @@ -34,6 +34,10 @@ */ class DotExpandingXContentParser extends FilterXContentParserWrapper { + static boolean isInstance(XContentParser parser) { + return parser instanceof WrappingParser; + } + private static final class WrappingParser extends FilterXContentParser { private final ContentPath contentPath; diff --git a/server/src/main/java/org/elasticsearch/index/mapper/XContentDataHelper.java b/server/src/main/java/org/elasticsearch/index/mapper/XContentDataHelper.java index 8bacaf8505f91..dee5ff92040a9 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/XContentDataHelper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/XContentDataHelper.java @@ -221,8 +221,11 @@ static Tuple cloneSubContextWithParser(Do private static Tuple cloneSubContextParserConfiguration(DocumentParserContext context) throws IOException { XContentParser parser = context.parser(); + var oldValue = context.path().isWithinLeafObject(); + context.path().setWithinLeafObject(true); XContentBuilder builder = XContentBuilder.builder(parser.contentType().xContent()); builder.copyCurrentStructure(parser); + context.path().setWithinLeafObject(oldValue); XContentParserConfiguration configuration = XContentParserConfiguration.EMPTY.withRegistry(parser.getXContentRegistry()) .withDeprecationHandler(parser.getDeprecationHandler()) @@ -235,9 +238,17 @@ private static DocumentParserContext cloneDocumentParserContext( XContentParserConfiguration configuration, XContentBuilder builder ) throws IOException { - DocumentParserContext subcontext = context.switchParser( - XContentHelper.createParserNotCompressed(configuration, BytesReference.bytes(builder), context.parser().contentType()) + XContentParser newParser = XContentHelper.createParserNotCompressed( + configuration, + BytesReference.bytes(builder), + context.parser().contentType() ); + if (DotExpandingXContentParser.isInstance(context.parser())) { + // If we performed dot expanding originally we need to continue to do so when we replace the parser. + newParser = DotExpandingXContentParser.expandDots(newParser, context.path()); + } + + DocumentParserContext subcontext = context.switchParser(newParser); subcontext.setRecordedSource(); // Avoids double-storing parts of the source for the same parser subtree. subcontext.parser().nextToken(); return subcontext; diff --git a/server/src/test/java/org/elasticsearch/index/mapper/IgnoredSourceFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/IgnoredSourceFieldMapperTests.java index 7a4ce8bcb03fa..884372d249287 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/IgnoredSourceFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/IgnoredSourceFieldMapperTests.java @@ -2075,6 +2075,136 @@ public void testDisabledObjectWithFlatFields() throws IOException { {"top":[{"file.name":"A","file.line":10},{"file.name":"B","file.line":20}]}""", syntheticSourceWithArray); } + public void testRegularObjectWithFlatFields() throws IOException { + DocumentMapper documentMapper = createMapperService(syntheticSourceMapping(b -> { + b.startObject("top").field("type", "object").field("synthetic_source_keep", "all").endObject(); + })).documentMapper(); + + CheckedConsumer document = b -> { + b.startObject("top"); + b.field("file.name", "A"); + b.field("file.line", 10); + b.endObject(); + }; + + var syntheticSource = syntheticSource(documentMapper, document); + assertEquals("{\"top\":{\"file.name\":\"A\",\"file.line\":10}}", syntheticSource); + + CheckedConsumer documentWithArray = b -> { + b.startArray("top"); + b.startObject(); + b.field("file.name", "A"); + b.field("file.line", 10); + b.endObject(); + b.startObject(); + b.field("file.name", "B"); + b.field("file.line", 20); + b.endObject(); + b.endArray(); + }; + + var syntheticSourceWithArray = syntheticSource(documentMapper, documentWithArray); + assertEquals(""" + {"top":[{"file.name":"A","file.line":10},{"file.name":"B","file.line":20}]}""", syntheticSourceWithArray); + } + + public void testRegularObjectWithFlatFieldsInsideAnArray() throws IOException { + DocumentMapper documentMapper = createMapperService(syntheticSourceMapping(b -> { + b.startObject("top"); + b.startObject("properties"); + { + b.startObject("inner").field("type", "object").field("synthetic_source_keep", "all").endObject(); + } + b.endObject(); + b.endObject(); + })).documentMapper(); + + CheckedConsumer document = b -> { + b.startArray("top"); + b.startObject(); + { + b.startObject("inner"); + b.field("file.name", "A"); + b.field("file.line", 10); + b.endObject(); + } + b.endObject(); + b.endArray(); + }; + + var syntheticSource = syntheticSource(documentMapper, document); + assertEquals("{\"top\":{\"inner\":{\"file.name\":\"A\",\"file.line\":10}}}", syntheticSource); + + CheckedConsumer documentWithArray = b -> { + b.startArray("top"); + b.startObject(); + { + b.startObject("inner"); + b.field("file.name", "A"); + b.field("file.line", 10); + b.endObject(); + } + b.endObject(); + b.startObject(); + { + b.startObject("inner"); + b.field("file.name", "B"); + b.field("file.line", 20); + b.endObject(); + } + b.endObject(); + b.endArray(); + }; + + var syntheticSourceWithArray = syntheticSource(documentMapper, documentWithArray); + assertEquals(""" + {"top":{"inner":[{"file.name":"A","file.line":10},{"file.name":"B","file.line":20}]}}""", syntheticSourceWithArray); + } + + public void testIgnoredDynamicObjectWithFlatFields() throws IOException { + var syntheticSource = getSyntheticSourceWithFieldLimit(b -> { + b.startObject("top"); + b.field("file.name", "A"); + b.field("file.line", 10); + b.endObject(); + }); + assertEquals("{\"top\":{\"file.name\":\"A\",\"file.line\":10}}", syntheticSource); + + var syntheticSourceWithArray = getSyntheticSourceWithFieldLimit(b -> { + b.startArray("top"); + b.startObject(); + b.field("file.name", "A"); + b.field("file.line", 10); + b.endObject(); + b.startObject(); + b.field("file.name", "B"); + b.field("file.line", 20); + b.endObject(); + b.endArray(); + }); + assertEquals(""" + {"top":[{"file.name":"A","file.line":10},{"file.name":"B","file.line":20}]}""", syntheticSourceWithArray); + } + + public void testStoredArrayWithFlatFields() throws IOException { + DocumentMapper documentMapper = createMapperServiceWithStoredArraySource(syntheticSourceMapping(b -> { + b.startObject("outer").startObject("properties"); + { + b.startObject("inner").field("type", "object").endObject(); + } + b.endObject().endObject(); + })).documentMapper(); + var syntheticSource = syntheticSource(documentMapper, b -> { + b.startObject("outer").startArray("inner"); + { + b.startObject().field("a.b", "a.b").field("a.c", "a.c").endObject(); + } + b.endArray().endObject(); + }); + assertEquals(""" + {"outer":{"inner":[{"a.b":"a.b","a.c":"a.c"}]}}""", syntheticSource); + } + protected void validateRoundTripReader(String syntheticSource, DirectoryReader reader, DirectoryReader roundTripReader) throws IOException { // We exclude ignored source field since in some cases it contains an exact copy of a part of document source. From e5d5c17c99c476e9820ed141edd87af0c3adbef5 Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Tue, 29 Oct 2024 13:02:28 -0700 Subject: [PATCH 192/324] Use directory name as project name for libs (#115720) The libs projects are configured to all begin with `elasticsearch-`. While this is desireable for the artifacts to contain this consistent prefix, it means the project names don't match up with their directories. Additionally, it creates complexities for subproject naming that must be manually adjusted. This commit adjusts the project names for those under libs to be their directory names. The resulting artifacts for these libs are kept the same, all beginning with `elasticsearch-`. --- benchmarks/build.gradle | 6 +-- .../internal/PublishPluginFuncTest.groovy | 2 +- .../src/main/groovy/elasticsearch.ide.gradle | 2 +- .../groovy/elasticsearch.stable-api.gradle | 6 +-- .../internal/ElasticsearchJavaBasePlugin.java | 2 +- .../InternalDistributionBwcSetupPlugin.java | 13 ++++-- .../precommit/JarHellPrecommitPlugin.java | 4 +- .../ThirdPartyAuditPrecommitPlugin.java | 2 +- .../fixtures/AbstractGradleFuncTest.groovy | 2 +- client/rest/build.gradle | 2 +- client/sniffer/build.gradle | 2 +- client/test/build.gradle | 2 +- distribution/build.gradle | 4 +- .../tools/entitlement-agent/build.gradle | 2 +- .../tools/entitlement-runtime/build.gradle | 4 +- distribution/tools/geoip-cli/build.gradle | 4 +- distribution/tools/keystore-cli/build.gradle | 2 +- distribution/tools/plugin-cli/build.gradle | 6 +-- distribution/tools/server-cli/build.gradle | 2 +- .../tools/windows-service-cli/build.gradle | 2 +- libs/build.gradle | 40 +++++++++++++++++-- libs/cli/build.gradle | 4 +- libs/core/build.gradle | 6 +-- libs/dissect/build.gradle | 2 +- libs/geo/build.gradle | 2 +- libs/grok/build.gradle | 2 +- libs/h3/build.gradle | 4 +- libs/logging/build.gradle | 4 +- libs/logstash-bridge/build.gradle | 6 +-- libs/lz4/build.gradle | 4 +- libs/native/build.gradle | 6 +-- libs/plugin-analysis-api/build.gradle | 4 +- libs/plugin-scanner/build.gradle | 8 ++-- libs/secure-sm/build.gradle | 2 +- libs/simdvec/build.gradle | 6 +-- libs/ssl-config/build.gradle | 4 +- libs/tdigest/build.gradle | 4 +- libs/x-content/build.gradle | 6 +-- libs/x-content/impl/build.gradle | 6 +-- modules/ingest-common/build.gradle | 4 +- modules/reindex/build.gradle | 2 +- modules/runtime-fields-common/build.gradle | 4 +- modules/systemd/build.gradle | 2 +- modules/transport-netty4/build.gradle | 4 +- qa/logging-config/build.gradle | 2 +- qa/packaging/build.gradle | 2 +- server/build.gradle | 26 ++++++------ settings.gradle | 12 +----- .../apm-integration/build.gradle | 2 +- test/fixtures/geoip-fixture/build.gradle | 4 +- test/framework/build.gradle | 4 +- test/x-content/build.gradle | 2 +- x-pack/plugin/blob-cache/build.gradle | 2 +- x-pack/plugin/core/build.gradle | 4 +- x-pack/plugin/esql/build.gradle | 4 +- .../plugin/esql/qa/testFixtures/build.gradle | 4 +- x-pack/plugin/inference/build.gradle | 2 +- x-pack/plugin/ml-package-loader/build.gradle | 2 +- x-pack/plugin/ml/build.gradle | 2 +- .../plugin/searchable-snapshots/build.gradle | 2 +- x-pack/plugin/spatial/build.gradle | 2 +- x-pack/plugin/sql/sql-action/build.gradle | 6 +-- x-pack/plugin/sql/sql-cli/build.gradle | 2 +- x-pack/plugin/sql/sql-proto/build.gradle | 4 +- x-pack/plugin/text-structure/build.gradle | 2 +- .../plugin/transform/qa/common/build.gradle | 2 +- 66 files changed, 165 insertions(+), 136 deletions(-) diff --git a/benchmarks/build.gradle b/benchmarks/build.gradle index 3a03cbe2d934d..f3ced9f16d327 100644 --- a/benchmarks/build.gradle +++ b/benchmarks/build.gradle @@ -40,15 +40,15 @@ dependencies { // us to invoke the JMH uberjar as usual. exclude group: 'net.sf.jopt-simple', module: 'jopt-simple' } - api(project(':libs:elasticsearch-h3')) + api(project(':libs:h3')) api(project(':modules:aggregations')) api(project(':x-pack:plugin:esql-core')) api(project(':x-pack:plugin:esql')) api(project(':x-pack:plugin:esql:compute')) - implementation project(path: ':libs:elasticsearch-simdvec') + implementation project(path: ':libs:simdvec') expression(project(path: ':modules:lang-expression', configuration: 'zip')) painless(project(path: ':modules:lang-painless', configuration: 'zip')) - nativeLib(project(':libs:elasticsearch-native')) + nativeLib(project(':libs:native')) api "org.openjdk.jmh:jmh-core:$versions.jmh" annotationProcessor "org.openjdk.jmh:jmh-generator-annprocess:$versions.jmh" // Dependencies of JMH diff --git a/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/PublishPluginFuncTest.groovy b/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/PublishPluginFuncTest.groovy index 99d451116dbe7..6e403c85a23f4 100644 --- a/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/PublishPluginFuncTest.groovy +++ b/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/PublishPluginFuncTest.groovy @@ -18,7 +18,7 @@ class PublishPluginFuncTest extends AbstractGradleFuncTest { def setup() { // required for JarHell to work - subProject(":libs:elasticsearch-core") << "apply plugin:'java'" + subProject(":libs:core") << "apply plugin:'java'" configurationCacheCompatible = false } diff --git a/build-tools-internal/src/main/groovy/elasticsearch.ide.gradle b/build-tools-internal/src/main/groovy/elasticsearch.ide.gradle index 67878181a005d..86b48f744e16e 100644 --- a/build-tools-internal/src/main/groovy/elasticsearch.ide.gradle +++ b/build-tools-internal/src/main/groovy/elasticsearch.ide.gradle @@ -161,7 +161,7 @@ if (providers.systemProperty('idea.active').getOrNull() == 'true') { ':server:generateModulesList', ':server:generatePluginsList', ':generateProviderImpls', - ':libs:elasticsearch-native:elasticsearch-native-libraries:extractLibs', + ':libs:native:native-libraries:extractLibs', ':x-pack:libs:es-opensaml-security-api:shadowJar'].collect { elasticsearchProject.right()?.task(it) ?: it }) } diff --git a/build-tools-internal/src/main/groovy/elasticsearch.stable-api.gradle b/build-tools-internal/src/main/groovy/elasticsearch.stable-api.gradle index 0148caf8983ef..1fab4d035177a 100644 --- a/build-tools-internal/src/main/groovy/elasticsearch.stable-api.gradle +++ b/build-tools-internal/src/main/groovy/elasticsearch.stable-api.gradle @@ -33,12 +33,12 @@ BuildParams.bwcVersions.withIndexCompatible({ it.onOrAfter(Version.fromString(ex if (unreleasedVersion) { // For unreleased snapshot versions, build them from source "oldJar${baseName}"(files(project(unreleasedVersion.gradleProjectPath).tasks.named(buildBwcTaskName(project.name)))) - } else if(bwcVersion.onOrAfter('8.7.0') && project.name.endsWith("elasticsearch-logging")==false) { + } else if(bwcVersion.onOrAfter('8.7.0') && project.name.endsWith("logging")==false) { //there was a package rename in 8.7.0, except for es-logging - "oldJar${baseName}"("org.elasticsearch.plugin:${project.name}:${bwcVersion}") + "oldJar${baseName}"("org.elasticsearch.plugin:elasticsearch-${project.name}:${bwcVersion}") } else { // For released versions, download it - "oldJar${baseName}"("org.elasticsearch:${project.name}:${bwcVersion}") + "oldJar${baseName}"("org.elasticsearch:elasticsearch-${project.name}:${bwcVersion}") } } diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchJavaBasePlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchJavaBasePlugin.java index 5913339e32f47..05b7af83aa8e4 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchJavaBasePlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchJavaBasePlugin.java @@ -177,7 +177,7 @@ public static void configureInputNormalization(Project project) { } private static void configureNativeLibraryPath(Project project) { - String nativeProject = ":libs:elasticsearch-native:elasticsearch-native-libraries"; + String nativeProject = ":libs:native:native-libraries"; Configuration nativeConfig = project.getConfigurations().create("nativeLibs"); nativeConfig.defaultDependencies(deps -> { deps.add(project.getDependencies().project(Map.of("path", nativeProject, "configuration", "default"))); diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalDistributionBwcSetupPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalDistributionBwcSetupPlugin.java index 90b9c0d395f43..fcf286ed471dd 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalDistributionBwcSetupPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalDistributionBwcSetupPlugin.java @@ -165,7 +165,12 @@ private static void configureBwcProject( DistributionProjectArtifact stableAnalysisPluginProjectArtifact = new DistributionProjectArtifact( new File( checkoutDir.get(), - relativeDir + "/build/distributions/" + stableApiProject.getName() + "-" + bwcVersion.get() + "-SNAPSHOT.jar" + relativeDir + + "/build/distributions/elasticsearch-" + + stableApiProject.getName() + + "-" + + bwcVersion.get() + + "-SNAPSHOT.jar" ), null ); @@ -275,7 +280,7 @@ private static List resolveArchiveProjects(File checkoutDir } private static List resolveStableProjects(Project project) { - Set stableProjectNames = Set.of("elasticsearch-logging", "elasticsearch-plugin-api", "elasticsearch-plugin-analysis-api"); + Set stableProjectNames = Set.of("logging", "plugin-api", "plugin-analysis-api"); return project.findProject(":libs") .getSubprojects() .stream() @@ -312,7 +317,9 @@ static void createBuildBwcTask( c.getOutputs().files(expectedOutputFile); } c.getOutputs().doNotCacheIf("BWC distribution caching is disabled for local builds", task -> BuildParams.isCi() == false); - c.getArgs().add(projectPath.replace('/', ':') + ":" + assembleTaskName); + c.getArgs().add("-p"); + c.getArgs().add(projectPath); + c.getArgs().add(assembleTaskName); if (project.getGradle().getStartParameter().isBuildCacheEnabled()) { c.getArgs().add("--build-cache"); } diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/JarHellPrecommitPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/JarHellPrecommitPlugin.java index 56434cf1f4eda..0a22a2b61c953 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/JarHellPrecommitPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/JarHellPrecommitPlugin.java @@ -21,11 +21,11 @@ public class JarHellPrecommitPlugin extends PrecommitPlugin { public TaskProvider createTask(Project project) { project.getPluginManager().apply(JarHellPlugin.class); - if (project.getPath().equals(":libs:elasticsearch-core") == false) { + if (project.getPath().equals(":libs:core") == false) { // ideally we would configure this as a default dependency. But Default dependencies do not work correctly // with gradle project dependencies as they're resolved to late in the build and don't setup according task // dependencies properly - var elasticsearchCoreProject = project.findProject(":libs:elasticsearch-core"); + var elasticsearchCoreProject = project.findProject(":libs:core"); if (elasticsearchCoreProject != null) { project.getDependencies().add("jarHell", elasticsearchCoreProject); } diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/ThirdPartyAuditPrecommitPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/ThirdPartyAuditPrecommitPlugin.java index f0eefe1f81a8c..80cece6074ab7 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/ThirdPartyAuditPrecommitPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/ThirdPartyAuditPrecommitPlugin.java @@ -27,7 +27,7 @@ public class ThirdPartyAuditPrecommitPlugin extends PrecommitPlugin { public static final String JDK_JAR_HELL_CONFIG_NAME = "jdkJarHell"; - public static final String LIBS_ELASTICSEARCH_CORE_PROJECT_PATH = ":libs:elasticsearch-core"; + public static final String LIBS_ELASTICSEARCH_CORE_PROJECT_PATH = ":libs:core"; @Override public TaskProvider createTask(Project project) { diff --git a/build-tools/src/testFixtures/groovy/org/elasticsearch/gradle/fixtures/AbstractGradleFuncTest.groovy b/build-tools/src/testFixtures/groovy/org/elasticsearch/gradle/fixtures/AbstractGradleFuncTest.groovy index 567fb048fad54..d3d06b2de3575 100644 --- a/build-tools/src/testFixtures/groovy/org/elasticsearch/gradle/fixtures/AbstractGradleFuncTest.groovy +++ b/build-tools/src/testFixtures/groovy/org/elasticsearch/gradle/fixtures/AbstractGradleFuncTest.groovy @@ -56,7 +56,7 @@ abstract class AbstractGradleFuncTest extends Specification { propertiesFile << "org.gradle.java.installations.fromEnv=JAVA_HOME,RUNTIME_JAVA_HOME,JAVA15_HOME,JAVA14_HOME,JAVA13_HOME,JAVA12_HOME,JAVA11_HOME,JAVA8_HOME" - def nativeLibsProject = subProject(":libs:elasticsearch-native:elasticsearch-native-libraries") + def nativeLibsProject = subProject(":libs:native:native-libraries") nativeLibsProject << """ plugins { id 'base' diff --git a/client/rest/build.gradle b/client/rest/build.gradle index 6006fae1c2d84..003c251186510 100644 --- a/client/rest/build.gradle +++ b/client/rest/build.gradle @@ -79,7 +79,7 @@ tasks.named('forbiddenApisTest').configure { } // JarHell is part of es server, which we don't want to pull in -// TODO: Not anymore. Now in :libs:elasticsearch-core +// TODO: Not anymore. Now in :libs:core tasks.named("jarHell").configure { enabled = false } diff --git a/client/sniffer/build.gradle b/client/sniffer/build.gradle index 901917c7b25f8..f6f26c8f7c0d5 100644 --- a/client/sniffer/build.gradle +++ b/client/sniffer/build.gradle @@ -73,7 +73,7 @@ tasks.named("dependencyLicenses").configure { } // JarHell is part of es server, which we don't want to pull in -// TODO: Not anymore. Now in :libs:elasticsearch-core +// TODO: Not anymore. Now in :libs:core tasks.named("jarHell").configure { enabled = false } tasks.named("testTestingConventions").configure { diff --git a/client/test/build.gradle b/client/test/build.gradle index 3a3a9e3c03264..8de6b3dbf92be 100644 --- a/client/test/build.gradle +++ b/client/test/build.gradle @@ -54,7 +54,7 @@ tasks.named('forbiddenApisTest').configure { tasks.named("thirdPartyAudit").configure { enabled = false } // JarHell is part of es server, which we don't want to pull in -// TODO: Not anymore. Now in :libs:elasticsearch-core +// TODO: Not anymore. Now in :libs:core tasks.named("jarHell").configure { enabled = false } // TODO: should we have licenses for our test deps? diff --git a/distribution/build.gradle b/distribution/build.gradle index 72dea714fdcdb..f7b6f7bc1c7d0 100644 --- a/distribution/build.gradle +++ b/distribution/build.gradle @@ -275,7 +275,7 @@ configure(subprojects.findAll { ['archives', 'packages'].contains(it.name) }) { } all { resolutionStrategy.dependencySubstitution { - substitute module("org.apache.logging.log4j:log4j-core") using project(":libs:elasticsearch-log4j") because "patched to remove JndiLookup clas"} + substitute module("org.apache.logging.log4j:log4j-core") using project(":libs:log4j") because "patched to remove JndiLookup clas"} } } @@ -291,7 +291,7 @@ configure(subprojects.findAll { ['archives', 'packages'].contains(it.name) }) { libsKeystoreCli project(path: ':distribution:tools:keystore-cli') libsSecurityCli project(':x-pack:plugin:security:cli') libsGeoIpCli project(':distribution:tools:geoip-cli') - libsNative project(':libs:elasticsearch-native:elasticsearch-native-libraries') + libsNative project(':libs:native:native-libraries') } project.ext { diff --git a/distribution/tools/entitlement-agent/build.gradle b/distribution/tools/entitlement-agent/build.gradle index 3fa9d0f5ef83a..d3e7ae10dcc6d 100644 --- a/distribution/tools/entitlement-agent/build.gradle +++ b/distribution/tools/entitlement-agent/build.gradle @@ -22,7 +22,7 @@ configurations { dependencies { entitlementBridge project(":distribution:tools:entitlement-bridge") - compileOnly project(":libs:elasticsearch-core") + compileOnly project(":libs:core") compileOnly project(":distribution:tools:entitlement-runtime") testImplementation project(":test:framework") testImplementation project(":distribution:tools:entitlement-bridge") diff --git a/distribution/tools/entitlement-runtime/build.gradle b/distribution/tools/entitlement-runtime/build.gradle index 55471272c1b5f..aaeee76d8bc57 100644 --- a/distribution/tools/entitlement-runtime/build.gradle +++ b/distribution/tools/entitlement-runtime/build.gradle @@ -10,8 +10,8 @@ apply plugin: 'elasticsearch.build' apply plugin: 'elasticsearch.publish' dependencies { - compileOnly project(':libs:elasticsearch-core') // For @SuppressForbidden - compileOnly project(":libs:elasticsearch-x-content") // for parsing policy files + compileOnly project(':libs:core') // For @SuppressForbidden + compileOnly project(":libs:x-content") // for parsing policy files compileOnly project(':server') // To access the main server module for special permission checks compileOnly project(':distribution:tools:entitlement-bridge') testImplementation project(":test:framework") diff --git a/distribution/tools/geoip-cli/build.gradle b/distribution/tools/geoip-cli/build.gradle index ee20d5e1bd88e..26af3bb4f9911 100644 --- a/distribution/tools/geoip-cli/build.gradle +++ b/distribution/tools/geoip-cli/build.gradle @@ -15,8 +15,8 @@ base { dependencies { compileOnly project(":server") - compileOnly project(":libs:elasticsearch-cli") - compileOnly project(":libs:elasticsearch-x-content") + compileOnly project(":libs:cli") + compileOnly project(":libs:x-content") testImplementation project(":test:framework") testImplementation "org.apache.commons:commons-compress:1.26.1" testImplementation "commons-io:commons-io:2.15.1" diff --git a/distribution/tools/keystore-cli/build.gradle b/distribution/tools/keystore-cli/build.gradle index 07aa92151171a..0140cd9d8eedf 100644 --- a/distribution/tools/keystore-cli/build.gradle +++ b/distribution/tools/keystore-cli/build.gradle @@ -11,7 +11,7 @@ apply plugin: 'elasticsearch.build' dependencies { compileOnly project(":server") - compileOnly project(":libs:elasticsearch-cli") + compileOnly project(":libs:cli") testImplementation project(":test:framework") testImplementation "com.google.jimfs:jimfs:${versions.jimfs}" testRuntimeOnly "com.google.guava:guava:${versions.jimfs_guava}" diff --git a/distribution/tools/plugin-cli/build.gradle b/distribution/tools/plugin-cli/build.gradle index 16932df96e223..ac8ade89c9014 100644 --- a/distribution/tools/plugin-cli/build.gradle +++ b/distribution/tools/plugin-cli/build.gradle @@ -21,9 +21,9 @@ tasks.named("dependencyLicenses").configure { dependencies { compileOnly project(":server") - compileOnly project(":libs:elasticsearch-cli") - implementation project(":libs:elasticsearch-plugin-api") - implementation project(":libs:elasticsearch-plugin-scanner") + compileOnly project(":libs:cli") + implementation project(":libs:plugin-api") + implementation project(":libs:plugin-scanner") // TODO: asm is picked up from the plugin scanner, we should consolidate so it is not defined twice implementation 'org.ow2.asm:asm:9.7' implementation 'org.ow2.asm:asm-tree:9.7' diff --git a/distribution/tools/server-cli/build.gradle b/distribution/tools/server-cli/build.gradle index e8f70e9053d7c..299d511ba5dbe 100644 --- a/distribution/tools/server-cli/build.gradle +++ b/distribution/tools/server-cli/build.gradle @@ -12,7 +12,7 @@ apply plugin: 'elasticsearch.build' dependencies { compileOnly project(":server") - compileOnly project(":libs:elasticsearch-cli") + compileOnly project(":libs:cli") testImplementation project(":test:framework") } diff --git a/distribution/tools/windows-service-cli/build.gradle b/distribution/tools/windows-service-cli/build.gradle index 77da0d407a40d..dcfaf244b7eec 100644 --- a/distribution/tools/windows-service-cli/build.gradle +++ b/distribution/tools/windows-service-cli/build.gradle @@ -2,7 +2,7 @@ apply plugin: 'elasticsearch.build' dependencies { compileOnly project(":server") - compileOnly project(":libs:elasticsearch-cli") + compileOnly project(":libs:cli") compileOnly project(":distribution:tools:server-cli") testImplementation project(":test:framework") diff --git a/libs/build.gradle b/libs/build.gradle index d0dfabd9c4fc5..efd2329ca2b5e 100644 --- a/libs/build.gradle +++ b/libs/build.gradle @@ -7,10 +7,42 @@ * License v3.0 only", or the "Server Side Public License, v 1". */ -configure(childProjects.values() - project('elasticsearch-log4j')) { +configure(childProjects.values()) { + + apply plugin: 'base' + /* - * All subprojects are java projects using Elasticsearch's standard build - * tools. + * Although these libs are local to Elasticsearch, they can conflict with other similarly + * named libraries when downloaded into a single directory via maven. Here we set the + * name of all libs to begin with the "elasticsearch-" prefix. Additionally, subprojects + * of libs begin with their parents artifactId. */ - apply plugin: 'elasticsearch.build' + def baseProject = project + def baseArtifactId = "elasticsearch-${it.name}" + base { + archivesName = baseArtifactId + } + subprojects { + apply plugin: 'base' + + def subArtifactId = baseArtifactId + def currentProject = project + while (currentProject != baseProject) { + subArtifactId += "-${currentProject.name}" + currentProject = currentProject.parent + } + base { + archivesName = subArtifactId + } + } + + // log4j is a hack, and not really a full elasticsearch built jar + if (project.name != 'log4j') { + + /* + * All subprojects are java projects using Elasticsearch's standard build + * tools. + */ + apply plugin: 'elasticsearch.build' + } } diff --git a/libs/cli/build.gradle b/libs/cli/build.gradle index b6ae962eaa603..d5842d4a2c59c 100644 --- a/libs/cli/build.gradle +++ b/libs/cli/build.gradle @@ -11,10 +11,10 @@ apply plugin: 'elasticsearch.publish' dependencies { api 'net.sf.jopt-simple:jopt-simple:5.0.2' - api project(':libs:elasticsearch-core') + api project(':libs:core') testImplementation(project(":test:framework")) { - exclude group: 'org.elasticsearch', module: 'elasticsearch-cli' + exclude group: 'org.elasticsearch', module: 'cli' } } diff --git a/libs/core/build.gradle b/libs/core/build.gradle index ebbeac141e4bd..e24417e09a53d 100644 --- a/libs/core/build.gradle +++ b/libs/core/build.gradle @@ -13,19 +13,19 @@ apply plugin: 'elasticsearch.mrjar' dependencies { // This dependency is used only by :libs:core for null-checking interop with other tools compileOnly "com.google.code.findbugs:jsr305:3.0.2" - compileOnly project(':libs:elasticsearch-logging') + compileOnly project(':libs:logging') testImplementation "com.carrotsearch.randomizedtesting:randomizedtesting-runner:${versions.randomizedrunner}" testImplementation "junit:junit:${versions.junit}" testImplementation "org.hamcrest:hamcrest:${versions.hamcrest}" testImplementation(project(":test:framework")) { - exclude group: 'org.elasticsearch', module: 'elasticsearch-core' + exclude group: 'org.elasticsearch', module: 'core' } } tasks.named('forbiddenApisMain').configure { - // :libs:elasticsearch-core does not depend on server + // :libs:core does not depend on server // TODO: Need to decide how we want to handle for forbidden signatures with the changes to server replaceSignatureFiles 'jdk-signatures' } diff --git a/libs/dissect/build.gradle b/libs/dissect/build.gradle index be2691bfd332f..f1a09cc0ba0e6 100644 --- a/libs/dissect/build.gradle +++ b/libs/dissect/build.gradle @@ -9,7 +9,7 @@ dependencies { testImplementation(project(":test:framework")) { - exclude group: 'org.elasticsearch', module: 'elasticsearch-dissect' + exclude group: 'org.elasticsearch', module: 'dissect' } testImplementation "com.fasterxml.jackson.core:jackson-core:${versions.jackson}" testImplementation "com.fasterxml.jackson.core:jackson-annotations:${versions.jackson}" diff --git a/libs/geo/build.gradle b/libs/geo/build.gradle index 37dd65cb19262..c753ba814a5f9 100644 --- a/libs/geo/build.gradle +++ b/libs/geo/build.gradle @@ -12,7 +12,7 @@ apply plugin: 'elasticsearch.publish' dependencies { testImplementation(project(":test:framework")) { - exclude group: 'org.elasticsearch', module: 'elasticsearch-geo' + exclude group: 'org.elasticsearch', module: 'geo' } } diff --git a/libs/grok/build.gradle b/libs/grok/build.gradle index ce4be699953c7..2a74927fedd83 100644 --- a/libs/grok/build.gradle +++ b/libs/grok/build.gradle @@ -14,7 +14,7 @@ dependencies { api 'org.jruby.jcodings:jcodings:1.0.44' testImplementation(project(":test:framework")) { - exclude group: 'org.elasticsearch', module: 'elasticsearch-grok' + exclude group: 'org.elasticsearch', module: 'grok' } } diff --git a/libs/h3/build.gradle b/libs/h3/build.gradle index 0eb1aea09d49c..81a0d56ed4606 100644 --- a/libs/h3/build.gradle +++ b/libs/h3/build.gradle @@ -23,7 +23,7 @@ apply plugin: 'elasticsearch.publish' dependencies { testImplementation(project(":test:framework")) { - exclude group: 'org.elasticsearch', module: 'elasticsearch-geo' + exclude group: 'org.elasticsearch', module: 'geo' } // lucene topology library that uses spherical geometry testImplementation "org.apache.lucene:lucene-spatial3d:${versions.lucene}" @@ -40,4 +40,4 @@ licenseFile.set(rootProject.file('licenses/APACHE-LICENSE-2.0.txt')) tasks.withType(LicenseHeadersTask.class).configureEach { approvedLicenses = ['Apache', 'Generated', 'Vendored'] -} \ No newline at end of file +} diff --git a/libs/logging/build.gradle b/libs/logging/build.gradle index 4222d89ebe2da..f52c2629176a7 100644 --- a/libs/logging/build.gradle +++ b/libs/logging/build.gradle @@ -14,12 +14,12 @@ tasks.named("loggerUsageCheck").configure {enabled = false } dependencies { testImplementation(project(":test:framework")) { - exclude group: 'org.elasticsearch', module: 'elasticsearch-logging' + exclude group: 'org.elasticsearch', module: 'logging' } } tasks.named('forbiddenApisMain').configure { - // :libs:elasticsearch-logging does not depend on server + // :libs:logging does not depend on server replaceSignatureFiles 'jdk-signatures' } diff --git a/libs/logstash-bridge/build.gradle b/libs/logstash-bridge/build.gradle index e4b2728f693a0..117bed1e98105 100644 --- a/libs/logstash-bridge/build.gradle +++ b/libs/logstash-bridge/build.gradle @@ -10,9 +10,9 @@ apply plugin: 'elasticsearch.build' dependencies { compileOnly project(':server') - compileOnly project(':libs:elasticsearch-core') - compileOnly project(':libs:elasticsearch-plugin-api') - compileOnly project(':libs:elasticsearch-x-content') + compileOnly project(':libs:core') + compileOnly project(':libs:plugin-api') + compileOnly project(':libs:x-content') compileOnly project(':modules:lang-painless') compileOnly project(':modules:lang-painless:spi') compileOnly project(':modules:lang-mustache') diff --git a/libs/lz4/build.gradle b/libs/lz4/build.gradle index d9f1175248121..72e1bb50187a7 100644 --- a/libs/lz4/build.gradle +++ b/libs/lz4/build.gradle @@ -10,10 +10,10 @@ apply plugin: 'elasticsearch.publish' dependencies { api 'org.lz4:lz4-java:1.8.0' - api project(':libs:elasticsearch-core') + api project(':libs:core') testImplementation(project(":test:framework")) { - exclude group: 'org.elasticsearch', module: 'elasticsearch-lz4' + exclude group: 'org.elasticsearch', module: 'lz4' } } diff --git a/libs/native/build.gradle b/libs/native/build.gradle index 0c889d47566fb..eff8f82434461 100644 --- a/libs/native/build.gradle +++ b/libs/native/build.gradle @@ -14,10 +14,10 @@ apply plugin: 'elasticsearch.build' apply plugin: 'elasticsearch.mrjar' dependencies { - api project(':libs:elasticsearch-core') - api project(':libs:elasticsearch-logging') + api project(':libs:core') + api project(':libs:logging') testImplementation(project(":test:framework")) { - exclude group: 'org.elasticsearch', module: 'elasticsearch-native' + exclude group: 'org.elasticsearch', module: 'native' } } diff --git a/libs/plugin-analysis-api/build.gradle b/libs/plugin-analysis-api/build.gradle index e240f18a88e0a..3f1670d76a0c1 100644 --- a/libs/plugin-analysis-api/build.gradle +++ b/libs/plugin-analysis-api/build.gradle @@ -18,12 +18,12 @@ tasks.named("loggerUsageCheck").configure {enabled = false } dependencies { api "org.apache.lucene:lucene-core:${versions.lucene}" - api project(':libs:elasticsearch-plugin-api') + api project(':libs:plugin-api') } tasks.named('forbiddenApisMain').configure { - // :libs:elasticsearch-logging does not depend on server + // :libs:logging does not depend on server replaceSignatureFiles 'jdk-signatures' } diff --git a/libs/plugin-scanner/build.gradle b/libs/plugin-scanner/build.gradle index b8cd224eba46a..d04af0624b3b1 100644 --- a/libs/plugin-scanner/build.gradle +++ b/libs/plugin-scanner/build.gradle @@ -16,16 +16,16 @@ tasks.named("dependencyLicenses").configure { } dependencies { - api project(':libs:elasticsearch-core') - api project(':libs:elasticsearch-plugin-api') - api project(":libs:elasticsearch-x-content") + api project(':libs:core') + api project(':libs:plugin-api') + api project(":libs:x-content") api 'org.ow2.asm:asm:9.7' api 'org.ow2.asm:asm-tree:9.7' testImplementation "junit:junit:${versions.junit}" testImplementation(project(":test:framework")) { - exclude group: 'org.elasticsearch', module: 'elasticsearch-plugin-scanner' + exclude group: 'org.elasticsearch', module: 'plugin-scanner' } } tasks.named('forbiddenApisMain').configure { diff --git a/libs/secure-sm/build.gradle b/libs/secure-sm/build.gradle index 5e35f3ac7126f..473a86215e91e 100644 --- a/libs/secure-sm/build.gradle +++ b/libs/secure-sm/build.gradle @@ -16,7 +16,7 @@ dependencies { testImplementation "org.hamcrest:hamcrest:${versions.hamcrest}" testImplementation(project(":test:framework")) { - exclude group: 'org.elasticsearch', module: 'elasticsearch-secure-sm' + exclude group: 'org.elasticsearch', module: 'secure-sm' } } diff --git a/libs/simdvec/build.gradle b/libs/simdvec/build.gradle index eee56be72d0bf..02f960130e690 100644 --- a/libs/simdvec/build.gradle +++ b/libs/simdvec/build.gradle @@ -15,12 +15,12 @@ apply plugin: 'elasticsearch.build' apply plugin: 'elasticsearch.mrjar' dependencies { - implementation project(':libs:elasticsearch-native') - implementation project(':libs:elasticsearch-logging') + implementation project(':libs:native') + implementation project(':libs:logging') implementation "org.apache.lucene:lucene-core:${versions.lucene}" testImplementation(project(":test:framework")) { - exclude group: 'org.elasticsearch', module: 'elasticsearch-native' + exclude group: 'org.elasticsearch', module: 'native' } } diff --git a/libs/ssl-config/build.gradle b/libs/ssl-config/build.gradle index 3c0eb7c440510..d63df95003ab6 100644 --- a/libs/ssl-config/build.gradle +++ b/libs/ssl-config/build.gradle @@ -9,10 +9,10 @@ apply plugin: "elasticsearch.publish" dependencies { - api project(':libs:elasticsearch-core') + api project(':libs:core') testImplementation(project(":test:framework")) { - exclude group: 'org.elasticsearch', module: 'elasticsearch-ssl-config' + exclude group: 'org.elasticsearch', module: 'ssl-config' } testImplementation "com.carrotsearch.randomizedtesting:randomizedtesting-runner:${versions.randomizedrunner}" diff --git a/libs/tdigest/build.gradle b/libs/tdigest/build.gradle index 231eb845339aa..2713df701fb44 100644 --- a/libs/tdigest/build.gradle +++ b/libs/tdigest/build.gradle @@ -22,11 +22,11 @@ apply plugin: 'elasticsearch.build' apply plugin: 'elasticsearch.publish' dependencies { - api project(':libs:elasticsearch-core') + api project(':libs:core') api "org.apache.lucene:lucene-core:${versions.lucene}" testImplementation(project(":test:framework")) { - exclude group: 'org.elasticsearch', module: 'elasticsearch-tdigest' + exclude group: 'org.elasticsearch', module: 'tdigest' } testImplementation 'org.junit.jupiter:junit-jupiter:5.8.1' } diff --git a/libs/x-content/build.gradle b/libs/x-content/build.gradle index 7540bd0fb68f0..1cf18d46e7610 100644 --- a/libs/x-content/build.gradle +++ b/libs/x-content/build.gradle @@ -12,14 +12,14 @@ apply plugin: 'elasticsearch.publish' apply plugin: 'elasticsearch.embedded-providers' embeddedProviders { - impl 'x-content', project(':libs:elasticsearch-x-content:impl') + impl 'x-content', project(':libs:x-content:impl') } dependencies { - api project(':libs:elasticsearch-core') + api project(':libs:core') testImplementation(project(":test:framework")) { - exclude group: 'org.elasticsearch', module: 'elasticsearch-x-content' + exclude group: 'org.elasticsearch', module: 'x-content' } } diff --git a/libs/x-content/impl/build.gradle b/libs/x-content/impl/build.gradle index 753d2c3d5fe1e..35e122d336c68 100644 --- a/libs/x-content/impl/build.gradle +++ b/libs/x-content/impl/build.gradle @@ -16,8 +16,8 @@ base { String jacksonVersion = "2.17.2" dependencies { - compileOnly project(':libs:elasticsearch-core') - compileOnly project(':libs:elasticsearch-x-content') + compileOnly project(':libs:core') + compileOnly project(':libs:x-content') implementation "com.fasterxml.jackson.core:jackson-core:${jacksonVersion}" implementation "com.fasterxml.jackson.dataformat:jackson-dataformat-smile:${jacksonVersion}" implementation "com.fasterxml.jackson.dataformat:jackson-dataformat-yaml:${jacksonVersion}" @@ -25,7 +25,7 @@ dependencies { implementation "org.yaml:snakeyaml:${versions.snakeyaml}" testImplementation(project(":test:framework")) { - exclude group: 'org.elasticsearch', module: 'elasticsearch-x-content' + exclude group: 'org.elasticsearch', module: 'x-content' } } diff --git a/modules/ingest-common/build.gradle b/modules/ingest-common/build.gradle index 98dacce01fba4..7cfdba4d33744 100644 --- a/modules/ingest-common/build.gradle +++ b/modules/ingest-common/build.gradle @@ -20,8 +20,8 @@ esplugin { dependencies { compileOnly project(':modules:lang-painless:spi') - api project(':libs:elasticsearch-grok') - api project(':libs:elasticsearch-dissect') + api project(':libs:grok') + api project(':libs:dissect') implementation "org.apache.httpcomponents:httpclient:${versions.httpclient}" implementation "org.apache.httpcomponents:httpcore:${versions.httpcore}" } diff --git a/modules/reindex/build.gradle b/modules/reindex/build.gradle index ac68b565a0fbe..14a6b1e3f5b82 100644 --- a/modules/reindex/build.gradle +++ b/modules/reindex/build.gradle @@ -38,7 +38,7 @@ testClusters.configureEach { dependencies { api project(":client:rest") - api project(":libs:elasticsearch-ssl-config") + api project(":libs:ssl-config") // for parent/child testing testImplementation project(':modules:parent-join') testImplementation project(':modules:rest-root') diff --git a/modules/runtime-fields-common/build.gradle b/modules/runtime-fields-common/build.gradle index 00bb17df8665e..e743939cbf79e 100644 --- a/modules/runtime-fields-common/build.gradle +++ b/modules/runtime-fields-common/build.gradle @@ -19,8 +19,8 @@ esplugin { dependencies { compileOnly project(':modules:lang-painless:spi') - api project(':libs:elasticsearch-grok') - api project(':libs:elasticsearch-dissect') + api project(':libs:grok') + api project(':libs:dissect') } tasks.named("yamlRestCompatTestTransform").configure({ task -> diff --git a/modules/systemd/build.gradle b/modules/systemd/build.gradle index 28fd36160936a..8eb48e1d5f638 100644 --- a/modules/systemd/build.gradle +++ b/modules/systemd/build.gradle @@ -13,6 +13,6 @@ esplugin { } dependencies { - implementation project(':libs:elasticsearch-native') + implementation project(':libs:native') } diff --git a/modules/transport-netty4/build.gradle b/modules/transport-netty4/build.gradle index d80b63bec53d8..8dc718a818cec 100644 --- a/modules/transport-netty4/build.gradle +++ b/modules/transport-netty4/build.gradle @@ -35,7 +35,7 @@ configurations { } dependencies { - api project(":libs:elasticsearch-ssl-config") + api project(":libs:ssl-config") // network stack api "io.netty:netty-buffer:${versions.netty}" @@ -244,4 +244,4 @@ tasks.named("thirdPartyAudit").configure { tasks.named('forbiddenApisMain').configure { signaturesFiles += files('forbidden/netty-signatures.txt') -} \ No newline at end of file +} diff --git a/qa/logging-config/build.gradle b/qa/logging-config/build.gradle index 78da8590660f7..4d65c4384afa1 100644 --- a/qa/logging-config/build.gradle +++ b/qa/logging-config/build.gradle @@ -10,7 +10,7 @@ apply plugin: 'elasticsearch.build' apply plugin: 'elasticsearch.legacy-java-rest-test' dependencies { - testImplementation project(":libs:elasticsearch-x-content") + testImplementation project(":libs:x-content") testImplementation project(":test:framework") } diff --git a/qa/packaging/build.gradle b/qa/packaging/build.gradle index 73b6507490185..f9a903223c88a 100644 --- a/qa/packaging/build.gradle +++ b/qa/packaging/build.gradle @@ -13,7 +13,7 @@ plugins { dependencies { testImplementation project(':server') - testImplementation project(':libs:elasticsearch-core') + testImplementation project(':libs:core') testImplementation(testArtifact(project(':x-pack:plugin:core'))) testImplementation "junit:junit:${versions.junit}" testImplementation "org.hamcrest:hamcrest:${versions.hamcrest}" diff --git a/server/build.gradle b/server/build.gradle index 963b3cfb2e747..e8493751cb327 100644 --- a/server/build.gradle +++ b/server/build.gradle @@ -28,17 +28,17 @@ base { dependencies { - api project(':libs:elasticsearch-core') - api project(':libs:elasticsearch-logging') - api project(':libs:elasticsearch-secure-sm') - api project(':libs:elasticsearch-x-content') - api project(":libs:elasticsearch-geo") - api project(":libs:elasticsearch-lz4") - api project(":libs:elasticsearch-plugin-api") - api project(":libs:elasticsearch-plugin-analysis-api") - api project(':libs:elasticsearch-grok') - api project(":libs:elasticsearch-tdigest") - implementation project(":libs:elasticsearch-simdvec") + api project(':libs:core') + api project(':libs:logging') + api project(':libs:secure-sm') + api project(':libs:x-content') + api project(":libs:geo") + api project(":libs:lz4") + api project(":libs:plugin-api") + api project(":libs:plugin-analysis-api") + api project(':libs:grok') + api project(":libs:tdigest") + implementation project(":libs:simdvec") // lucene api "org.apache.lucene:lucene-core:${versions.lucene}" @@ -56,7 +56,7 @@ dependencies { api "org.apache.lucene:lucene-suggest:${versions.lucene}" // utilities - api project(":libs:elasticsearch-cli") + api project(":libs:cli") implementation 'com.carrotsearch:hppc:0.8.1' // precentil ranks aggregation @@ -67,7 +67,7 @@ dependencies { api "org.apache.logging.log4j:log4j-core:${versions.log4j}" // access to native functions - implementation project(':libs:elasticsearch-native') + implementation project(':libs:native') api "co.elastic.logging:log4j2-ecs-layout:${versions.ecsLogging}" api "co.elastic.logging:ecs-logging-core:${versions.ecsLogging}" diff --git a/settings.gradle b/settings.gradle index 39453e8d0935a..25ed048d57253 100644 --- a/settings.gradle +++ b/settings.gradle @@ -155,17 +155,7 @@ addSubProjects('', new File(rootProject.projectDir, 'x-pack/libs')) include projects.toArray(new String[0]) -project(":libs").children.each { libsProject -> - libsProject.name = "elasticsearch-${libsProject.name}" - libsProject.children.each { lp -> - lp.name = lp.name // for :libs:elasticsearch-x-content:impl - } -} -project(":libs:elasticsearch-native:libraries").name = "elasticsearch-native-libraries" - -project(":qa:stable-api").children.each { libsProject -> - libsProject.name = "elasticsearch-${libsProject.name}" -} +project(":libs:native:libraries").name = "native-libraries" project(":test:external-modules").children.each { testProject -> testProject.name = "test-${testProject.name}" diff --git a/test/external-modules/apm-integration/build.gradle b/test/external-modules/apm-integration/build.gradle index 98090f33ee2c7..d0f5f889e9b30 100644 --- a/test/external-modules/apm-integration/build.gradle +++ b/test/external-modules/apm-integration/build.gradle @@ -22,5 +22,5 @@ tasks.named('javaRestTest').configure { dependencies { clusterModules project(':modules:apm') - implementation project(':libs:elasticsearch-logging') + implementation project(':libs:logging') } diff --git a/test/fixtures/geoip-fixture/build.gradle b/test/fixtures/geoip-fixture/build.gradle index f20db481814ea..13d2b6ae88e9c 100644 --- a/test/fixtures/geoip-fixture/build.gradle +++ b/test/fixtures/geoip-fixture/build.gradle @@ -13,8 +13,8 @@ description = 'Fixture for GeoIPv2 service' dependencies { api project(':server') api project(':distribution:tools:geoip-cli') - api project(":libs:elasticsearch-cli") - api project(":libs:elasticsearch-x-content") + api project(":libs:cli") + api project(":libs:x-content") api("junit:junit:${versions.junit}") { exclude module: 'hamcrest-core' } diff --git a/test/framework/build.gradle b/test/framework/build.gradle index 72a8eade3bce0..f130ecf131848 100644 --- a/test/framework/build.gradle +++ b/test/framework/build.gradle @@ -14,9 +14,9 @@ apply plugin: 'elasticsearch.publish' dependencies { api project(":client:rest") api project(':modules:transport-netty4') - api project(':libs:elasticsearch-ssl-config') + api project(':libs:ssl-config') api project(":server") - api project(":libs:elasticsearch-cli") + api project(":libs:cli") api "com.carrotsearch.randomizedtesting:randomizedtesting-runner:${versions.randomizedrunner}" api "junit:junit:${versions.junit}" api "org.hamcrest:hamcrest:${versions.hamcrest}" diff --git a/test/x-content/build.gradle b/test/x-content/build.gradle index 48667eeb58735..281148a0fe819 100644 --- a/test/x-content/build.gradle +++ b/test/x-content/build.gradle @@ -12,7 +12,7 @@ apply plugin: 'elasticsearch.publish' dependencies { api project(":test:framework") - api project(":libs:elasticsearch-x-content") + api project(":libs:x-content") // json schema validation dependencies implementation "com.fasterxml.jackson.core:jackson-core:${versions.jackson}" diff --git a/x-pack/plugin/blob-cache/build.gradle b/x-pack/plugin/blob-cache/build.gradle index ff21b64def768..c5c91a5ef87e3 100644 --- a/x-pack/plugin/blob-cache/build.gradle +++ b/x-pack/plugin/blob-cache/build.gradle @@ -15,5 +15,5 @@ esplugin { } dependencies { - compileOnly project(path: ':libs:elasticsearch-native') + compileOnly project(path: ':libs:native') } diff --git a/x-pack/plugin/core/build.gradle b/x-pack/plugin/core/build.gradle index 1ed59d6fe3581..fb4acb0055a8c 100644 --- a/x-pack/plugin/core/build.gradle +++ b/x-pack/plugin/core/build.gradle @@ -36,8 +36,8 @@ configurations { dependencies { compileOnly project(":server") - api project(':libs:elasticsearch-grok') - api project(":libs:elasticsearch-ssl-config") + api project(':libs:grok') + api project(":libs:ssl-config") api "org.apache.httpcomponents:httpclient:${versions.httpclient}" api "org.apache.httpcomponents:httpcore:${versions.httpcore}" api "org.apache.httpcomponents:httpcore-nio:${versions.httpcore}" diff --git a/x-pack/plugin/esql/build.gradle b/x-pack/plugin/esql/build.gradle index 766d0c0f13892..150017ce9e955 100644 --- a/x-pack/plugin/esql/build.gradle +++ b/x-pack/plugin/esql/build.gradle @@ -26,8 +26,8 @@ dependencies { compileOnly project(xpackModule('ml')) implementation project('compute') implementation project('compute:ann') - implementation project(':libs:elasticsearch-dissect') - implementation project(':libs:elasticsearch-grok') + implementation project(':libs:dissect') + implementation project(':libs:grok') implementation project('arrow') // Also contains a dummy processor to allow compilation with unused annotations. diff --git a/x-pack/plugin/esql/qa/testFixtures/build.gradle b/x-pack/plugin/esql/qa/testFixtures/build.gradle index b6ed610406631..903986466b77f 100644 --- a/x-pack/plugin/esql/qa/testFixtures/build.gradle +++ b/x-pack/plugin/esql/qa/testFixtures/build.gradle @@ -5,9 +5,9 @@ dependencies { implementation project(':x-pack:plugin:esql:compute') implementation project(':x-pack:plugin:esql') compileOnly project(path: xpackModule('core')) - implementation project(":libs:elasticsearch-x-content") + implementation project(":libs:x-content") implementation project(':client:rest') - implementation project(':libs:elasticsearch-logging') + implementation project(':libs:logging') implementation project(':test:framework') api(testArtifact(project(xpackModule('esql-core')))) implementation project(':server') diff --git a/x-pack/plugin/inference/build.gradle b/x-pack/plugin/inference/build.gradle index 28e1405cf7b97..6791aad6619d3 100644 --- a/x-pack/plugin/inference/build.gradle +++ b/x-pack/plugin/inference/build.gradle @@ -32,7 +32,7 @@ versions << [ ] dependencies { - implementation project(path: ':libs:elasticsearch-logging') + implementation project(path: ':libs:logging') compileOnly project(":server") compileOnly project(path: xpackModule('core')) testImplementation(testArtifact(project(xpackModule('core')))) diff --git a/x-pack/plugin/ml-package-loader/build.gradle b/x-pack/plugin/ml-package-loader/build.gradle index bdd1e54f20c86..122ad396b507d 100644 --- a/x-pack/plugin/ml-package-loader/build.gradle +++ b/x-pack/plugin/ml-package-loader/build.gradle @@ -18,7 +18,7 @@ esplugin { } dependencies { - implementation project(path: ':libs:elasticsearch-logging') + implementation project(path: ':libs:logging') compileOnly project(":server") compileOnly project(path: xpackModule('core')) testImplementation(testArtifact(project(xpackModule('core')))) diff --git a/x-pack/plugin/ml/build.gradle b/x-pack/plugin/ml/build.gradle index 706d7ea73aea9..e79a771293392 100644 --- a/x-pack/plugin/ml/build.gradle +++ b/x-pack/plugin/ml/build.gradle @@ -76,7 +76,7 @@ dependencies { testImplementation(testArtifact(project(xpackModule('security')))) testImplementation project(path: xpackModule('wildcard')) // ml deps - api project(':libs:elasticsearch-grok') + api project(':libs:grok') api project(':modules:lang-mustache') api "org.apache.commons:commons-math3:3.6.1" api "com.ibm.icu:icu4j:${versions.icu4j}" diff --git a/x-pack/plugin/searchable-snapshots/build.gradle b/x-pack/plugin/searchable-snapshots/build.gradle index 4e309499445e6..747e94b0e8d8d 100644 --- a/x-pack/plugin/searchable-snapshots/build.gradle +++ b/x-pack/plugin/searchable-snapshots/build.gradle @@ -15,7 +15,7 @@ base { dependencies { compileOnly project(path: xpackModule('core')) compileOnly project(path: xpackModule('blob-cache')) - compileOnly project(path: ':libs:elasticsearch-native') + compileOnly project(path: ':libs:native') testImplementation(testArtifact(project(xpackModule('blob-cache')))) internalClusterTestImplementation(testArtifact(project(xpackModule('core')))) internalClusterTestImplementation(project(path: xpackModule('shutdown'))) diff --git a/x-pack/plugin/spatial/build.gradle b/x-pack/plugin/spatial/build.gradle index e111949724844..5bcec68c227ce 100644 --- a/x-pack/plugin/spatial/build.gradle +++ b/x-pack/plugin/spatial/build.gradle @@ -15,7 +15,7 @@ dependencies { compileOnly project(':modules:lang-painless:spi') compileOnly project(path: xpackModule('core')) api "org.apache.lucene:lucene-spatial3d:${versions.lucene}" - api project(":libs:elasticsearch-h3") + api project(":libs:h3") testImplementation(testArtifact(project(xpackModule('core')))) testImplementation project(path: ':modules:percolator') testImplementation project(path: xpackModule('vector-tile')) diff --git a/x-pack/plugin/sql/sql-action/build.gradle b/x-pack/plugin/sql/sql-action/build.gradle index 9a0aefac4e434..60e809df00ae0 100644 --- a/x-pack/plugin/sql/sql-action/build.gradle +++ b/x-pack/plugin/sql/sql-action/build.gradle @@ -11,10 +11,10 @@ dependencies { api(project(':server')) { transitive = false } - api(project(':libs:elasticsearch-core')) { + api(project(':libs:core')) { transitive = false } - api(project(':libs:elasticsearch-x-content')) { + api(project(':libs:x-content')) { transitive = false } api project(':x-pack:plugin:core') @@ -33,4 +33,4 @@ tasks.named('forbiddenApisMain').configure { tasks.named("dependencyLicenses").configure { mapping from: /jackson-.*/, to: 'jackson' mapping from: /lucene-.*/, to: 'lucene' -} \ No newline at end of file +} diff --git a/x-pack/plugin/sql/sql-cli/build.gradle b/x-pack/plugin/sql/sql-cli/build.gradle index 1d3a63ec13c98..b9713bcb8e7a3 100644 --- a/x-pack/plugin/sql/sql-cli/build.gradle +++ b/x-pack/plugin/sql/sql-cli/build.gradle @@ -29,7 +29,7 @@ dependencies { api "org.jline:jline-style:${jlineVersion}" api project(':x-pack:plugin:sql:sql-client') - api project(":libs:elasticsearch-cli") + api project(":libs:cli") implementation "net.java.dev.jna:jna:${versions.jna}" testImplementation project(":test:framework") } diff --git a/x-pack/plugin/sql/sql-proto/build.gradle b/x-pack/plugin/sql/sql-proto/build.gradle index de3f3462da85e..2cb1cfa89f033 100644 --- a/x-pack/plugin/sql/sql-proto/build.gradle +++ b/x-pack/plugin/sql/sql-proto/build.gradle @@ -10,9 +10,9 @@ dependencies { api "com.fasterxml.jackson.core:jackson-core:${versions.jackson}" api "com.fasterxml.jackson.dataformat:jackson-dataformat-cbor:${versions.jackson}" - testImplementation project(":libs:elasticsearch-x-content") + testImplementation project(":libs:x-content") testImplementation(project(":test:framework")) { - exclude group: 'org.elasticsearch', module: 'elasticsearch-x-content' + exclude group: 'org.elasticsearch', module: 'x-content' } } diff --git a/x-pack/plugin/text-structure/build.gradle b/x-pack/plugin/text-structure/build.gradle index cab7f3ceeaa13..5bb6d8ef50274 100644 --- a/x-pack/plugin/text-structure/build.gradle +++ b/x-pack/plugin/text-structure/build.gradle @@ -12,7 +12,7 @@ base { dependencies { compileOnly project(path: xpackModule('core')) testImplementation(testArtifact(project(xpackModule('core')))) - api project(':libs:elasticsearch-grok') + api project(':libs:grok') api "com.ibm.icu:icu4j:${versions.icu4j}" api "net.sf.supercsv:super-csv:${versions.supercsv}" } diff --git a/x-pack/plugin/transform/qa/common/build.gradle b/x-pack/plugin/transform/qa/common/build.gradle index 9e7abfa2f977e..28e4068d31c6b 100644 --- a/x-pack/plugin/transform/qa/common/build.gradle +++ b/x-pack/plugin/transform/qa/common/build.gradle @@ -1,7 +1,7 @@ apply plugin: 'elasticsearch.internal-java-rest-test' dependencies { - api project(':libs:elasticsearch-x-content') + api project(':libs:x-content') api project(':test:framework') api project(xpackModule('core')) } From b6d2d4bc10233649cc9af7e7d7598c1b8b7355f7 Mon Sep 17 00:00:00 2001 From: Michael Peterson Date: Tue, 29 Oct 2024 16:18:42 -0400 Subject: [PATCH 193/324] ES|QL CCS uses skip_unavailable setting for handling disconnected remote clusters (#115266) As part of ES|QL planning of a cross-cluster search, a field-caps call is done to each cluster and, if an ENRICH command is present, the enrich policy-resolve API is called on each remote. If a remote cluster cannot be connected to in these calls, the outcome depends on the skip_unavailable setting. For skip_unavailable=false clusters, the error is fatal and the error will immediately be propagated back to the client with a top level error message with a 500 HTTP status response code. For skip_unavailable=true clusters, the error is not fatal. The error will be trapped, recorded in the EsqlExecutionInfo object for the query, marking the cluster as SKIPPED. If the user requested CCS metadata to be included, the cluster status and connection failure will be present in the _clusters/details section of the response. If no clusters can be contacted, if they are all marked as skip_unavailable=true, no error will be returned. Instead a 200 HTTP status will be returned with no column and no values. If the include_ccs_metadata: true setting was included on the query, the errors will listed in the _clusters metadata section. (Note: this is also how the _search endpoint works for CCS.) Partially addresses https://github.com/elastic/elasticsearch/issues/114531 --- docs/changelog/115266.yaml | 6 + .../org/elasticsearch/TransportVersions.java | 1 + ...ossClusterEnrichUnavailableClustersIT.java | 690 ++++++++++++++++++ ...CrossClusterQueryUnavailableRemotesIT.java | 525 +++++++++++++ .../esql/action/CrossClustersQueryIT.java | 14 +- .../xpack/esql/action/EsqlExecutionInfo.java | 49 +- .../xpack/esql/analysis/EnrichResolution.java | 9 + .../esql/enrich/EnrichPolicyResolver.java | 74 +- .../xpack/esql/index/IndexResolution.java | 19 +- .../xpack/esql/session/EsqlSession.java | 216 +++++- .../xpack/esql/session/IndexResolver.java | 11 +- .../session/NoClustersToSearchException.java | 15 + .../esql/action/EsqlQueryResponseTests.java | 3 + .../esql/plugin/ComputeListenerTests.java | 1 + .../xpack/esql/session/EsqlSessionTests.java | 227 ++++-- .../esql/session/IndexResolverTests.java | 21 +- .../RemoteClusterSecurityEsqlIT.java | 82 ++- 17 files changed, 1819 insertions(+), 144 deletions(-) create mode 100644 docs/changelog/115266.yaml create mode 100644 x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClusterEnrichUnavailableClustersIT.java create mode 100644 x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClusterQueryUnavailableRemotesIT.java create mode 100644 x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/NoClustersToSearchException.java diff --git a/docs/changelog/115266.yaml b/docs/changelog/115266.yaml new file mode 100644 index 0000000000000..1d7fb1368c0e8 --- /dev/null +++ b/docs/changelog/115266.yaml @@ -0,0 +1,6 @@ +pr: 115266 +summary: ES|QL CCS uses `skip_unavailable` setting for handling disconnected remote + clusters +area: ES|QL +type: enhancement +issues: [ 114531 ] diff --git a/server/src/main/java/org/elasticsearch/TransportVersions.java b/server/src/main/java/org/elasticsearch/TransportVersions.java index 7bf3204b7e1a6..ea3e649de9ef8 100644 --- a/server/src/main/java/org/elasticsearch/TransportVersions.java +++ b/server/src/main/java/org/elasticsearch/TransportVersions.java @@ -185,6 +185,7 @@ static TransportVersion def(int id) { public static final TransportVersion INDEX_REQUEST_REMOVE_METERING = def(8_780_00_0); public static final TransportVersion CPU_STAT_STRING_PARSING = def(8_781_00_0); public static final TransportVersion QUERY_RULES_RETRIEVER = def(8_782_00_0); + public static final TransportVersion ESQL_CCS_EXEC_INFO_WITH_FAILURES = def(8_783_00_0); /* * STOP! READ THIS FIRST! No, really, diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClusterEnrichUnavailableClustersIT.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClusterEnrichUnavailableClustersIT.java new file mode 100644 index 0000000000000..d142752d0c408 --- /dev/null +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClusterEnrichUnavailableClustersIT.java @@ -0,0 +1,690 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.action; + +import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.client.internal.Client; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.CollectionUtils; +import org.elasticsearch.core.Tuple; +import org.elasticsearch.ingest.common.IngestCommonPlugin; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.reindex.ReindexPlugin; +import org.elasticsearch.test.AbstractMultiClustersTestCase; +import org.elasticsearch.transport.RemoteClusterAware; +import org.elasticsearch.xpack.core.XPackSettings; +import org.elasticsearch.xpack.core.enrich.action.ExecuteEnrichPolicyAction; +import org.elasticsearch.xpack.core.enrich.action.PutEnrichPolicyAction; +import org.elasticsearch.xpack.esql.core.type.DataType; +import org.elasticsearch.xpack.esql.plan.logical.Enrich; +import org.elasticsearch.xpack.esql.plugin.EsqlPlugin; +import org.junit.Before; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.TimeUnit; + +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.xpack.esql.EsqlTestUtils.getValuesList; +import static org.elasticsearch.xpack.esql.action.CrossClustersEnrichIT.enrichHosts; +import static org.elasticsearch.xpack.esql.action.CrossClustersEnrichIT.enrichVendors; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.lessThanOrEqualTo; + +/** + * This IT test is the dual of CrossClustersEnrichIT, which tests "happy path" + * and this one tests unavailable cluster scenarios using (most of) the same tests. + */ +public class CrossClusterEnrichUnavailableClustersIT extends AbstractMultiClustersTestCase { + + public static String REMOTE_CLUSTER_1 = "c1"; + public static String REMOTE_CLUSTER_2 = "c2"; + + @Override + protected Collection remoteClusterAlias() { + return List.of(REMOTE_CLUSTER_1, REMOTE_CLUSTER_2); + } + + @Override + protected boolean reuseClusters() { + return false; + } + + private Collection allClusters() { + return CollectionUtils.appendToCopy(remoteClusterAlias(), LOCAL_CLUSTER); + } + + @Override + protected Collection> nodePlugins(String clusterAlias) { + List> plugins = new ArrayList<>(super.nodePlugins(clusterAlias)); + plugins.add(EsqlPlugin.class); + plugins.add(CrossClustersEnrichIT.LocalStateEnrich.class); + plugins.add(IngestCommonPlugin.class); + plugins.add(ReindexPlugin.class); + return plugins; + } + + @Override + protected Settings nodeSettings() { + return Settings.builder().put(super.nodeSettings()).put(XPackSettings.SECURITY_ENABLED.getKey(), false).build(); + } + + @Before + public void setupHostsEnrich() { + // the hosts policy are identical on every node + Map allHosts = Map.of( + "192.168.1.2", + "Windows", + "192.168.1.3", + "MacOS", + "192.168.1.4", + "Linux", + "192.168.1.5", + "Android", + "192.168.1.6", + "iOS", + "192.168.1.7", + "Windows", + "192.168.1.8", + "MacOS", + "192.168.1.9", + "Linux", + "192.168.1.10", + "Linux", + "192.168.1.11", + "Windows" + ); + for (String cluster : allClusters()) { + Client client = client(cluster); + client.admin().indices().prepareCreate("hosts").setMapping("ip", "type=ip", "os", "type=keyword").get(); + for (Map.Entry h : allHosts.entrySet()) { + client.prepareIndex("hosts").setSource("ip", h.getKey(), "os", h.getValue()).get(); + } + client.admin().indices().prepareRefresh("hosts").get(); + client.execute( + PutEnrichPolicyAction.INSTANCE, + new PutEnrichPolicyAction.Request(TEST_REQUEST_TIMEOUT, "hosts", CrossClustersEnrichIT.hostPolicy) + ).actionGet(); + client.execute(ExecuteEnrichPolicyAction.INSTANCE, new ExecuteEnrichPolicyAction.Request(TEST_REQUEST_TIMEOUT, "hosts")) + .actionGet(); + assertAcked(client.admin().indices().prepareDelete("hosts")); + } + } + + @Before + public void setupVendorPolicy() { + var localVendors = Map.of("Windows", "Microsoft", "MacOS", "Apple", "iOS", "Apple", "Android", "Samsung", "Linux", "Redhat"); + var c1Vendors = Map.of("Windows", "Microsoft", "MacOS", "Apple", "iOS", "Apple", "Android", "Google", "Linux", "Suse"); + var c2Vendors = Map.of("Windows", "Microsoft", "MacOS", "Apple", "iOS", "Apple", "Android", "Sony", "Linux", "Ubuntu"); + var vendors = Map.of(LOCAL_CLUSTER, localVendors, "c1", c1Vendors, "c2", c2Vendors); + for (Map.Entry> e : vendors.entrySet()) { + Client client = client(e.getKey()); + client.admin().indices().prepareCreate("vendors").setMapping("os", "type=keyword", "vendor", "type=keyword").get(); + for (Map.Entry v : e.getValue().entrySet()) { + client.prepareIndex("vendors").setSource("os", v.getKey(), "vendor", v.getValue()).get(); + } + client.admin().indices().prepareRefresh("vendors").get(); + client.execute( + PutEnrichPolicyAction.INSTANCE, + new PutEnrichPolicyAction.Request(TEST_REQUEST_TIMEOUT, "vendors", CrossClustersEnrichIT.vendorPolicy) + ).actionGet(); + client.execute(ExecuteEnrichPolicyAction.INSTANCE, new ExecuteEnrichPolicyAction.Request(TEST_REQUEST_TIMEOUT, "vendors")) + .actionGet(); + assertAcked(client.admin().indices().prepareDelete("vendors")); + } + } + + @Before + public void setupEventsIndices() { + record Event(long timestamp, String user, String host) {} + + List e0 = List.of( + new Event(1, "matthew", "192.168.1.3"), + new Event(2, "simon", "192.168.1.5"), + new Event(3, "park", "192.168.1.2"), + new Event(4, "andrew", "192.168.1.7"), + new Event(5, "simon", "192.168.1.20"), + new Event(6, "kevin", "192.168.1.2"), + new Event(7, "akio", "192.168.1.5"), + new Event(8, "luke", "192.168.1.2"), + new Event(9, "jack", "192.168.1.4") + ); + List e1 = List.of( + new Event(1, "andres", "192.168.1.2"), + new Event(2, "sergio", "192.168.1.6"), + new Event(3, "kylian", "192.168.1.8"), + new Event(4, "andrew", "192.168.1.9"), + new Event(5, "jack", "192.168.1.3"), + new Event(6, "kevin", "192.168.1.4"), + new Event(7, "akio", "192.168.1.7"), + new Event(8, "kevin", "192.168.1.21"), + new Event(9, "andres", "192.168.1.8") + ); + List e2 = List.of( + new Event(1, "park", "192.168.1.25"), + new Event(2, "akio", "192.168.1.5"), + new Event(3, "park", "192.168.1.2"), + new Event(4, "kevin", "192.168.1.3") + ); + for (var c : Map.of(LOCAL_CLUSTER, e0, "c1", e1, "c2", e2).entrySet()) { + Client client = client(c.getKey()); + client.admin() + .indices() + .prepareCreate("events") + .setMapping("timestamp", "type=long", "user", "type=keyword", "host", "type=ip") + .get(); + for (var e : c.getValue()) { + client.prepareIndex("events").setSource("timestamp", e.timestamp, "user", e.user, "host", e.host).get(); + } + client.admin().indices().prepareRefresh("events").get(); + } + } + + public void testEnrichWithHostsPolicyAndDisconnectedRemotesWithSkipUnavailableTrue() throws IOException { + setSkipUnavailable(REMOTE_CLUSTER_1, true); + setSkipUnavailable(REMOTE_CLUSTER_2, true); + + try { + // close remote-cluster-1 so that it is unavailable + cluster(REMOTE_CLUSTER_1).close(); + + Tuple includeCCSMetadata = CrossClustersEnrichIT.randomIncludeCCSMetadata(); + Boolean requestIncludeMeta = includeCCSMetadata.v1(); + boolean responseExpectMeta = includeCCSMetadata.v2(); + + { + Enrich.Mode mode = randomFrom(Enrich.Mode.values()); + String query = "FROM *:events | eval ip= TO_STR(host) | " + enrichHosts(mode) + " | stats c = COUNT(*) by os | SORT os"; + try (EsqlQueryResponse resp = runQuery(query, requestIncludeMeta)) { + List> rows = getValuesList(resp); + assertThat(rows.size(), greaterThanOrEqualTo(1)); + EsqlExecutionInfo executionInfo = resp.getExecutionInfo(); + assertCCSExecutionInfoDetails(executionInfo); + + assertThat(executionInfo.includeCCSMetadata(), equalTo(responseExpectMeta)); + assertThat(executionInfo.clusterAliases(), equalTo(Set.of(REMOTE_CLUSTER_1, REMOTE_CLUSTER_2))); + + EsqlExecutionInfo.Cluster cluster1 = executionInfo.getCluster(REMOTE_CLUSTER_1); + assertThat(cluster1.getStatus(), equalTo(EsqlExecutionInfo.Cluster.Status.SKIPPED)); + assertThat(cluster1.getTotalShards(), equalTo(0)); + assertThat(cluster1.getSuccessfulShards(), equalTo(0)); + assertThat(cluster1.getSkippedShards(), equalTo(0)); + assertThat(cluster1.getFailedShards(), equalTo(0)); + + EsqlExecutionInfo.Cluster cluster2 = executionInfo.getCluster(REMOTE_CLUSTER_2); + assertThat(cluster2.getStatus(), equalTo(EsqlExecutionInfo.Cluster.Status.SUCCESSFUL)); + assertThat(cluster2.getTotalShards(), greaterThanOrEqualTo(0)); + assertThat(cluster2.getSuccessfulShards(), equalTo(cluster2.getSuccessfulShards())); + assertThat(cluster2.getSkippedShards(), equalTo(0)); + assertThat(cluster2.getFailedShards(), equalTo(0)); + } + } + + // close remote-cluster-2 so that it is also unavailable + cluster(REMOTE_CLUSTER_2).close(); + + { + Enrich.Mode mode = randomFrom(Enrich.Mode.values()); + String query = "FROM *:events | eval ip= TO_STR(host) | " + enrichHosts(mode) + " | stats c = COUNT(*) by os | SORT os"; + try (EsqlQueryResponse resp = runQuery(query, requestIncludeMeta)) { + List columns = resp.columns(); + assertThat(columns.size(), equalTo(1)); + // column from an empty result should be {"name":"","type":"null"} + assertThat(columns.get(0).name(), equalTo("")); + assertThat(columns.get(0).type(), equalTo(DataType.NULL)); + + List> rows = getValuesList(resp); + assertThat(rows.size(), equalTo(0)); + + EsqlExecutionInfo executionInfo = resp.getExecutionInfo(); + assertCCSExecutionInfoDetails(executionInfo); + + assertThat(executionInfo.includeCCSMetadata(), equalTo(responseExpectMeta)); + assertThat(executionInfo.clusterAliases(), equalTo(Set.of(REMOTE_CLUSTER_1, REMOTE_CLUSTER_2))); + + EsqlExecutionInfo.Cluster cluster1 = executionInfo.getCluster(REMOTE_CLUSTER_1); + assertThat(cluster1.getStatus(), equalTo(EsqlExecutionInfo.Cluster.Status.SKIPPED)); + assertThat(cluster1.getTotalShards(), equalTo(0)); + assertThat(cluster1.getSuccessfulShards(), equalTo(0)); + assertThat(cluster1.getSkippedShards(), equalTo(0)); + assertThat(cluster1.getFailedShards(), equalTo(0)); + + EsqlExecutionInfo.Cluster cluster2 = executionInfo.getCluster(REMOTE_CLUSTER_2); + assertThat(cluster2.getStatus(), equalTo(EsqlExecutionInfo.Cluster.Status.SKIPPED)); + assertThat(cluster2.getTotalShards(), equalTo(0)); + assertThat(cluster2.getSuccessfulShards(), equalTo(0)); + assertThat(cluster2.getSkippedShards(), equalTo(0)); + assertThat(cluster2.getFailedShards(), equalTo(0)); + } + } + } finally { + clearSkipUnavailable(); + } + } + + public void testEnrichWithHostsPolicyAndDisconnectedRemotesWithSkipUnavailableFalse() throws IOException { + setSkipUnavailable(REMOTE_CLUSTER_1, true); + setSkipUnavailable(REMOTE_CLUSTER_2, false); + + try { + // close remote-cluster-1 so that it is unavailable + cluster(REMOTE_CLUSTER_1).close(); + + Tuple includeCCSMetadata = CrossClustersEnrichIT.randomIncludeCCSMetadata(); + Boolean requestIncludeMeta = includeCCSMetadata.v1(); + boolean responseExpectMeta = includeCCSMetadata.v2(); + + { + Enrich.Mode mode = randomFrom(Enrich.Mode.values()); + String query = "FROM *:events | EVAL ip= TO_STR(host) | " + enrichHosts(mode) + " | STATS c = COUNT(*) by os | SORT os"; + try (EsqlQueryResponse resp = runQuery(query, requestIncludeMeta)) { + List> rows = getValuesList(resp); + assertThat(rows.size(), greaterThanOrEqualTo(1)); + EsqlExecutionInfo executionInfo = resp.getExecutionInfo(); + assertCCSExecutionInfoDetails(executionInfo); + + assertThat(executionInfo.includeCCSMetadata(), equalTo(responseExpectMeta)); + assertThat(executionInfo.clusterAliases(), equalTo(Set.of(REMOTE_CLUSTER_1, REMOTE_CLUSTER_2))); + + EsqlExecutionInfo.Cluster cluster1 = executionInfo.getCluster(REMOTE_CLUSTER_1); + assertThat(cluster1.getStatus(), equalTo(EsqlExecutionInfo.Cluster.Status.SKIPPED)); + assertThat(cluster1.getTotalShards(), equalTo(0)); + assertThat(cluster1.getSuccessfulShards(), equalTo(0)); + assertThat(cluster1.getSkippedShards(), equalTo(0)); + assertThat(cluster1.getFailedShards(), equalTo(0)); + + EsqlExecutionInfo.Cluster cluster2 = executionInfo.getCluster(REMOTE_CLUSTER_2); + assertThat(cluster2.getStatus(), equalTo(EsqlExecutionInfo.Cluster.Status.SUCCESSFUL)); + assertThat(cluster2.getTotalShards(), greaterThanOrEqualTo(0)); + assertThat(cluster2.getSuccessfulShards(), equalTo(cluster2.getSuccessfulShards())); + assertThat(cluster2.getSkippedShards(), equalTo(0)); + assertThat(cluster2.getFailedShards(), equalTo(0)); + } + } + + // close remote-cluster-2 so that it is also unavailable + cluster(REMOTE_CLUSTER_2).close(); + { + Enrich.Mode mode = randomFrom(Enrich.Mode.values()); + String query = "FROM *:events | eval ip= TO_STR(host) | " + enrichHosts(mode) + " | stats c = COUNT(*) by os | SORT os"; + Exception exception = expectThrows(Exception.class, () -> runQuery(query, requestIncludeMeta)); + assertTrue(ExceptionsHelper.isRemoteUnavailableException(exception)); + } + } finally { + clearSkipUnavailable(); + } + } + + public void testEnrichTwiceThenAggsWithUnavailableRemotes() throws IOException { + Tuple includeCCSMetadata = CrossClustersEnrichIT.randomIncludeCCSMetadata(); + Boolean requestIncludeMeta = includeCCSMetadata.v1(); + boolean responseExpectMeta = includeCCSMetadata.v2(); + + boolean skipUnavailableRemote1 = randomBoolean(); + setSkipUnavailable(REMOTE_CLUSTER_1, skipUnavailableRemote1); + setSkipUnavailable(REMOTE_CLUSTER_2, true); + + try { + // close remote-cluster-2 so that it is unavailable + cluster(REMOTE_CLUSTER_2).close(); + + for (var hostMode : Enrich.Mode.values()) { + String query = String.format(Locale.ROOT, """ + FROM *:events,events + | eval ip= TO_STR(host) + | %s + | %s + | stats c = COUNT(*) by vendor + | sort vendor + """, enrichHosts(hostMode), enrichVendors(Enrich.Mode.COORDINATOR)); + try (EsqlQueryResponse resp = runQuery(query, requestIncludeMeta)) { + assertThat(getValuesList(resp).size(), greaterThanOrEqualTo(1)); + EsqlExecutionInfo executionInfo = resp.getExecutionInfo(); + assertThat(executionInfo.includeCCSMetadata(), equalTo(responseExpectMeta)); + assertThat(executionInfo.clusterAliases(), equalTo(Set.of("", REMOTE_CLUSTER_1, REMOTE_CLUSTER_2))); + assertCCSExecutionInfoDetails(executionInfo); + + EsqlExecutionInfo.Cluster cluster1 = executionInfo.getCluster(REMOTE_CLUSTER_1); + assertThat(cluster1.getStatus(), equalTo(EsqlExecutionInfo.Cluster.Status.SUCCESSFUL)); + assertThat(cluster1.getTotalShards(), greaterThanOrEqualTo(0)); + assertThat(cluster1.getSuccessfulShards(), equalTo(cluster1.getSuccessfulShards())); + assertThat(cluster1.getSkippedShards(), equalTo(0)); + assertThat(cluster1.getFailedShards(), equalTo(0)); + + EsqlExecutionInfo.Cluster cluster2 = executionInfo.getCluster(REMOTE_CLUSTER_2); + assertThat(cluster2.getStatus(), equalTo(EsqlExecutionInfo.Cluster.Status.SKIPPED)); + assertThat(cluster2.getTotalShards(), equalTo(0)); + assertThat(cluster2.getSuccessfulShards(), equalTo(0)); + assertThat(cluster2.getSkippedShards(), equalTo(0)); + assertThat(cluster2.getFailedShards(), equalTo(0)); + + EsqlExecutionInfo.Cluster localCluster = executionInfo.getCluster(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY); + assertThat(localCluster.getStatus(), equalTo(EsqlExecutionInfo.Cluster.Status.SUCCESSFUL)); + assertThat(localCluster.getTotalShards(), greaterThan(0)); + assertThat(localCluster.getSuccessfulShards(), equalTo(localCluster.getTotalShards())); + assertThat(localCluster.getSkippedShards(), equalTo(0)); + assertThat(localCluster.getFailedShards(), equalTo(0)); + } + } + + // close remote-cluster-1 so that it is also unavailable + cluster(REMOTE_CLUSTER_1).close(); + + for (var hostMode : Enrich.Mode.values()) { + String query = String.format(Locale.ROOT, """ + FROM *:events,events + | eval ip= TO_STR(host) + | %s + | %s + | stats c = COUNT(*) by vendor + | sort vendor + """, enrichHosts(hostMode), enrichVendors(Enrich.Mode.COORDINATOR)); + if (skipUnavailableRemote1 == false) { + Exception exception = expectThrows(Exception.class, () -> runQuery(query, requestIncludeMeta)); + assertTrue(ExceptionsHelper.isRemoteUnavailableException(exception)); + } else { + try (EsqlQueryResponse resp = runQuery(query, requestIncludeMeta)) { + assertThat(getValuesList(resp).size(), greaterThanOrEqualTo(1)); + EsqlExecutionInfo executionInfo = resp.getExecutionInfo(); + assertThat(executionInfo.includeCCSMetadata(), equalTo(responseExpectMeta)); + assertThat(executionInfo.clusterAliases(), equalTo(Set.of("", REMOTE_CLUSTER_1, REMOTE_CLUSTER_2))); + assertCCSExecutionInfoDetails(executionInfo); + + EsqlExecutionInfo.Cluster cluster1 = executionInfo.getCluster(REMOTE_CLUSTER_1); + assertThat(cluster1.getStatus(), equalTo(EsqlExecutionInfo.Cluster.Status.SKIPPED)); + assertThat(cluster1.getTotalShards(), equalTo(0)); + assertThat(cluster1.getSuccessfulShards(), equalTo(0)); + assertThat(cluster1.getSkippedShards(), equalTo(0)); + assertThat(cluster1.getFailedShards(), equalTo(0)); + assertThat(cluster1.getTook().millis(), greaterThanOrEqualTo(0L)); + + EsqlExecutionInfo.Cluster cluster2 = executionInfo.getCluster(REMOTE_CLUSTER_2); + assertThat(cluster2.getStatus(), equalTo(EsqlExecutionInfo.Cluster.Status.SKIPPED)); + assertThat(cluster2.getTotalShards(), equalTo(0)); + assertThat(cluster2.getSuccessfulShards(), equalTo(0)); + assertThat(cluster2.getSkippedShards(), equalTo(0)); + assertThat(cluster2.getFailedShards(), equalTo(0)); + + EsqlExecutionInfo.Cluster localCluster = executionInfo.getCluster(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY); + assertThat(localCluster.getStatus(), equalTo(EsqlExecutionInfo.Cluster.Status.SUCCESSFUL)); + assertThat(localCluster.getTotalShards(), greaterThan(0)); + assertThat(localCluster.getSuccessfulShards(), equalTo(localCluster.getTotalShards())); + assertThat(localCluster.getSkippedShards(), equalTo(0)); + assertThat(localCluster.getFailedShards(), equalTo(0)); + } + } + } + } finally { + clearSkipUnavailable(); + } + } + + public void testEnrichCoordinatorThenAnyWithSingleUnavailableRemoteAndLocal() throws IOException { + Tuple includeCCSMetadata = CrossClustersEnrichIT.randomIncludeCCSMetadata(); + Boolean requestIncludeMeta = includeCCSMetadata.v1(); + boolean responseExpectMeta = includeCCSMetadata.v2(); + + boolean skipUnavailableRemote1 = randomBoolean(); + setSkipUnavailable(REMOTE_CLUSTER_1, skipUnavailableRemote1); + + try { + // close remote-cluster-1 so that it is unavailable + cluster(REMOTE_CLUSTER_1).close(); + String query = String.format(Locale.ROOT, """ + FROM %s:events,events + | eval ip= TO_STR(host) + | %s + | %s + | stats c = COUNT(*) by vendor + | sort vendor + """, REMOTE_CLUSTER_1, enrichHosts(Enrich.Mode.COORDINATOR), enrichVendors(Enrich.Mode.ANY)); + if (skipUnavailableRemote1 == false) { + Exception exception = expectThrows(Exception.class, () -> runQuery(query, requestIncludeMeta)); + assertTrue(ExceptionsHelper.isRemoteUnavailableException(exception)); + } else { + try (EsqlQueryResponse resp = runQuery(query, requestIncludeMeta)) { + assertThat(getValuesList(resp).size(), greaterThanOrEqualTo(1)); + EsqlExecutionInfo executionInfo = resp.getExecutionInfo(); + assertThat(executionInfo.includeCCSMetadata(), equalTo(responseExpectMeta)); + assertThat( + executionInfo.clusterAliases(), + equalTo(Set.of(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY, REMOTE_CLUSTER_1)) + ); + assertCCSExecutionInfoDetails(executionInfo); + + EsqlExecutionInfo.Cluster cluster1 = executionInfo.getCluster(REMOTE_CLUSTER_1); + assertThat(cluster1.getStatus(), equalTo(EsqlExecutionInfo.Cluster.Status.SKIPPED)); + assertThat(cluster1.getTotalShards(), equalTo(0)); + assertThat(cluster1.getSuccessfulShards(), equalTo(0)); + assertThat(cluster1.getSkippedShards(), equalTo(0)); + assertThat(cluster1.getFailedShards(), equalTo(0)); + + EsqlExecutionInfo.Cluster localCluster = executionInfo.getCluster(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY); + assertThat(localCluster.getStatus(), equalTo(EsqlExecutionInfo.Cluster.Status.SUCCESSFUL)); + assertThat(localCluster.getTotalShards(), greaterThan(0)); + assertThat(localCluster.getSuccessfulShards(), equalTo(localCluster.getTotalShards())); + assertThat(localCluster.getSkippedShards(), equalTo(0)); + assertThat(localCluster.getFailedShards(), equalTo(0)); + } + } + } finally { + clearSkipUnavailable(); + } + } + + public void testEnrichCoordinatorThenAnyWithSingleUnavailableRemoteAndNotLocal() throws IOException { + Tuple includeCCSMetadata = CrossClustersEnrichIT.randomIncludeCCSMetadata(); + Boolean requestIncludeMeta = includeCCSMetadata.v1(); + boolean responseExpectMeta = includeCCSMetadata.v2(); + + boolean skipUnavailableRemote1 = randomBoolean(); + setSkipUnavailable(REMOTE_CLUSTER_1, skipUnavailableRemote1); + + try { + // close remote-cluster-1 so that it is unavailable + cluster(REMOTE_CLUSTER_1).close(); + String query = String.format(Locale.ROOT, """ + FROM %s:events + | eval ip= TO_STR(host) + | %s + | %s + | stats c = COUNT(*) by vendor + | sort vendor + """, REMOTE_CLUSTER_1, enrichHosts(Enrich.Mode.COORDINATOR), enrichVendors(Enrich.Mode.ANY)); + if (skipUnavailableRemote1 == false) { + Exception exception = expectThrows(Exception.class, () -> runQuery(query, requestIncludeMeta)); + assertTrue(ExceptionsHelper.isRemoteUnavailableException(exception)); + } else { + try (EsqlQueryResponse resp = runQuery(query, requestIncludeMeta)) { + List columns = resp.columns(); + assertThat(columns.size(), equalTo(1)); + // column from an empty result should be {"name":"","type":"null"} + assertThat(columns.get(0).name(), equalTo("")); + assertThat(columns.get(0).type(), equalTo(DataType.NULL)); + + assertThat(getValuesList(resp).size(), equalTo(0)); + EsqlExecutionInfo executionInfo = resp.getExecutionInfo(); + assertThat(executionInfo.includeCCSMetadata(), equalTo(responseExpectMeta)); + assertThat(executionInfo.clusterAliases(), equalTo(Set.of(REMOTE_CLUSTER_1))); + assertCCSExecutionInfoDetails(executionInfo); + + EsqlExecutionInfo.Cluster cluster1 = executionInfo.getCluster(REMOTE_CLUSTER_1); + assertThat(cluster1.getStatus(), equalTo(EsqlExecutionInfo.Cluster.Status.SKIPPED)); + assertThat(cluster1.getTotalShards(), equalTo(0)); + assertThat(cluster1.getSuccessfulShards(), equalTo(0)); + assertThat(cluster1.getSkippedShards(), equalTo(0)); + assertThat(cluster1.getFailedShards(), equalTo(0)); + } + } + } finally { + clearSkipUnavailable(); + } + } + + public void testEnrichRemoteWithVendor() throws IOException { + Tuple includeCCSMetadata = CrossClustersEnrichIT.randomIncludeCCSMetadata(); + Boolean requestIncludeMeta = includeCCSMetadata.v1(); + boolean responseExpectMeta = includeCCSMetadata.v2(); + + boolean skipUnavailableRemote2 = randomBoolean(); + setSkipUnavailable(REMOTE_CLUSTER_1, true); + setSkipUnavailable(REMOTE_CLUSTER_2, skipUnavailableRemote2); + + try { + // close remote-cluster-1 so that it is unavailable + cluster(REMOTE_CLUSTER_1).close(); + + for (Enrich.Mode hostMode : List.of(Enrich.Mode.ANY, Enrich.Mode.REMOTE)) { + var query = String.format(Locale.ROOT, """ + FROM *:events,events + | eval ip= TO_STR(host) + | %s + | %s + | stats c = COUNT(*) by vendor + | sort vendor + """, enrichHosts(hostMode), enrichVendors(Enrich.Mode.REMOTE)); + try (EsqlQueryResponse resp = runQuery(query, requestIncludeMeta)) { + assertThat(getValuesList(resp).size(), greaterThan(0)); + EsqlExecutionInfo executionInfo = resp.getExecutionInfo(); + assertThat(executionInfo.includeCCSMetadata(), equalTo(responseExpectMeta)); + assertThat( + executionInfo.clusterAliases(), + equalTo(Set.of(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY, REMOTE_CLUSTER_1, REMOTE_CLUSTER_2)) + ); + assertCCSExecutionInfoDetails(executionInfo); + + EsqlExecutionInfo.Cluster cluster1 = executionInfo.getCluster(REMOTE_CLUSTER_1); + assertThat(cluster1.getStatus(), equalTo(EsqlExecutionInfo.Cluster.Status.SKIPPED)); + assertThat(cluster1.getTotalShards(), equalTo(0)); + assertThat(cluster1.getSuccessfulShards(), equalTo(0)); + assertThat(cluster1.getSkippedShards(), equalTo(0)); + assertThat(cluster1.getFailedShards(), equalTo(0)); + assertThat(cluster1.getTook().millis(), greaterThanOrEqualTo(0L)); + + EsqlExecutionInfo.Cluster cluster2 = executionInfo.getCluster(REMOTE_CLUSTER_2); + assertThat(cluster2.getStatus(), equalTo(EsqlExecutionInfo.Cluster.Status.SUCCESSFUL)); + assertThat(cluster2.getTotalShards(), greaterThan(0)); + assertThat(cluster2.getSuccessfulShards(), equalTo(cluster2.getSuccessfulShards())); + assertThat(cluster2.getSkippedShards(), equalTo(0)); + assertThat(cluster2.getFailedShards(), equalTo(0)); + + EsqlExecutionInfo.Cluster localCluster = executionInfo.getCluster(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY); + assertThat(localCluster.getStatus(), equalTo(EsqlExecutionInfo.Cluster.Status.SUCCESSFUL)); + assertThat(localCluster.getTotalShards(), greaterThan(0)); + assertThat(localCluster.getSuccessfulShards(), equalTo(localCluster.getTotalShards())); + assertThat(localCluster.getSkippedShards(), equalTo(0)); + assertThat(localCluster.getFailedShards(), equalTo(0)); + } + } + + // close remote-cluster-2 so that it is also unavailable + cluster(REMOTE_CLUSTER_2).close(); + + for (Enrich.Mode hostMode : List.of(Enrich.Mode.ANY, Enrich.Mode.REMOTE)) { + var query = String.format(Locale.ROOT, """ + FROM *:events,events + | eval ip= TO_STR(host) + | %s + | %s + | stats c = COUNT(*) by vendor + | sort vendor + """, enrichHosts(hostMode), enrichVendors(Enrich.Mode.REMOTE)); + if (skipUnavailableRemote2 == false) { + Exception exception = expectThrows(Exception.class, () -> runQuery(query, requestIncludeMeta)); + assertTrue(ExceptionsHelper.isRemoteUnavailableException(exception)); + } else { + + try (EsqlQueryResponse resp = runQuery(query, requestIncludeMeta)) { + assertThat(getValuesList(resp).size(), greaterThan(0)); + EsqlExecutionInfo executionInfo = resp.getExecutionInfo(); + assertThat(executionInfo.includeCCSMetadata(), equalTo(responseExpectMeta)); + assertThat( + executionInfo.clusterAliases(), + equalTo(Set.of(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY, REMOTE_CLUSTER_1, REMOTE_CLUSTER_2)) + ); + assertCCSExecutionInfoDetails(executionInfo); + + EsqlExecutionInfo.Cluster cluster1 = executionInfo.getCluster(REMOTE_CLUSTER_1); + assertThat(cluster1.getStatus(), equalTo(EsqlExecutionInfo.Cluster.Status.SKIPPED)); + assertThat(cluster1.getTotalShards(), equalTo(0)); + assertThat(cluster1.getSuccessfulShards(), equalTo(0)); + assertThat(cluster1.getSkippedShards(), equalTo(0)); + assertThat(cluster1.getFailedShards(), equalTo(0)); + assertThat(cluster1.getTook().millis(), greaterThanOrEqualTo(0L)); + + EsqlExecutionInfo.Cluster cluster2 = executionInfo.getCluster(REMOTE_CLUSTER_2); + assertThat(cluster2.getStatus(), equalTo(EsqlExecutionInfo.Cluster.Status.SKIPPED)); + assertThat(cluster2.getTotalShards(), equalTo(0)); + assertThat(cluster2.getSuccessfulShards(), equalTo(0)); + assertThat(cluster2.getSkippedShards(), equalTo(0)); + assertThat(cluster2.getFailedShards(), equalTo(0)); + + EsqlExecutionInfo.Cluster localCluster = executionInfo.getCluster(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY); + assertThat(localCluster.getStatus(), equalTo(EsqlExecutionInfo.Cluster.Status.SUCCESSFUL)); + assertThat(localCluster.getTotalShards(), greaterThan(0)); + assertThat(localCluster.getSuccessfulShards(), equalTo(localCluster.getTotalShards())); + assertThat(localCluster.getSkippedShards(), equalTo(0)); + assertThat(localCluster.getFailedShards(), equalTo(0)); + } + } + } + } finally { + clearSkipUnavailable(); + } + } + + protected EsqlQueryResponse runQuery(String query, Boolean ccsMetadataInResponse) { + EsqlQueryRequest request = EsqlQueryRequest.syncEsqlQueryRequest(); + request.query(query); + request.pragmas(AbstractEsqlIntegTestCase.randomPragmas()); + if (randomBoolean()) { + request.profile(true); + } + if (ccsMetadataInResponse != null) { + request.includeCCSMetadata(ccsMetadataInResponse); + } + return client(LOCAL_CLUSTER).execute(EsqlQueryAction.INSTANCE, request).actionGet(30, TimeUnit.SECONDS); + } + + private static void assertCCSExecutionInfoDetails(EsqlExecutionInfo executionInfo) { + assertThat(executionInfo.overallTook().millis(), greaterThanOrEqualTo(0L)); + assertTrue(executionInfo.isCrossClusterSearch()); + + for (String clusterAlias : executionInfo.clusterAliases()) { + EsqlExecutionInfo.Cluster cluster = executionInfo.getCluster(clusterAlias); + assertThat(cluster.getTook().millis(), greaterThanOrEqualTo(0L)); + assertThat(cluster.getTook().millis(), lessThanOrEqualTo(executionInfo.overallTook().millis())); + } + } + + private void setSkipUnavailable(String clusterAlias, boolean skip) { + client(LOCAL_CLUSTER).admin() + .cluster() + .prepareUpdateSettings(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT) + .setPersistentSettings(Settings.builder().put("cluster.remote." + clusterAlias + ".skip_unavailable", skip).build()) + .get(); + } + + private void clearSkipUnavailable() { + Settings.Builder settingsBuilder = Settings.builder() + .putNull("cluster.remote." + REMOTE_CLUSTER_1 + ".skip_unavailable") + .putNull("cluster.remote." + REMOTE_CLUSTER_2 + ".skip_unavailable"); + client(LOCAL_CLUSTER).admin() + .cluster() + .prepareUpdateSettings(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT) + .setPersistentSettings(settingsBuilder.build()) + .get(); + } +} diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClusterQueryUnavailableRemotesIT.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClusterQueryUnavailableRemotesIT.java new file mode 100644 index 0000000000000..0f1aa8541fdd9 --- /dev/null +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClusterQueryUnavailableRemotesIT.java @@ -0,0 +1,525 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.action; + +import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.client.internal.Client; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.compute.operator.exchange.ExchangeService; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.core.Tuple; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.test.AbstractMultiClustersTestCase; +import org.elasticsearch.test.XContentTestUtils; +import org.elasticsearch.xpack.esql.core.type.DataType; +import org.elasticsearch.xpack.esql.plugin.EsqlPlugin; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.TimeUnit; + +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.xpack.esql.EsqlTestUtils.getValuesList; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.lessThanOrEqualTo; + +public class CrossClusterQueryUnavailableRemotesIT extends AbstractMultiClustersTestCase { + private static final String REMOTE_CLUSTER_1 = "cluster-a"; + private static final String REMOTE_CLUSTER_2 = "cluster-b"; + + @Override + protected Collection remoteClusterAlias() { + return List.of(REMOTE_CLUSTER_1, REMOTE_CLUSTER_2); + } + + @Override + protected boolean reuseClusters() { + return false; + } + + @Override + protected Collection> nodePlugins(String clusterAlias) { + List> plugins = new ArrayList<>(super.nodePlugins(clusterAlias)); + plugins.add(EsqlPlugin.class); + plugins.add(org.elasticsearch.xpack.esql.action.CrossClustersQueryIT.InternalExchangePlugin.class); + return plugins; + } + + public static class InternalExchangePlugin extends Plugin { + @Override + public List> getSettings() { + return List.of( + Setting.timeSetting( + ExchangeService.INACTIVE_SINKS_INTERVAL_SETTING, + TimeValue.timeValueSeconds(30), + Setting.Property.NodeScope + ) + ); + } + } + + public void testCCSAgainstDisconnectedRemoteWithSkipUnavailableTrue() throws Exception { + int numClusters = 3; + Map testClusterInfo = setupClusters(numClusters); + int localNumShards = (Integer) testClusterInfo.get("local.num_shards"); + int remote2NumShards = (Integer) testClusterInfo.get("remote2.num_shards"); + setSkipUnavailable(REMOTE_CLUSTER_1, true); + setSkipUnavailable(REMOTE_CLUSTER_2, true); + + Tuple includeCCSMetadata = randomIncludeCCSMetadata(); + Boolean requestIncludeMeta = includeCCSMetadata.v1(); + boolean responseExpectMeta = includeCCSMetadata.v2(); + + try { + // close remote-cluster-1 so that it is unavailable + cluster(REMOTE_CLUSTER_1).close(); + + try (EsqlQueryResponse resp = runQuery("FROM logs-*,*:logs-* | STATS sum (v)", requestIncludeMeta)) { + List> values = getValuesList(resp); + assertThat(values, hasSize(1)); + assertThat(values.get(0), equalTo(List.of(330L))); + + EsqlExecutionInfo executionInfo = resp.getExecutionInfo(); + assertNotNull(executionInfo); + assertThat(executionInfo.isCrossClusterSearch(), is(true)); + long overallTookMillis = executionInfo.overallTook().millis(); + assertThat(overallTookMillis, greaterThanOrEqualTo(0L)); + assertThat(executionInfo.includeCCSMetadata(), equalTo(responseExpectMeta)); + + assertThat(executionInfo.clusterAliases(), equalTo(Set.of(REMOTE_CLUSTER_1, REMOTE_CLUSTER_2, LOCAL_CLUSTER))); + + EsqlExecutionInfo.Cluster remote1Cluster = executionInfo.getCluster(REMOTE_CLUSTER_1); + assertThat(remote1Cluster.getIndexExpression(), equalTo("logs-*")); + assertThat(remote1Cluster.getStatus(), equalTo(EsqlExecutionInfo.Cluster.Status.SKIPPED)); + assertThat(remote1Cluster.getTook().millis(), greaterThanOrEqualTo(0L)); + assertThat(remote1Cluster.getTook().millis(), lessThanOrEqualTo(overallTookMillis)); + assertThat(remote1Cluster.getTotalShards(), equalTo(0)); + assertThat(remote1Cluster.getSuccessfulShards(), equalTo(0)); + assertThat(remote1Cluster.getSkippedShards(), equalTo(0)); + assertThat(remote1Cluster.getFailedShards(), equalTo(0)); + + EsqlExecutionInfo.Cluster remote2Cluster = executionInfo.getCluster(REMOTE_CLUSTER_2); + assertThat(remote2Cluster.getIndexExpression(), equalTo("logs-*")); + assertThat(remote2Cluster.getStatus(), equalTo(EsqlExecutionInfo.Cluster.Status.SUCCESSFUL)); + assertThat(remote2Cluster.getTook().millis(), greaterThanOrEqualTo(0L)); + assertThat(remote2Cluster.getTook().millis(), lessThanOrEqualTo(overallTookMillis)); + assertThat(remote2Cluster.getTotalShards(), equalTo(remote2NumShards)); + assertThat(remote2Cluster.getSuccessfulShards(), equalTo(remote2NumShards)); + assertThat(remote2Cluster.getSkippedShards(), equalTo(0)); + assertThat(remote2Cluster.getFailedShards(), equalTo(0)); + + EsqlExecutionInfo.Cluster localCluster = executionInfo.getCluster(LOCAL_CLUSTER); + assertThat(localCluster.getIndexExpression(), equalTo("logs-*")); + assertThat(localCluster.getStatus(), equalTo(EsqlExecutionInfo.Cluster.Status.SUCCESSFUL)); + assertThat(localCluster.getTook().millis(), greaterThanOrEqualTo(0L)); + assertThat(localCluster.getTook().millis(), lessThanOrEqualTo(overallTookMillis)); + assertThat(localCluster.getTotalShards(), equalTo(localNumShards)); + assertThat(localCluster.getSuccessfulShards(), equalTo(localNumShards)); + assertThat(localCluster.getSkippedShards(), equalTo(0)); + assertThat(localCluster.getFailedShards(), equalTo(0)); + + // ensure that the _clusters metadata is present only if requested + assertClusterMetadataInResponse(resp, responseExpectMeta); + } + + // scenario where there are no indices to match because + // 1) the local cluster indexExpression and REMOTE_CLUSTER_2 indexExpression match no indices + // 2) the REMOTE_CLUSTER_1 is unavailable + // 3) both remotes are marked as skip_un=true + String query = "FROM nomatch*," + REMOTE_CLUSTER_1 + ":logs-*," + REMOTE_CLUSTER_2 + ":nomatch* | STATS sum (v)"; + try (EsqlQueryResponse resp = runQuery(query, requestIncludeMeta)) { + List> values = getValuesList(resp); + assertThat(values, hasSize(0)); + + EsqlExecutionInfo executionInfo = resp.getExecutionInfo(); + assertNotNull(executionInfo); + assertThat(executionInfo.isCrossClusterSearch(), is(true)); + long overallTookMillis = executionInfo.overallTook().millis(); + assertThat(overallTookMillis, greaterThanOrEqualTo(0L)); + assertThat(executionInfo.includeCCSMetadata(), equalTo(responseExpectMeta)); + + assertThat(executionInfo.clusterAliases(), equalTo(Set.of(REMOTE_CLUSTER_1, REMOTE_CLUSTER_2, LOCAL_CLUSTER))); + + EsqlExecutionInfo.Cluster remote1Cluster = executionInfo.getCluster(REMOTE_CLUSTER_1); + assertThat(remote1Cluster.getIndexExpression(), equalTo("logs-*")); + assertThat(remote1Cluster.getStatus(), equalTo(EsqlExecutionInfo.Cluster.Status.SKIPPED)); + assertThat(remote1Cluster.getTook().millis(), greaterThanOrEqualTo(0L)); + assertThat(remote1Cluster.getTook().millis(), lessThanOrEqualTo(overallTookMillis)); + assertThat(remote1Cluster.getTotalShards(), equalTo(0)); + assertThat(remote1Cluster.getSuccessfulShards(), equalTo(0)); + assertThat(remote1Cluster.getSkippedShards(), equalTo(0)); + assertThat(remote1Cluster.getFailedShards(), equalTo(0)); + + EsqlExecutionInfo.Cluster remote2Cluster = executionInfo.getCluster(REMOTE_CLUSTER_2); + assertThat(remote2Cluster.getIndexExpression(), equalTo("nomatch*")); + assertThat(remote2Cluster.getStatus(), equalTo(EsqlExecutionInfo.Cluster.Status.SKIPPED)); + assertThat(remote2Cluster.getTook().millis(), greaterThanOrEqualTo(0L)); + assertThat(remote2Cluster.getTook().millis(), lessThanOrEqualTo(overallTookMillis)); + assertThat(remote2Cluster.getTotalShards(), equalTo(0)); + assertThat(remote2Cluster.getSuccessfulShards(), equalTo(0)); + assertThat(remote2Cluster.getSkippedShards(), equalTo(0)); + assertThat(remote2Cluster.getFailedShards(), equalTo(0)); + + EsqlExecutionInfo.Cluster localCluster = executionInfo.getCluster(LOCAL_CLUSTER); + assertThat(localCluster.getIndexExpression(), equalTo("nomatch*")); + // local cluster should never be marked as SKIPPED + assertThat(localCluster.getStatus(), equalTo(EsqlExecutionInfo.Cluster.Status.SUCCESSFUL)); + assertThat(localCluster.getTook().millis(), greaterThanOrEqualTo(0L)); + assertThat(localCluster.getTook().millis(), lessThanOrEqualTo(overallTookMillis)); + assertThat(localCluster.getTotalShards(), equalTo(0)); + assertThat(localCluster.getSuccessfulShards(), equalTo(0)); + assertThat(localCluster.getSkippedShards(), equalTo(0)); + assertThat(localCluster.getFailedShards(), equalTo(0)); + + // ensure that the _clusters metadata is present only if requested + assertClusterMetadataInResponse(resp, responseExpectMeta); + } + + // close remote-cluster-2 so that it is also unavailable + cluster(REMOTE_CLUSTER_2).close(); + + try (EsqlQueryResponse resp = runQuery("FROM logs-*,*:logs-* | STATS sum (v)", requestIncludeMeta)) { + List> values = getValuesList(resp); + assertThat(values, hasSize(1)); + assertThat(values.get(0), equalTo(List.of(45L))); + + EsqlExecutionInfo executionInfo = resp.getExecutionInfo(); + assertNotNull(executionInfo); + assertThat(executionInfo.isCrossClusterSearch(), is(true)); + long overallTookMillis = executionInfo.overallTook().millis(); + assertThat(overallTookMillis, greaterThanOrEqualTo(0L)); + assertThat(executionInfo.includeCCSMetadata(), equalTo(responseExpectMeta)); + + assertThat(executionInfo.clusterAliases(), equalTo(Set.of(REMOTE_CLUSTER_1, REMOTE_CLUSTER_2, LOCAL_CLUSTER))); + + EsqlExecutionInfo.Cluster remote1Cluster = executionInfo.getCluster(REMOTE_CLUSTER_1); + assertThat(remote1Cluster.getIndexExpression(), equalTo("logs-*")); + assertThat(remote1Cluster.getStatus(), equalTo(EsqlExecutionInfo.Cluster.Status.SKIPPED)); + assertThat(remote1Cluster.getTook().millis(), greaterThanOrEqualTo(0L)); + assertThat(remote1Cluster.getTook().millis(), lessThanOrEqualTo(overallTookMillis)); + assertThat(remote1Cluster.getTotalShards(), equalTo(0)); + assertThat(remote1Cluster.getSuccessfulShards(), equalTo(0)); + assertThat(remote1Cluster.getSkippedShards(), equalTo(0)); + assertThat(remote1Cluster.getFailedShards(), equalTo(0)); + + EsqlExecutionInfo.Cluster remote2Cluster = executionInfo.getCluster(REMOTE_CLUSTER_2); + assertThat(remote2Cluster.getIndexExpression(), equalTo("logs-*")); + assertThat(remote2Cluster.getStatus(), equalTo(EsqlExecutionInfo.Cluster.Status.SKIPPED)); + assertThat(remote2Cluster.getTook().millis(), greaterThanOrEqualTo(0L)); + assertThat(remote2Cluster.getTook().millis(), lessThanOrEqualTo(overallTookMillis)); + assertThat(remote2Cluster.getTotalShards(), equalTo(0)); + assertThat(remote2Cluster.getSuccessfulShards(), equalTo(0)); + assertThat(remote2Cluster.getSkippedShards(), equalTo(0)); + assertThat(remote2Cluster.getFailedShards(), equalTo(0)); + + EsqlExecutionInfo.Cluster localCluster = executionInfo.getCluster(LOCAL_CLUSTER); + assertThat(localCluster.getIndexExpression(), equalTo("logs-*")); + assertThat(localCluster.getStatus(), equalTo(EsqlExecutionInfo.Cluster.Status.SUCCESSFUL)); + assertThat(localCluster.getTook().millis(), greaterThanOrEqualTo(0L)); + assertThat(localCluster.getTook().millis(), lessThanOrEqualTo(overallTookMillis)); + assertThat(localCluster.getTotalShards(), equalTo(localNumShards)); + assertThat(localCluster.getSuccessfulShards(), equalTo(localNumShards)); + assertThat(localCluster.getSkippedShards(), equalTo(0)); + assertThat(localCluster.getFailedShards(), equalTo(0)); + + // ensure that the _clusters metadata is present only if requested + assertClusterMetadataInResponse(resp, responseExpectMeta); + } + } finally { + clearSkipUnavailable(numClusters); + } + } + + public void testRemoteOnlyCCSAgainstDisconnectedRemoteWithSkipUnavailableTrue() throws Exception { + int numClusters = 3; + setupClusters(numClusters); + setSkipUnavailable(REMOTE_CLUSTER_1, true); + setSkipUnavailable(REMOTE_CLUSTER_2, true); + + try { + // close remote cluster 1 so that it is unavailable + cluster(REMOTE_CLUSTER_1).close(); + + Tuple includeCCSMetadata = randomIncludeCCSMetadata(); + Boolean requestIncludeMeta = includeCCSMetadata.v1(); + boolean responseExpectMeta = includeCCSMetadata.v2(); + + // query only the REMOTE_CLUSTER_1 + try (EsqlQueryResponse resp = runQuery("FROM " + REMOTE_CLUSTER_1 + ":logs-* | STATS sum (v)", requestIncludeMeta)) { + List columns = resp.columns(); + assertThat(columns.size(), equalTo(1)); + // column from an empty result should be {"name":"","type":"null"} + assertThat(columns.get(0).name(), equalTo("")); + assertThat(columns.get(0).type(), equalTo(DataType.NULL)); + + List> values = getValuesList(resp); + assertThat(values, hasSize(0)); + + EsqlExecutionInfo executionInfo = resp.getExecutionInfo(); + assertNotNull(executionInfo); + assertThat(executionInfo.isCrossClusterSearch(), is(true)); + long overallTookMillis = executionInfo.overallTook().millis(); + assertThat(overallTookMillis, greaterThanOrEqualTo(0L)); + assertThat(executionInfo.includeCCSMetadata(), equalTo(responseExpectMeta)); + + assertThat(executionInfo.clusterAliases(), equalTo(Set.of(REMOTE_CLUSTER_1))); + + EsqlExecutionInfo.Cluster remoteCluster = executionInfo.getCluster(REMOTE_CLUSTER_1); + assertThat(remoteCluster.getIndexExpression(), equalTo("logs-*")); + assertThat(remoteCluster.getStatus(), equalTo(EsqlExecutionInfo.Cluster.Status.SKIPPED)); + assertThat(remoteCluster.getTook().millis(), greaterThanOrEqualTo(0L)); + assertThat(remoteCluster.getTook().millis(), lessThanOrEqualTo(overallTookMillis)); + assertThat(remoteCluster.getTotalShards(), equalTo(0)); + assertThat(remoteCluster.getSuccessfulShards(), equalTo(0)); + assertThat(remoteCluster.getSkippedShards(), equalTo(0)); + assertThat(remoteCluster.getFailedShards(), equalTo(0)); + + // ensure that the _clusters metadata is present only if requested + assertClusterMetadataInResponse(resp, responseExpectMeta); + } + + // close remote cluster 2 so that it is also unavailable + cluster(REMOTE_CLUSTER_2).close(); + + // query only the both remote clusters + try ( + EsqlQueryResponse resp = runQuery( + "FROM " + REMOTE_CLUSTER_1 + ":logs-*," + REMOTE_CLUSTER_2 + ":logs-* | STATS sum (v)", + requestIncludeMeta + ) + ) { + List columns = resp.columns(); + assertThat(columns.size(), equalTo(1)); + // column from an empty result should be {"name":"","type":"null"} + assertThat(columns.get(0).name(), equalTo("")); + assertThat(columns.get(0).type(), equalTo(DataType.NULL)); + + List> values = getValuesList(resp); + assertThat(values, hasSize(0)); + + EsqlExecutionInfo executionInfo = resp.getExecutionInfo(); + assertNotNull(executionInfo); + assertThat(executionInfo.isCrossClusterSearch(), is(true)); + long overallTookMillis = executionInfo.overallTook().millis(); + assertThat(overallTookMillis, greaterThanOrEqualTo(0L)); + assertThat(executionInfo.includeCCSMetadata(), equalTo(responseExpectMeta)); + + assertThat(executionInfo.clusterAliases(), equalTo(Set.of(REMOTE_CLUSTER_1, REMOTE_CLUSTER_2))); + + EsqlExecutionInfo.Cluster remote1Cluster = executionInfo.getCluster(REMOTE_CLUSTER_1); + assertThat(remote1Cluster.getIndexExpression(), equalTo("logs-*")); + assertThat(remote1Cluster.getStatus(), equalTo(EsqlExecutionInfo.Cluster.Status.SKIPPED)); + assertThat(remote1Cluster.getTook().millis(), greaterThanOrEqualTo(0L)); + assertThat(remote1Cluster.getTook().millis(), lessThanOrEqualTo(overallTookMillis)); + assertThat(remote1Cluster.getTotalShards(), equalTo(0)); + assertThat(remote1Cluster.getSuccessfulShards(), equalTo(0)); + assertThat(remote1Cluster.getSkippedShards(), equalTo(0)); + assertThat(remote1Cluster.getFailedShards(), equalTo(0)); + + EsqlExecutionInfo.Cluster remote2Cluster = executionInfo.getCluster(REMOTE_CLUSTER_2); + assertThat(remote2Cluster.getIndexExpression(), equalTo("logs-*")); + assertThat(remote2Cluster.getStatus(), equalTo(EsqlExecutionInfo.Cluster.Status.SKIPPED)); + assertThat(remote2Cluster.getTook().millis(), greaterThanOrEqualTo(0L)); + assertThat(remote2Cluster.getTook().millis(), lessThanOrEqualTo(overallTookMillis)); + assertThat(remote2Cluster.getTotalShards(), equalTo(0)); + assertThat(remote2Cluster.getSuccessfulShards(), equalTo(0)); + assertThat(remote2Cluster.getSkippedShards(), equalTo(0)); + assertThat(remote2Cluster.getFailedShards(), equalTo(0)); + + // ensure that the _clusters metadata is present only if requested + assertClusterMetadataInResponse(resp, responseExpectMeta); + } + + } finally { + clearSkipUnavailable(numClusters); + } + } + + public void testCCSAgainstDisconnectedRemoteWithSkipUnavailableFalse() throws Exception { + int numClusters = 2; + setupClusters(numClusters); + setSkipUnavailable(REMOTE_CLUSTER_1, false); + + try { + // close the remote cluster so that it is unavailable + cluster(REMOTE_CLUSTER_1).close(); + + Tuple includeCCSMetadata = randomIncludeCCSMetadata(); + Boolean requestIncludeMeta = includeCCSMetadata.v1(); + + final Exception exception = expectThrows( + Exception.class, + () -> runQuery("FROM logs-*,*:logs-* | STATS sum (v)", requestIncludeMeta) + ); + assertThat(ExceptionsHelper.isRemoteUnavailableException(exception), is(true)); + } finally { + clearSkipUnavailable(numClusters); + } + } + + public void testRemoteOnlyCCSAgainstDisconnectedRemoteWithSkipUnavailableFalse() throws Exception { + int numClusters = 3; + setupClusters(numClusters); + setSkipUnavailable(REMOTE_CLUSTER_1, false); + setSkipUnavailable(REMOTE_CLUSTER_2, randomBoolean()); + + try { + Tuple includeCCSMetadata = randomIncludeCCSMetadata(); + Boolean requestIncludeMeta = includeCCSMetadata.v1(); + { + // close the remote cluster so that it is unavailable + cluster(REMOTE_CLUSTER_1).close(); + Exception exception = expectThrows(Exception.class, () -> runQuery("FROM *:logs-* | STATS sum (v)", requestIncludeMeta)); + assertThat(ExceptionsHelper.isRemoteUnavailableException(exception), is(true)); + } + { + // close remote cluster 2 so that it is unavailable + cluster(REMOTE_CLUSTER_2).close(); + Exception exception = expectThrows(Exception.class, () -> runQuery("FROM *:logs-* | STATS sum (v)", requestIncludeMeta)); + assertThat(ExceptionsHelper.isRemoteUnavailableException(exception), is(true)); + } + } finally { + clearSkipUnavailable(numClusters); + } + } + + private void setSkipUnavailable(String clusterAlias, boolean skip) { + client(LOCAL_CLUSTER).admin() + .cluster() + .prepareUpdateSettings(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT) + .setPersistentSettings(Settings.builder().put("cluster.remote." + clusterAlias + ".skip_unavailable", skip).build()) + .get(); + } + + private void clearSkipUnavailable(int numClusters) { + assert numClusters == 2 || numClusters == 3 : "Only 2 or 3 clusters supported"; + Settings.Builder settingsBuilder = Settings.builder().putNull("cluster.remote." + REMOTE_CLUSTER_1 + ".skip_unavailable"); + if (numClusters == 3) { + settingsBuilder.putNull("cluster.remote." + REMOTE_CLUSTER_2 + ".skip_unavailable"); + } + client(LOCAL_CLUSTER).admin() + .cluster() + .prepareUpdateSettings(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT) + .setPersistentSettings(settingsBuilder.build()) + .get(); + } + + private static void assertClusterMetadataInResponse(EsqlQueryResponse resp, boolean responseExpectMeta) { + try { + final Map esqlResponseAsMap = XContentTestUtils.convertToMap(resp); + final Object clusters = esqlResponseAsMap.get("_clusters"); + if (responseExpectMeta) { + assertNotNull(clusters); + // test a few entries to ensure it looks correct (other tests do a full analysis of the metadata in the response) + @SuppressWarnings("unchecked") + Map inner = (Map) clusters; + assertTrue(inner.containsKey("total")); + assertTrue(inner.containsKey("details")); + } else { + assertNull(clusters); + } + } catch (IOException e) { + fail("Could not convert ESQL response to Map: " + e); + } + } + + protected EsqlQueryResponse runQuery(String query, Boolean ccsMetadataInResponse) { + EsqlQueryRequest request = EsqlQueryRequest.syncEsqlQueryRequest(); + request.query(query); + request.pragmas(AbstractEsqlIntegTestCase.randomPragmas()); + request.profile(randomInt(5) == 2); + request.columnar(randomBoolean()); + if (ccsMetadataInResponse != null) { + request.includeCCSMetadata(ccsMetadataInResponse); + } + return runQuery(request); + } + + protected EsqlQueryResponse runQuery(EsqlQueryRequest request) { + return client(LOCAL_CLUSTER).execute(EsqlQueryAction.INSTANCE, request).actionGet(30, TimeUnit.SECONDS); + } + + /** + * v1: value to send to runQuery (can be null; null means use default value) + * v2: whether to expect CCS Metadata in the response (cannot be null) + * @return + */ + public static Tuple randomIncludeCCSMetadata() { + return switch (randomIntBetween(1, 3)) { + case 1 -> new Tuple<>(Boolean.TRUE, Boolean.TRUE); + case 2 -> new Tuple<>(Boolean.FALSE, Boolean.FALSE); + case 3 -> new Tuple<>(null, Boolean.FALSE); + default -> throw new AssertionError("should not get here"); + }; + } + + Map setupClusters(int numClusters) { + assert numClusters == 2 || numClusters == 3 : "2 or 3 clusters supported not: " + numClusters; + String localIndex = "logs-1"; + int numShardsLocal = randomIntBetween(1, 5); + populateLocalIndices(localIndex, numShardsLocal); + + String remoteIndex = "logs-2"; + int numShardsRemote = randomIntBetween(1, 5); + populateRemoteIndices(REMOTE_CLUSTER_1, remoteIndex, numShardsRemote); + + Map clusterInfo = new HashMap<>(); + clusterInfo.put("local.num_shards", numShardsLocal); + clusterInfo.put("local.index", localIndex); + clusterInfo.put("remote.num_shards", numShardsRemote); + clusterInfo.put("remote.index", remoteIndex); + + if (numClusters == 3) { + int numShardsRemote2 = randomIntBetween(1, 5); + populateRemoteIndices(REMOTE_CLUSTER_2, remoteIndex, numShardsRemote2); + clusterInfo.put("remote2.index", remoteIndex); + clusterInfo.put("remote2.num_shards", numShardsRemote2); + } + + return clusterInfo; + } + + void populateLocalIndices(String indexName, int numShards) { + Client localClient = client(LOCAL_CLUSTER); + assertAcked( + localClient.admin() + .indices() + .prepareCreate(indexName) + .setSettings(Settings.builder().put("index.number_of_shards", numShards)) + .setMapping("id", "type=keyword", "tag", "type=keyword", "v", "type=long") + ); + for (int i = 0; i < 10; i++) { + localClient.prepareIndex(indexName).setSource("id", "local-" + i, "tag", "local", "v", i).get(); + } + localClient.admin().indices().prepareRefresh(indexName).get(); + } + + void populateRemoteIndices(String clusterAlias, String indexName, int numShards) { + Client remoteClient = client(clusterAlias); + assertAcked( + remoteClient.admin() + .indices() + .prepareCreate(indexName) + .setSettings(Settings.builder().put("index.number_of_shards", numShards)) + .setMapping("id", "type=keyword", "tag", "type=keyword", "v", "type=long") + ); + for (int i = 0; i < 10; i++) { + remoteClient.prepareIndex(indexName).setSource("id", "remote-" + i, "tag", "remote", "v", i * i).get(); + } + remoteClient.admin().indices().prepareRefresh(indexName).get(); + } +} diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClustersQueryIT.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClustersQueryIT.java index ddd5cff014ed2..ba44adb5a85e0 100644 --- a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClustersQueryIT.java +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClustersQueryIT.java @@ -25,6 +25,7 @@ import org.elasticsearch.test.AbstractMultiClustersTestCase; import org.elasticsearch.test.InternalTestCluster; import org.elasticsearch.test.XContentTestUtils; +import org.elasticsearch.transport.RemoteClusterAware; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.esql.plugin.EsqlPlugin; import org.elasticsearch.xpack.esql.plugin.QueryPragmas; @@ -246,7 +247,8 @@ public void testSearchesWhereMissingIndicesAreSpecified() { EsqlExecutionInfo.Cluster localCluster = executionInfo.getCluster(LOCAL_CLUSTER); assertThat(localCluster.getIndexExpression(), equalTo("no_such_index")); - assertThat(localCluster.getStatus(), equalTo(EsqlExecutionInfo.Cluster.Status.SKIPPED)); + // TODO: a follow on PR will change this to throw an Exception when the local cluster requests a concrete index that is missing + assertThat(localCluster.getStatus(), equalTo(EsqlExecutionInfo.Cluster.Status.SUCCESSFUL)); assertThat(localCluster.getTook().millis(), greaterThanOrEqualTo(0L)); assertThat(localCluster.getTook().millis(), lessThanOrEqualTo(overallTookMillis)); assertThat(localCluster.getTotalShards(), equalTo(0)); @@ -499,7 +501,7 @@ public void testCCSExecutionOnSearchesWithLimit0() { EsqlExecutionInfo.Cluster localCluster = executionInfo.getCluster(LOCAL_CLUSTER); assertThat(localCluster.getIndexExpression(), equalTo("nomatch*")); - assertThat(localCluster.getStatus(), equalTo(EsqlExecutionInfo.Cluster.Status.SKIPPED)); + assertThat(localCluster.getStatus(), equalTo(EsqlExecutionInfo.Cluster.Status.SUCCESSFUL)); assertThat(localCluster.getTook().millis(), greaterThanOrEqualTo(0L)); assertThat(localCluster.getTook().millis(), lessThanOrEqualTo(overallTookMillis)); assertThat(remoteCluster.getTotalShards(), equalTo(0)); @@ -803,6 +805,14 @@ Map setupTwoClusters() { clusterInfo.put("local.index", localIndex); clusterInfo.put("remote.num_shards", numShardsRemote); clusterInfo.put("remote.index", remoteIndex); + + String skipUnavailableKey = Strings.format("cluster.remote.%s.skip_unavailable", REMOTE_CLUSTER); + Setting skipUnavailableSetting = cluster(REMOTE_CLUSTER).clusterService().getClusterSettings().get(skipUnavailableKey); + boolean skipUnavailable = (boolean) cluster(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY).clusterService() + .getClusterSettings() + .get(skipUnavailableSetting); + clusterInfo.put("remote.skip_unavailable", skipUnavailable); + return clusterInfo; } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlExecutionInfo.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlExecutionInfo.java index aeac14091f378..f2ab0355304b3 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlExecutionInfo.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlExecutionInfo.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.esql.action; import org.elasticsearch.TransportVersions; +import org.elasticsearch.action.search.ShardSearchFailure; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; @@ -281,6 +282,7 @@ public static class Cluster implements ToXContentFragment, Writeable { private final Integer successfulShards; private final Integer skippedShards; private final Integer failedShards; + private final List failures; private final TimeValue took; // search latency for this cluster sub-search /** @@ -300,7 +302,7 @@ public String toString() { } public Cluster(String clusterAlias, String indexExpression) { - this(clusterAlias, indexExpression, true, Cluster.Status.RUNNING, null, null, null, null, null); + this(clusterAlias, indexExpression, true, Cluster.Status.RUNNING, null, null, null, null, null, null); } /** @@ -312,7 +314,7 @@ public Cluster(String clusterAlias, String indexExpression) { * @param skipUnavailable whether this Cluster is marked as skip_unavailable in remote cluster settings */ public Cluster(String clusterAlias, String indexExpression, boolean skipUnavailable) { - this(clusterAlias, indexExpression, skipUnavailable, Cluster.Status.RUNNING, null, null, null, null, null); + this(clusterAlias, indexExpression, skipUnavailable, Cluster.Status.RUNNING, null, null, null, null, null, null); } /** @@ -324,7 +326,7 @@ public Cluster(String clusterAlias, String indexExpression, boolean skipUnavaila * @param status current status of the search on this Cluster */ public Cluster(String clusterAlias, String indexExpression, boolean skipUnavailable, Cluster.Status status) { - this(clusterAlias, indexExpression, skipUnavailable, status, null, null, null, null, null); + this(clusterAlias, indexExpression, skipUnavailable, status, null, null, null, null, null, null); } public Cluster( @@ -336,6 +338,7 @@ public Cluster( Integer successfulShards, Integer skippedShards, Integer failedShards, + List failures, TimeValue took ) { assert clusterAlias != null : "clusterAlias cannot be null"; @@ -349,6 +352,11 @@ public Cluster( this.successfulShards = successfulShards; this.skippedShards = skippedShards; this.failedShards = failedShards; + if (failures == null) { + this.failures = List.of(); + } else { + this.failures = failures; + } this.took = took; } @@ -362,6 +370,11 @@ public Cluster(StreamInput in) throws IOException { this.failedShards = in.readOptionalInt(); this.took = in.readOptionalTimeValue(); this.skipUnavailable = in.readBoolean(); + if (in.getTransportVersion().onOrAfter(TransportVersions.ESQL_CCS_EXEC_INFO_WITH_FAILURES)) { + this.failures = Collections.unmodifiableList(in.readCollectionAsList(ShardSearchFailure::readShardSearchFailure)); + } else { + this.failures = List.of(); + } } @Override @@ -375,6 +388,9 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalInt(failedShards); out.writeOptionalTimeValue(took); out.writeBoolean(skipUnavailable); + if (out.getTransportVersion().onOrAfter(TransportVersions.ESQL_CCS_EXEC_INFO_WITH_FAILURES)) { + out.writeCollection(failures); + } } /** @@ -387,12 +403,12 @@ public void writeTo(StreamOutput out) throws IOException { * All other fields can be set and override the value in the "copyFrom" Cluster. */ public static class Builder { - private String indexExpression; private Cluster.Status status; private Integer totalShards; private Integer successfulShards; private Integer skippedShards; private Integer failedShards; + private List failures; private TimeValue took; private final Cluster original; @@ -408,22 +424,18 @@ public Builder(Cluster copyFrom) { public Cluster build() { return new Cluster( original.getClusterAlias(), - indexExpression == null ? original.getIndexExpression() : indexExpression, + original.getIndexExpression(), original.isSkipUnavailable(), status != null ? status : original.getStatus(), totalShards != null ? totalShards : original.getTotalShards(), successfulShards != null ? successfulShards : original.getSuccessfulShards(), skippedShards != null ? skippedShards : original.getSkippedShards(), failedShards != null ? failedShards : original.getFailedShards(), + failures != null ? failures : original.getFailures(), took != null ? took : original.getTook() ); } - public Cluster.Builder setIndexExpression(String indexExpression) { - this.indexExpression = indexExpression; - return this; - } - public Cluster.Builder setStatus(Cluster.Status status) { this.status = status; return this; @@ -449,6 +461,11 @@ public Cluster.Builder setFailedShards(int failedShards) { return this; } + public Cluster.Builder setFailures(List failures) { + this.failures = failures; + return this; + } + public Cluster.Builder setTook(TimeValue took) { this.took = took; return this; @@ -466,7 +483,6 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.field(STATUS_FIELD.getPreferredName(), getStatus().toString()); builder.field(INDICES_FIELD.getPreferredName(), indexExpression); if (took != null) { - // TODO: change this to took_nanos and call took.nanos? builder.field(TOOK.getPreferredName(), took.millis()); } if (totalShards != null) { @@ -483,6 +499,13 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws } builder.endObject(); } + if (failures != null && failures.size() > 0) { + builder.startArray(RestActions.FAILURES_FIELD.getPreferredName()); + for (ShardSearchFailure failure : failures) { + failure.toXContent(builder, params); + } + builder.endArray(); + } } builder.endObject(); return builder; @@ -529,6 +552,10 @@ public Integer getFailedShards() { return failedShards; } + public List getFailures() { + return failures; + } + @Override public boolean equals(Object o) { if (this == o) return true; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/EnrichResolution.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/EnrichResolution.java index 7fb279f18b1dc..4f6886edc5fbc 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/EnrichResolution.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/EnrichResolution.java @@ -23,6 +23,7 @@ public final class EnrichResolution { private final Map resolvedPolicies = ConcurrentCollections.newConcurrentMap(); private final Map errors = ConcurrentCollections.newConcurrentMap(); + private final Map unavailableClusters = ConcurrentCollections.newConcurrentMap(); public ResolvedEnrichPolicy getResolvedPolicy(String policyName, Enrich.Mode mode) { return resolvedPolicies.get(new Key(policyName, mode)); @@ -51,6 +52,14 @@ public void addError(String policyName, Enrich.Mode mode, String reason) { errors.putIfAbsent(new Key(policyName, mode), reason); } + public void addUnavailableCluster(String clusterAlias, Exception e) { + unavailableClusters.put(clusterAlias, e); + } + + public Map getUnavailableClusters() { + return unavailableClusters; + } + private record Key(String policyName, Enrich.Mode mode) { } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichPolicyResolver.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichPolicyResolver.java index e67c406e26929..77ef5ef597bb5 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichPolicyResolver.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichPolicyResolver.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.esql.enrich; +import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionListenerResponseHandler; import org.elasticsearch.action.search.SearchRequest; @@ -50,6 +51,7 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; +import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.List; @@ -113,12 +115,27 @@ public void resolvePolicies( final boolean includeLocal = remoteClusters.remove(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY); lookupPolicies(remoteClusters, includeLocal, unresolvedPolicies, listener.map(lookupResponses -> { final EnrichResolution enrichResolution = new EnrichResolution(); + + Map lookupResponsesToProcess = new HashMap<>(); + + for (Map.Entry entry : lookupResponses.entrySet()) { + String clusterAlias = entry.getKey(); + if (entry.getValue().connectionError != null) { + enrichResolution.addUnavailableCluster(clusterAlias, entry.getValue().connectionError); + // remove unavailable cluster from the list of clusters which is used below to create the ResolvedEnrichPolicy + remoteClusters.remove(clusterAlias); + } else { + lookupResponsesToProcess.put(clusterAlias, entry.getValue()); + } + } + for (UnresolvedPolicy unresolved : unresolvedPolicies) { Tuple resolved = mergeLookupResults( unresolved, calculateTargetClusters(unresolved.mode, includeLocal, remoteClusters), - lookupResponses + lookupResponsesToProcess ); + if (resolved.v1() != null) { enrichResolution.addResolvedPolicy(unresolved.name, unresolved.mode, resolved.v1()); } else { @@ -149,13 +166,16 @@ private Tuple mergeLookupResults( Collection targetClusters, Map lookupResults ) { - assert targetClusters.isEmpty() == false; String policyName = unresolved.name; + if (targetClusters.isEmpty()) { + return Tuple.tuple(null, "enrich policy [" + policyName + "] cannot be resolved since remote clusters are unavailable"); + } final Map policies = new HashMap<>(); final List failures = new ArrayList<>(); for (String cluster : targetClusters) { LookupResponse lookupResult = lookupResults.get(cluster); if (lookupResult != null) { + assert lookupResult.connectionError == null : "Should never have a non-null connectionError here"; ResolvedEnrichPolicy policy = lookupResult.policies.get(policyName); if (policy != null) { policies.put(cluster, policy); @@ -261,22 +281,34 @@ private void lookupPolicies( if (remotePolicies.isEmpty() == false) { for (String cluster : remoteClusters) { ActionListener lookupListener = refs.acquire(resp -> lookupResponses.put(cluster, resp)); - getRemoteConnection( - cluster, - lookupListener.delegateFailureAndWrap( - (delegate, connection) -> transportService.sendRequest( + getRemoteConnection(cluster, new ActionListener() { + @Override + public void onResponse(Transport.Connection connection) { + transportService.sendRequest( connection, RESOLVE_ACTION_NAME, new LookupRequest(cluster, remotePolicies), TransportRequestOptions.EMPTY, - new ActionListenerResponseHandler<>( - delegate, - LookupResponse::new, - threadPool.executor(ThreadPool.Names.SEARCH) - ) - ) - ) - ); + new ActionListenerResponseHandler<>(lookupListener.delegateResponse((l, e) -> { + if (ExceptionsHelper.isRemoteUnavailableException(e) + && remoteClusterService.isSkipUnavailable(cluster)) { + l.onResponse(new LookupResponse(e)); + } else { + l.onFailure(e); + } + }), LookupResponse::new, threadPool.executor(ThreadPool.Names.SEARCH)) + ); + } + + @Override + public void onFailure(Exception e) { + if (ExceptionsHelper.isRemoteUnavailableException(e) && remoteClusterService.isSkipUnavailable(cluster)) { + lookupListener.onResponse(new LookupResponse(e)); + } else { + lookupListener.onFailure(e); + } + } + }); } } // local cluster @@ -323,16 +355,30 @@ public void writeTo(StreamOutput out) throws IOException { private static class LookupResponse extends TransportResponse { final Map policies; final Map failures; + // does not need to be Writable since this indicates a failure to contact a remote cluster, so only set on querying cluster + final transient Exception connectionError; LookupResponse(Map policies, Map failures) { this.policies = policies; this.failures = failures; + this.connectionError = null; + } + + /** + * Use this constructor when the remote cluster is unavailable to indicate inability to do the enrich policy lookup + * @param connectionError Exception received when trying to connect to a remote cluster + */ + LookupResponse(Exception connectionError) { + this.policies = Collections.emptyMap(); + this.failures = Collections.emptyMap(); + this.connectionError = connectionError; } LookupResponse(StreamInput in) throws IOException { PlanStreamInput planIn = new PlanStreamInput(in, in.namedWriteableRegistry(), null); this.policies = planIn.readMap(StreamInput::readString, ResolvedEnrichPolicy::new); this.failures = planIn.readMap(StreamInput::readString, StreamInput::readString); + this.connectionError = null; } @Override diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/index/IndexResolution.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/index/IndexResolution.java index 371aa1b632309..b2eaefcf09d65 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/index/IndexResolution.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/index/IndexResolution.java @@ -6,27 +6,28 @@ */ package org.elasticsearch.xpack.esql.index; +import org.elasticsearch.action.fieldcaps.FieldCapabilitiesFailure; import org.elasticsearch.core.Nullable; import java.util.Collections; +import java.util.Map; import java.util.Objects; -import java.util.Set; public final class IndexResolution { - public static IndexResolution valid(EsIndex index, Set unavailableClusters) { + public static IndexResolution valid(EsIndex index, Map unavailableClusters) { Objects.requireNonNull(index, "index must not be null if it was found"); Objects.requireNonNull(unavailableClusters, "unavailableClusters must not be null"); return new IndexResolution(index, null, unavailableClusters); } public static IndexResolution valid(EsIndex index) { - return valid(index, Collections.emptySet()); + return valid(index, Collections.emptyMap()); } public static IndexResolution invalid(String invalid) { Objects.requireNonNull(invalid, "invalid must not be null to signal that the index is invalid"); - return new IndexResolution(null, invalid, Collections.emptySet()); + return new IndexResolution(null, invalid, Collections.emptyMap()); } public static IndexResolution notFound(String name) { @@ -39,9 +40,9 @@ public static IndexResolution notFound(String name) { private final String invalid; // remote clusters included in the user's index expression that could not be connected to - private final Set unavailableClusters; + private final Map unavailableClusters; - private IndexResolution(EsIndex index, @Nullable String invalid, Set unavailableClusters) { + private IndexResolution(EsIndex index, @Nullable String invalid, Map unavailableClusters) { this.index = index; this.invalid = invalid; this.unavailableClusters = unavailableClusters; @@ -70,7 +71,11 @@ public boolean isValid() { return invalid == null; } - public Set getUnavailableClusters() { + /** + * @return Map of unavailable clusters (could not be connected to during field-caps query). Key of map is cluster alias, + * value is the {@link FieldCapabilitiesFailure} describing the issue. + */ + public Map getUnavailableClusters() { return unavailableClusters; } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/EsqlSession.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/EsqlSession.java index ccd167942340c..1e78f454b7531 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/EsqlSession.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/EsqlSession.java @@ -7,8 +7,11 @@ package org.elasticsearch.xpack.esql.session; +import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.OriginalIndices; +import org.elasticsearch.action.fieldcaps.FieldCapabilitiesFailure; +import org.elasticsearch.action.search.ShardSearchFailure; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.Iterators; @@ -22,7 +25,9 @@ import org.elasticsearch.indices.IndicesExpressionGrouper; import org.elasticsearch.logging.LogManager; import org.elasticsearch.logging.Logger; +import org.elasticsearch.transport.ConnectTransportException; import org.elasticsearch.transport.RemoteClusterAware; +import org.elasticsearch.transport.RemoteTransportException; import org.elasticsearch.xpack.esql.action.EsqlExecutionInfo; import org.elasticsearch.xpack.esql.action.EsqlQueryRequest; import org.elasticsearch.xpack.esql.analysis.Analyzer; @@ -44,6 +49,7 @@ import org.elasticsearch.xpack.esql.enrich.ResolvedEnrichPolicy; import org.elasticsearch.xpack.esql.expression.UnresolvedNamePattern; import org.elasticsearch.xpack.esql.expression.function.EsqlFunctionRegistry; +import org.elasticsearch.xpack.esql.index.EsIndex; import org.elasticsearch.xpack.esql.index.IndexResolution; import org.elasticsearch.xpack.esql.index.MappingException; import org.elasticsearch.xpack.esql.optimizer.LogicalPlanOptimizer; @@ -68,6 +74,7 @@ import java.util.ArrayList; import java.util.Arrays; +import java.util.Collections; import java.util.HashSet; import java.util.List; import java.util.Map; @@ -143,12 +150,105 @@ public void execute( analyzedPlan( parse(request.query(), request.params()), executionInfo, - listener.delegateFailureAndWrap( - (next, analyzedPlan) -> executeOptimizedPlan(request, executionInfo, runPhase, optimizedPlan(analyzedPlan), next) - ) + new LogicalPlanActionListener(request, executionInfo, runPhase, listener) ); } + /** + * ActionListener that receives LogicalPlan or error from logical planning. + * Any Exception sent to onFailure stops processing, but not all are fatal (return a 4xx or 5xx), so + * the onFailure handler determines whether to return an empty successful result or a 4xx/5xx error. + */ + class LogicalPlanActionListener implements ActionListener { + private final EsqlQueryRequest request; + private final EsqlExecutionInfo executionInfo; + private final BiConsumer> runPhase; + private final ActionListener listener; + + LogicalPlanActionListener( + EsqlQueryRequest request, + EsqlExecutionInfo executionInfo, + BiConsumer> runPhase, + ActionListener listener + ) { + this.request = request; + this.executionInfo = executionInfo; + this.runPhase = runPhase; + this.listener = listener; + } + + @Override + public void onResponse(LogicalPlan analyzedPlan) { + executeOptimizedPlan(request, executionInfo, runPhase, optimizedPlan(analyzedPlan), listener); + } + + /** + * Whether to return an empty result (HTTP status 200) for a CCS rather than a top level 4xx/5xx error. + * + * For cases where field-caps had no indices to search and the remotes were unavailable, we + * return an empty successful response (200) if all remotes are marked with skip_unavailable=true. + * + * Note: a follow-on PR will expand this logic to handle cases where no indices could be found to match + * on any of the requested clusters. + */ + private boolean returnSuccessWithEmptyResult(Exception e) { + if (executionInfo.isCrossClusterSearch() == false) { + return false; + } + + if (e instanceof NoClustersToSearchException || ExceptionsHelper.isRemoteUnavailableException(e)) { + for (String clusterAlias : executionInfo.clusterAliases()) { + if (executionInfo.isSkipUnavailable(clusterAlias) == false + && clusterAlias.equals(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY) == false) { + return false; + } + } + return true; + } + return false; + } + + @Override + public void onFailure(Exception e) { + if (returnSuccessWithEmptyResult(e)) { + executionInfo.markEndQuery(); + Exception exceptionForResponse; + if (e instanceof ConnectTransportException) { + // when field-caps has no field info (since no clusters could be connected to or had matching indices) + // it just throws the first exception in its list, so this odd special handling is here is to avoid + // having one specific remote alias name in all failure lists in the metadata response + exceptionForResponse = new RemoteTransportException( + "connect_transport_exception - unable to connect to remote cluster", + null + ); + } else { + exceptionForResponse = e; + } + for (String clusterAlias : executionInfo.clusterAliases()) { + executionInfo.swapCluster(clusterAlias, (k, v) -> { + EsqlExecutionInfo.Cluster.Builder builder = new EsqlExecutionInfo.Cluster.Builder(v).setTook( + executionInfo.overallTook() + ).setTotalShards(0).setSuccessfulShards(0).setSkippedShards(0).setFailedShards(0); + if (RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY.equals(clusterAlias)) { + // never mark local cluster as skipped + builder.setStatus(EsqlExecutionInfo.Cluster.Status.SUCCESSFUL); + } else { + builder.setStatus(EsqlExecutionInfo.Cluster.Status.SKIPPED); + // add this exception to the failures list only if there is no failure already recorded there + if (v.getFailures() == null || v.getFailures().size() == 0) { + builder.setFailures(List.of(new ShardSearchFailure(exceptionForResponse))); + } + } + return builder.build(); + }); + } + listener.onResponse(new Result(Analyzer.NO_FIELDS, Collections.emptyList(), Collections.emptyList(), executionInfo)); + } else { + listener.onFailure(e); + } + } + } + /** * Execute an analyzed plan. Most code should prefer calling {@link #execute} but * this is public for testing. See {@link Phased} for the sequence of operations. @@ -161,8 +261,8 @@ public void executeOptimizedPlan( ActionListener listener ) { LogicalPlan firstPhase = Phased.extractFirstPhase(optimizedPlan); + updateExecutionInfoAtEndOfPlanning(executionInfo); if (firstPhase == null) { - updateExecutionInfoAtEndOfPlanning(executionInfo); runPhase.accept(logicalPlanToPhysicalPlan(optimizedPlan, request), listener); } else { executePhased(new ArrayList<>(), optimizedPlan, request, executionInfo, firstPhase, runPhase, listener); @@ -242,17 +342,30 @@ private void preAnalyze( .stream() .map(ResolvedEnrichPolicy::matchField) .collect(Collectors.toSet()); - preAnalyzeIndices(parsed, executionInfo, l.delegateFailureAndWrap((ll, indexResolution) -> { + Map unavailableClusters = enrichResolution.getUnavailableClusters(); + preAnalyzeIndices(parsed, executionInfo, unavailableClusters, l.delegateFailureAndWrap((ll, indexResolution) -> { + // TODO in follow-PR (for skip_unavailble handling of missing concrete indexes) add some tests for invalid index + // resolution to updateExecutionInfo if (indexResolution.isValid()) { updateExecutionInfoWithClustersWithNoMatchingIndices(executionInfo, indexResolution); updateExecutionInfoWithUnavailableClusters(executionInfo, indexResolution.getUnavailableClusters()); + if (executionInfo.isCrossClusterSearch() + && executionInfo.getClusterStateCount(EsqlExecutionInfo.Cluster.Status.RUNNING) == 0) { + // for a CCS, if all clusters have been marked as SKIPPED, nothing to search so send a sentinel + // Exception to let the LogicalPlanActionListener decide how to proceed + ll.onFailure(new NoClustersToSearchException()); + return; + } + Set newClusters = enrichPolicyResolver.groupIndicesPerCluster( indexResolution.get().concreteIndices().toArray(String[]::new) ).keySet(); // If new clusters appear when resolving the main indices, we need to resolve the enrich policies again // or exclude main concrete indices. Since this is rare, it's simpler to resolve the enrich policies again. // TODO: add a test for this - if (targetClusters.containsAll(newClusters) == false) { + if (targetClusters.containsAll(newClusters) == false + // do not bother with a re-resolution if only remotes were requested and all were offline + && executionInfo.getClusterStateCount(EsqlExecutionInfo.Cluster.Status.RUNNING) > 0) { enrichPolicyResolver.resolvePolicies( newClusters, unresolvedPolicies, @@ -269,6 +382,7 @@ private void preAnalyze( private void preAnalyzeIndices( LogicalPlan parsed, EsqlExecutionInfo executionInfo, + Map unavailableClusters, // known to be unavailable from the enrich policy API call ActionListener listener, Set enrichPolicyMatchFields ) { @@ -288,10 +402,34 @@ private void preAnalyzeIndices( String indexExpr = Strings.arrayToCommaDelimitedString(entry.getValue().indices()); executionInfo.swapCluster(clusterAlias, (k, v) -> { assert v == null : "No cluster for " + clusterAlias + " should have been added to ExecutionInfo yet"; - return new EsqlExecutionInfo.Cluster(clusterAlias, indexExpr, executionInfo.isSkipUnavailable(clusterAlias)); + if (unavailableClusters.containsKey(k)) { + return new EsqlExecutionInfo.Cluster( + clusterAlias, + indexExpr, + executionInfo.isSkipUnavailable(clusterAlias), + EsqlExecutionInfo.Cluster.Status.SKIPPED, + 0, + 0, + 0, + 0, + List.of(new ShardSearchFailure(unavailableClusters.get(k))), + new TimeValue(0) + ); + } else { + return new EsqlExecutionInfo.Cluster(clusterAlias, indexExpr, executionInfo.isSkipUnavailable(clusterAlias)); + } }); } - indexResolver.resolveAsMergedMapping(table.index(), fieldNames, listener); + // if the preceding call to the enrich policy API found unavailable clusters, recreate the index expression to search + // based only on available clusters (which could now be an empty list) + String indexExpressionToResolve = createIndexExpressionFromAvailableClusters(executionInfo); + if (indexExpressionToResolve.isEmpty()) { + // if this was a pure remote CCS request (no local indices) and all remotes are offline, return an empty IndexResolution + listener.onResponse(IndexResolution.valid(new EsIndex(table.index(), Map.of(), Map.of()))); + } else { + // call the EsqlResolveFieldsAction (field-caps) to resolve indices and get field types + indexResolver.resolveAsMergedMapping(indexExpressionToResolve, fieldNames, listener); + } } else { try { // occurs when dealing with local relations (row a = 1) @@ -302,6 +440,30 @@ private void preAnalyzeIndices( } } + // visible for testing + static String createIndexExpressionFromAvailableClusters(EsqlExecutionInfo executionInfo) { + StringBuilder sb = new StringBuilder(); + for (String clusterAlias : executionInfo.clusterAliases()) { + EsqlExecutionInfo.Cluster cluster = executionInfo.getCluster(clusterAlias); + if (cluster.getStatus() != EsqlExecutionInfo.Cluster.Status.SKIPPED) { + if (cluster.getClusterAlias().equals(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY)) { + sb.append(executionInfo.getCluster(clusterAlias).getIndexExpression()).append(','); + } else { + String indexExpression = executionInfo.getCluster(clusterAlias).getIndexExpression(); + for (String index : indexExpression.split(",")) { + sb.append(clusterAlias).append(':').append(index).append(','); + } + } + } + } + + if (sb.length() > 0) { + return sb.substring(0, sb.length() - 1); + } else { + return ""; + } + } + static Set fieldNames(LogicalPlan parsed, Set enrichPolicyMatchFields) { if (false == parsed.anyMatch(plan -> plan instanceof Aggregate || plan instanceof Project)) { // no explicit columns selection, for example "from employees" @@ -446,14 +608,28 @@ public PhysicalPlan optimizedPhysicalPlan(LogicalPlan optimizedPlan) { return plan; } - // visible for testing - static void updateExecutionInfoWithUnavailableClusters(EsqlExecutionInfo executionInfo, Set unavailableClusters) { - for (String clusterAlias : unavailableClusters) { - executionInfo.swapCluster( - clusterAlias, - (k, v) -> new EsqlExecutionInfo.Cluster.Builder(v).setStatus(EsqlExecutionInfo.Cluster.Status.SKIPPED).build() + static void updateExecutionInfoWithUnavailableClusters(EsqlExecutionInfo execInfo, Map unavailable) { + for (Map.Entry entry : unavailable.entrySet()) { + String clusterAlias = entry.getKey(); + boolean skipUnavailable = execInfo.getCluster(clusterAlias).isSkipUnavailable(); + RemoteTransportException e = new RemoteTransportException( + Strings.format("Remote cluster [%s] (with setting skip_unavailable=%s) is not available", clusterAlias, skipUnavailable), + entry.getValue().getException() ); - // TODO: follow-on PR will set SKIPPED status when skip_unavailable=true and throw an exception when skip_un=false + if (skipUnavailable) { + execInfo.swapCluster( + clusterAlias, + (k, v) -> new EsqlExecutionInfo.Cluster.Builder(v).setStatus(EsqlExecutionInfo.Cluster.Status.SKIPPED) + .setTotalShards(0) + .setSuccessfulShards(0) + .setSkippedShards(0) + .setFailedShards(0) + .setFailures(List.of(new ShardSearchFailure(e))) + .build() + ); + } else { + throw e; + } } } @@ -466,16 +642,22 @@ static void updateExecutionInfoWithClustersWithNoMatchingIndices(EsqlExecutionIn } Set clustersRequested = executionInfo.clusterAliases(); Set clustersWithNoMatchingIndices = Sets.difference(clustersRequested, clustersWithResolvedIndices); - clustersWithNoMatchingIndices.removeAll(indexResolution.getUnavailableClusters()); + clustersWithNoMatchingIndices.removeAll(indexResolution.getUnavailableClusters().keySet()); /* * These are clusters in the original request that are not present in the field-caps response. They were * specified with an index or indices that do not exist, so the search on that cluster is done. * Mark it as SKIPPED with 0 shards searched and took=0. */ for (String c : clustersWithNoMatchingIndices) { + // TODO: in a follow-on PR, throw a Verification(400 status code) for local and remotes with skip_unavailable=false if + // they were requested with one or more concrete indices + // for now we never mark the local cluster as SKIPPED + final var status = RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY.equals(c) + ? EsqlExecutionInfo.Cluster.Status.SUCCESSFUL + : EsqlExecutionInfo.Cluster.Status.SKIPPED; executionInfo.swapCluster( c, - (k, v) -> new EsqlExecutionInfo.Cluster.Builder(v).setStatus(EsqlExecutionInfo.Cluster.Status.SKIPPED) + (k, v) -> new EsqlExecutionInfo.Cluster.Builder(v).setStatus(status) .setTook(new TimeValue(0)) .setTotalShards(0) .setSuccessfulShards(0) diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/IndexResolver.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/IndexResolver.java index c0f94bccc50a4..f76f7798dece8 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/IndexResolver.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/IndexResolver.java @@ -90,6 +90,7 @@ public void resolveAsMergedMapping(String indexWildcard, Set fieldNames, public IndexResolution mergedMappings(String indexPattern, FieldCapabilitiesResponse fieldCapsResponse) { assert ThreadPool.assertCurrentThreadPool(ThreadPool.Names.SEARCH_COORDINATION); // too expensive to run this on a transport worker if (fieldCapsResponse.getIndexResponses().isEmpty()) { + // TODO in follow-on PR, handle the case where remotes were specified with non-existent indices, according to skip_unavailable return IndexResolution.notFound(indexPattern); } @@ -158,18 +159,18 @@ public IndexResolution mergedMappings(String indexPattern, FieldCapabilitiesResp for (FieldCapabilitiesIndexResponse ir : fieldCapsResponse.getIndexResponses()) { concreteIndices.put(ir.getIndexName(), ir.getIndexMode()); } - Set unavailableRemoteClusters = determineUnavailableRemoteClusters(fieldCapsResponse.getFailures()); - return IndexResolution.valid(new EsIndex(indexPattern, rootFields, concreteIndices), unavailableRemoteClusters); + Map unavailableRemotes = determineUnavailableRemoteClusters(fieldCapsResponse.getFailures()); + return IndexResolution.valid(new EsIndex(indexPattern, rootFields, concreteIndices), unavailableRemotes); } // visible for testing - static Set determineUnavailableRemoteClusters(List failures) { - Set unavailableRemotes = new HashSet<>(); + static Map determineUnavailableRemoteClusters(List failures) { + Map unavailableRemotes = new HashMap<>(); for (FieldCapabilitiesFailure failure : failures) { if (ExceptionsHelper.isRemoteUnavailableException(failure.getException())) { for (String indexExpression : failure.getIndices()) { if (indexExpression.indexOf(RemoteClusterAware.REMOTE_CLUSTER_INDEX_SEPARATOR) > 0) { - unavailableRemotes.add(RemoteClusterAware.parseClusterAlias(indexExpression)); + unavailableRemotes.put(RemoteClusterAware.parseClusterAlias(indexExpression), failure); } } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/NoClustersToSearchException.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/NoClustersToSearchException.java new file mode 100644 index 0000000000000..f7ae78a521933 --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/NoClustersToSearchException.java @@ -0,0 +1,15 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.session; + +/** + * Sentinel exception indicating that logical planning could not find any clusters to search + * when, for a remote-only cross-cluster search, all clusters have been marked as SKIPPED. + * Intended for use only on the querying coordinating during ES|QL logical planning. + */ +public class NoClustersToSearchException extends RuntimeException {} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/action/EsqlQueryResponseTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/action/EsqlQueryResponseTests.java index 27343bf7ce205..4aaf4f6cccf0f 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/action/EsqlQueryResponseTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/action/EsqlQueryResponseTests.java @@ -147,6 +147,7 @@ EsqlExecutionInfo createExecutionInfo() { 10, 3, 0, + null, new TimeValue(4444L) ) ); @@ -161,6 +162,7 @@ EsqlExecutionInfo createExecutionInfo() { 12, 5, 0, + null, new TimeValue(4999L) ) ); @@ -498,6 +500,7 @@ private static EsqlExecutionInfo.Cluster parseCluster(String clusterAlias, XCont successfulShardsFinal, skippedShardsFinal, failedShardsFinal, + null, tookTimeValue ); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plugin/ComputeListenerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plugin/ComputeListenerTests.java index 5fbd5dd28050f..625cb5628d039 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plugin/ComputeListenerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plugin/ComputeListenerTests.java @@ -313,6 +313,7 @@ public void testAcquireComputeRunningOnRemoteClusterFillsInTookTime() { 10, 3, 0, + null, null // to be filled in the acquireCompute listener ) ); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/session/EsqlSessionTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/session/EsqlSessionTests.java index 32b31cf78650b..dddfa67338419 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/session/EsqlSessionTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/session/EsqlSessionTests.java @@ -7,117 +7,200 @@ package org.elasticsearch.xpack.esql.session; +import org.elasticsearch.action.fieldcaps.FieldCapabilitiesFailure; +import org.elasticsearch.common.Strings; import org.elasticsearch.index.IndexMode; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.transport.NoSeedNodeLeftException; import org.elasticsearch.transport.RemoteClusterAware; +import org.elasticsearch.transport.RemoteTransportException; import org.elasticsearch.xpack.esql.action.EsqlExecutionInfo; import org.elasticsearch.xpack.esql.core.type.EsField; import org.elasticsearch.xpack.esql.index.EsIndex; import org.elasticsearch.xpack.esql.index.IndexResolution; import org.elasticsearch.xpack.esql.type.EsFieldTests; +import java.util.Arrays; import java.util.HashMap; +import java.util.HashSet; +import java.util.List; import java.util.Map; import java.util.Set; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo; public class EsqlSessionTests extends ESTestCase { - public void testUpdateExecutionInfoWithUnavailableClusters() { - // skip_unavailable=true clusters are unavailable, both marked as SKIPPED + public void testCreateIndexExpressionFromAvailableClusters() { + final String localClusterAlias = RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY; + final String remote1Alias = "remote1"; + final String remote2Alias = "remote2"; + + // no clusters marked as skipped { - final String localClusterAlias = RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY; - final String remote1Alias = "remote1"; - final String remote2Alias = "remote2"; EsqlExecutionInfo executionInfo = new EsqlExecutionInfo(true); executionInfo.swapCluster(localClusterAlias, (k, v) -> new EsqlExecutionInfo.Cluster(localClusterAlias, "logs*", false)); executionInfo.swapCluster(remote1Alias, (k, v) -> new EsqlExecutionInfo.Cluster(remote1Alias, "*", true)); executionInfo.swapCluster(remote2Alias, (k, v) -> new EsqlExecutionInfo.Cluster(remote2Alias, "mylogs1,mylogs2,logs*", true)); - EsqlSession.updateExecutionInfoWithUnavailableClusters(executionInfo, Set.of(remote1Alias, remote2Alias)); + String indexExpr = EsqlSession.createIndexExpressionFromAvailableClusters(executionInfo); + List list = Arrays.stream(Strings.splitStringByCommaToArray(indexExpr)).toList(); + assertThat(list.size(), equalTo(5)); + assertThat( + new HashSet<>(list), + equalTo(Strings.commaDelimitedListToSet("logs*,remote1:*,remote2:mylogs1,remote2:mylogs2,remote2:logs*")) + ); + } - assertThat(executionInfo.clusterAliases(), equalTo(Set.of(localClusterAlias, remote1Alias, remote2Alias))); - assertNull(executionInfo.overallTook()); + // one cluster marked as skipped, so not present in revised index expression + { + EsqlExecutionInfo executionInfo = new EsqlExecutionInfo(true); + executionInfo.swapCluster(localClusterAlias, (k, v) -> new EsqlExecutionInfo.Cluster(localClusterAlias, "logs*", false)); + executionInfo.swapCluster(remote1Alias, (k, v) -> new EsqlExecutionInfo.Cluster(remote1Alias, "*,foo", true)); + executionInfo.swapCluster( + remote2Alias, + (k, v) -> new EsqlExecutionInfo.Cluster( + remote2Alias, + "mylogs1,mylogs2,logs*", + true, + EsqlExecutionInfo.Cluster.Status.SKIPPED + ) + ); - EsqlExecutionInfo.Cluster localCluster = executionInfo.getCluster(localClusterAlias); - assertThat(localCluster.getIndexExpression(), equalTo("logs*")); - assertClusterStatusAndHasNullCounts(localCluster, EsqlExecutionInfo.Cluster.Status.RUNNING); + String indexExpr = EsqlSession.createIndexExpressionFromAvailableClusters(executionInfo); + List list = Arrays.stream(Strings.splitStringByCommaToArray(indexExpr)).toList(); + assertThat(list.size(), equalTo(3)); + assertThat(new HashSet<>(list), equalTo(Strings.commaDelimitedListToSet("logs*,remote1:*,remote1:foo"))); + } - EsqlExecutionInfo.Cluster remote1Cluster = executionInfo.getCluster(remote1Alias); - assertThat(remote1Cluster.getIndexExpression(), equalTo("*")); - assertClusterStatusAndHasNullCounts(remote1Cluster, EsqlExecutionInfo.Cluster.Status.SKIPPED); + // two clusters marked as skipped, so only local cluster present in revised index expression + { + EsqlExecutionInfo executionInfo = new EsqlExecutionInfo(true); + executionInfo.swapCluster(localClusterAlias, (k, v) -> new EsqlExecutionInfo.Cluster(localClusterAlias, "logs*", false)); + executionInfo.swapCluster( + remote1Alias, + (k, v) -> new EsqlExecutionInfo.Cluster(remote1Alias, "*,foo", true, EsqlExecutionInfo.Cluster.Status.SKIPPED) + ); + executionInfo.swapCluster( + remote2Alias, + (k, v) -> new EsqlExecutionInfo.Cluster( + remote2Alias, + "mylogs1,mylogs2,logs*", + true, + EsqlExecutionInfo.Cluster.Status.SKIPPED + ) + ); - EsqlExecutionInfo.Cluster remote2Cluster = executionInfo.getCluster(remote2Alias); - assertThat(remote2Cluster.getIndexExpression(), equalTo("mylogs1,mylogs2,logs*")); - assertClusterStatusAndHasNullCounts(remote2Cluster, EsqlExecutionInfo.Cluster.Status.SKIPPED); + assertThat(EsqlSession.createIndexExpressionFromAvailableClusters(executionInfo), equalTo("logs*")); + } + + // only remotes present and all marked as skipped, so in revised index expression should be empty string + { + EsqlExecutionInfo executionInfo = new EsqlExecutionInfo(true); + executionInfo.swapCluster( + remote1Alias, + (k, v) -> new EsqlExecutionInfo.Cluster(remote1Alias, "*,foo", true, EsqlExecutionInfo.Cluster.Status.SKIPPED) + ); + executionInfo.swapCluster( + remote2Alias, + (k, v) -> new EsqlExecutionInfo.Cluster( + remote2Alias, + "mylogs1,mylogs2,logs*", + true, + EsqlExecutionInfo.Cluster.Status.SKIPPED + ) + ); + + assertThat(EsqlSession.createIndexExpressionFromAvailableClusters(executionInfo), equalTo("")); } + } + + public void testUpdateExecutionInfoWithUnavailableClusters() { + final String localClusterAlias = RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY; + final String remote1Alias = "remote1"; + final String remote2Alias = "remote2"; - // skip_unavailable=false cluster is unavailable, marked as SKIPPED // TODO: in follow on PR this will change to throwing an - // Exception + // skip_unavailable=true clusters are unavailable, both marked as SKIPPED { - final String localClusterAlias = RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY; - final String remote1Alias = "remote1"; - final String remote2Alias = "remote2"; EsqlExecutionInfo executionInfo = new EsqlExecutionInfo(true); executionInfo.swapCluster(localClusterAlias, (k, v) -> new EsqlExecutionInfo.Cluster(localClusterAlias, "logs*", false)); executionInfo.swapCluster(remote1Alias, (k, v) -> new EsqlExecutionInfo.Cluster(remote1Alias, "*", true)); - executionInfo.swapCluster(remote2Alias, (k, v) -> new EsqlExecutionInfo.Cluster(remote2Alias, "mylogs1,mylogs2,logs*", false)); + executionInfo.swapCluster(remote2Alias, (k, v) -> new EsqlExecutionInfo.Cluster(remote2Alias, "mylogs1,mylogs2,logs*", true)); - EsqlSession.updateExecutionInfoWithUnavailableClusters(executionInfo, Set.of(remote2Alias)); + var failure = new FieldCapabilitiesFailure(new String[] { "logs-a" }, new NoSeedNodeLeftException("unable to connect")); + var unvailableClusters = Map.of(remote1Alias, failure, remote2Alias, failure); + EsqlSession.updateExecutionInfoWithUnavailableClusters(executionInfo, unvailableClusters); assertThat(executionInfo.clusterAliases(), equalTo(Set.of(localClusterAlias, remote1Alias, remote2Alias))); assertNull(executionInfo.overallTook()); EsqlExecutionInfo.Cluster localCluster = executionInfo.getCluster(localClusterAlias); assertThat(localCluster.getIndexExpression(), equalTo("logs*")); - assertClusterStatusAndHasNullCounts(localCluster, EsqlExecutionInfo.Cluster.Status.RUNNING); + assertClusterStatusAndShardCounts(localCluster, EsqlExecutionInfo.Cluster.Status.RUNNING); EsqlExecutionInfo.Cluster remote1Cluster = executionInfo.getCluster(remote1Alias); assertThat(remote1Cluster.getIndexExpression(), equalTo("*")); - assertClusterStatusAndHasNullCounts(remote1Cluster, EsqlExecutionInfo.Cluster.Status.RUNNING); + assertClusterStatusAndShardCounts(remote1Cluster, EsqlExecutionInfo.Cluster.Status.SKIPPED); EsqlExecutionInfo.Cluster remote2Cluster = executionInfo.getCluster(remote2Alias); assertThat(remote2Cluster.getIndexExpression(), equalTo("mylogs1,mylogs2,logs*")); - assertClusterStatusAndHasNullCounts(remote2Cluster, EsqlExecutionInfo.Cluster.Status.SKIPPED); + assertClusterStatusAndShardCounts(remote2Cluster, EsqlExecutionInfo.Cluster.Status.SKIPPED); + } + + // skip_unavailable=false cluster is unavailable, throws Exception + { + EsqlExecutionInfo executionInfo = new EsqlExecutionInfo(true); + executionInfo.swapCluster(localClusterAlias, (k, v) -> new EsqlExecutionInfo.Cluster(localClusterAlias, "logs*", false)); + executionInfo.swapCluster(remote1Alias, (k, v) -> new EsqlExecutionInfo.Cluster(remote1Alias, "*", true)); + executionInfo.swapCluster(remote2Alias, (k, v) -> new EsqlExecutionInfo.Cluster(remote2Alias, "mylogs1,mylogs2,logs*", false)); + + var failure = new FieldCapabilitiesFailure(new String[] { "logs-a" }, new NoSeedNodeLeftException("unable to connect")); + RemoteTransportException e = expectThrows( + RemoteTransportException.class, + () -> EsqlSession.updateExecutionInfoWithUnavailableClusters(executionInfo, Map.of(remote2Alias, failure)) + ); + assertThat(e.status().getStatus(), equalTo(500)); + assertThat( + e.getDetailedMessage(), + containsString("Remote cluster [remote2] (with setting skip_unavailable=false) is not available") + ); + assertThat(e.getCause().getMessage(), containsString("unable to connect")); } // all clusters available, no Clusters in ExecutionInfo should be modified { - final String localClusterAlias = RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY; - final String remote1Alias = "remote1"; - final String remote2Alias = "remote2"; EsqlExecutionInfo executionInfo = new EsqlExecutionInfo(true); executionInfo.swapCluster(localClusterAlias, (k, v) -> new EsqlExecutionInfo.Cluster(localClusterAlias, "logs*", false)); executionInfo.swapCluster(remote1Alias, (k, v) -> new EsqlExecutionInfo.Cluster(remote1Alias, "*", true)); executionInfo.swapCluster(remote2Alias, (k, v) -> new EsqlExecutionInfo.Cluster(remote2Alias, "mylogs1,mylogs2,logs*", false)); - EsqlSession.updateExecutionInfoWithUnavailableClusters(executionInfo, Set.of()); + EsqlSession.updateExecutionInfoWithUnavailableClusters(executionInfo, Map.of()); assertThat(executionInfo.clusterAliases(), equalTo(Set.of(localClusterAlias, remote1Alias, remote2Alias))); assertNull(executionInfo.overallTook()); EsqlExecutionInfo.Cluster localCluster = executionInfo.getCluster(localClusterAlias); assertThat(localCluster.getIndexExpression(), equalTo("logs*")); - assertClusterStatusAndHasNullCounts(localCluster, EsqlExecutionInfo.Cluster.Status.RUNNING); + assertClusterStatusAndShardCounts(localCluster, EsqlExecutionInfo.Cluster.Status.RUNNING); EsqlExecutionInfo.Cluster remote1Cluster = executionInfo.getCluster(remote1Alias); assertThat(remote1Cluster.getIndexExpression(), equalTo("*")); - assertClusterStatusAndHasNullCounts(remote1Cluster, EsqlExecutionInfo.Cluster.Status.RUNNING); + assertClusterStatusAndShardCounts(remote1Cluster, EsqlExecutionInfo.Cluster.Status.RUNNING); EsqlExecutionInfo.Cluster remote2Cluster = executionInfo.getCluster(remote2Alias); assertThat(remote2Cluster.getIndexExpression(), equalTo("mylogs1,mylogs2,logs*")); - assertClusterStatusAndHasNullCounts(remote2Cluster, EsqlExecutionInfo.Cluster.Status.RUNNING); + assertClusterStatusAndShardCounts(remote2Cluster, EsqlExecutionInfo.Cluster.Status.RUNNING); } } public void testUpdateExecutionInfoWithClustersWithNoMatchingIndices() { + final String localClusterAlias = RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY; + final String remote1Alias = "remote1"; + final String remote2Alias = "remote2"; // all clusters present in EsIndex, so no updates to EsqlExecutionInfo should happen { - final String localClusterAlias = RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY; - final String remote1Alias = "remote1"; - final String remote2Alias = "remote2"; EsqlExecutionInfo executionInfo = new EsqlExecutionInfo(true); executionInfo.swapCluster(localClusterAlias, (k, v) -> new EsqlExecutionInfo.Cluster(localClusterAlias, "logs*", false)); executionInfo.swapCluster(remote1Alias, (k, v) -> new EsqlExecutionInfo.Cluster(remote1Alias, "*", true)); @@ -139,28 +222,25 @@ public void testUpdateExecutionInfoWithClustersWithNoMatchingIndices() { IndexMode.STANDARD ) ); - IndexResolution indexResolution = IndexResolution.valid(esIndex, Set.of()); + IndexResolution indexResolution = IndexResolution.valid(esIndex, Map.of()); EsqlSession.updateExecutionInfoWithClustersWithNoMatchingIndices(executionInfo, indexResolution); EsqlExecutionInfo.Cluster localCluster = executionInfo.getCluster(localClusterAlias); assertThat(localCluster.getIndexExpression(), equalTo("logs*")); - assertClusterStatusAndHasNullCounts(localCluster, EsqlExecutionInfo.Cluster.Status.RUNNING); + assertClusterStatusAndShardCounts(localCluster, EsqlExecutionInfo.Cluster.Status.RUNNING); EsqlExecutionInfo.Cluster remote1Cluster = executionInfo.getCluster(remote1Alias); assertThat(remote1Cluster.getIndexExpression(), equalTo("*")); - assertClusterStatusAndHasNullCounts(remote1Cluster, EsqlExecutionInfo.Cluster.Status.RUNNING); + assertClusterStatusAndShardCounts(remote1Cluster, EsqlExecutionInfo.Cluster.Status.RUNNING); EsqlExecutionInfo.Cluster remote2Cluster = executionInfo.getCluster(remote2Alias); assertThat(remote2Cluster.getIndexExpression(), equalTo("mylogs1,mylogs2,logs*")); - assertClusterStatusAndHasNullCounts(remote2Cluster, EsqlExecutionInfo.Cluster.Status.RUNNING); + assertClusterStatusAndShardCounts(remote2Cluster, EsqlExecutionInfo.Cluster.Status.RUNNING); } // remote1 is missing from EsIndex info, so it should be updated and marked as SKIPPED with 0 total shards, 0 took time, etc. { - final String localClusterAlias = RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY; - final String remote1Alias = "remote1"; - final String remote2Alias = "remote2"; EsqlExecutionInfo executionInfo = new EsqlExecutionInfo(true); executionInfo.swapCluster(localClusterAlias, (k, v) -> new EsqlExecutionInfo.Cluster(localClusterAlias, "logs*", false)); executionInfo.swapCluster(remote1Alias, (k, v) -> new EsqlExecutionInfo.Cluster(remote1Alias, "*", true)); @@ -180,13 +260,13 @@ public void testUpdateExecutionInfoWithClustersWithNoMatchingIndices() { IndexMode.STANDARD ) ); - IndexResolution indexResolution = IndexResolution.valid(esIndex, Set.of()); + IndexResolution indexResolution = IndexResolution.valid(esIndex, Map.of()); EsqlSession.updateExecutionInfoWithClustersWithNoMatchingIndices(executionInfo, indexResolution); EsqlExecutionInfo.Cluster localCluster = executionInfo.getCluster(localClusterAlias); assertThat(localCluster.getIndexExpression(), equalTo("logs*")); - assertClusterStatusAndHasNullCounts(localCluster, EsqlExecutionInfo.Cluster.Status.RUNNING); + assertClusterStatusAndShardCounts(localCluster, EsqlExecutionInfo.Cluster.Status.RUNNING); EsqlExecutionInfo.Cluster remote1Cluster = executionInfo.getCluster(remote1Alias); assertThat(remote1Cluster.getIndexExpression(), equalTo("*")); @@ -199,14 +279,11 @@ public void testUpdateExecutionInfoWithClustersWithNoMatchingIndices() { EsqlExecutionInfo.Cluster remote2Cluster = executionInfo.getCluster(remote2Alias); assertThat(remote2Cluster.getIndexExpression(), equalTo("mylogs1,mylogs2,logs*")); - assertClusterStatusAndHasNullCounts(remote2Cluster, EsqlExecutionInfo.Cluster.Status.RUNNING); + assertClusterStatusAndShardCounts(remote2Cluster, EsqlExecutionInfo.Cluster.Status.RUNNING); } // all remotes are missing from EsIndex info, so they should be updated and marked as SKIPPED with 0 total shards, 0 took time, etc. { - final String localClusterAlias = RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY; - final String remote1Alias = "remote1"; - final String remote2Alias = "remote2"; EsqlExecutionInfo executionInfo = new EsqlExecutionInfo(true); executionInfo.swapCluster(localClusterAlias, (k, v) -> new EsqlExecutionInfo.Cluster(localClusterAlias, "logs*", false)); executionInfo.swapCluster(remote1Alias, (k, v) -> new EsqlExecutionInfo.Cluster(remote1Alias, "*", true)); @@ -217,21 +294,21 @@ public void testUpdateExecutionInfoWithClustersWithNoMatchingIndices() { randomMapping(), Map.of("logs-a", IndexMode.STANDARD) ); - // mark remote1 as unavailable - IndexResolution indexResolution = IndexResolution.valid(esIndex, Set.of(remote1Alias)); + // remote1 is unavailable + var failure = new FieldCapabilitiesFailure(new String[] { "logs-a" }, new NoSeedNodeLeftException("unable to connect")); + IndexResolution indexResolution = IndexResolution.valid(esIndex, Map.of(remote1Alias, failure)); EsqlSession.updateExecutionInfoWithClustersWithNoMatchingIndices(executionInfo, indexResolution); EsqlExecutionInfo.Cluster localCluster = executionInfo.getCluster(localClusterAlias); assertThat(localCluster.getIndexExpression(), equalTo("logs*")); - assertClusterStatusAndHasNullCounts(localCluster, EsqlExecutionInfo.Cluster.Status.RUNNING); + assertClusterStatusAndShardCounts(localCluster, EsqlExecutionInfo.Cluster.Status.RUNNING); EsqlExecutionInfo.Cluster remote1Cluster = executionInfo.getCluster(remote1Alias); assertThat(remote1Cluster.getIndexExpression(), equalTo("*")); - // remote1 is left as RUNNING, since another method (updateExecutionInfoWithUnavailableClusters) not under test changes status + // since remote1 is in the unavailable Map (passed to IndexResolution.valid), it's status will not be changed + // by updateExecutionInfoWithClustersWithNoMatchingIndices (it is handled in updateExecutionInfoWithUnavailableClusters) assertThat(remote1Cluster.getStatus(), equalTo(EsqlExecutionInfo.Cluster.Status.RUNNING)); - assertNull(remote1Cluster.getTook()); - assertNull(remote1Cluster.getTotalShards()); EsqlExecutionInfo.Cluster remote2Cluster = executionInfo.getCluster(remote2Alias); assertThat(remote2Cluster.getIndexExpression(), equalTo("mylogs1,mylogs2,logs*")); @@ -242,6 +319,25 @@ public void testUpdateExecutionInfoWithClustersWithNoMatchingIndices() { assertThat(remote2Cluster.getSkippedShards(), equalTo(0)); assertThat(remote2Cluster.getFailedShards(), equalTo(0)); } + + // all remotes are missing from EsIndex info. Since one is configured with skip_unavailable=false, + // an exception should be thrown + { + EsqlExecutionInfo executionInfo = new EsqlExecutionInfo(true); + executionInfo.swapCluster(localClusterAlias, (k, v) -> new EsqlExecutionInfo.Cluster(localClusterAlias, "logs*")); + executionInfo.swapCluster(remote1Alias, (k, v) -> new EsqlExecutionInfo.Cluster(remote1Alias, "*", true)); + executionInfo.swapCluster(remote2Alias, (k, v) -> new EsqlExecutionInfo.Cluster(remote2Alias, "mylogs1,mylogs2,logs*", false)); + + EsIndex esIndex = new EsIndex( + "logs*,remote2:mylogs1,remote2:mylogs2,remote2:logs*", + randomMapping(), + Map.of("logs-a", IndexMode.STANDARD) + ); + + var failure = new FieldCapabilitiesFailure(new String[] { "logs-a" }, new NoSeedNodeLeftException("unable to connect")); + IndexResolution indexResolution = IndexResolution.valid(esIndex, Map.of(remote1Alias, failure)); + EsqlSession.updateExecutionInfoWithClustersWithNoMatchingIndices(executionInfo, indexResolution); + } } public void testUpdateExecutionInfoAtEndOfPlanning() { @@ -288,13 +384,22 @@ public void testUpdateExecutionInfoAtEndOfPlanning() { assertNull(remote2Cluster.getTook()); } - private void assertClusterStatusAndHasNullCounts(EsqlExecutionInfo.Cluster cluster, EsqlExecutionInfo.Cluster.Status status) { + private void assertClusterStatusAndShardCounts(EsqlExecutionInfo.Cluster cluster, EsqlExecutionInfo.Cluster.Status status) { assertThat(cluster.getStatus(), equalTo(status)); assertNull(cluster.getTook()); - assertNull(cluster.getTotalShards()); - assertNull(cluster.getSuccessfulShards()); - assertNull(cluster.getSkippedShards()); - assertNull(cluster.getFailedShards()); + if (status == EsqlExecutionInfo.Cluster.Status.RUNNING) { + assertNull(cluster.getTotalShards()); + assertNull(cluster.getSuccessfulShards()); + assertNull(cluster.getSkippedShards()); + assertNull(cluster.getFailedShards()); + } else if (status == EsqlExecutionInfo.Cluster.Status.SKIPPED) { + assertThat(cluster.getTotalShards(), equalTo(0)); + assertThat(cluster.getSuccessfulShards(), equalTo(0)); + assertThat(cluster.getSkippedShards(), equalTo(0)); + assertThat(cluster.getFailedShards(), equalTo(0)); + } else { + fail("Unexpected status: " + status); + } } private static Map randomMapping() { diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/session/IndexResolverTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/session/IndexResolverTests.java index 51497b5ca5093..d6e410305afaa 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/session/IndexResolverTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/session/IndexResolverTests.java @@ -15,6 +15,7 @@ import java.util.ArrayList; import java.util.List; +import java.util.Map; import java.util.Set; import static org.hamcrest.Matchers.equalTo; @@ -33,8 +34,8 @@ public void testDetermineUnavailableRemoteClusters() { ) ); - Set unavailableClusters = IndexResolver.determineUnavailableRemoteClusters(failures); - assertThat(unavailableClusters, equalTo(Set.of("remote1", "remote2"))); + Map unavailableClusters = IndexResolver.determineUnavailableRemoteClusters(failures); + assertThat(unavailableClusters.keySet(), equalTo(Set.of("remote1", "remote2"))); } // one cluster with "remote unavailable" with two failures @@ -43,8 +44,8 @@ public void testDetermineUnavailableRemoteClusters() { failures.add(new FieldCapabilitiesFailure(new String[] { "remote2:mylogs1" }, new NoSuchRemoteClusterException("remote2"))); failures.add(new FieldCapabilitiesFailure(new String[] { "remote2:mylogs1" }, new NoSeedNodeLeftException("no seed node"))); - Set unavailableClusters = IndexResolver.determineUnavailableRemoteClusters(failures); - assertThat(unavailableClusters, equalTo(Set.of("remote2"))); + Map unavailableClusters = IndexResolver.determineUnavailableRemoteClusters(failures); + assertThat(unavailableClusters.keySet(), equalTo(Set.of("remote2"))); } // two clusters, one "remote unavailable" type exceptions and one with another type @@ -57,23 +58,23 @@ public void testDetermineUnavailableRemoteClusters() { new IllegalStateException("Unable to open any connections") ) ); - Set unavailableClusters = IndexResolver.determineUnavailableRemoteClusters(failures); - assertThat(unavailableClusters, equalTo(Set.of("remote2"))); + Map unavailableClusters = IndexResolver.determineUnavailableRemoteClusters(failures); + assertThat(unavailableClusters.keySet(), equalTo(Set.of("remote2"))); } // one cluster1 with exception not known to indicate "remote unavailable" { List failures = new ArrayList<>(); failures.add(new FieldCapabilitiesFailure(new String[] { "remote1:mylogs1" }, new RuntimeException("foo"))); - Set unavailableClusters = IndexResolver.determineUnavailableRemoteClusters(failures); - assertThat(unavailableClusters, equalTo(Set.of())); + Map unavailableClusters = IndexResolver.determineUnavailableRemoteClusters(failures); + assertThat(unavailableClusters.keySet(), equalTo(Set.of())); } // empty failures list { List failures = new ArrayList<>(); - Set unavailableClusters = IndexResolver.determineUnavailableRemoteClusters(failures); - assertThat(unavailableClusters, equalTo(Set.of())); + Map unavailableClusters = IndexResolver.determineUnavailableRemoteClusters(failures); + assertThat(unavailableClusters.keySet(), equalTo(Set.of())); } } } diff --git a/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityEsqlIT.java b/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityEsqlIT.java index 1a236ccb6aa06..d5b3141b539eb 100644 --- a/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityEsqlIT.java +++ b/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityEsqlIT.java @@ -34,9 +34,12 @@ import java.util.Base64; import java.util.List; import java.util.Map; +import java.util.Set; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; +import java.util.regex.Matcher; +import java.util.regex.Pattern; import java.util.stream.Collectors; import java.util.stream.Stream; @@ -491,6 +494,10 @@ public void testCrossClusterQueryWithRemoteDLSAndFLS() throws Exception { assertThat(flatList, containsInAnyOrder("engineering")); } + /** + * Note: invalid_remote is "invalid" because it has a bogus API key and the cluster does not exist (cannot be connected to) + */ + @SuppressWarnings("unchecked") public void testCrossClusterQueryAgainstInvalidRemote() throws Exception { configureRemoteCluster(); populateData(); @@ -512,22 +519,53 @@ public void testCrossClusterQueryAgainstInvalidRemote() throws Exception { ); // invalid remote with local index should return local results - var q = "FROM invalid_remote:employees,employees | SORT emp_id DESC | LIMIT 10"; - Response response = performRequestWithRemoteSearchUser(esqlRequest(q)); - assertLocalOnlyResults(response); - - // only calling an invalid remote should error - ResponseException error = expectThrows(ResponseException.class, () -> { - var q2 = "FROM invalid_remote:employees | SORT emp_id DESC | LIMIT 10"; - performRequestWithRemoteSearchUser(esqlRequest(q2)); - }); - - if (skipUnavailable == false) { - assertThat(error.getResponse().getStatusLine().getStatusCode(), equalTo(401)); - assertThat(error.getMessage(), containsString("unable to find apikey")); - } else { - assertThat(error.getResponse().getStatusLine().getStatusCode(), equalTo(500)); - assertThat(error.getMessage(), containsString("Unable to connect to [invalid_remote]")); + { + var q = "FROM invalid_remote:employees,employees | SORT emp_id DESC | LIMIT 10"; + Response response = performRequestWithRemoteSearchUser(esqlRequest(q)); + // TODO: when skip_unavailable=false for invalid_remote, a fatal exception should be thrown + // this does not yet happen because field-caps returns nothing for this cluster, rather + // than an error, so the current code cannot detect that error. Follow on PR will handle this. + assertLocalOnlyResults(response); + } + + { + var q = "FROM invalid_remote:employees | SORT emp_id DESC | LIMIT 10"; + // errors from invalid remote should be ignored if the cluster is marked with skip_unavailable=true + if (skipUnavailable) { + // expected response: + // {"took":1,"columns":[],"values":[],"_clusters":{"total":1,"successful":0,"running":0,"skipped":1,"partial":0, + // "failed":0,"details":{"invalid_remote":{"status":"skipped","indices":"employees","took":1,"_shards": + // {"total":0,"successful":0,"skipped":0,"failed":0},"failures":[{"shard":-1,"index":null,"reason": + // {"type":"remote_transport_exception", + // "reason":"[connect_transport_exception - unable to connect to remote cluster]"}}]}}}} + Response response = performRequestWithRemoteSearchUser(esqlRequest(q)); + assertOK(response); + Map responseAsMap = entityAsMap(response); + List columns = (List) responseAsMap.get("columns"); + List values = (List) responseAsMap.get("values"); + assertThat(columns.size(), equalTo(1)); + Map column1 = (Map) columns.get(0); + assertThat(column1.get("name").toString(), equalTo("")); + assertThat(values.size(), equalTo(0)); + Map clusters = (Map) responseAsMap.get("_clusters"); + Map details = (Map) clusters.get("details"); + Map invalidRemoteEntry = (Map) details.get("invalid_remote"); + assertThat(invalidRemoteEntry.get("status").toString(), equalTo("skipped")); + List failures = (List) invalidRemoteEntry.get("failures"); + assertThat(failures.size(), equalTo(1)); + Map failuresMap = (Map) failures.get(0); + Map reason = (Map) failuresMap.get("reason"); + assertThat(reason.get("type").toString(), equalTo("remote_transport_exception")); + assertThat(reason.get("reason").toString(), containsString("unable to connect to remote cluster")); + + } else { + // errors from invalid remote should throw an exception if the cluster is marked with skip_unavailable=false + ResponseException error = expectThrows(ResponseException.class, () -> { + final Response response1 = performRequestWithRemoteSearchUser(esqlRequest(q)); + }); + assertThat(error.getResponse().getStatusLine().getStatusCode(), equalTo(401)); + assertThat(error.getMessage(), containsString("unable to find apikey")); + } } } @@ -887,7 +925,16 @@ public void testAlias() throws Exception { Request request = esqlRequest("FROM " + index + " | KEEP emp_id | SORT emp_id | LIMIT 100"); ResponseException error = expectThrows(ResponseException.class, () -> performRequestWithRemoteSearchUser(request)); assertThat(error.getResponse().getStatusLine().getStatusCode(), equalTo(400)); - assertThat(error.getMessage(), containsString(" Unknown index [" + index + "]")); + String expectedIndexExpressionInError = index.replace("*", "my_remote_cluster"); + Pattern p = Pattern.compile("Unknown index \\[([^\\]]+)\\]"); + Matcher m = p.matcher(error.getMessage()); + assertTrue("Pattern matcher to parse error message did not find matching string: " + error.getMessage(), m.find()); + String unknownIndexExpressionInErrorMessage = m.group(1); + Set actualUnknownIndexes = org.elasticsearch.common.Strings.commaDelimitedListToSet( + unknownIndexExpressionInErrorMessage + ); + Set expectedUnknownIndexes = org.elasticsearch.common.Strings.commaDelimitedListToSet(expectedIndexExpressionInError); + assertThat(actualUnknownIndexes, equalTo(expectedUnknownIndexes)); } for (var index : List.of( @@ -920,6 +967,7 @@ protected Request esqlRequest(String command) throws IOException { XContentBuilder body = JsonXContent.contentBuilder(); body.startObject(); body.field("query", command); + body.field("include_ccs_metadata", true); if (Build.current().isSnapshot() && randomBoolean()) { Settings.Builder settings = Settings.builder(); if (randomBoolean()) { From d6d11d8788030c618982f19f8d74f5c569c9352b Mon Sep 17 00:00:00 2001 From: "elastic-renovate-prod[bot]" <174716857+elastic-renovate-prod[bot]@users.noreply.github.com> Date: Wed, 30 Oct 2024 08:03:49 +0100 Subject: [PATCH 194/324] Update docker.elastic.co/wolfi/chainguard-base:latest Docker digest to 9734313 (main) (#115350) * Update docker.elastic.co/wolfi/chainguard-base:latest Docker digest to 9734313 * Only allow renovate PRs once per week --------- Co-authored-by: elastic-renovate-prod[bot] <174716857+elastic-renovate-prod[bot]@users.noreply.github.com> Co-authored-by: Rene Groeschke --- .../java/org/elasticsearch/gradle/internal/DockerBase.java | 2 +- renovate.json | 3 +++ 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/DockerBase.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/DockerBase.java index 0535f0bdc3cc8..3e0a47a8f453c 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/DockerBase.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/DockerBase.java @@ -24,7 +24,7 @@ public enum DockerBase { // Chainguard based wolfi image with latest jdk // This is usually updated via renovatebot // spotless:off - WOLFI("docker.elastic.co/wolfi/chainguard-base:latest@sha256:bf163e1977002301f7b9fd28fe6837a8cb2dd5c83e4cd45fb67fb28d15d5d40f", + WOLFI("docker.elastic.co/wolfi/chainguard-base:latest@sha256:973431347ad45f40e01afbbd010bf9de929c088a63382239b90dd84f39618bc8", "-wolfi", "apk" ), diff --git a/renovate.json b/renovate.json index 293a2bb262375..c1637ae651c1c 100644 --- a/renovate.json +++ b/renovate.json @@ -4,6 +4,9 @@ "github>elastic/renovate-config:only-chainguard", ":disableDependencyDashboard" ], + "schedule": [ + "after 1pm on tuesday" + ], "labels": [">non-issue", ":Delivery/Packaging", "Team:Delivery"], "baseBranches": ["main", "8.x"], "packageRules": [ From 75ab4eb93bb17b89458042be8924a9c34fae626c Mon Sep 17 00:00:00 2001 From: Salvatore Campagna <93581129+salvatore-campagna@users.noreply.github.com> Date: Wed, 30 Oct 2024 08:36:27 +0100 Subject: [PATCH 195/324] fix: _ignored_source is a multi-value field (#115853) Co-authored-by: Elastic Machine --- muted-tests.yml | 6 ------ .../main/java/org/elasticsearch/index/get/GetResult.java | 5 +++-- 2 files changed, 3 insertions(+), 8 deletions(-) diff --git a/muted-tests.yml b/muted-tests.yml index 22e57a524f0bc..ddb50c5a829f9 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -251,18 +251,12 @@ tests: - class: org.elasticsearch.oldrepos.OldRepositoryAccessIT method: testOldRepoAccess issue: https://github.com/elastic/elasticsearch/issues/115631 -- class: org.elasticsearch.index.get.GetResultTests - method: testToAndFromXContent - issue: https://github.com/elastic/elasticsearch/issues/115688 - class: org.elasticsearch.action.update.UpdateResponseTests method: testToAndFromXContent issue: https://github.com/elastic/elasticsearch/issues/115689 - class: org.elasticsearch.xpack.shutdown.NodeShutdownIT method: testStalledShardMigrationProperlyDetected issue: https://github.com/elastic/elasticsearch/issues/115697 -- class: org.elasticsearch.index.get.GetResultTests - method: testToAndFromXContentEmbedded - issue: https://github.com/elastic/elasticsearch/issues/115657 - class: org.elasticsearch.xpack.spatial.search.GeoGridAggAndQueryConsistencyIT method: testGeoShapeGeoHash issue: https://github.com/elastic/elasticsearch/issues/115664 diff --git a/server/src/main/java/org/elasticsearch/index/get/GetResult.java b/server/src/main/java/org/elasticsearch/index/get/GetResult.java index 109f645f24caf..3c504d400c7c6 100644 --- a/server/src/main/java/org/elasticsearch/index/get/GetResult.java +++ b/server/src/main/java/org/elasticsearch/index/get/GetResult.java @@ -21,6 +21,7 @@ import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.index.mapper.IgnoredFieldMapper; +import org.elasticsearch.index.mapper.IgnoredSourceFieldMapper; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.SourceFieldMapper; import org.elasticsearch.search.lookup.Source; @@ -247,7 +248,7 @@ public XContentBuilder toXContentEmbedded(XContentBuilder builder, Params params for (DocumentField field : metaFields.values()) { // TODO: can we avoid having an exception here? - if (field.getName().equals(IgnoredFieldMapper.NAME)) { + if (field.getName().equals(IgnoredFieldMapper.NAME) || field.getName().equals(IgnoredSourceFieldMapper.NAME)) { builder.field(field.getName(), field.getValues()); } else { builder.field(field.getName(), field.getValue()); @@ -341,7 +342,7 @@ public static GetResult fromXContentEmbedded(XContentParser parser, String index parser.skipChildren(); // skip potential inner objects for forward compatibility } } else if (token == XContentParser.Token.START_ARRAY) { - if (IgnoredFieldMapper.NAME.equals(currentFieldName)) { + if (IgnoredFieldMapper.NAME.equals(currentFieldName) || IgnoredSourceFieldMapper.NAME.equals(currentFieldName)) { metaFields.put(currentFieldName, new DocumentField(currentFieldName, parser.list())); } else { parser.skipChildren(); // skip potential inner arrays for forward compatibility From feea0a09b87509a201e7dc5a9a79c0e2bd29d620 Mon Sep 17 00:00:00 2001 From: Liam Thompson <32779855+leemthompo@users.noreply.github.com> Date: Wed, 30 Oct 2024 10:14:22 +0100 Subject: [PATCH 196/324] [DOCS] Update connectors link on landing page (#115904) --- docs/reference/landing-page.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/reference/landing-page.asciidoc b/docs/reference/landing-page.asciidoc index f1b5ce8210996..1f2145a3aae82 100644 --- a/docs/reference/landing-page.asciidoc +++ b/docs/reference/landing-page.asciidoc @@ -128,7 +128,7 @@ Adding data to Elasticsearch
  • - Connectors + Connectors
  • Web crawler From c6f7827105ffa8b986c830e80ab3b230f5860b5a Mon Sep 17 00:00:00 2001 From: Chris Hegarty <62058229+ChrisHegarty@users.noreply.github.com> Date: Wed, 30 Oct 2024 11:33:31 +0000 Subject: [PATCH 197/324] Fix and unmute org.elasticsearch.script.StatsSummaryTests:testEqualsAndHashCode (#115922) This commit fixes and unmutes org.elasticsearch.script.StatsSummaryTests:testEqualsAndHashCode. Previously, there was no guarantee that the doubles added to stats1 and stats2 will be different. In fact, the count may even be zero - which we seen in one particular failure. The simplest thing here, to avoid this potential situation, is to ensure that there is at least one value, and that the values added to each stats instance are different. --- muted-tests.yml | 3 --- .../elasticsearch/script/StatsSummaryTests.java | 16 ++++++++++++++-- 2 files changed, 14 insertions(+), 5 deletions(-) diff --git a/muted-tests.yml b/muted-tests.yml index ddb50c5a829f9..f60d16d373f32 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -103,9 +103,6 @@ tests: - class: org.elasticsearch.xpack.sql.qa.single_node.JdbcSqlSpecIT method: test {case-functions.testUcaseInline3} issue: https://github.com/elastic/elasticsearch/issues/112643 -- class: org.elasticsearch.script.StatsSummaryTests - method: testEqualsAndHashCode - issue: https://github.com/elastic/elasticsearch/issues/112439 - class: org.elasticsearch.repositories.blobstore.testkit.analyze.HdfsRepositoryAnalysisRestIT issue: https://github.com/elastic/elasticsearch/issues/112889 - class: org.elasticsearch.xpack.sql.qa.security.JdbcSqlSpecIT diff --git a/server/src/test/java/org/elasticsearch/script/StatsSummaryTests.java b/server/src/test/java/org/elasticsearch/script/StatsSummaryTests.java index 9cfa586de11ab..1e3fdb4e6a9ff 100644 --- a/server/src/test/java/org/elasticsearch/script/StatsSummaryTests.java +++ b/server/src/test/java/org/elasticsearch/script/StatsSummaryTests.java @@ -75,9 +75,21 @@ public void testEqualsAndHashCode() { assertThat(stats1, equalTo(stats2)); assertThat(stats1.hashCode(), equalTo(stats2.hashCode())); + // Accumulators with same sum, but different counts are not equals + stats1.accept(1); + stats1.accept(1); + stats2.accept(2); + assertThat(stats1.getSum(), equalTo(stats2.getSum())); + assertThat(stats1.getCount(), not(equalTo(stats2.getCount()))); + assertThat(stats1, not(equalTo(stats2))); + assertThat(stats1.hashCode(), not(equalTo(stats2.hashCode()))); + // Accumulators with different values are not equals - randomDoubles(randomIntBetween(0, 20)).forEach(stats1); - randomDoubles(randomIntBetween(0, 20)).forEach(stats2); + stats1.reset(); + stats2.reset(); + randomDoubles(randomIntBetween(1, 20)).forEach(stats1.andThen(v -> stats2.accept(v + randomDoubleBetween(0.0, 1.0, false)))); + assertThat(stats1.getCount(), equalTo(stats2.getCount())); + assertThat(stats1.getSum(), not(equalTo(stats2.getSum()))); assertThat(stats1, not(equalTo(stats2))); assertThat(stats1.hashCode(), not(equalTo(stats2.hashCode()))); } From 0416812456af0a763a5f43f9ab6813221ea6e4d8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Aur=C3=A9lien=20FOUCRET?= Date: Wed, 30 Oct 2024 15:31:26 +0100 Subject: [PATCH 198/324] Term Stats documentation (#115933) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Term Stats documentation * Update docs/reference/reranking/learning-to-rank-model-training.asciidoc Co-authored-by: István Zoltán Szabó * Fix query example. --------- Co-authored-by: István Zoltán Szabó --- .../query-dsl/script-score-query.asciidoc | 13 +++- .../learning-to-rank-model-training.asciidoc | 37 +++++++--- .../learning-to-rank-search-usage.asciidoc | 7 -- docs/reference/scripting/fields.asciidoc | 73 +++++++++++++++++++ 4 files changed, 108 insertions(+), 22 deletions(-) diff --git a/docs/reference/query-dsl/script-score-query.asciidoc b/docs/reference/query-dsl/script-score-query.asciidoc index 9291b8c15f0d1..051c9c6f9c32d 100644 --- a/docs/reference/query-dsl/script-score-query.asciidoc +++ b/docs/reference/query-dsl/script-score-query.asciidoc @@ -62,10 +62,17 @@ multiplied by `boost` to produce final documents' scores. Defaults to `1.0`. ===== Use relevance scores in a script Within a script, you can -{ref}/modules-scripting-fields.html#scripting-score[access] +{ref}/modules-scripting-fields.html#scripting-score[access] the `_score` variable which represents the current relevance score of a document. +[[script-score-access-term-statistics]] +===== Use term statistics in a script + +Within a script, you can +{ref}/modules-scripting-fields.html#scripting-term-statistics[access] +the `_termStats` variable which provides statistical information about the terms used in the child query of the `script_score` query. + [[script-score-predefined-functions]] ===== Predefined functions You can use any of the available {painless}/painless-contexts.html[painless @@ -147,7 +154,7 @@ updated since update operations also update the value of the `_seq_no` field. [[decay-functions-numeric-fields]] ====== Decay functions for numeric fields -You can read more about decay functions +You can read more about decay functions {ref}/query-dsl-function-score-query.html#function-decay[here]. * `double decayNumericLinear(double origin, double scale, double offset, double decay, double docValue)` @@ -233,7 +240,7 @@ The `script_score` query calculates the score for every matching document, or hit. There are faster alternative query types that can efficiently skip non-competitive hits: -* If you want to boost documents on some static fields, use the +* If you want to boost documents on some static fields, use the <> query. * If you want to boost documents closer to a date or geographic point, use the <> query. diff --git a/docs/reference/reranking/learning-to-rank-model-training.asciidoc b/docs/reference/reranking/learning-to-rank-model-training.asciidoc index 0f4640ebdf347..8e0b3f9ae94ce 100644 --- a/docs/reference/reranking/learning-to-rank-model-training.asciidoc +++ b/docs/reference/reranking/learning-to-rank-model-training.asciidoc @@ -38,11 +38,21 @@ Feature extractors are defined using templated queries. https://eland.readthedoc from eland.ml.ltr import QueryFeatureExtractor feature_extractors=[ - # We want to use the score of the match query for the title field as a feature: + # We want to use the BM25 score of the match query for the title field as a feature: QueryFeatureExtractor( feature_name="title_bm25", query={"match": {"title": "{{query}}"}} ), + # We want to use the the number of matched terms in the title field as a feature: + QueryFeatureExtractor( + feature_name="title_matched_term_count", + query={ + "script_score": { + "query": {"match": {"title": "{{query}}"}}, + "script": {"source": "return _termStats.matchedTermsCount();"}, + } + }, + ), # We can use a script_score query to get the value # of the field rating directly as a feature: QueryFeatureExtractor( @@ -54,19 +64,13 @@ feature_extractors=[ } }, ), - # We can execute a script on the value of the query - # and use the return value as a feature: - QueryFeatureExtractor( - feature_name="query_length", + # We extract the number of terms in the query as feature. + QueryFeatureExtractor( + feature_name="query_term_count", query={ "script_score": { - "query": {"match_all": {}}, - "script": { - "source": "return params['query'].splitOnToken(' ').length;", - "params": { - "query": "{{query}}", - } - }, + "query": {"match": {"title": "{{query}}"}}, + "script": {"source": "return _termStats.uniqueTermsCount();"}, } }, ), @@ -74,6 +78,15 @@ feature_extractors=[ ---- // NOTCONSOLE +[NOTE] +.Tern statistics as features +=================================================== + +It is very common for an LTR model to leverage raw term statistics as features. +To extract this information, you can use the {ref}/modules-scripting-fields.html#scripting-term-statistics[term statistics feature] provided as part of the <> query. + +=================================================== + Once the feature extractors have been defined, they are wrapped in an `eland.ml.ltr.LTRModelConfig` object for use in later training steps: [source,python] diff --git a/docs/reference/reranking/learning-to-rank-search-usage.asciidoc b/docs/reference/reranking/learning-to-rank-search-usage.asciidoc index f14219e24bc11..afb623dc2b1c9 100644 --- a/docs/reference/reranking/learning-to-rank-search-usage.asciidoc +++ b/docs/reference/reranking/learning-to-rank-search-usage.asciidoc @@ -61,10 +61,3 @@ When exposing pagination to users, `window_size` should remain constant as each ====== Negative scores Depending on how your model is trained, it’s possible that the model will return negative scores for documents. While negative scores are not allowed from first-stage retrieval and ranking, it is possible to use them in the LTR rescorer. - -[discrete] -[[learning-to-rank-rescorer-limitations-term-statistics]] -====== Term statistics as features - -We do not currently support term statistics as features, however future releases will introduce this capability. - diff --git a/docs/reference/scripting/fields.asciidoc b/docs/reference/scripting/fields.asciidoc index c2a40d4519f9f..8a9bb3c712789 100644 --- a/docs/reference/scripting/fields.asciidoc +++ b/docs/reference/scripting/fields.asciidoc @@ -80,6 +80,79 @@ GET my-index-000001/_search } ------------------------------------- +[discrete] +[[scripting-term-statistics]] +=== Accessing term statistics of a document within a script + +Scripts used in a <> query have access to the `_termStats` variable which provides statistical information about the terms in the child query. + +In the following example, `_termStats` is used within a <> query to retrieve the average term frequency for the terms `quick`, `brown`, and `fox` in the `text` field: + +[source,console] +------------------------------------- +PUT my-index-000001/_doc/1?refresh +{ + "text": "quick brown fox" +} + +PUT my-index-000001/_doc/2?refresh +{ + "text": "quick fox" +} + +GET my-index-000001/_search +{ + "query": { + "script_score": { + "query": { <1> + "match": { + "text": "quick brown fox" + } + }, + "script": { + "source": "_termStats.termFreq().getAverage()" <2> + } + } + } +} +------------------------------------- + +<1> Child query used to infer the field and the terms considered in term statistics. + +<2> The script calculates the average document frequency for the terms in the query using `_termStats`. + +`_termStats` provides access to the following functions for working with term statistics: + +- `uniqueTermsCount`: Returns the total number of unique terms in the query. This value is the same across all documents. +- `matchedTermsCount`: Returns the count of query terms that matched within the current document. +- `docFreq`: Provides document frequency statistics for the terms in the query, indicating how many documents contain each term. This value is consistent across all documents. +- `totalTermFreq`: Provides the total frequency of terms across all documents, representing how often each term appears in the entire corpus. This value is consistent across all documents. +- `termFreq`: Returns the frequency of query terms within the current document, showing how often each term appears in that document. + +[NOTE] +.Functions returning aggregated statistics +=================================================== + +The `docFreq`, `termFreq` and `totalTermFreq` functions return objects that represent statistics across all terms of the child query. + +Statistics provides support for the following methods: + +`getAverage()`: Returns the average value of the metric. +`getMin()`: Returns the minimum value of the metric. +`getMax()`: Returns the maximum value of the metric. +`getSum()`: Returns the sum of the metric values. +`getCount()`: Returns the count of terms included in the metric calculation. + +=================================================== + + +[NOTE] +.Painless language required +=================================================== + +The `_termStats` variable is only available when using the <> scripting language. + +=================================================== [discrete] [[modules-scripting-doc-vals]] From 6a9f8f3db3e4388c0301f9041258c1de6da39b9a Mon Sep 17 00:00:00 2001 From: Mark Vieira Date: Wed, 30 Oct 2024 07:31:59 -0700 Subject: [PATCH 199/324] Avoid resolving project dependencies in 'resolveAllDependencies' task (#115888) --- .../internal/ResolveAllDependencies.java | 26 +++++++++++++------ 1 file changed, 18 insertions(+), 8 deletions(-) diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ResolveAllDependencies.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ResolveAllDependencies.java index c63a902afde37..694282adac051 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ResolveAllDependencies.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ResolveAllDependencies.java @@ -12,8 +12,11 @@ import org.elasticsearch.gradle.VersionProperties; import org.gradle.api.DefaultTask; import org.gradle.api.artifacts.Configuration; +import org.gradle.api.artifacts.FileCollectionDependency; +import org.gradle.api.artifacts.component.ModuleComponentIdentifier; import org.gradle.api.file.FileCollection; import org.gradle.api.model.ObjectFactory; +import org.gradle.api.provider.ProviderFactory; import org.gradle.api.tasks.InputFiles; import org.gradle.api.tasks.Internal; import org.gradle.api.tasks.TaskAction; @@ -26,9 +29,6 @@ import javax.inject.Inject; -import static org.elasticsearch.gradle.DistributionDownloadPlugin.DISTRO_EXTRACTED_CONFIG_PREFIX; -import static org.elasticsearch.gradle.internal.test.rest.compat.compat.LegacyYamlRestCompatTestPlugin.BWC_MINOR_CONFIG_NAME; - public abstract class ResolveAllDependencies extends DefaultTask { private boolean resolveJavaToolChain = false; @@ -37,18 +37,28 @@ public abstract class ResolveAllDependencies extends DefaultTask { protected abstract JavaToolchainService getJavaToolchainService(); private final ObjectFactory objectFactory; + private final ProviderFactory providerFactory; private Collection configs; @Inject - public ResolveAllDependencies(ObjectFactory objectFactory) { + public ResolveAllDependencies(ObjectFactory objectFactory, ProviderFactory providerFactory) { this.objectFactory = objectFactory; + this.providerFactory = providerFactory; } @InputFiles public FileCollection getResolvedArtifacts() { - return objectFactory.fileCollection() - .from(configs.stream().filter(ResolveAllDependencies::canBeResolved).collect(Collectors.toList())); + return objectFactory.fileCollection().from(configs.stream().filter(ResolveAllDependencies::canBeResolved).map(c -> { + // Make a copy of the configuration, omitting file collection dependencies to avoid building project artifacts + Configuration copy = c.copyRecursive(d -> d instanceof FileCollectionDependency == false); + copy.setCanBeConsumed(false); + return copy; + }) + // Include only module dependencies, ignoring things like project dependencies so we don't unnecessarily build stuff + .map(c -> c.getIncoming().artifactView(v -> v.lenient(true).componentFilter(i -> i instanceof ModuleComponentIdentifier))) + .map(artifactView -> providerFactory.provider(artifactView::getFiles)) + .collect(Collectors.toList())); } @TaskAction @@ -95,8 +105,8 @@ private static boolean canBeResolved(Configuration configuration) { return false; } } - return configuration.getName().startsWith(DISTRO_EXTRACTED_CONFIG_PREFIX) == false - && configuration.getName().equals(BWC_MINOR_CONFIG_NAME) == false; + + return true; } } From 099a3dbb2565e0d114492fce0bdc48e61519e05a Mon Sep 17 00:00:00 2001 From: Mark Vieira Date: Wed, 30 Oct 2024 07:32:17 -0700 Subject: [PATCH 200/324] Fix example plugins tests (#115890) --- plugins/examples/settings.gradle | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/plugins/examples/settings.gradle b/plugins/examples/settings.gradle index 1f168525d4b1d..bbeb024957b75 100644 --- a/plugins/examples/settings.gradle +++ b/plugins/examples/settings.gradle @@ -37,6 +37,10 @@ gradle.projectsEvaluated { configurations.all { resolutionStrategy.dependencySubstitution { // When using composite builds we need to tell Gradle to use the project names since we rename the published artifacts + substitute module('org.elasticsearch.plugin:elasticsearch-plugin-api') using module("org.elasticsearch.plugin:plugin-api:${elasticsearchVersion}") + substitute module('org.elasticsearch.plugin:elasticsearch-plugin-analysis-api') using module("org.elasticsearch.plugin:plugin-analysis-api:${elasticsearchVersion}") + substitute module('org.elasticsearch:elasticsearch-plugin-scanner') using module("org.elasticsearch:plugin-scanner:${elasticsearchVersion}") + substitute module('org.elasticsearch:elasticsearch-core') using module("org.elasticsearch:core:${elasticsearchVersion}") substitute module('org.elasticsearch:elasticsearch') using module("org.elasticsearch:server:${elasticsearchVersion}") substitute module('org.elasticsearch.client:elasticsearch-rest-client') using module("org.elasticsearch.client:rest:${elasticsearchVersion}") substitute module('org.elasticsearch.plugin:x-pack-core') using module("org.elasticsearch.plugin:core:${elasticsearchVersion}") From 6b5f6fbc6e34643f4ff1ee5700a600e2be63bfe9 Mon Sep 17 00:00:00 2001 From: David Kyle Date: Wed, 30 Oct 2024 14:48:26 +0000 Subject: [PATCH 201/324] [ML] Wait for all shards to be active when creating the ML stats index (#108202) * Wait for all shards to be active when creating the ML stats index * Unmute tests * Wait for the stats index in cleanup * more waiting for the stats index * Add adminclient to ensureHealth Co-authored-by: Pat Whelan * fix errors causing build failures --------- Co-authored-by: Max Hniebergall <137079448+maxhniebergall@users.noreply.github.com> Co-authored-by: Pat Whelan Co-authored-by: Max Hniebergall Co-authored-by: Elastic Machine --- .../org/elasticsearch/test/rest/ESRestTestCase.java | 2 +- .../elasticsearch/xpack/core/ml/MlStatsIndex.java | 12 +++++++++++- .../ml/job/persistence/AnomalyDetectorsIndex.java | 9 +++++++++ .../xpack/core/ml/utils/MlIndexAndAlias.java | 9 +++++++-- .../core/ml/integration/MlRestTestStateCleaner.java | 9 +++++++++ .../xpack/core/ml/utils/MlIndexAndAliasTests.java | 4 +++- .../xpack/ml/integration/InferenceProcessorIT.java | 3 ++- .../rest-api-spec/test/ml/inference_crud.yml | 11 ----------- 8 files changed, 42 insertions(+), 17 deletions(-) diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java index 22f93e6bda61f..676fb13d29428 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java @@ -1726,7 +1726,7 @@ public static void ensureHealth(RestClient restClient, Consumer request ensureHealth(restClient, "", requestConsumer); } - protected static void ensureHealth(RestClient restClient, String index, Consumer requestConsumer) throws IOException { + public static void ensureHealth(RestClient restClient, String index, Consumer requestConsumer) throws IOException { Request request = new Request("GET", "/_cluster/health" + (index.isBlank() ? "" : "/" + index)); requestConsumer.accept(request); try { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MlStatsIndex.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MlStatsIndex.java index 97dede7cf0c6f..c0d62c7b29170 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MlStatsIndex.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MlStatsIndex.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.core.ml; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.ActiveShardCount; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; @@ -66,6 +67,15 @@ public static void createStatsIndexAndAliasIfNecessary( TimeValue masterNodeTimeout, ActionListener listener ) { - MlIndexAndAlias.createIndexAndAliasIfNecessary(client, state, resolver, TEMPLATE_NAME, writeAlias(), masterNodeTimeout, listener); + MlIndexAndAlias.createIndexAndAliasIfNecessary( + client, + state, + resolver, + TEMPLATE_NAME, + writeAlias(), + masterNodeTimeout, + ActiveShardCount.ALL, + listener + ); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/persistence/AnomalyDetectorsIndex.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/persistence/AnomalyDetectorsIndex.java index 0acc953c24039..7a098d432f35b 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/persistence/AnomalyDetectorsIndex.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/persistence/AnomalyDetectorsIndex.java @@ -9,6 +9,7 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequest; import org.elasticsearch.action.admin.cluster.health.TransportClusterHealthAction; +import org.elasticsearch.action.support.ActiveShardCount; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; @@ -91,6 +92,10 @@ public static void createStateIndexAndAliasIfNecessary( AnomalyDetectorsIndexFields.STATE_INDEX_PREFIX, AnomalyDetectorsIndex.jobStateIndexWriteAlias(), masterNodeTimeout, + // TODO: shard count default preserves the existing behaviour when the + // parameter was added but it may be that ActiveShardCount.ALL is a + // better option + ActiveShardCount.DEFAULT, finalListener ); } @@ -123,6 +128,10 @@ public static void createStateIndexAndAliasIfNecessaryAndWaitForYellow( AnomalyDetectorsIndexFields.STATE_INDEX_PREFIX, AnomalyDetectorsIndex.jobStateIndexWriteAlias(), masterNodeTimeout, + // TODO: shard count default preserves the existing behaviour when the + // parameter was added but it may be that ActiveShardCount.ALL is a + // better option + ActiveShardCount.DEFAULT, stateIndexAndAliasCreated ); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/utils/MlIndexAndAlias.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/utils/MlIndexAndAlias.java index 1603ad67718c3..b630bafdbc77d 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/utils/MlIndexAndAlias.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/utils/MlIndexAndAlias.java @@ -21,6 +21,7 @@ import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder; import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; import org.elasticsearch.action.admin.indices.template.put.TransportPutComposableIndexTemplateAction; +import org.elasticsearch.action.support.ActiveShardCount; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.client.internal.Client; @@ -105,6 +106,7 @@ public static void createIndexAndAliasIfNecessary( String indexPatternPrefix, String alias, TimeValue masterNodeTimeout, + ActiveShardCount waitForShardCount, ActionListener finalListener ) { @@ -133,7 +135,7 @@ public static void createIndexAndAliasIfNecessary( if (concreteIndexNames.length == 0) { if (indexPointedByCurrentWriteAlias.isEmpty()) { - createFirstConcreteIndex(client, firstConcreteIndex, alias, true, indexCreatedListener); + createFirstConcreteIndex(client, firstConcreteIndex, alias, true, waitForShardCount, indexCreatedListener); return; } logger.error( @@ -144,7 +146,7 @@ public static void createIndexAndAliasIfNecessary( ); } else if (concreteIndexNames.length == 1 && concreteIndexNames[0].equals(legacyIndexWithoutSuffix)) { if (indexPointedByCurrentWriteAlias.isEmpty()) { - createFirstConcreteIndex(client, firstConcreteIndex, alias, true, indexCreatedListener); + createFirstConcreteIndex(client, firstConcreteIndex, alias, true, waitForShardCount, indexCreatedListener); return; } if (indexPointedByCurrentWriteAlias.get().equals(legacyIndexWithoutSuffix)) { @@ -153,6 +155,7 @@ public static void createIndexAndAliasIfNecessary( firstConcreteIndex, alias, false, + waitForShardCount, indexCreatedListener.delegateFailureAndWrap( (l, unused) -> updateWriteAlias(client, alias, legacyIndexWithoutSuffix, firstConcreteIndex, l) ) @@ -241,6 +244,7 @@ private static void createFirstConcreteIndex( String index, String alias, boolean addAlias, + ActiveShardCount waitForShardCount, ActionListener listener ) { logger.info("About to create first concrete index [{}] with alias [{}]", index, alias); @@ -248,6 +252,7 @@ private static void createFirstConcreteIndex( if (addAlias) { requestBuilder.addAlias(new Alias(alias).isHidden(true)); } + requestBuilder.setWaitForActiveShards(waitForShardCount); CreateIndexRequest request = requestBuilder.request(); executeAsyncWithOrigin( diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/integration/MlRestTestStateCleaner.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/integration/MlRestTestStateCleaner.java index dc7967f7386fb..6f6224d505327 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/integration/MlRestTestStateCleaner.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/integration/MlRestTestStateCleaner.java @@ -30,6 +30,7 @@ public MlRestTestStateCleaner(Logger logger, RestClient adminClient) { } public void resetFeatures() throws IOException { + waitForMlStatsIndexToInitialize(); deleteAllTrainedModelIngestPipelines(); // This resets all features, not just ML, but they should have been getting reset between tests anyway so it shouldn't matter adminClient.performRequest(new Request("POST", "/_features/_reset")); @@ -54,4 +55,12 @@ private void deleteAllTrainedModelIngestPipelines() throws IOException { } } } + + private void waitForMlStatsIndexToInitialize() throws IOException { + ESRestTestCase.ensureHealth(adminClient, ".ml-stats-*", (request) -> { + request.addParameter("wait_for_no_initializing_shards", "true"); + request.addParameter("level", "shards"); + request.addParameter("timeout", "30s"); + }); + } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/utils/MlIndexAndAliasTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/utils/MlIndexAndAliasTests.java index 1d2190a29fa30..8e20ba4bfa9bd 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/utils/MlIndexAndAliasTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/utils/MlIndexAndAliasTests.java @@ -18,6 +18,7 @@ import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder; import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; import org.elasticsearch.action.admin.indices.template.put.TransportPutComposableIndexTemplateAction; +import org.elasticsearch.action.support.ActiveShardCount; import org.elasticsearch.client.internal.AdminClient; import org.elasticsearch.client.internal.Client; import org.elasticsearch.client.internal.ClusterAdminClient; @@ -370,7 +371,8 @@ private void createIndexAndAliasIfNecessary(ClusterState clusterState) { TestIndexNameExpressionResolver.newInstance(), TEST_INDEX_PREFIX, TEST_INDEX_ALIAS, - TEST_REQUEST_TIMEOUT, + TimeValue.timeValueSeconds(30), + ActiveShardCount.DEFAULT, listener ); } diff --git a/x-pack/plugin/ml/qa/single-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/InferenceProcessorIT.java b/x-pack/plugin/ml/qa/single-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/InferenceProcessorIT.java index a8b0174628894..fb1b6948d0032 100644 --- a/x-pack/plugin/ml/qa/single-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/InferenceProcessorIT.java +++ b/x-pack/plugin/ml/qa/single-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/InferenceProcessorIT.java @@ -40,13 +40,13 @@ private void putModelAlias(String modelAlias, String newModel) throws IOExceptio } @SuppressWarnings("unchecked") - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/107777") public void testCreateAndDeletePipelineWithInferenceProcessor() throws Exception { putRegressionModel(MODEL_ID); String pipelineId = "regression-model-pipeline"; createdPipelines.add(pipelineId); putPipeline(MODEL_ID, pipelineId); + waitForStats(); Map statsAsMap = getStats(MODEL_ID); List pipelineCount = (List) XContentMapValues.extractValue("trained_model_stats.pipeline_count", statsAsMap); assertThat(pipelineCount.get(0), equalTo(1)); @@ -107,6 +107,7 @@ public void testCreateAndDeletePipelineWithInferenceProcessorByName() throws Exc createdPipelines.add("second_pipeline"); putPipeline("regression_second", "second_pipeline"); + waitForStats(); Map statsAsMap = getStats(MODEL_ID); List pipelineCount = (List) XContentMapValues.extractValue("trained_model_stats.pipeline_count", statsAsMap); assertThat(pipelineCount.get(0), equalTo(2)); diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/inference_crud.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/inference_crud.yml index 4a1b2379888da..a53e5be54e35b 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/inference_crud.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/inference_crud.yml @@ -563,9 +563,6 @@ setup: --- "Test delete given model referenced by pipeline": - - skip: - awaits_fix: "https://github.com/elastic/elasticsearch/issues/80703" - - do: ingest.put_pipeline: id: "pipeline-using-a-classification-model" @@ -592,9 +589,6 @@ setup: --- "Test force delete given model referenced by pipeline": - - skip: - awaits_fix: "https://github.com/elastic/elasticsearch/issues/80703" - - do: ingest.put_pipeline: id: "pipeline-using-a-classification-model" @@ -622,9 +616,6 @@ setup: --- "Test delete given model with alias referenced by pipeline": - - skip: - awaits_fix: "https://github.com/elastic/elasticsearch/issues/80703" - - do: ml.put_trained_model_alias: model_alias: "alias-to-a-classification-model" @@ -655,8 +646,6 @@ setup: --- "Test force delete given model with alias referenced by pipeline": - - skip: - awaits_fix: "https://github.com/elastic/elasticsearch/issues/106652" - do: ml.put_trained_model_alias: model_alias: "alias-to-a-classification-model" From 36ed99c6ca5e1381d56d3aecfc9793be0124d318 Mon Sep 17 00:00:00 2001 From: Andrei Stefan Date: Wed, 30 Oct 2024 17:26:06 +0200 Subject: [PATCH 202/324] Wait a bit before .async-search index shard is available (#115905) Co-authored-by: Elastic Machine --- muted-tests.yml | 3 --- .../org/elasticsearch/xpack/esql/EsqlAsyncSecurityIT.java | 8 ++++++++ 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/muted-tests.yml b/muted-tests.yml index f60d16d373f32..339790d15557e 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -150,9 +150,6 @@ tests: - class: org.elasticsearch.backwards.MixedClusterClientYamlTestSuiteIT method: test {p0=search/500_date_range/from, to, include_lower, include_upper deprecated} issue: https://github.com/elastic/elasticsearch/pull/113286 -- class: org.elasticsearch.xpack.esql.EsqlAsyncSecurityIT - method: testLimitedPrivilege - issue: https://github.com/elastic/elasticsearch/issues/113419 - class: org.elasticsearch.xpack.esql.ccq.MultiClusterSpecIT method: test {categorize.Categorize} issue: https://github.com/elastic/elasticsearch/issues/113428 diff --git a/x-pack/plugin/esql/qa/security/src/javaRestTest/java/org/elasticsearch/xpack/esql/EsqlAsyncSecurityIT.java b/x-pack/plugin/esql/qa/security/src/javaRestTest/java/org/elasticsearch/xpack/esql/EsqlAsyncSecurityIT.java index f2633dfffb0fe..b45ef45914985 100644 --- a/x-pack/plugin/esql/qa/security/src/javaRestTest/java/org/elasticsearch/xpack/esql/EsqlAsyncSecurityIT.java +++ b/x-pack/plugin/esql/qa/security/src/javaRestTest/java/org/elasticsearch/xpack/esql/EsqlAsyncSecurityIT.java @@ -154,6 +154,14 @@ private Response runAsyncGet(String user, String id, boolean isAsyncIdNotFound_E } catch (InterruptedException ex) { throw new RuntimeException(ex); } + } else if (statusCode == 503 && message.contains("No shard available for [get [.async-search]")) { + // Workaround for https://github.com/elastic/elasticsearch/issues/113419 + logger.warn(".async-search index shards not yet available", e); + try { + Thread.sleep(500); + } catch (InterruptedException ex) { + throw new RuntimeException(ex); + } } else { throw e; } From 6b32bced368a0e8221a36ed1396ead551f843b29 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Slobodan=20Adamovi=C4=87?= Date: Wed, 30 Oct 2024 16:28:34 +0100 Subject: [PATCH 203/324] Remove optional transitive `tink` and `protobuf-java` dependencies (#115916) This commit removes `com.google.crypto.tink` which is transitive and optional dependency of `oauth2-oidc-sdk` and `nimbus-jose-jwt`. We don't seem to be using any functionality that requires `tink` and thus `protobuf-java`. Removing them feels safer than having to maintain misaligned versions. --- gradle/verification-metadata.xml | 10 - modules/repository-azure/build.gradle | 30 +-- .../licenses/protobuf-java-LICENSE.txt | 32 --- .../licenses/protobuf-java-NOTICE.txt | 0 .../licenses/tink-LICENSE.txt | 202 ------------------ .../repository-azure/licenses/tink-NOTICE.txt | 0 6 files changed, 11 insertions(+), 263 deletions(-) delete mode 100644 modules/repository-azure/licenses/protobuf-java-LICENSE.txt delete mode 100644 modules/repository-azure/licenses/protobuf-java-NOTICE.txt delete mode 100644 modules/repository-azure/licenses/tink-LICENSE.txt delete mode 100644 modules/repository-azure/licenses/tink-NOTICE.txt diff --git a/gradle/verification-metadata.xml b/gradle/verification-metadata.xml index 5cfe7adb5ea49..869cb64de54d0 100644 --- a/gradle/verification-metadata.xml +++ b/gradle/verification-metadata.xml @@ -579,11 +579,6 @@ - - - - - @@ -759,11 +754,6 @@ - - - - - diff --git a/modules/repository-azure/build.gradle b/modules/repository-azure/build.gradle index d011de81f4fb3..86776e743685e 100644 --- a/modules/repository-azure/build.gradle +++ b/modules/repository-azure/build.gradle @@ -63,8 +63,12 @@ dependencies { api "com.github.stephenc.jcip:jcip-annotations:1.0-1" api "com.nimbusds:content-type:2.3" api "com.nimbusds:lang-tag:1.7" - api "com.nimbusds:nimbus-jose-jwt:9.37.3" - api "com.nimbusds:oauth2-oidc-sdk:11.9.1" + api("com.nimbusds:nimbus-jose-jwt:9.37.3"){ + exclude group: 'com.google.crypto.tink', module: 'tink' // it's an optional dependency on which we don't rely + } + api("com.nimbusds:oauth2-oidc-sdk:11.9.1"){ + exclude group: 'com.google.crypto.tink', module: 'tink' // it's an optional dependency on which we don't rely + } api "jakarta.activation:jakarta.activation-api:1.2.1" api "jakarta.xml.bind:jakarta.xml.bind-api:2.3.3" api "net.java.dev.jna:jna-platform:${versions.jna}" // Maven says 5.14.0 but this aligns with the Elasticsearch-wide version @@ -74,8 +78,6 @@ dependencies { api "org.codehaus.woodstox:stax2-api:4.2.2" api "org.ow2.asm:asm:9.3" - runtimeOnly "com.google.crypto.tink:tink:1.14.0" - runtimeOnly "com.google.protobuf:protobuf-java:4.27.0" runtimeOnly "com.google.code.gson:gson:2.11.0" runtimeOnly "org.cryptomator:siv-mode:1.5.2" @@ -175,13 +177,11 @@ tasks.named("thirdPartyAudit").configure { // 'org.slf4j.ext.EventData' - bring back when https://github.com/elastic/elasticsearch/issues/93714 is done // Optional dependency of tink - 'com.google.api.client.http.HttpHeaders', - 'com.google.api.client.http.HttpRequest', - 'com.google.api.client.http.HttpRequestFactory', - 'com.google.api.client.http.HttpResponse', - 'com.google.api.client.http.HttpTransport', - 'com.google.api.client.http.javanet.NetHttpTransport', - 'com.google.api.client.http.javanet.NetHttpTransport$Builder', + 'com.google.crypto.tink.subtle.Ed25519Sign', + 'com.google.crypto.tink.subtle.Ed25519Sign$KeyPair', + 'com.google.crypto.tink.subtle.Ed25519Verify', + 'com.google.crypto.tink.subtle.X25519', + 'com.google.crypto.tink.subtle.XChaCha20Poly1305', // Optional dependency of nimbus-jose-jwt and oauth2-oidc-sdk 'org.bouncycastle.asn1.pkcs.PrivateKeyInfo', @@ -253,14 +253,6 @@ tasks.named("thirdPartyAudit").configure { 'javax.activation.MailcapCommandMap', 'javax.activation.MimetypesFileTypeMap', 'reactor.core.publisher.Traces$SharedSecretsCallSiteSupplierFactory$TracingException', - - 'com.google.protobuf.MessageSchema', - 'com.google.protobuf.UnsafeUtil', - 'com.google.protobuf.UnsafeUtil$1', - 'com.google.protobuf.UnsafeUtil$Android32MemoryAccessor', - 'com.google.protobuf.UnsafeUtil$Android64MemoryAccessor', - 'com.google.protobuf.UnsafeUtil$JvmMemoryAccessor', - 'com.google.protobuf.UnsafeUtil$MemoryAccessor', ) } diff --git a/modules/repository-azure/licenses/protobuf-java-LICENSE.txt b/modules/repository-azure/licenses/protobuf-java-LICENSE.txt deleted file mode 100644 index 19b305b00060a..0000000000000 --- a/modules/repository-azure/licenses/protobuf-java-LICENSE.txt +++ /dev/null @@ -1,32 +0,0 @@ -Copyright 2008 Google Inc. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -Code generated by the Protocol Buffer compiler is owned by the owner -of the input file used when generating it. This code is not -standalone and requires a support library to be linked with it. This -support library is itself covered by the above license. diff --git a/modules/repository-azure/licenses/protobuf-java-NOTICE.txt b/modules/repository-azure/licenses/protobuf-java-NOTICE.txt deleted file mode 100644 index e69de29bb2d1d..0000000000000 diff --git a/modules/repository-azure/licenses/tink-LICENSE.txt b/modules/repository-azure/licenses/tink-LICENSE.txt deleted file mode 100644 index d645695673349..0000000000000 --- a/modules/repository-azure/licenses/tink-LICENSE.txt +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/modules/repository-azure/licenses/tink-NOTICE.txt b/modules/repository-azure/licenses/tink-NOTICE.txt deleted file mode 100644 index e69de29bb2d1d..0000000000000 From 5ce74d385a1ed8d83bd56062329a5fc63c010bca Mon Sep 17 00:00:00 2001 From: Simon Cooper Date: Wed, 30 Oct 2024 15:32:42 +0000 Subject: [PATCH 204/324] Fix NodeStatsTests chunking (#115929) Rewrite the test to make it a bit clearer --- muted-tests.yml | 3 - .../cluster/node/stats/NodeStatsTests.java | 97 ++++++++++--------- 2 files changed, 49 insertions(+), 51 deletions(-) diff --git a/muted-tests.yml b/muted-tests.yml index 339790d15557e..131bbb14aec10 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -117,9 +117,6 @@ tests: - class: org.elasticsearch.xpack.sql.qa.security.JdbcSqlSpecIT method: test {case-functions.testSelectInsertWithLcaseAndLengthWithOrderBy} issue: https://github.com/elastic/elasticsearch/issues/112642 -- class: org.elasticsearch.action.admin.cluster.node.stats.NodeStatsTests - method: testChunking - issue: https://github.com/elastic/elasticsearch/issues/113139 - class: org.elasticsearch.packaging.test.WindowsServiceTests method: test30StartStop issue: https://github.com/elastic/elasticsearch/issues/113160 diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/stats/NodeStatsTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/stats/NodeStatsTests.java index b5f61d5b798fa..7a31f0dcb4631 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/stats/NodeStatsTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/stats/NodeStatsTests.java @@ -30,7 +30,7 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.network.HandlingTimeTracker; import org.elasticsearch.common.util.Maps; -import org.elasticsearch.core.Nullable; +import org.elasticsearch.common.xcontent.ChunkedToXContent; import org.elasticsearch.core.Tuple; import org.elasticsearch.discovery.DiscoveryStats; import org.elasticsearch.http.HttpStats; @@ -93,6 +93,7 @@ import java.util.HashSet; import java.util.List; import java.util.Map; +import java.util.function.ToIntFunction; import java.util.stream.IntStream; import static java.util.Collections.emptySet; @@ -477,54 +478,56 @@ public void testSerialization() throws IOException { } public void testChunking() { - assertChunkCount( - createNodeStats(), - randomFrom(ToXContent.EMPTY_PARAMS, new ToXContent.MapParams(Map.of("level", "node"))), - nodeStats -> expectedChunks(nodeStats, NodeStatsLevel.NODE) - ); - assertChunkCount( - createNodeStats(), - new ToXContent.MapParams(Map.of("level", "indices")), - nodeStats -> expectedChunks(nodeStats, NodeStatsLevel.INDICES) - ); - assertChunkCount( - createNodeStats(), - new ToXContent.MapParams(Map.of("level", "shards")), - nodeStats -> expectedChunks(nodeStats, NodeStatsLevel.SHARDS) - ); + assertChunkCount(createNodeStats(), ToXContent.EMPTY_PARAMS, nodeStats -> expectedChunks(nodeStats, ToXContent.EMPTY_PARAMS)); + for (NodeStatsLevel l : NodeStatsLevel.values()) { + ToXContent.Params p = new ToXContent.MapParams(Map.of("level", l.getLevel())); + assertChunkCount(createNodeStats(), p, nodeStats -> expectedChunks(nodeStats, p)); + } } - private static int expectedChunks(NodeStats nodeStats, NodeStatsLevel level) { - return 7 // number of static chunks, see NodeStats#toXContentChunked - + expectedChunks(nodeStats.getHttp()) // - + expectedChunks(nodeStats.getIndices(), level) // - + expectedChunks(nodeStats.getTransport()) // - + expectedChunks(nodeStats.getIngestStats()) // - + expectedChunks(nodeStats.getThreadPool()) // - + expectedChunks(nodeStats.getScriptStats()) // - + expectedChunks(nodeStats.getScriptCacheStats()); + private static int expectedChunks(NodeStats nodeStats, ToXContent.Params params) { + return 3 // number of static chunks, see NodeStats#toXContentChunked + + assertExpectedChunks(nodeStats.getIndices(), i -> expectedChunks(i, NodeStatsLevel.of(params, NodeStatsLevel.NODE)), params) + + assertExpectedChunks(nodeStats.getThreadPool(), NodeStatsTests::expectedChunks, params) //
    + + chunkIfPresent(nodeStats.getFs()) //
    + + assertExpectedChunks(nodeStats.getTransport(), NodeStatsTests::expectedChunks, params) //
    + + assertExpectedChunks(nodeStats.getHttp(), NodeStatsTests::expectedChunks, params) //
    + + chunkIfPresent(nodeStats.getBreaker()) //
    + + assertExpectedChunks(nodeStats.getScriptStats(), NodeStatsTests::expectedChunks, params) //
    + + chunkIfPresent(nodeStats.getDiscoveryStats()) //
    + + assertExpectedChunks(nodeStats.getIngestStats(), NodeStatsTests::expectedChunks, params) //
    + + chunkIfPresent(nodeStats.getAdaptiveSelectionStats()) //
    + + assertExpectedChunks(nodeStats.getScriptCacheStats(), NodeStatsTests::expectedChunks, params); } - private static int expectedChunks(ScriptCacheStats scriptCacheStats) { - if (scriptCacheStats == null) return 0; + private static int chunkIfPresent(ToXContent xcontent) { + return xcontent == null ? 0 : 1; + } - var chunks = 4; - if (scriptCacheStats.general() != null) { - chunks += 3; - } else { - chunks += 2; - chunks += scriptCacheStats.context().size() * 6; + private static int assertExpectedChunks(T obj, ToIntFunction getChunks, ToXContent.Params params) { + if (obj == null) return 0; + int chunks = getChunks.applyAsInt(obj); + assertChunkCount(obj, params, t -> chunks); + return chunks; + } + + private static int expectedChunks(ScriptCacheStats scriptCacheStats) { + var chunks = 3; // start, end, SUM + if (scriptCacheStats.general() == null) { + chunks += 2 + scriptCacheStats.context().size() * 4; } return chunks; } private static int expectedChunks(ScriptStats scriptStats) { - return scriptStats == null ? 0 : 8 + scriptStats.contextStats().size(); + return 7 + (scriptStats.compilationsHistory() != null && scriptStats.compilationsHistory().areTimingsEmpty() == false ? 1 : 0) + + (scriptStats.cacheEvictionsHistory() != null && scriptStats.cacheEvictionsHistory().areTimingsEmpty() == false ? 1 : 0) + + scriptStats.contextStats().size(); } private static int expectedChunks(ThreadPoolStats threadPool) { - return threadPool == null ? 0 : 2 + threadPool.stats().stream().mapToInt(s -> { + return 2 + threadPool.stats().stream().mapToInt(s -> { var chunks = 0; chunks += s.threads() == -1 ? 0 : 1; chunks += s.queue() == -1 ? 0 : 1; @@ -536,25 +539,23 @@ private static int expectedChunks(ThreadPoolStats threadPool) { }).sum(); } - private static int expectedChunks(@Nullable IngestStats ingestStats) { - return ingestStats == null - ? 0 - : 2 + ingestStats.pipelineStats() - .stream() - .mapToInt(pipelineStats -> 2 + ingestStats.processorStats().getOrDefault(pipelineStats.pipelineId(), List.of()).size()) - .sum(); + private static int expectedChunks(IngestStats ingestStats) { + return 2 + ingestStats.pipelineStats() + .stream() + .mapToInt(pipelineStats -> 2 + ingestStats.processorStats().getOrDefault(pipelineStats.pipelineId(), List.of()).size()) + .sum(); } - private static int expectedChunks(@Nullable HttpStats httpStats) { - return httpStats == null ? 0 : 3 + httpStats.getClientStats().size() + httpStats.httpRouteStats().size(); + private static int expectedChunks(HttpStats httpStats) { + return 3 + httpStats.getClientStats().size() + httpStats.httpRouteStats().size(); } - private static int expectedChunks(@Nullable TransportStats transportStats) { - return transportStats == null ? 0 : 3; // only one transport action + private static int expectedChunks(TransportStats transportStats) { + return 3; // only one transport action } - private static int expectedChunks(@Nullable NodeIndicesStats nodeIndicesStats, NodeStatsLevel level) { - return nodeIndicesStats == null ? 0 : switch (level) { + private static int expectedChunks(NodeIndicesStats nodeIndicesStats, NodeStatsLevel level) { + return switch (level) { case NODE -> 2; case INDICES -> 5; // only one index case SHARDS -> 9; // only one shard From 30079d1a37c6a3992b1e18477a2219d374aa8204 Mon Sep 17 00:00:00 2001 From: Mark Vieira Date: Wed, 30 Oct 2024 08:37:58 -0700 Subject: [PATCH 205/324] Update reference to libs project in IDE setup (#115942) --- build-tools-internal/src/main/groovy/elasticsearch.ide.gradle | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/build-tools-internal/src/main/groovy/elasticsearch.ide.gradle b/build-tools-internal/src/main/groovy/elasticsearch.ide.gradle index 86b48f744e16e..63a3cb6d86d68 100644 --- a/build-tools-internal/src/main/groovy/elasticsearch.ide.gradle +++ b/build-tools-internal/src/main/groovy/elasticsearch.ide.gradle @@ -137,7 +137,7 @@ if (providers.systemProperty('idea.active').getOrNull() == 'true') { } } - // modifies the idea module config to enable preview features on 'elasticsearch-native' module + // modifies the idea module config to enable preview features on ':libs:native' module tasks.register("enablePreviewFeatures") { group = 'ide' description = 'Enables preview features on native library module' @@ -145,7 +145,7 @@ if (providers.systemProperty('idea.active').getOrNull() == 'true') { doLast { ['main', 'test'].each { sourceSet -> - modifyXml(".idea/modules/libs/native/elasticsearch.libs.elasticsearch-native.${sourceSet}.iml") { xml -> + modifyXml(".idea/modules/libs/native/elasticsearch.libs.${project.project(':libs:native').name}.${sourceSet}.iml") { xml -> xml.component.find { it.'@name' == 'NewModuleRootManager' }?.'@LANGUAGE_LEVEL' = 'JDK_21_PREVIEW' } } From a3615f067d1534b4cb6d105d2e66160f654dea4f Mon Sep 17 00:00:00 2001 From: Luigi Dell'Aquila Date: Wed, 30 Oct 2024 16:40:18 +0100 Subject: [PATCH 206/324] ES|QL: fix LIMIT pushdown past MV_EXPAND (#115624) --- docs/changelog/115624.yaml | 7 + .../src/main/resources/mv_expand.csv-spec | 80 +++++ .../xpack/esql/action/EsqlCapabilities.java | 7 +- .../xpack/esql/analysis/Analyzer.java | 3 +- .../esql/optimizer/LogicalPlanOptimizer.java | 2 - .../logical/DuplicateLimitAfterMvExpand.java | 108 ------- .../logical/PushDownAndCombineLimits.java | 15 + .../xpack/esql/plan/logical/MvExpand.java | 23 +- .../xpack/esql/planner/Mapper.java | 11 +- .../LocalLogicalPlanOptimizerTests.java | 17 +- .../optimizer/LogicalPlanOptimizerTests.java | 273 +++++++++++++++--- .../esql/parser/StatementParserTests.java | 3 +- 12 files changed, 378 insertions(+), 171 deletions(-) create mode 100644 docs/changelog/115624.yaml delete mode 100644 x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/DuplicateLimitAfterMvExpand.java diff --git a/docs/changelog/115624.yaml b/docs/changelog/115624.yaml new file mode 100644 index 0000000000000..1992ed65679ca --- /dev/null +++ b/docs/changelog/115624.yaml @@ -0,0 +1,7 @@ +pr: 115624 +summary: "ES|QL: fix LIMIT pushdown past MV_EXPAND" +area: ES|QL +type: bug +issues: + - 102084 + - 102061 diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/mv_expand.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/mv_expand.csv-spec index 3a1ae3985e129..2a7c092798404 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/mv_expand.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/mv_expand.csv-spec @@ -324,3 +324,83 @@ from employees | where emp_no == 10001 | keep * | mv_expand first_name; avg_worked_seconds:long | birth_date:date | emp_no:integer | first_name:keyword | gender:keyword | height:double | height.float:double | height.half_float:double | height.scaled_float:double | hire_date:date | is_rehired:boolean | job_positions:keyword | languages:integer | languages.byte:integer | languages.long:long | languages.short:integer | last_name:keyword | salary:integer | salary_change:double | salary_change.int:integer | salary_change.keyword:keyword | salary_change.long:long | still_hired:boolean 268728049 | 1953-09-02T00:00:00.000Z | 10001 | Georgi | M | 2.03 | 2.0299999713897705 | 2.029296875 | 2.0300000000000002 | 1986-06-26T00:00:00.000Z | [false, true] | [Accountant, Senior Python Developer] | 2 | 2 | 2 | 2 | Facello | 57305 | 1.19 | 1 | 1.19 | 1 | true ; + + +// see https://github.com/elastic/elasticsearch/issues/102061 +sortMvExpand +required_capability: add_limit_inside_mv_expand +row a = 1 | sort a | mv_expand a; + +a:integer +1 +; + +// see https://github.com/elastic/elasticsearch/issues/102061 +sortMvExpandFromIndex +required_capability: add_limit_inside_mv_expand +from employees | sort emp_no | mv_expand emp_no | limit 1 | keep emp_no; + +emp_no:integer +10001 +; + + +// see https://github.com/elastic/elasticsearch/issues/102061 +limitSortMvExpand +required_capability: add_limit_inside_mv_expand +row a = 1 | limit 1 | sort a | mv_expand a; + +a:integer +1 +; + + +// see https://github.com/elastic/elasticsearch/issues/102061 +limitSortMultipleMvExpand +required_capability: add_limit_inside_mv_expand +row a = [1, 2, 3, 4, 5], b = 2, c = 3 | sort a | mv_expand a | mv_expand b | mv_expand c | limit 3; + +a:integer | b:integer | c:integer +1 | 2 | 3 +2 | 2 | 3 +3 | 2 | 3 +; + + +multipleLimitSortMultipleMvExpand +required_capability: add_limit_inside_mv_expand +row a = [1, 2, 3, 4, 5], b = 2, c = 3 | sort a | mv_expand a | limit 2 | mv_expand b | mv_expand c | limit 3; + +a:integer | b:integer | c:integer +1 | 2 | 3 +2 | 2 | 3 +; + + +multipleLimitSortMultipleMvExpand2 +required_capability: add_limit_inside_mv_expand +row a = [1, 2, 3, 4, 5], b = 2, c = 3 | sort a | mv_expand a | limit 3 | mv_expand b | mv_expand c | limit 2; + +a:integer | b:integer | c:integer +1 | 2 | 3 +2 | 2 | 3 +; + + +//see https://github.com/elastic/elasticsearch/issues/102084 +whereMvExpand +required_capability: add_limit_inside_mv_expand +row a = 1, b = -15 | where b > 3 | mv_expand b; + +a:integer | b:integer +; + + +//see https://github.com/elastic/elasticsearch/issues/102084 +whereMvExpandOnIndex +required_capability: add_limit_inside_mv_expand +from employees | where emp_no == 10003 | mv_expand first_name | keep first_name; + +first_name:keyword +Parto +; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java index 196a864db2c15..6439df6ee71ee 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java @@ -441,7 +441,12 @@ public enum Cap { /** * Support simplified syntax for named parameters for field and function names. */ - NAMED_PARAMETER_FOR_FIELD_AND_FUNCTION_NAMES_SIMPLIFIED_SYNTAX(Build.current().isSnapshot()); + NAMED_PARAMETER_FOR_FIELD_AND_FUNCTION_NAMES_SIMPLIFIED_SYNTAX(Build.current().isSnapshot()), + + /** + * Fix pushdown of LIMIT past MV_EXPAND + */ + ADD_LIMIT_INSIDE_MV_EXPAND; private final boolean enabled; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Analyzer.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Analyzer.java index b18f58b0a43cb..4768af4bc8edb 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Analyzer.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Analyzer.java @@ -507,7 +507,8 @@ private LogicalPlan resolveMvExpand(MvExpand p, List childrenOutput) resolved, resolved.resolved() ? new ReferenceAttribute(resolved.source(), resolved.name(), resolved.dataType(), resolved.nullable(), null, false) - : resolved + : resolved, + p.limit() ); } return p; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizer.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizer.java index a1da269f896da..fb3a1b5179beb 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizer.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizer.java @@ -19,7 +19,6 @@ import org.elasticsearch.xpack.esql.optimizer.rules.logical.CombineProjections; import org.elasticsearch.xpack.esql.optimizer.rules.logical.ConstantFolding; import org.elasticsearch.xpack.esql.optimizer.rules.logical.ConvertStringToByteRef; -import org.elasticsearch.xpack.esql.optimizer.rules.logical.DuplicateLimitAfterMvExpand; import org.elasticsearch.xpack.esql.optimizer.rules.logical.FoldNull; import org.elasticsearch.xpack.esql.optimizer.rules.logical.LiteralsOnTheRight; import org.elasticsearch.xpack.esql.optimizer.rules.logical.PartiallyFoldCase; @@ -174,7 +173,6 @@ protected static Batch operators() { new PruneColumns(), new PruneLiteralsInOrderBy(), new PushDownAndCombineLimits(), - new DuplicateLimitAfterMvExpand(), new PushDownAndCombineFilters(), new PushDownEval(), new PushDownRegexExtract(), diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/DuplicateLimitAfterMvExpand.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/DuplicateLimitAfterMvExpand.java deleted file mode 100644 index 8985f4ab24705..0000000000000 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/DuplicateLimitAfterMvExpand.java +++ /dev/null @@ -1,108 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -package org.elasticsearch.xpack.esql.optimizer.rules.logical; - -import org.elasticsearch.xpack.esql.core.expression.AttributeSet; -import org.elasticsearch.xpack.esql.core.expression.ReferenceAttribute; -import org.elasticsearch.xpack.esql.plan.logical.Aggregate; -import org.elasticsearch.xpack.esql.plan.logical.Enrich; -import org.elasticsearch.xpack.esql.plan.logical.Eval; -import org.elasticsearch.xpack.esql.plan.logical.Filter; -import org.elasticsearch.xpack.esql.plan.logical.Limit; -import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; -import org.elasticsearch.xpack.esql.plan.logical.MvExpand; -import org.elasticsearch.xpack.esql.plan.logical.OrderBy; -import org.elasticsearch.xpack.esql.plan.logical.Project; -import org.elasticsearch.xpack.esql.plan.logical.RegexExtract; -import org.elasticsearch.xpack.esql.plan.logical.UnaryPlan; - -public final class DuplicateLimitAfterMvExpand extends OptimizerRules.OptimizerRule { - - @Override - protected LogicalPlan rule(Limit limit) { - var child = limit.child(); - var shouldSkip = child instanceof Eval - || child instanceof Project - || child instanceof RegexExtract - || child instanceof Enrich - || child instanceof Limit; - - if (shouldSkip == false && child instanceof UnaryPlan unary) { - MvExpand mvExpand = descendantMvExpand(unary); - if (mvExpand != null) { - Limit limitBeforeMvExpand = limitBeforeMvExpand(mvExpand); - // if there is no "appropriate" limit before mv_expand, then push down a copy of the one after it so that: - // - a possible TopN is properly built as low as possible in the tree (closed to Lucene) - // - the input of mv_expand is as small as possible before it is expanded (less rows to inflate and occupy memory) - if (limitBeforeMvExpand == null) { - var duplicateLimit = new Limit(limit.source(), limit.limit(), mvExpand.child()); - return limit.replaceChild(propagateDuplicateLimitUntilMvExpand(duplicateLimit, mvExpand, unary)); - } - } - } - return limit; - } - - private static MvExpand descendantMvExpand(UnaryPlan unary) { - UnaryPlan plan = unary; - AttributeSet filterReferences = new AttributeSet(); - while (plan instanceof Aggregate == false) { - if (plan instanceof MvExpand mve) { - // don't return the mv_expand that has a filter after it which uses the expanded values - // since this will trigger the use of a potentially incorrect (too restrictive) limit further down in the tree - if (filterReferences.isEmpty() == false) { - if (filterReferences.contains(mve.target()) // the same field or reference attribute is used in mv_expand AND filter - || mve.target() instanceof ReferenceAttribute // or the mv_expand attr hasn't yet been resolved to a field attr - // or not all filter references have been resolved to field attributes - || filterReferences.stream().anyMatch(ref -> ref instanceof ReferenceAttribute)) { - return null; - } - } - return mve; - } else if (plan instanceof Filter filter) { - // gather all the filters' references to be checked later when a mv_expand is found - filterReferences.addAll(filter.references()); - } else if (plan instanceof OrderBy) { - // ordering after mv_expand COULD break the order of the results, so the limit shouldn't be copied past mv_expand - // something like from test | sort emp_no | mv_expand job_positions | sort first_name | limit 5 - // (the sort first_name likely changes the order of the docs after sort emp_no, so "limit 5" shouldn't be copied down - return null; - } - - if (plan.child() instanceof UnaryPlan unaryPlan) { - plan = unaryPlan; - } else { - break; - } - } - return null; - } - - private static Limit limitBeforeMvExpand(MvExpand mvExpand) { - UnaryPlan plan = mvExpand; - while (plan instanceof Aggregate == false) { - if (plan instanceof Limit limit) { - return limit; - } - if (plan.child() instanceof UnaryPlan unaryPlan) { - plan = unaryPlan; - } else { - break; - } - } - return null; - } - - private LogicalPlan propagateDuplicateLimitUntilMvExpand(Limit duplicateLimit, MvExpand mvExpand, UnaryPlan child) { - if (child == mvExpand) { - return mvExpand.replaceChild(duplicateLimit); - } else { - return child.replaceChild(propagateDuplicateLimitUntilMvExpand(duplicateLimit, mvExpand, (UnaryPlan) child.child())); - } - } -} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/PushDownAndCombineLimits.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/PushDownAndCombineLimits.java index 08f32b094a95a..153efa5b5c233 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/PushDownAndCombineLimits.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/PushDownAndCombineLimits.java @@ -33,6 +33,21 @@ public LogicalPlan rule(Limit limit) { } else if (limit.child() instanceof UnaryPlan unary) { if (unary instanceof Eval || unary instanceof Project || unary instanceof RegexExtract || unary instanceof Enrich) { return unary.replaceChild(limit.replaceChild(unary.child())); + } else if (unary instanceof MvExpand mvx) { + // MV_EXPAND can increase the number of rows, so we cannot just push the limit down + // (we also have to preserve the LIMIT afterwards) + // + // To avoid infinite loops, ie. + // | MV_EXPAND | LIMIT -> | LIMIT | MV_EXPAND | LIMIT -> ... | MV_EXPAND | LIMIT + // we add an inner limit to MvExpand and just push down the existing limit, ie. + // | MV_EXPAND | LIMIT N -> | LIMIT N | MV_EXPAND (with limit N) + var limitSource = limit.limit(); + var limitVal = (int) limitSource.fold(); + Integer mvxLimit = mvx.limit(); + if (mvxLimit == null || mvxLimit > limitVal) { + mvx = new MvExpand(mvx.source(), mvx.child(), mvx.target(), mvx.expanded(), limitVal); + } + return mvx.replaceChild(limit.replaceChild(mvx.child())); } // check if there's a 'visible' descendant limit lower than the current one // and if so, align the current limit since it adds no value diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/MvExpand.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/MvExpand.java index 46ebc43d698a6..949e4906e5033 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/MvExpand.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/MvExpand.java @@ -27,13 +27,19 @@ public class MvExpand extends UnaryPlan { private final NamedExpression target; private final Attribute expanded; + private final Integer limit; private List output; public MvExpand(Source source, LogicalPlan child, NamedExpression target, Attribute expanded) { + this(source, child, target, expanded, null); + } + + public MvExpand(Source source, LogicalPlan child, NamedExpression target, Attribute expanded, Integer limit) { super(source, child); this.target = target; this.expanded = expanded; + this.limit = limit; } private MvExpand(StreamInput in) throws IOException { @@ -41,7 +47,8 @@ private MvExpand(StreamInput in) throws IOException { Source.readFrom((PlanStreamInput) in), in.readNamedWriteable(LogicalPlan.class), in.readNamedWriteable(NamedExpression.class), - in.readNamedWriteable(Attribute.class) + in.readNamedWriteable(Attribute.class), + null // we only need this on the coordinator ); } @@ -51,6 +58,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeNamedWriteable(child()); out.writeNamedWriteable(target()); out.writeNamedWriteable(expanded()); + assert limit == null; } @Override @@ -78,6 +86,10 @@ public Attribute expanded() { return expanded; } + public Integer limit() { + return limit; + } + @Override protected AttributeSet computeReferences() { return target.references(); @@ -94,7 +106,7 @@ public boolean expressionsResolved() { @Override public UnaryPlan replaceChild(LogicalPlan newChild) { - return new MvExpand(source(), newChild, target, expanded); + return new MvExpand(source(), newChild, target, expanded, limit); } @Override @@ -107,12 +119,12 @@ public List output() { @Override protected NodeInfo info() { - return NodeInfo.create(this, MvExpand::new, child(), target, expanded); + return NodeInfo.create(this, MvExpand::new, child(), target, expanded, limit); } @Override public int hashCode() { - return Objects.hash(super.hashCode(), target, expanded); + return Objects.hash(super.hashCode(), target, expanded, limit); } @Override @@ -120,6 +132,7 @@ public boolean equals(Object obj) { if (false == super.equals(obj)) { return false; } - return Objects.equals(target, ((MvExpand) obj).target) && Objects.equals(expanded, ((MvExpand) obj).expanded); + MvExpand other = ((MvExpand) obj); + return Objects.equals(target, other.target) && Objects.equals(expanded, other.expanded) && Objects.equals(limit, other.limit); } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/Mapper.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/Mapper.java index 152c492a34433..a8f820c8ef3fd 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/Mapper.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/Mapper.java @@ -11,6 +11,9 @@ import org.elasticsearch.compute.aggregation.AggregatorMode; import org.elasticsearch.xpack.esql.EsqlIllegalArgumentException; import org.elasticsearch.xpack.esql.core.expression.Attribute; +import org.elasticsearch.xpack.esql.core.expression.Literal; +import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.core.type.DataType; import org.elasticsearch.xpack.esql.expression.function.EsqlFunctionRegistry; import org.elasticsearch.xpack.esql.plan.logical.Aggregate; import org.elasticsearch.xpack.esql.plan.logical.BinaryPlan; @@ -228,7 +231,13 @@ private PhysicalPlan map(UnaryPlan p, PhysicalPlan child) { } if (p instanceof MvExpand mvExpand) { - return new MvExpandExec(mvExpand.source(), map(mvExpand.child()), mvExpand.target(), mvExpand.expanded()); + MvExpandExec result = new MvExpandExec(mvExpand.source(), map(mvExpand.child()), mvExpand.target(), mvExpand.expanded()); + if (mvExpand.limit() != null) { + // MvExpand could have an inner limit + // see PushDownAndCombineLimits rule + return new LimitExec(result.source(), result, new Literal(Source.EMPTY, mvExpand.limit(), DataType.INTEGER)); + } + return result; } // diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LocalLogicalPlanOptimizerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LocalLogicalPlanOptimizerTests.java index e556d43a471c3..baef20081a4f2 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LocalLogicalPlanOptimizerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LocalLogicalPlanOptimizerTests.java @@ -73,6 +73,7 @@ import static org.elasticsearch.xpack.esql.EsqlTestUtils.withDefaultLimitWarning; import static org.elasticsearch.xpack.esql.core.tree.Source.EMPTY; import static org.hamcrest.Matchers.contains; +import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.nullValue; @@ -193,11 +194,10 @@ public void testMissingFieldInSort() { /** * Expects - * EsqlProject[[first_name{f}#6]] - * \_Limit[1000[INTEGER]] - * \_MvExpand[last_name{f}#9,last_name{r}#15] - * \_Limit[1000[INTEGER]] - * \_EsRelation[test][_meta_field{f}#11, emp_no{f}#5, first_name{f}#6, ge..] + * EsqlProject[[first_name{f}#9, last_name{r}#18]] + * \_MvExpand[last_name{f}#12,last_name{r}#18,1000] + * \_Limit[1000[INTEGER]] + * \_EsRelation[test][_meta_field{f}#14, emp_no{f}#8, first_name{f}#9, ge..] */ public void testMissingFieldInMvExpand() { var plan = plan(""" @@ -213,11 +213,8 @@ public void testMissingFieldInMvExpand() { var projections = project.projections(); assertThat(Expressions.names(projections), contains("first_name", "last_name")); - var limit = as(project.child(), Limit.class); - // MvExpand cannot be optimized (yet) because the target NamedExpression cannot be replaced with a NULL literal - // https://github.com/elastic/elasticsearch/issues/109974 - // See LocalLogicalPlanOptimizer.ReplaceMissingFieldWithNull - var mvExpand = as(limit.child(), MvExpand.class); + var mvExpand = as(project.child(), MvExpand.class); + assertThat(mvExpand.limit(), equalTo(1000)); var limit2 = as(mvExpand.child(), Limit.class); as(limit2.child(), EsRelation.class); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java index ff7675504d6ff..59ba8352d2aaf 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java @@ -1239,11 +1239,10 @@ public void testDontCombineOrderByThroughMvExpand() { /** * Expected - * Limit[1000[INTEGER]] - * \_MvExpand[x{r}#159] - * \_EsqlProject[[first_name{f}#162 AS x]] - * \_Limit[1000[INTEGER]] - * \_EsRelation[test][first_name{f}#162] + * MvExpand[x{r}#4,x{r}#18,1000] + * \_EsqlProject[[first_name{f}#9 AS x]] + * \_Limit[1000[INTEGER]] + * \_EsRelation[test][_meta_field{f}#14, emp_no{f}#8, first_name{f}#9, ge..] */ public void testCopyDefaultLimitPastMvExpand() { LogicalPlan plan = optimizedPlan(""" @@ -1253,21 +1252,20 @@ public void testCopyDefaultLimitPastMvExpand() { | mv_expand x """); - var limit = as(plan, Limit.class); - var mvExpand = as(limit.child(), MvExpand.class); + var mvExpand = as(plan, MvExpand.class); + assertThat(mvExpand.limit(), equalTo(1000)); var keep = as(mvExpand.child(), EsqlProject.class); var limitPastMvExpand = as(keep.child(), Limit.class); - assertThat(limitPastMvExpand.limit(), equalTo(limit.limit())); + assertThat(limitPastMvExpand.limit().fold(), equalTo(1000)); as(limitPastMvExpand.child(), EsRelation.class); } /** * Expected - * Limit[10[INTEGER]] - * \_MvExpand[first_name{f}#155] - * \_EsqlProject[[first_name{f}#155, last_name{f}#156]] - * \_Limit[1[INTEGER]] - * \_EsRelation[test][first_name{f}#155, last_name{f}#156] + * MvExpand[first_name{f}#7,first_name{r}#16,10] + * \_EsqlProject[[first_name{f}#7, last_name{f}#10]] + * \_Limit[1[INTEGER]] + * \_EsRelation[test][_meta_field{f}#12, emp_no{f}#6, first_name{f}#7, ge..] */ public void testDontPushDownLimitPastMvExpand() { LogicalPlan plan = optimizedPlan(""" @@ -1277,28 +1275,26 @@ public void testDontPushDownLimitPastMvExpand() { | mv_expand first_name | limit 10"""); - var limit = as(plan, Limit.class); - assertThat(limit.limit().fold(), equalTo(10)); - var mvExpand = as(limit.child(), MvExpand.class); + var mvExpand = as(plan, MvExpand.class); + assertThat(mvExpand.limit(), equalTo(10)); var project = as(mvExpand.child(), EsqlProject.class); - limit = as(project.child(), Limit.class); + var limit = as(project.child(), Limit.class); assertThat(limit.limit().fold(), equalTo(1)); as(limit.child(), EsRelation.class); } /** * Expected - * EsqlProject[[emp_no{f}#141, first_name{f}#142, languages{f}#143, lll{r}#132, salary{f}#147]] - * \_TopN[[Order[salary{f}#147,DESC,FIRST], Order[first_name{f}#142,ASC,LAST]],5[INTEGER]] - * \_Limit[5[INTEGER]] - * \_MvExpand[salary{f}#147] - * \_Eval[[languages{f}#143 + 5[INTEGER] AS lll]] - * \_Filter[languages{f}#143 > 1[INTEGER]] - * \_Limit[10[INTEGER]] - * \_MvExpand[first_name{f}#142] - * \_TopN[[Order[emp_no{f}#141,DESC,FIRST]],10[INTEGER]] - * \_Filter[emp_no{f}#141 < 10006[INTEGER]] - * \_EsRelation[test][emp_no{f}#141, first_name{f}#142, languages{f}#1..] + * EsqlProject[[emp_no{f}#19, first_name{r}#29, languages{f}#22, lll{r}#9, salary{r}#30]] + * \_TopN[[Order[salary{r}#30,DESC,FIRST]],5[INTEGER]] + * \_MvExpand[salary{f}#24,salary{r}#30,5] + * \_Eval[[languages{f}#22 + 5[INTEGER] AS lll]] + * \_Limit[5[INTEGER]] + * \_Filter[languages{f}#22 > 1[INTEGER]] + * \_MvExpand[first_name{f}#20,first_name{r}#29,10] + * \_TopN[[Order[emp_no{f}#19,DESC,FIRST]],10[INTEGER]] + * \_Filter[emp_no{f}#19 ≤ 10006[INTEGER]] + * \_EsRelation[test][_meta_field{f}#25, emp_no{f}#19, first_name{f}#20, ..] */ public void testMultipleMvExpandWithSortAndLimit() { LogicalPlan plan = optimizedPlan(""" @@ -1319,14 +1315,13 @@ public void testMultipleMvExpandWithSortAndLimit() { var topN = as(keep.child(), TopN.class); assertThat(topN.limit().fold(), equalTo(5)); assertThat(orderNames(topN), contains("salary")); - var limit = as(topN.child(), Limit.class); - assertThat(limit.limit().fold(), equalTo(5)); - var mvExp = as(limit.child(), MvExpand.class); + var mvExp = as(topN.child(), MvExpand.class); + assertThat(mvExp.limit(), equalTo(5)); var eval = as(mvExp.child(), Eval.class); - var filter = as(eval.child(), Filter.class); - limit = as(filter.child(), Limit.class); - assertThat(limit.limit().fold(), equalTo(10)); - mvExp = as(limit.child(), MvExpand.class); + var limit5 = as(eval.child(), Limit.class); + var filter = as(limit5.child(), Filter.class); + mvExp = as(filter.child(), MvExpand.class); + assertThat(mvExp.limit(), equalTo(10)); topN = as(mvExp.child(), TopN.class); assertThat(topN.limit().fold(), equalTo(10)); filter = as(topN.child(), Filter.class); @@ -1434,10 +1429,9 @@ public void testDontPushDownLimitPastAggregate_AndMvExpand() { * Limit[5[INTEGER]] * \_Filter[ISNOTNULL(first_name{r}#22)] * \_Aggregate[STANDARD,[first_name{r}#22],[MAX(salary{f}#17,true[BOOLEAN]) AS max_s, first_name{r}#22]] - * \_Limit[50[INTEGER]] - * \_MvExpand[first_name{f}#13,first_name{r}#22] - * \_Limit[50[INTEGER]] - * \_EsRelation[test][_meta_field{f}#18, emp_no{f}#12, first_name{f}#13, ..] + * \_MvExpand[first_name{f}#13,first_name{r}#22,50] + * \_Limit[50[INTEGER]] + * \_EsRelation[test][_meta_field{f}#18, emp_no{f}#12, first_name{f}#13, ..] */ public void testPushDown_TheRightLimit_PastMvExpand() { LogicalPlan plan = optimizedPlan(""" @@ -1453,9 +1447,8 @@ public void testPushDown_TheRightLimit_PastMvExpand() { assertThat(limit.limit().fold(), equalTo(5)); var filter = as(limit.child(), Filter.class); var agg = as(filter.child(), Aggregate.class); - limit = as(agg.child(), Limit.class); - assertThat(limit.limit().fold(), equalTo(50)); - var mvExp = as(limit.child(), MvExpand.class); + var mvExp = as(agg.child(), MvExpand.class); + assertThat(mvExp.limit(), equalTo(50)); limit = as(mvExp.child(), Limit.class); assertThat(limit.limit().fold(), equalTo(50)); as(limit.child(), EsRelation.class); @@ -1492,6 +1485,143 @@ public void testPushDownLimit_PastEvalAndMvExpand() { as(topN.child(), EsRelation.class); } + /** + * Expected + * EsqlProject[[emp_no{f}#12, first_name{r}#22, salary{f}#17]] + * \_TopN[[Order[salary{f}#17,ASC,LAST], Order[first_name{r}#22,ASC,LAST]],1000[INTEGER]] + * \_Filter[gender{f}#14 == [46][KEYWORD] AND WILDCARDLIKE(first_name{r}#22)] + * \_MvExpand[first_name{f}#13,first_name{r}#22,null] + * \_TopN[[Order[emp_no{f}#12,ASC,LAST]],10000[INTEGER]] + * \_EsRelation[test][_meta_field{f}#18, emp_no{f}#12, first_name{f}#13, ..] + */ + public void testAddDefaultLimit_BeforeMvExpand_WithFilterOnExpandedField_ResultTruncationDefaultSize() { + LogicalPlan plan = optimizedPlan(""" + from test + | sort emp_no + | mv_expand first_name + | where gender == "F" + | where first_name LIKE "R*" + | keep emp_no, first_name, salary + | sort salary, first_name"""); + + var keep = as(plan, EsqlProject.class); + var topN = as(keep.child(), TopN.class); + assertThat(topN.limit().fold(), equalTo(1000)); + assertThat(orderNames(topN), contains("salary", "first_name")); + var filter = as(topN.child(), Filter.class); + assertThat(filter.condition(), instanceOf(And.class)); + var mvExp = as(filter.child(), MvExpand.class); + topN = as(mvExp.child(), TopN.class); // TODO is it correct? Double-check AddDefaultTopN rule + assertThat(orderNames(topN), contains("emp_no")); + as(topN.child(), EsRelation.class); + } + + /** + * Expected + * + * MvExpand[first_name{f}#7,first_name{r}#16,10] + * \_TopN[[Order[emp_no{f}#6,DESC,FIRST]],10[INTEGER]] + * \_Filter[emp_no{f}#6 ≤ 10006[INTEGER]] + * \_EsRelation[test][_meta_field{f}#12, emp_no{f}#6, first_name{f}#7, ge..] + */ + public void testFilterWithSortBeforeMvExpand() { + LogicalPlan plan = optimizedPlan(""" + from test + | where emp_no <= 10006 + | sort emp_no desc + | mv_expand first_name + | limit 10"""); + + var mvExp = as(plan, MvExpand.class); + assertThat(mvExp.limit(), equalTo(10)); + var topN = as(mvExp.child(), TopN.class); + assertThat(topN.limit().fold(), equalTo(10)); + assertThat(orderNames(topN), contains("emp_no")); + var filter = as(topN.child(), Filter.class); + as(filter.child(), EsRelation.class); + } + + /** + * Expected + * + * TopN[[Order[first_name{f}#10,ASC,LAST]],500[INTEGER]] + * \_MvExpand[last_name{f}#13,last_name{r}#20,null] + * \_Filter[emp_no{r}#19 > 10050[INTEGER]] + * \_MvExpand[emp_no{f}#9,emp_no{r}#19,null] + * \_EsRelation[test][_meta_field{f}#15, emp_no{f}#9, first_name{f}#10, g..] + */ + public void testMultiMvExpand_SortDownBelow() { + LogicalPlan plan = optimizedPlan(""" + from test + | sort last_name ASC + | mv_expand emp_no + | where emp_no > 10050 + | mv_expand last_name + | sort first_name"""); + + var topN = as(plan, TopN.class); + assertThat(topN.limit().fold(), equalTo(1000)); + assertThat(orderNames(topN), contains("first_name")); + var mvExpand = as(topN.child(), MvExpand.class); + var filter = as(mvExpand.child(), Filter.class); + mvExpand = as(filter.child(), MvExpand.class); + var topN2 = as(mvExpand.child(), TopN.class); // TODO is it correct? Double-check AddDefaultTopN rule + as(topN2.child(), EsRelation.class); + } + + /** + * Expected + * + * MvExpand[c{r}#7,c{r}#16,10000] + * \_EsqlProject[[c{r}#7, a{r}#3]] + * \_TopN[[Order[a{r}#3,ASC,FIRST]],7300[INTEGER]] + * \_MvExpand[b{r}#5,b{r}#15,7300] + * \_Limit[7300[INTEGER]] + * \_Row[[null[NULL] AS a, 123[INTEGER] AS b, 234[INTEGER] AS c]] + */ + public void testLimitThenSortBeforeMvExpand() { + LogicalPlan plan = optimizedPlan(""" + row a = null, b = 123, c = 234 + | mv_expand b + | limit 7300 + | keep c, a + | sort a NULLS FIRST + | mv_expand c"""); + + var mvExpand = as(plan, MvExpand.class); + assertThat(mvExpand.limit(), equalTo(10000)); + var project = as(mvExpand.child(), EsqlProject.class); + var topN = as(project.child(), TopN.class); + assertThat(topN.limit().fold(), equalTo(7300)); + assertThat(orderNames(topN), contains("a")); + mvExpand = as(topN.child(), MvExpand.class); + var limit = as(mvExpand.child(), Limit.class); + assertThat(limit.limit().fold(), equalTo(7300)); + as(limit.child(), Row.class); + } + + /** + * Expected + * TopN[[Order[first_name{r}#16,ASC,LAST]],10000[INTEGER]] + * \_MvExpand[first_name{f}#7,first_name{r}#16] + * \_EsRelation[test][_meta_field{f}#12, emp_no{f}#6, first_name{f}#7, ge..] + */ + public void testRemoveUnusedSortBeforeMvExpand_DefaultLimit10000() { + LogicalPlan plan = optimizedPlan(""" + from test + | sort emp_no + | mv_expand first_name + | sort first_name + | limit 15000"""); + + var topN = as(plan, TopN.class); + assertThat(orderNames(topN), contains("first_name")); + assertThat(topN.limit().fold(), equalTo(10000)); + var mvExpand = as(topN.child(), MvExpand.class); + var topN2 = as(mvExpand.child(), TopN.class); // TODO is it correct? Double-check AddDefaultTopN rule + as(topN2.child(), EsRelation.class); + } + /** * Expected * EsqlProject[[emp_no{f}#104, first_name{f}#105, salary{f}#106]] @@ -1597,6 +1727,65 @@ public void testAddDefaultLimit_BeforeMvExpand_WithFilterOnExpandedFieldAlias() as(topN.child(), EsRelation.class); } + /** + * Expected: + * MvExpand[a{r}#1402,a{r}#1406,1000] + * \_TopN[[Order[a{r}#1402,ASC,LAST]],1000[INTEGER]] + * \_Row[[1[INTEGER] AS a]] + */ + public void testSortMvExpand() { + LogicalPlan plan = optimizedPlan(""" + row a = 1 + | sort a + | mv_expand a"""); + + var expand = as(plan, MvExpand.class); + assertThat(expand.limit(), equalTo(1000)); + var topN = as(expand.child(), TopN.class); + var row = as(topN.child(), Row.class); + } + + /** + * Expected: + * MvExpand[emp_no{f}#5,emp_no{r}#15,20] + * \_TopN[[Order[emp_no{f}#5,ASC,LAST]],20[INTEGER]] + * \_EsRelation[test][_meta_field{f}#11, emp_no{f}#5, first_name{f}#6, ge..] + */ + public void testSortMvExpandLimit() { + LogicalPlan plan = optimizedPlan(""" + from test + | sort emp_no + | mv_expand emp_no + | limit 20"""); + + var expand = as(plan, MvExpand.class); + assertThat(expand.limit(), equalTo(20)); + var topN = as(expand.child(), TopN.class); + assertThat(topN.limit().fold(), is(20)); + var row = as(topN.child(), EsRelation.class); + } + + /** + * Expected: + * MvExpand[b{r}#5,b{r}#9,1000] + * \_Limit[1000[INTEGER]] + * \_Row[[1[INTEGER] AS a, -15[INTEGER] AS b]] + * + * see https://github.com/elastic/elasticsearch/issues/102084 + */ + public void testWhereMvExpand() { + LogicalPlan plan = optimizedPlan(""" + row a = 1, b = -15 + | where b < 3 + | mv_expand b"""); + + var expand = as(plan, MvExpand.class); + assertThat(expand.limit(), equalTo(1000)); + var limit2 = as(expand.child(), Limit.class); + assertThat(limit2.limit().fold(), is(1000)); + var row = as(limit2.child(), Row.class); + } + private static List orderNames(TopN topN) { return topN.order().stream().map(o -> as(o.child(), NamedExpression.class).name()).toList(); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/parser/StatementParserTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/parser/StatementParserTests.java index 8019dbf77ffbf..97de0caa93b5c 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/parser/StatementParserTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/parser/StatementParserTests.java @@ -1677,7 +1677,8 @@ public void testParamForIdentifier() { List.of(new Order(EMPTY, attribute("f.11..f.12.*"), Order.OrderDirection.ASC, Order.NullsPosition.LAST)) ), attribute("f.*.13.f.14*"), - attribute("f.*.13.f.14*") + attribute("f.*.13.f.14*"), + null ), statement( """ From ce3c3540d6eea09420ca703cfd91c43ba55e0b14 Mon Sep 17 00:00:00 2001 From: Pete Gillin Date: Wed, 30 Oct 2024 15:43:28 +0000 Subject: [PATCH 207/324] Apply more strict parsing of actions in bulk API (#115923) Previously, the following classes of malformed input were deprecated but not rejected in the action lines of the a bulk request: - Missing closing brace; - Additional keys after the action (which were ignored); - Additional data after the closing brace (which was ignored). They will now be considered errors and rejected. The existing behaviour is preserved in v8 compatibility mode. (N.B. The deprecation warnings were added in 8.1. The normal guidance to deprecate for a whole major version before removing does not apply here, since this was never a supported API feature. There is a risk to the lenient approach since it results in input being ignored, which is likely not the user's intention.) --- docs/changelog/115923.yaml | 16 ++++ .../action/bulk/BulkRequestParser.java | 72 ++++++++++----- .../action/bulk/BulkRequestParserTests.java | 91 +++++++++++++++++++ .../action/bulk/BulkRequestTests.java | 31 ++++--- 4 files changed, 171 insertions(+), 39 deletions(-) create mode 100644 docs/changelog/115923.yaml diff --git a/docs/changelog/115923.yaml b/docs/changelog/115923.yaml new file mode 100644 index 0000000000000..36e6b1e7fb29e --- /dev/null +++ b/docs/changelog/115923.yaml @@ -0,0 +1,16 @@ +pr: 115923 +summary: Apply more strict parsing of actions in bulk API +area: Indices APIs +type: breaking +issues: [ ] +breaking: + title: Apply more strict parsing of actions in bulk API + area: REST API + details: >- + Previously, the following classes of malformed input were deprecated but not rejected in the action lines of the a + bulk request: missing closing brace; additional keys after the action (which were ignored); additional data after + the closing brace (which was ignored). They will now be considered errors and rejected. + impact: >- + Users must provide well-formed input when using the bulk API. (They can request REST API compatibility with v8 to + get the previous behaviour back as an interim measure.) + notable: false diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkRequestParser.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkRequestParser.java index 4c475bee985ab..8712430918fbf 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/BulkRequestParser.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkRequestParser.java @@ -19,7 +19,7 @@ import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.RestApiVersion; -import org.elasticsearch.core.UpdateForV9; +import org.elasticsearch.core.UpdateForV10; import org.elasticsearch.index.VersionType; import org.elasticsearch.index.seqno.SequenceNumbers; import org.elasticsearch.search.fetch.subphase.FetchSourceContext; @@ -44,7 +44,11 @@ * Helper to parse bulk requests. This should be considered an internal class. */ public final class BulkRequestParser { + + @UpdateForV10(owner = UpdateForV10.Owner.DATA_MANAGEMENT) + // Remove deprecation logger when its usages in checkBulkActionIsProperlyClosed are removed private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(BulkRequestParser.class); + private static final Set SUPPORTED_ACTIONS = Set.of("create", "index", "update", "delete"); private static final String STRICT_ACTION_PARSING_WARNING_KEY = "bulk_request_strict_action_parsing"; @@ -348,7 +352,7 @@ public int incrementalParse( + "]" ); } - checkBulkActionIsProperlyClosed(parser); + checkBulkActionIsProperlyClosed(parser, line); if ("delete".equals(action)) { if (dynamicTemplates.isEmpty() == false) { @@ -446,35 +450,55 @@ public int incrementalParse( return isIncremental ? consumed : from; } - @UpdateForV9(owner = UpdateForV9.Owner.DISTRIBUTED_INDEXING) - // Warnings will need to be replaced with XContentEOFException from 9.x - private static void warnBulkActionNotProperlyClosed(String message) { - deprecationLogger.compatibleCritical(STRICT_ACTION_PARSING_WARNING_KEY, message); - } - - private static void checkBulkActionIsProperlyClosed(XContentParser parser) throws IOException { + @UpdateForV10(owner = UpdateForV10.Owner.DATA_MANAGEMENT) // Remove lenient parsing in V8 BWC mode + private void checkBulkActionIsProperlyClosed(XContentParser parser, int line) throws IOException { XContentParser.Token token; try { token = parser.nextToken(); - } catch (XContentEOFException ignore) { - warnBulkActionNotProperlyClosed( - "A bulk action wasn't closed properly with the closing brace. Malformed objects are currently accepted but will be " - + "rejected in a future version." - ); - return; + } catch (XContentEOFException e) { + if (config.restApiVersion() == RestApiVersion.V_8) { + deprecationLogger.compatibleCritical( + STRICT_ACTION_PARSING_WARNING_KEY, + "A bulk action wasn't closed properly with the closing brace. Malformed objects are currently accepted but will be" + + " rejected in a future version." + ); + return; + } else { + throw e; + } } if (token != XContentParser.Token.END_OBJECT) { - warnBulkActionNotProperlyClosed( - "A bulk action object contained multiple keys. Additional keys are currently ignored but will be rejected in a " - + "future version." - ); - return; + if (config.restApiVersion() == RestApiVersion.V_8) { + deprecationLogger.compatibleCritical( + STRICT_ACTION_PARSING_WARNING_KEY, + "A bulk action object contained multiple keys. Additional keys are currently ignored but will be rejected in a future" + + " version." + ); + return; + } else { + throw new IllegalArgumentException( + "Malformed action/metadata line [" + + line + + "], expected " + + XContentParser.Token.END_OBJECT + + " but found [" + + token + + "]" + ); + } } if (parser.nextToken() != null) { - warnBulkActionNotProperlyClosed( - "A bulk action contained trailing data after the closing brace. This is currently ignored but will be rejected in a " - + "future version." - ); + if (config.restApiVersion() == RestApiVersion.V_8) { + deprecationLogger.compatibleCritical( + STRICT_ACTION_PARSING_WARNING_KEY, + "A bulk action contained trailing data after the closing brace. This is currently ignored but will be rejected in a" + + " future version." + ); + } else { + throw new IllegalArgumentException( + "Malformed action/metadata line [" + line + "], unexpected data after the closing brace" + ); + } } } diff --git a/server/src/test/java/org/elasticsearch/action/bulk/BulkRequestParserTests.java b/server/src/test/java/org/elasticsearch/action/bulk/BulkRequestParserTests.java index ddb0c0cc7acfd..b7f7a02e3b07e 100644 --- a/server/src/test/java/org/elasticsearch/action/bulk/BulkRequestParserTests.java +++ b/server/src/test/java/org/elasticsearch/action/bulk/BulkRequestParserTests.java @@ -12,6 +12,7 @@ import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.core.RestApiVersion; +import org.elasticsearch.core.UpdateForV10; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xcontent.XContentType; import org.hamcrest.Matchers; @@ -20,9 +21,15 @@ import java.util.ArrayList; import java.util.List; import java.util.concurrent.atomic.AtomicBoolean; +import java.util.stream.Stream; public class BulkRequestParserTests extends ESTestCase { + @UpdateForV10(owner = UpdateForV10.Owner.DATA_MANAGEMENT) // Replace with just RestApiVersion.values() when V8 no longer exists + public static final List REST_API_VERSIONS_POST_V8 = Stream.of(RestApiVersion.values()) + .filter(v -> v.compareTo(RestApiVersion.V_8) > 0) + .toList(); + public void testIndexRequest() throws IOException { BytesArray request = new BytesArray(""" { "index":{ "_id": "bar" } } @@ -260,6 +267,90 @@ public void testFailOnInvalidAction() { ); } + public void testFailMissingCloseBrace() { + BytesArray request = new BytesArray(""" + { "index":{ } + {} + """); + BulkRequestParser parser = new BulkRequestParser(randomBoolean(), randomFrom(REST_API_VERSIONS_POST_V8)); + + IllegalArgumentException ex = expectThrows( + IllegalArgumentException.class, + () -> parser.parse( + request, + null, + null, + null, + null, + null, + null, + null, + false, + XContentType.JSON, + (req, type) -> fail("expected failure before we got this far"), + req -> fail("expected failure before we got this far"), + req -> fail("expected failure before we got this far") + ) + ); + assertEquals("[1:14] Unexpected end of file", ex.getMessage()); + } + + public void testFailExtraKeys() { + BytesArray request = new BytesArray(""" + { "index":{ }, "something": "unexpected" } + {} + """); + BulkRequestParser parser = new BulkRequestParser(randomBoolean(), randomFrom(REST_API_VERSIONS_POST_V8)); + + IllegalArgumentException ex = expectThrows( + IllegalArgumentException.class, + () -> parser.parse( + request, + null, + null, + null, + null, + null, + null, + null, + false, + XContentType.JSON, + (req, type) -> fail("expected failure before we got this far"), + req -> fail("expected failure before we got this far"), + req -> fail("expected failure before we got this far") + ) + ); + assertEquals("Malformed action/metadata line [1], expected END_OBJECT but found [FIELD_NAME]", ex.getMessage()); + } + + public void testFailContentAfterClosingBrace() { + BytesArray request = new BytesArray(""" + { "index":{ } } { "something": "unexpected" } + {} + """); + BulkRequestParser parser = new BulkRequestParser(randomBoolean(), randomFrom(REST_API_VERSIONS_POST_V8)); + + IllegalArgumentException ex = expectThrows( + IllegalArgumentException.class, + () -> parser.parse( + request, + null, + null, + null, + null, + null, + null, + null, + false, + XContentType.JSON, + (req, type) -> fail("expected failure before we got this far"), + req -> fail("expected failure before we got this far"), + req -> fail("expected failure before we got this far") + ) + ); + assertEquals("Malformed action/metadata line [1], unexpected data after the closing brace", ex.getMessage()); + } + public void testListExecutedPipelines() throws IOException { BytesArray request = new BytesArray(""" { "index":{ "_id": "bar" } } diff --git a/server/src/test/java/org/elasticsearch/action/bulk/BulkRequestTests.java b/server/src/test/java/org/elasticsearch/action/bulk/BulkRequestTests.java index c601401a1c49d..032db4135aab7 100644 --- a/server/src/test/java/org/elasticsearch/action/bulk/BulkRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/bulk/BulkRequestTests.java @@ -42,6 +42,7 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.notNullValue; @@ -426,12 +427,12 @@ public void testBulkActionWithoutCurlyBrace() throws Exception { { "field1" : "value1" } """; BulkRequest bulkRequest = new BulkRequest(); - bulkRequest.add(bulkAction.getBytes(StandardCharsets.UTF_8), 0, bulkAction.length(), null, XContentType.JSON); - - assertWarnings( - "A bulk action wasn't closed properly with the closing brace. Malformed objects are currently accepted" - + " but will be rejected in a future version." + IllegalArgumentException ex = expectThrows( + IllegalArgumentException.class, + () -> bulkRequest.add(bulkAction.getBytes(StandardCharsets.UTF_8), 0, bulkAction.length(), null, XContentType.JSON) ); + + assertThat(ex.getMessage(), containsString("Unexpected end of file")); } public void testBulkActionWithAdditionalKeys() throws Exception { @@ -440,12 +441,12 @@ public void testBulkActionWithAdditionalKeys() throws Exception { { "field1" : "value1" } """; BulkRequest bulkRequest = new BulkRequest(); - bulkRequest.add(bulkAction.getBytes(StandardCharsets.UTF_8), 0, bulkAction.length(), null, XContentType.JSON); - - assertWarnings( - "A bulk action object contained multiple keys. Additional keys are currently ignored but will be " - + "rejected in a future version." + IllegalArgumentException ex = expectThrows( + IllegalArgumentException.class, + () -> bulkRequest.add(bulkAction.getBytes(StandardCharsets.UTF_8), 0, bulkAction.length(), null, XContentType.JSON) ); + + assertThat(ex.getMessage(), is("Malformed action/metadata line [1], expected END_OBJECT but found [FIELD_NAME]")); } public void testBulkActionWithTrailingData() throws Exception { @@ -454,12 +455,12 @@ public void testBulkActionWithTrailingData() throws Exception { { "field1" : "value1" } """; BulkRequest bulkRequest = new BulkRequest(); - bulkRequest.add(bulkAction.getBytes(StandardCharsets.UTF_8), 0, bulkAction.length(), null, XContentType.JSON); - - assertWarnings( - "A bulk action contained trailing data after the closing brace. This is currently ignored " - + "but will be rejected in a future version." + IllegalArgumentException ex = expectThrows( + IllegalArgumentException.class, + () -> bulkRequest.add(bulkAction.getBytes(StandardCharsets.UTF_8), 0, bulkAction.length(), null, XContentType.JSON) ); + + assertThat(ex.getMessage(), is("Malformed action/metadata line [1], unexpected data after the closing brace")); } public void testUnsupportedAction() { From f3b34f3e344516811780de5682ad7d624a245f4f Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Wed, 30 Oct 2024 09:15:16 -0700 Subject: [PATCH 208/324] Remove old synthetic source mapping config (#115889) This change replaces the old synthetic source config in mappings with the newly introduced index setting. Closes #115859 --- .../index/mapper/MatchOnlyTextMapperIT.java | 6 +- .../extras/MatchOnlyTextFieldMapperTests.java | 17 +- .../mapper/DocCountFieldMapperTests.java | 6 +- ...edSourceFieldMapperConfigurationTests.java | 4 +- .../mapper/IgnoredSourceFieldMapperTests.java | 148 ++++++++---------- .../index/mapper/KeywordFieldMapperTests.java | 10 +- .../index/mapper/NestedObjectMapperTests.java | 22 +-- .../index/mapper/ObjectMapperTests.java | 10 +- .../index/mapper/RangeFieldMapperTests.java | 2 +- .../index/mapper/SourceFieldMetricsTests.java | 4 +- .../index/mapper/SourceLoaderTests.java | 27 ++-- .../index/mapper/TextFieldMapperTests.java | 9 +- .../flattened/FlattenedFieldMapperTests.java | 4 +- .../index/mapper/MapperServiceTestCase.java | 20 +-- .../index/mapper/MapperTestCase.java | 45 +++--- .../mapper/HistogramFieldMapperTests.java | 6 +- .../accesscontrol/FieldSubsetReaderTests.java | 3 +- .../xpack/esql/action/SyntheticSourceIT.java | 10 +- ...AggregateDoubleMetricFieldMapperTests.java | 6 +- .../ConstantKeywordFieldMapperTests.java | 6 +- 20 files changed, 163 insertions(+), 202 deletions(-) diff --git a/modules/mapper-extras/src/internalClusterTest/java/org/elasticsearch/index/mapper/MatchOnlyTextMapperIT.java b/modules/mapper-extras/src/internalClusterTest/java/org/elasticsearch/index/mapper/MatchOnlyTextMapperIT.java index 7c160bd00039c..18f8b5ca30bf8 100644 --- a/modules/mapper-extras/src/internalClusterTest/java/org/elasticsearch/index/mapper/MatchOnlyTextMapperIT.java +++ b/modules/mapper-extras/src/internalClusterTest/java/org/elasticsearch/index/mapper/MatchOnlyTextMapperIT.java @@ -12,6 +12,7 @@ import org.elasticsearch.action.bulk.BulkRequestBuilder; import org.elasticsearch.action.bulk.BulkResponse; import org.elasticsearch.action.support.WriteRequest; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.mapper.extras.MapperExtrasPlugin; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.plugins.Plugin; @@ -89,13 +90,14 @@ public void testHighlightingWithMatchOnlyTextFieldSyntheticSource() throws IOExc // load the source. String mappings = """ - { "_source" : { "mode" : "synthetic" }, + { "properties" : { "message" : { "type" : "match_only_text" } } } """; - assertAcked(prepareCreate("test").setMapping(mappings)); + Settings.Builder settings = Settings.builder().put(indexSettings()).put("index.mapping.source.mode", "synthetic"); + assertAcked(prepareCreate("test").setSettings(settings).setMapping(mappings)); BulkRequestBuilder bulk = client().prepareBulk("test").setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); for (int i = 0; i < 2000; i++) { bulk.add( diff --git a/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/extras/MatchOnlyTextFieldMapperTests.java b/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/extras/MatchOnlyTextFieldMapperTests.java index 1eb6083cfe453..4ad4c7fe3bef8 100644 --- a/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/extras/MatchOnlyTextFieldMapperTests.java +++ b/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/extras/MatchOnlyTextFieldMapperTests.java @@ -64,19 +64,19 @@ protected Object getSampleValueForDocument() { } public void testExistsStandardSource() throws IOException { - assertExistsQuery(createMapperService(testMapping(false))); + assertExistsQuery(createMapperService(fieldMapping(b -> b.field("type", "match_only_text")))); } public void testExistsSyntheticSource() throws IOException { - assertExistsQuery(createMapperService(testMapping(true))); + assertExistsQuery(createSytheticSourceMapperService(fieldMapping(b -> b.field("type", "match_only_text")))); } public void testPhraseQueryStandardSource() throws IOException { - assertPhraseQuery(createMapperService(testMapping(false))); + assertPhraseQuery(createMapperService(fieldMapping(b -> b.field("type", "match_only_text")))); } public void testPhraseQuerySyntheticSource() throws IOException { - assertPhraseQuery(createMapperService(testMapping(true))); + assertPhraseQuery(createSytheticSourceMapperService(fieldMapping(b -> b.field("type", "match_only_text")))); } private void assertPhraseQuery(MapperService mapperService) throws IOException { @@ -104,13 +104,6 @@ protected void registerParameters(ParameterChecker checker) throws IOException { ); } - private static XContentBuilder testMapping(boolean syntheticSource) throws IOException { - if (syntheticSource) { - return syntheticSourceMapping(b -> b.startObject("field").field("type", "match_only_text").endObject()); - } - return fieldMapping(b -> b.field("type", "match_only_text")); - } - @Override protected void minimalMapping(XContentBuilder b) throws IOException { b.field("type", "match_only_text"); @@ -256,7 +249,7 @@ public void testDocValues() throws IOException { } public void testDocValuesLoadedFromSynthetic() throws IOException { - MapperService mapper = createMapperService(syntheticSourceFieldMapping(b -> b.field("type", "match_only_text"))); + MapperService mapper = createSytheticSourceMapperService(fieldMapping(b -> b.field("type", "match_only_text"))); assertScriptDocValues(mapper, "foo", equalTo(List.of("foo"))); } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/DocCountFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/DocCountFieldMapperTests.java index b4bc2f23af087..4101828d4cd24 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/DocCountFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/DocCountFieldMapperTests.java @@ -84,12 +84,12 @@ public void testInvalidDocument_ArrayDocCount() throws Exception { } public void testSyntheticSource() throws IOException { - DocumentMapper mapper = createDocumentMapper(syntheticSourceMapping(b -> {})); + DocumentMapper mapper = createSytheticSourceMapperService(topMapping(b -> {})).documentMapper(); assertThat(syntheticSource(mapper, b -> b.field(CONTENT_TYPE, 10)), equalTo("{\"_doc_count\":10}")); } public void testSyntheticSourceMany() throws IOException { - MapperService mapper = createMapperService(syntheticSourceMapping(b -> b.startObject("doc").field("type", "integer").endObject())); + MapperService mapper = createSytheticSourceMapperService(mapping(b -> b.startObject("doc").field("type", "integer").endObject())); List counts = randomList(2, 10000, () -> between(1, Integer.MAX_VALUE)); withLuceneIndex(mapper, iw -> { int d = 0; @@ -116,7 +116,7 @@ public void testSyntheticSourceMany() throws IOException { } public void testSyntheticSourceManyDoNotHave() throws IOException { - MapperService mapper = createMapperService(syntheticSourceMapping(b -> b.startObject("doc").field("type", "integer").endObject())); + MapperService mapper = createSytheticSourceMapperService(mapping(b -> b.startObject("doc").field("type", "integer").endObject())); List counts = randomList(2, 10000, () -> randomBoolean() ? null : between(1, Integer.MAX_VALUE)); withLuceneIndex(mapper, iw -> { int d = 0; diff --git a/server/src/test/java/org/elasticsearch/index/mapper/IgnoredSourceFieldMapperConfigurationTests.java b/server/src/test/java/org/elasticsearch/index/mapper/IgnoredSourceFieldMapperConfigurationTests.java index e08ace01e88e8..8646e1b66dcb0 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/IgnoredSourceFieldMapperConfigurationTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/IgnoredSourceFieldMapperConfigurationTests.java @@ -130,8 +130,8 @@ private MapperService mapperServiceWithCustomSettings( for (var entry : customSettings.entrySet()) { settings.put(entry.getKey(), entry.getValue()); } - - return createMapperService(settings.build(), syntheticSourceMapping(mapping)); + settings.put(SourceFieldMapper.INDEX_MAPPER_SOURCE_MODE_SETTING.getKey(), SourceFieldMapper.Mode.SYNTHETIC); + return createMapperService(settings.build(), mapping(mapping)); } protected void validateRoundTripReader(String syntheticSource, DirectoryReader reader, DirectoryReader roundTripReader) diff --git a/server/src/test/java/org/elasticsearch/index/mapper/IgnoredSourceFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/IgnoredSourceFieldMapperTests.java index 884372d249287..7d29db66f4031 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/IgnoredSourceFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/IgnoredSourceFieldMapperTests.java @@ -30,9 +30,10 @@ private DocumentMapper getDocumentMapperWithFieldLimit() throws IOException { return createMapperService( Settings.builder() .put("index.mapping.total_fields.limit", 2) + .put("index.mapping.source.mode", "synthetic") .put("index.mapping.total_fields.ignore_dynamic_beyond_limit", true) .build(), - syntheticSourceMapping(b -> { + mapping(b -> { b.startObject("foo").field("type", "keyword").endObject(); b.startObject("bar").field("type", "object").endObject(); }) @@ -52,6 +53,7 @@ private String getSyntheticSourceWithFieldLimit(CheckedConsumer { - b.startObject("_source").field("mode", "synthetic").endObject(); - b.field("enabled", false); - })).documentMapper(); + DocumentMapper documentMapper = createSytheticSourceMapperService(topMapping(b -> { b.field("enabled", false); })).documentMapper(); var syntheticSource = syntheticSource(documentMapper, b -> { b.field("name", name); }); assertEquals(String.format(Locale.ROOT, """ {"name":"%s"}""", name), syntheticSource); @@ -250,10 +249,7 @@ public void testDisabledRootObjectManyFields() throws IOException { int intValue = randomInt(); String stringValue = randomAlphaOfLength(20); - DocumentMapper documentMapper = createMapperService(topMapping(b -> { - b.startObject("_source").field("mode", "synthetic").endObject(); - b.field("enabled", false); - })).documentMapper(); + DocumentMapper documentMapper = createSytheticSourceMapperService(topMapping(b -> b.field("enabled", false))).documentMapper(); var syntheticSource = syntheticSource(documentMapper, b -> { b.field("boolean_value", booleanValue); b.startObject("path"); @@ -292,7 +288,7 @@ public void testDisabledRootObjectManyFields() throws IOException { public void testDisabledObjectSingleField() throws IOException { String name = randomAlphaOfLength(20); - DocumentMapper documentMapper = createMapperService(syntheticSourceMapping(b -> { + DocumentMapper documentMapper = createSytheticSourceMapperService(mapping(b -> { b.startObject("path").field("type", "object").field("enabled", false).endObject(); })).documentMapper(); var syntheticSource = syntheticSource(documentMapper, b -> { @@ -308,7 +304,7 @@ public void testDisabledObjectSingleField() throws IOException { public void testDisabledObjectContainsArray() throws IOException { String name = randomAlphaOfLength(20); - DocumentMapper documentMapper = createMapperService(syntheticSourceMapping(b -> { + DocumentMapper documentMapper = createSytheticSourceMapperService(mapping(b -> { b.startObject("path").field("type", "object").field("enabled", false).endObject(); })).documentMapper(); var syntheticSource = syntheticSource(documentMapper, b -> { @@ -328,7 +324,7 @@ public void testDisabledObjectManyFields() throws IOException { int intValue = randomInt(); String stringValue = randomAlphaOfLength(20); - DocumentMapper documentMapper = createMapperService(syntheticSourceMapping(b -> { + DocumentMapper documentMapper = createSytheticSourceMapperService(mapping(b -> { b.startObject("boolean_value").field("type", "boolean").endObject(); b.startObject("path").field("type", "object").field("enabled", false).endObject(); })).documentMapper(); @@ -372,7 +368,7 @@ public void testDisabledSubObject() throws IOException { boolean booleanValue = randomBoolean(); int intValue = randomInt(); String name = randomAlphaOfLength(20); - DocumentMapper documentMapper = createMapperService(syntheticSourceMapping(b -> { + DocumentMapper documentMapper = createSytheticSourceMapperService(mapping(b -> { b.startObject("boolean_value").field("type", "boolean").endObject(); b.startObject("path"); { @@ -404,7 +400,7 @@ public void testDisabledSubObject() throws IOException { } public void testDisabledSubobjectContainsArray() throws IOException { - DocumentMapper documentMapper = createMapperService(syntheticSourceMapping(b -> { + DocumentMapper documentMapper = createSytheticSourceMapperService(mapping(b -> { b.startObject("boolean_value").field("type", "boolean").endObject(); b.startObject("path"); { @@ -447,7 +443,7 @@ public void testMixedDisabledEnabledObjects() throws IOException { int intValue = randomInt(); String foo = randomAlphaOfLength(20); String bar = randomAlphaOfLength(20); - DocumentMapper documentMapper = createMapperService(syntheticSourceMapping(b -> { + DocumentMapper documentMapper = createSytheticSourceMapperService(mapping(b -> { b.startObject("boolean_value").field("type", "boolean").endObject(); b.startObject("path"); { @@ -507,7 +503,7 @@ public void testMixedDisabledEnabledObjects() throws IOException { } public void testIndexStoredArraySourceRootValueArray() throws IOException { - DocumentMapper documentMapper = createMapperServiceWithStoredArraySource(syntheticSourceMapping(b -> { + DocumentMapper documentMapper = createMapperServiceWithStoredArraySource(mapping(b -> { b.startObject("int_value").field("type", "integer").endObject(); b.startObject("bool_value").field("type", "boolean").endObject(); })).documentMapper(); @@ -520,7 +516,7 @@ public void testIndexStoredArraySourceRootValueArray() throws IOException { } public void testIndexStoredArraySourceRootValueArrayDisabled() throws IOException { - DocumentMapper documentMapper = createMapperServiceWithStoredArraySource(syntheticSourceMapping(b -> { + DocumentMapper documentMapper = createMapperServiceWithStoredArraySource(mapping(b -> { b.startObject("int_value").field("type", "integer").field(Mapper.SYNTHETIC_SOURCE_KEEP_PARAM, "none").endObject(); b.startObject("bool_value").field("type", "boolean").endObject(); })).documentMapper(); @@ -533,7 +529,7 @@ public void testIndexStoredArraySourceRootValueArrayDisabled() throws IOExceptio } public void testIndexStoredArraySourceSingleLeafElement() throws IOException { - DocumentMapper documentMapper = createMapperServiceWithStoredArraySource(syntheticSourceMapping(b -> { + DocumentMapper documentMapper = createMapperServiceWithStoredArraySource(mapping(b -> { b.startObject("int_value").field("type", "integer").endObject(); })).documentMapper(); var syntheticSource = syntheticSource(documentMapper, b -> b.array("int_value", new int[] { 10 })); @@ -543,7 +539,7 @@ public void testIndexStoredArraySourceSingleLeafElement() throws IOException { } public void testIndexStoredArraySourceSingleLeafElementAndNull() throws IOException { - DocumentMapper documentMapper = createMapperServiceWithStoredArraySource(syntheticSourceMapping(b -> { + DocumentMapper documentMapper = createMapperServiceWithStoredArraySource(mapping(b -> { b.startObject("value").field("type", "keyword").endObject(); })).documentMapper(); var syntheticSource = syntheticSource(documentMapper, b -> b.array("value", new String[] { "foo", null })); @@ -551,7 +547,7 @@ public void testIndexStoredArraySourceSingleLeafElementAndNull() throws IOExcept } public void testIndexStoredArraySourceSingleObjectElement() throws IOException { - DocumentMapper documentMapper = createMapperServiceWithStoredArraySource(syntheticSourceMapping(b -> { + DocumentMapper documentMapper = createMapperServiceWithStoredArraySource(mapping(b -> { b.startObject("path").startObject("properties"); { b.startObject("int_value").field("type", "integer").endObject(); @@ -565,7 +561,7 @@ public void testIndexStoredArraySourceSingleObjectElement() throws IOException { } public void testFieldStoredArraySourceRootValueArray() throws IOException { - DocumentMapper documentMapper = createMapperService(syntheticSourceMapping(b -> { + DocumentMapper documentMapper = createSytheticSourceMapperService(mapping(b -> { b.startObject("int_value").field("type", "integer").field(Mapper.SYNTHETIC_SOURCE_KEEP_PARAM, "arrays").endObject(); b.startObject("string_value").field("type", "keyword").field(Mapper.SYNTHETIC_SOURCE_KEEP_PARAM, "all").endObject(); b.startObject("bool_value").field("type", "boolean").endObject(); @@ -580,7 +576,7 @@ public void testFieldStoredArraySourceRootValueArray() throws IOException { } public void testFieldStoredSourceRootValue() throws IOException { - DocumentMapper documentMapper = createMapperService(syntheticSourceMapping(b -> { + DocumentMapper documentMapper = createSytheticSourceMapperService(mapping(b -> { b.startObject("default").field("type", "float").field(Mapper.SYNTHETIC_SOURCE_KEEP_PARAM, "none").endObject(); b.startObject("source_kept").field("type", "float").field(Mapper.SYNTHETIC_SOURCE_KEEP_PARAM, "all").endObject(); b.startObject("bool_value").field("type", "boolean").endObject(); @@ -595,7 +591,7 @@ public void testFieldStoredSourceRootValue() throws IOException { } public void testIndexStoredArraySourceRootObjectArray() throws IOException { - DocumentMapper documentMapper = createMapperServiceWithStoredArraySource(syntheticSourceMapping(b -> { + DocumentMapper documentMapper = createMapperServiceWithStoredArraySource(mapping(b -> { b.startObject("path"); { b.field("type", "object"); @@ -620,7 +616,7 @@ public void testIndexStoredArraySourceRootObjectArray() throws IOException { } public void testIndexStoredArraySourceRootObjectArrayWithBypass() throws IOException { - DocumentMapper documentMapper = createMapperServiceWithStoredArraySource(syntheticSourceMapping(b -> { + DocumentMapper documentMapper = createMapperServiceWithStoredArraySource(mapping(b -> { b.startObject("path"); { b.field("type", "object"); @@ -646,7 +642,7 @@ public void testIndexStoredArraySourceRootObjectArrayWithBypass() throws IOExcep } public void testIndexStoredArraySourceNestedValueArray() throws IOException { - DocumentMapper documentMapper = createMapperServiceWithStoredArraySource(syntheticSourceMapping(b -> { + DocumentMapper documentMapper = createMapperServiceWithStoredArraySource(mapping(b -> { b.startObject("path"); { b.field("type", "object"); @@ -672,7 +668,7 @@ public void testIndexStoredArraySourceNestedValueArray() throws IOException { } public void testIndexStoredArraySourceNestedValueArrayDisabled() throws IOException { - DocumentMapper documentMapper = createMapperServiceWithStoredArraySource(syntheticSourceMapping(b -> { + DocumentMapper documentMapper = createMapperServiceWithStoredArraySource(mapping(b -> { b.startObject("path"); { b.field("type", "object"); @@ -710,7 +706,7 @@ public void testIndexStoredArraySourceNestedValueArrayDisabled() throws IOExcept } public void testFieldStoredArraySourceNestedValueArray() throws IOException { - DocumentMapper documentMapper = createMapperService(syntheticSourceMapping(b -> { + DocumentMapper documentMapper = createSytheticSourceMapperService(mapping(b -> { b.startObject("path"); { b.field("type", "object"); @@ -738,7 +734,7 @@ public void testFieldStoredArraySourceNestedValueArray() throws IOException { } public void testFieldStoredSourceNestedValue() throws IOException { - DocumentMapper documentMapper = createMapperService(syntheticSourceMapping(b -> { + DocumentMapper documentMapper = createSytheticSourceMapperService(mapping(b -> { b.startObject("path"); { b.field("type", "object"); @@ -766,7 +762,7 @@ public void testFieldStoredSourceNestedValue() throws IOException { } public void testIndexStoredArraySourceNestedObjectArray() throws IOException { - DocumentMapper documentMapper = createMapperServiceWithStoredArraySource(syntheticSourceMapping(b -> { + DocumentMapper documentMapper = createMapperServiceWithStoredArraySource(mapping(b -> { b.startObject("path"); { b.field("type", "object"); @@ -804,7 +800,7 @@ public void testIndexStoredArraySourceNestedObjectArray() throws IOException { } public void testRootArray() throws IOException { - DocumentMapper documentMapper = createMapperService(syntheticSourceMapping(b -> { + DocumentMapper documentMapper = createSytheticSourceMapperService(mapping(b -> { b.startObject("path"); { b.field("type", "object"); @@ -828,7 +824,7 @@ public void testRootArray() throws IOException { } public void testNestedArray() throws IOException { - DocumentMapper documentMapper = createMapperService(syntheticSourceMapping(b -> { + DocumentMapper documentMapper = createSytheticSourceMapperService(mapping(b -> { b.startObject("boolean_value").field("type", "boolean").endObject(); b.startObject("path"); { @@ -902,7 +898,7 @@ public void testNestedArray() throws IOException { } public void testConflictingFieldNameAfterArray() throws IOException { - DocumentMapper documentMapper = createMapperService(syntheticSourceMapping(b -> { + DocumentMapper documentMapper = createSytheticSourceMapperService(mapping(b -> { b.startObject("path").startObject("properties"); { b.startObject("to").startObject("properties"); @@ -933,7 +929,7 @@ public void testConflictingFieldNameAfterArray() throws IOException { } public void testArrayWithNestedObjects() throws IOException { - DocumentMapper documentMapper = createMapperService(syntheticSourceMapping(b -> { + DocumentMapper documentMapper = createSytheticSourceMapperService(mapping(b -> { b.startObject("path").startObject("properties"); { b.startObject("to").field("type", "nested").startObject("properties"); @@ -963,7 +959,7 @@ public void testArrayWithNestedObjects() throws IOException { } public void testObjectArrayWithinNestedObjects() throws IOException { - DocumentMapper documentMapper = createMapperService(syntheticSourceMapping(b -> { + DocumentMapper documentMapper = createSytheticSourceMapperService(mapping(b -> { b.startObject("path").startObject("properties"); { b.startObject("to").field("type", "nested").startObject("properties"); @@ -1000,7 +996,7 @@ public void testObjectArrayWithinNestedObjects() throws IOException { } public void testObjectArrayWithinNestedObjectsArray() throws IOException { - DocumentMapper documentMapper = createMapperService(syntheticSourceMapping(b -> { + DocumentMapper documentMapper = createSytheticSourceMapperService(mapping(b -> { b.startObject("path").startObject("properties"); { b.startObject("to").field("type", "nested").startObject("properties"); @@ -1051,7 +1047,7 @@ public void testObjectArrayWithinNestedObjectsArray() throws IOException { } public void testArrayWithinArray() throws IOException { - DocumentMapper documentMapper = createMapperService(syntheticSourceMapping(b -> { + DocumentMapper documentMapper = createSytheticSourceMapperService(mapping(b -> { b.startObject("path"); { b.field("type", "object").field("synthetic_source_keep", "arrays"); @@ -1104,7 +1100,7 @@ public void testArrayWithinArray() throws IOException { } public void testObjectArrayAndValue() throws IOException { - DocumentMapper documentMapper = createMapperService(syntheticSourceMapping(b -> { + DocumentMapper documentMapper = createSytheticSourceMapperService(mapping(b -> { b.startObject("path"); { b.field("type", "object"); @@ -1146,7 +1142,7 @@ public void testObjectArrayAndValue() throws IOException { } public void testDeeplyNestedObjectArrayAndValue() throws IOException { - DocumentMapper documentMapper = createMapperService(syntheticSourceMapping(b -> { + DocumentMapper documentMapper = createSytheticSourceMapperService(mapping(b -> { b.startObject("path").startObject("properties").startObject("to").startObject("properties"); { b.startObject("stored"); @@ -1183,7 +1179,7 @@ public void testDeeplyNestedObjectArrayAndValue() throws IOException { } public void testObjectArrayAndValueInNestedObject() throws IOException { - DocumentMapper documentMapper = createMapperService(syntheticSourceMapping(b -> { + DocumentMapper documentMapper = createSytheticSourceMapperService(mapping(b -> { b.startObject("path").startObject("properties").startObject("to").startObject("properties"); { b.startObject("stored"); @@ -1219,7 +1215,7 @@ public void testObjectArrayAndValueInNestedObject() throws IOException { } public void testObjectArrayAndValueDisabledObject() throws IOException { - DocumentMapper documentMapper = createMapperService(syntheticSourceMapping(b -> { + DocumentMapper documentMapper = createSytheticSourceMapperService(mapping(b -> { b.startObject("path").field("type", "object").startObject("properties"); { b.startObject("regular"); @@ -1250,7 +1246,7 @@ public void testObjectArrayAndValueDisabledObject() throws IOException { } public void testObjectArrayAndValueNonDynamicObject() throws IOException { - DocumentMapper documentMapper = createMapperService(syntheticSourceMapping(b -> { + DocumentMapper documentMapper = createSytheticSourceMapperService(mapping(b -> { b.startObject("path").field("type", "object").startObject("properties"); { b.startObject("regular"); @@ -1277,7 +1273,7 @@ public void testObjectArrayAndValueNonDynamicObject() throws IOException { } public void testObjectArrayAndValueDynamicRuntimeObject() throws IOException { - DocumentMapper documentMapper = createMapperService(syntheticSourceMapping(b -> { + DocumentMapper documentMapper = createSytheticSourceMapperService(mapping(b -> { b.startObject("path").field("type", "object").startObject("properties"); { b.startObject("regular"); @@ -1304,7 +1300,7 @@ public void testObjectArrayAndValueDynamicRuntimeObject() throws IOException { } public void testDisabledObjectWithinHigherLevelArray() throws IOException { - DocumentMapper documentMapper = createMapperService(syntheticSourceMapping(b -> { + DocumentMapper documentMapper = createSytheticSourceMapperService(mapping(b -> { b.startObject("path"); { b.field("type", "object"); @@ -1347,7 +1343,7 @@ public void testDisabledObjectWithinHigherLevelArray() throws IOException { } public void testStoredArrayWithinHigherLevelArray() throws IOException { - DocumentMapper documentMapper = createMapperService(syntheticSourceMapping(b -> { + DocumentMapper documentMapper = createSytheticSourceMapperService(mapping(b -> { b.startObject("path"); { b.field("type", "object"); @@ -1400,7 +1396,7 @@ public void testStoredArrayWithinHigherLevelArray() throws IOException { } public void testObjectWithKeepAll() throws IOException { - DocumentMapper documentMapper = createMapperService(syntheticSourceMapping(b -> { + DocumentMapper documentMapper = createSytheticSourceMapperService(mapping(b -> { b.startObject("path"); { b.field("type", "object").field("synthetic_source_keep", "all"); @@ -1436,7 +1432,7 @@ public void testObjectWithKeepAll() throws IOException { } public void testFallbackFieldWithinHigherLevelArray() throws IOException { - DocumentMapper documentMapper = createMapperService(syntheticSourceMapping(b -> { + DocumentMapper documentMapper = createSytheticSourceMapperService(mapping(b -> { b.startObject("path"); { b.field("type", "object"); @@ -1466,7 +1462,7 @@ public void testFallbackFieldWithinHigherLevelArray() throws IOException { } public void testFieldOrdering() throws IOException { - DocumentMapper documentMapper = createMapperService(syntheticSourceMapping(b -> { + DocumentMapper documentMapper = createSytheticSourceMapperService(mapping(b -> { b.startObject("A").field("type", "integer").endObject(); b.startObject("B").field("type", "object").field("synthetic_source_keep", "arrays"); { @@ -1514,7 +1510,7 @@ public void testFieldOrdering() throws IOException { } public void testNestedObjectWithField() throws IOException { - DocumentMapper documentMapper = createMapperService(syntheticSourceMapping(b -> { + DocumentMapper documentMapper = createSytheticSourceMapperService(mapping(b -> { b.startObject("path").field("type", "nested"); { b.field("synthetic_source_keep", "all"); @@ -1536,7 +1532,7 @@ public void testNestedObjectWithField() throws IOException { } public void testNestedObjectWithArray() throws IOException { - DocumentMapper documentMapper = createMapperService(syntheticSourceMapping(b -> { + DocumentMapper documentMapper = createSytheticSourceMapperService(mapping(b -> { b.startObject("path").field("type", "nested"); { b.field("synthetic_source_keep", "all"); @@ -1562,7 +1558,7 @@ public void testNestedObjectWithArray() throws IOException { } public void testNestedSubobjectWithField() throws IOException { - DocumentMapper documentMapper = createMapperService(syntheticSourceMapping(b -> { + DocumentMapper documentMapper = createSytheticSourceMapperService(mapping(b -> { b.startObject("boolean_value").field("type", "boolean").endObject(); b.startObject("path"); { @@ -1603,7 +1599,7 @@ public void testNestedSubobjectWithField() throws IOException { } public void testNestedSubobjectWithArray() throws IOException { - DocumentMapper documentMapper = createMapperService(syntheticSourceMapping(b -> { + DocumentMapper documentMapper = createSytheticSourceMapperService(mapping(b -> { b.startObject("boolean_value").field("type", "boolean").endObject(); b.startObject("path"); { @@ -1652,7 +1648,7 @@ public void testNestedSubobjectWithArray() throws IOException { } public void testNestedObjectIncludeInRoot() throws IOException { - DocumentMapper documentMapper = createMapperService(syntheticSourceMapping(b -> { + DocumentMapper documentMapper = createSytheticSourceMapperService(mapping(b -> { b.startObject("path").field("type", "nested").field("synthetic_source_keep", "all").field("include_in_root", true); { b.startObject("properties"); @@ -1674,7 +1670,7 @@ public void testNestedObjectIncludeInRoot() throws IOException { public void testNoDynamicObjectSingleField() throws IOException { String name = randomAlphaOfLength(20); - DocumentMapper documentMapper = createMapperService(syntheticSourceMapping(b -> { + DocumentMapper documentMapper = createSytheticSourceMapperService(mapping(b -> { b.startObject("path").field("type", "object").field("dynamic", "false").endObject(); })).documentMapper(); var syntheticSource = syntheticSource(documentMapper, b -> { @@ -1693,7 +1689,7 @@ public void testNoDynamicObjectManyFields() throws IOException { int intValue = randomInt(); String stringValue = randomAlphaOfLength(20); - DocumentMapper documentMapper = createMapperService(syntheticSourceMapping(b -> { + DocumentMapper documentMapper = createSytheticSourceMapperService(mapping(b -> { b.startObject("boolean_value").field("type", "boolean").endObject(); b.startObject("path").field("type", "object").field("dynamic", "false"); { @@ -1737,7 +1733,7 @@ public void testNoDynamicObjectManyFields() throws IOException { } public void testNoDynamicObjectSimpleArray() throws IOException { - DocumentMapper documentMapper = createMapperService(syntheticSourceMapping(b -> { + DocumentMapper documentMapper = createSytheticSourceMapperService(mapping(b -> { b.startObject("path").field("type", "object").field("dynamic", "false").endObject(); })).documentMapper(); var syntheticSource = syntheticSource(documentMapper, b -> { @@ -1753,7 +1749,7 @@ public void testNoDynamicObjectSimpleArray() throws IOException { } public void testNoDynamicObjectSimpleValueArray() throws IOException { - DocumentMapper documentMapper = createMapperService(syntheticSourceMapping(b -> { + DocumentMapper documentMapper = createSytheticSourceMapperService(mapping(b -> { b.startObject("path").field("type", "object").field("dynamic", "false").endObject(); })).documentMapper(); var syntheticSource = syntheticSource( @@ -1765,7 +1761,7 @@ public void testNoDynamicObjectSimpleValueArray() throws IOException { } public void testNoDynamicObjectNestedArray() throws IOException { - DocumentMapper documentMapper = createMapperService(syntheticSourceMapping(b -> { + DocumentMapper documentMapper = createSytheticSourceMapperService(mapping(b -> { b.startObject("path").field("type", "object").field("dynamic", "false").endObject(); })).documentMapper(); var syntheticSource = syntheticSource(documentMapper, b -> { @@ -1781,9 +1777,7 @@ public void testNoDynamicObjectNestedArray() throws IOException { } public void testNoDynamicRootObject() throws IOException { - DocumentMapper documentMapper = createMapperService(topMapping(b -> { - b.startObject("_source").field("mode", "synthetic").endObject().field("dynamic", "false"); - })).documentMapper(); + DocumentMapper documentMapper = createSytheticSourceMapperService(topMapping(b -> b.field("dynamic", "false"))).documentMapper(); var syntheticSource = syntheticSource(documentMapper, b -> { b.field("foo", "bar"); b.startObject("path").field("X", "Y").endObject(); @@ -1795,7 +1789,7 @@ public void testNoDynamicRootObject() throws IOException { public void testRuntimeDynamicObjectSingleField() throws IOException { String name = randomAlphaOfLength(20); - DocumentMapper documentMapper = createMapperService(syntheticSourceMapping(b -> { + DocumentMapper documentMapper = createSytheticSourceMapperService(mapping(b -> { b.startObject("path").field("type", "object").field("dynamic", "runtime").endObject(); })).documentMapper(); var syntheticSource = syntheticSource(documentMapper, b -> { @@ -1814,7 +1808,7 @@ public void testRuntimeDynamicObjectManyFields() throws IOException { int intValue = randomInt(); String stringValue = randomAlphaOfLength(20); - DocumentMapper documentMapper = createMapperService(syntheticSourceMapping(b -> { + DocumentMapper documentMapper = createSytheticSourceMapperService(mapping(b -> { b.startObject("boolean_value").field("type", "boolean").endObject(); b.startObject("path").field("type", "object").field("dynamic", "runtime"); { @@ -1858,7 +1852,7 @@ public void testRuntimeDynamicObjectManyFields() throws IOException { } public void testRuntimeDynamicObjectSimpleArray() throws IOException { - DocumentMapper documentMapper = createMapperService(syntheticSourceMapping(b -> { + DocumentMapper documentMapper = createSytheticSourceMapperService(mapping(b -> { b.startObject("path").field("type", "object").field("dynamic", "runtime").endObject(); })).documentMapper(); var syntheticSource = syntheticSource(documentMapper, b -> { @@ -1874,7 +1868,7 @@ public void testRuntimeDynamicObjectSimpleArray() throws IOException { } public void testRuntimeDynamicObjectSimpleValueArray() throws IOException { - DocumentMapper documentMapper = createMapperService(syntheticSourceMapping(b -> { + DocumentMapper documentMapper = createSytheticSourceMapperService(mapping(b -> { b.startObject("path").field("type", "object").field("dynamic", "runtime").endObject(); })).documentMapper(); var syntheticSource = syntheticSource( @@ -1886,7 +1880,7 @@ public void testRuntimeDynamicObjectSimpleValueArray() throws IOException { } public void testRuntimeDynamicObjectNestedArray() throws IOException { - DocumentMapper documentMapper = createMapperService(syntheticSourceMapping(b -> { + DocumentMapper documentMapper = createSytheticSourceMapperService(mapping(b -> { b.startObject("path").field("type", "object").field("dynamic", "runtime").endObject(); })).documentMapper(); var syntheticSource = syntheticSource(documentMapper, b -> { @@ -1902,7 +1896,7 @@ public void testRuntimeDynamicObjectNestedArray() throws IOException { } public void testDisabledSubObjectWithNameOverlappingParentName() throws IOException { - DocumentMapper documentMapper = createMapperService(syntheticSourceMapping(b -> { + DocumentMapper documentMapper = createSytheticSourceMapperService(mapping(b -> { b.startObject("path"); b.startObject("properties"); { @@ -1923,7 +1917,7 @@ public void testDisabledSubObjectWithNameOverlappingParentName() throws IOExcept } public void testStoredNestedSubObjectWithNameOverlappingParentName() throws IOException { - DocumentMapper documentMapper = createMapperService(syntheticSourceMapping(b -> { + DocumentMapper documentMapper = createSytheticSourceMapperService(mapping(b -> { b.startObject("path"); b.startObject("properties"); { @@ -1944,7 +1938,7 @@ public void testStoredNestedSubObjectWithNameOverlappingParentName() throws IOEx } public void testCopyToLogicInsideObject() throws IOException { - DocumentMapper documentMapper = createMapperService(syntheticSourceMapping(b -> { + DocumentMapper documentMapper = createSytheticSourceMapperService(mapping(b -> { b.startObject("path"); b.startObject("properties"); { @@ -1975,10 +1969,7 @@ public void testCopyToLogicInsideObject() throws IOException { } public void testDynamicIgnoredObjectWithFlatFields() throws IOException { - DocumentMapper documentMapper = createMapperService(topMapping(b -> { - b.startObject("_source").field("mode", "synthetic").endObject(); - b.field("dynamic", false); - })).documentMapper(); + DocumentMapper documentMapper = createSytheticSourceMapperService(topMapping(b -> b.field("dynamic", false))).documentMapper(); CheckedConsumer document = b -> { b.startObject("top"); @@ -2009,10 +2000,7 @@ public void testDynamicIgnoredObjectWithFlatFields() throws IOException { } public void testDisabledRootObjectWithFlatFields() throws IOException { - DocumentMapper documentMapper = createMapperService(topMapping(b -> { - b.startObject("_source").field("mode", "synthetic").endObject(); - b.field("enabled", false); - })).documentMapper(); + DocumentMapper documentMapper = createSytheticSourceMapperService(topMapping(b -> b.field("enabled", false))).documentMapper(); CheckedConsumer document = b -> { b.startObject("top"); @@ -2043,7 +2031,7 @@ public void testDisabledRootObjectWithFlatFields() throws IOException { } public void testDisabledObjectWithFlatFields() throws IOException { - DocumentMapper documentMapper = createMapperService(syntheticSourceMapping(b -> { + DocumentMapper documentMapper = createSytheticSourceMapperService(mapping(b -> { b.startObject("top").field("type", "object").field("enabled", false).endObject(); })).documentMapper(); @@ -2076,7 +2064,7 @@ public void testDisabledObjectWithFlatFields() throws IOException { } public void testRegularObjectWithFlatFields() throws IOException { - DocumentMapper documentMapper = createMapperService(syntheticSourceMapping(b -> { + DocumentMapper documentMapper = createSytheticSourceMapperService(mapping(b -> { b.startObject("top").field("type", "object").field("synthetic_source_keep", "all").endObject(); })).documentMapper(); @@ -2109,7 +2097,7 @@ public void testRegularObjectWithFlatFields() throws IOException { } public void testRegularObjectWithFlatFieldsInsideAnArray() throws IOException { - DocumentMapper documentMapper = createMapperService(syntheticSourceMapping(b -> { + DocumentMapper documentMapper = createSytheticSourceMapperService(mapping(b -> { b.startObject("top"); b.startObject("properties"); { @@ -2187,7 +2175,7 @@ public void testIgnoredDynamicObjectWithFlatFields() throws IOException { } public void testStoredArrayWithFlatFields() throws IOException { - DocumentMapper documentMapper = createMapperServiceWithStoredArraySource(syntheticSourceMapping(b -> { + DocumentMapper documentMapper = createMapperServiceWithStoredArraySource(mapping(b -> { b.startObject("outer").startObject("properties"); { b.startObject("inner").field("type", "object").endObject(); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/KeywordFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/KeywordFieldMapperTests.java index 5b218fb077d32..052bf995bdd48 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/KeywordFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/KeywordFieldMapperTests.java @@ -663,10 +663,8 @@ public void testKeywordFieldUtf8LongerThan32766SourceOnly() throws Exception { * Test that we track the synthetic source if field is neither indexed nor has doc values nor stored */ public void testSyntheticSourceForDisabledField() throws Exception { - MapperService mapper = createMapperService( - syntheticSourceFieldMapping( - b -> b.field("type", "keyword").field("index", false).field("doc_values", false).field("store", false) - ) + MapperService mapper = createSytheticSourceMapperService( + fieldMapping(b -> b.field("type", "keyword").field("index", false).field("doc_values", false).field("store", false)) ); String value = randomAlphaOfLengthBetween(1, 20); assertEquals("{\"field\":\"" + value + "\"}", syntheticSource(mapper.documentMapper(), b -> b.field("field", value))); @@ -767,8 +765,8 @@ public void testDocValuesLoadedFromSource() throws IOException { } public void testDocValuesLoadedFromStoredSynthetic() throws IOException { - MapperService mapper = createMapperService( - syntheticSourceFieldMapping(b -> b.field("type", "keyword").field("doc_values", false).field("store", true)) + MapperService mapper = createSytheticSourceMapperService( + fieldMapping(b -> b.field("type", "keyword").field("doc_values", false).field("store", true)) ); assertScriptDocValues(mapper, "foo", equalTo(List.of("foo"))); } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/NestedObjectMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/NestedObjectMapperTests.java index be1469e25f24d..2d87e121875b4 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/NestedObjectMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/NestedObjectMapperTests.java @@ -1570,9 +1570,9 @@ public void testNestedMapperFilters() throws Exception { } public void testStoreArraySourceinSyntheticSourceMode() throws IOException { - DocumentMapper mapper = createDocumentMapper(syntheticSourceMapping(b -> { + DocumentMapper mapper = createSytheticSourceMapperService(mapping(b -> { b.startObject("o").field("type", "nested").field("synthetic_source_keep", "all").endObject(); - })); + })).documentMapper(); assertNotNull(mapper.mapping().getRoot().getMapper("o")); } @@ -1584,7 +1584,7 @@ public void testStoreArraySourceNoopInNonSyntheticSourceMode() throws IOExceptio } public void testSyntheticNestedWithObject() throws IOException { - DocumentMapper documentMapper = createMapperService(syntheticSourceMapping(b -> { + DocumentMapper documentMapper = createSytheticSourceMapperService(mapping(b -> { b.startObject("path").field("type", "nested"); { b.startObject("properties"); @@ -1605,7 +1605,7 @@ public void testSyntheticNestedWithObject() throws IOException { } public void testSyntheticNestedWithArray() throws IOException { - DocumentMapper documentMapper = createMapperService(syntheticSourceMapping(b -> { + DocumentMapper documentMapper = createSytheticSourceMapperService(mapping(b -> { b.startObject("path").field("type", "nested"); { b.startObject("properties"); @@ -1630,7 +1630,7 @@ public void testSyntheticNestedWithArray() throws IOException { } public void testSyntheticNestedWithSubObjects() throws IOException { - DocumentMapper documentMapper = createMapperService(syntheticSourceMapping(b -> { + DocumentMapper documentMapper = createSytheticSourceMapperService(mapping(b -> { b.startObject("boolean_value").field("type", "boolean").endObject(); b.startObject("path"); { @@ -1670,7 +1670,7 @@ public void testSyntheticNestedWithSubObjects() throws IOException { } public void testSyntheticNestedWithSubArrays() throws IOException { - DocumentMapper documentMapper = createMapperService(syntheticSourceMapping(b -> { + DocumentMapper documentMapper = createSytheticSourceMapperService(mapping(b -> { b.startObject("boolean_value").field("type", "boolean").endObject(); b.startObject("path"); { @@ -1718,7 +1718,7 @@ public void testSyntheticNestedWithSubArrays() throws IOException { } public void testSyntheticNestedWithIncludeInRoot() throws IOException { - DocumentMapper documentMapper = createMapperService(syntheticSourceMapping(b -> { + DocumentMapper documentMapper = createSytheticSourceMapperService(mapping(b -> { b.startObject("path").field("type", "nested").field("include_in_root", true); { b.startObject("properties"); @@ -1739,7 +1739,7 @@ public void testSyntheticNestedWithIncludeInRoot() throws IOException { } public void testSyntheticNestedWithEmptyObject() throws IOException { - DocumentMapper documentMapper = createMapperService(syntheticSourceMapping(b -> { + DocumentMapper documentMapper = createSytheticSourceMapperService(mapping(b -> { b.startObject("path").field("type", "nested"); { b.startObject("properties"); @@ -1756,7 +1756,7 @@ public void testSyntheticNestedWithEmptyObject() throws IOException { } public void testSyntheticNestedWithEmptySubObject() throws IOException { - DocumentMapper documentMapper = createMapperService(syntheticSourceMapping(b -> { + DocumentMapper documentMapper = createSytheticSourceMapperService(mapping(b -> { b.startObject("path").field("type", "nested"); { b.startObject("properties"); @@ -1783,7 +1783,7 @@ public void testSyntheticNestedWithEmptySubObject() throws IOException { } public void testSyntheticNestedWithArrayContainingEmptyObject() throws IOException { - DocumentMapper documentMapper = createMapperService(syntheticSourceMapping(b -> { + DocumentMapper documentMapper = createSytheticSourceMapperService(mapping(b -> { b.startObject("path").field("type", "nested"); { b.startObject("properties"); @@ -1807,7 +1807,7 @@ public void testSyntheticNestedWithArrayContainingEmptyObject() throws IOExcepti } public void testSyntheticNestedWithArrayContainingOnlyEmptyObject() throws IOException { - DocumentMapper documentMapper = createMapperService(syntheticSourceMapping(b -> { + DocumentMapper documentMapper = createSytheticSourceMapperService(mapping(b -> { b.startObject("path").field("type", "nested"); { b.startObject("properties"); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/ObjectMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/ObjectMapperTests.java index 3b77015fde415..527d7497a8418 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/ObjectMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/ObjectMapperTests.java @@ -136,7 +136,7 @@ public void testMerge() throws IOException { } public void testMergeEnabledForIndexTemplates() throws IOException { - MapperService mapperService = createMapperService(syntheticSourceMapping(b -> {})); + MapperService mapperService = createSytheticSourceMapperService(mapping(b -> {})); merge(mapperService, MergeReason.INDEX_TEMPLATE, mapping(b -> { b.startObject("object"); { @@ -685,9 +685,9 @@ public void testSyntheticSourceDocValuesFieldWithout() throws IOException { } public void testStoreArraySourceinSyntheticSourceMode() throws IOException { - DocumentMapper mapper = createDocumentMapper(syntheticSourceMapping(b -> { + DocumentMapper mapper = createSytheticSourceMapperService(mapping(b -> { b.startObject("o").field("type", "object").field("synthetic_source_keep", "arrays").endObject(); - })); + })).documentMapper(); assertNotNull(mapper.mapping().getRoot().getMapper("o")); } @@ -728,7 +728,7 @@ public void testWithoutMappers() throws IOException { private ObjectMapper createObjectMapperWithAllParametersSet(CheckedConsumer propertiesBuilder) throws IOException { - DocumentMapper mapper = createDocumentMapper(syntheticSourceMapping(b -> { + DocumentMapper mapper = createSytheticSourceMapperService(mapping(b -> { b.startObject("object"); { b.field("type", "object"); @@ -741,7 +741,7 @@ private ObjectMapper createObjectMapperWithAllParametersSet(CheckedConsumer randomRangeForSyntheticSourceTest() { } protected Source getSourceFor(CheckedConsumer mapping, List inputValues) throws IOException { - DocumentMapper mapper = createDocumentMapper(syntheticSourceMapping(mapping)); + DocumentMapper mapper = createSytheticSourceMapperService(mapping(mapping)).documentMapper(); CheckedConsumer input = b -> { b.field("field"); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/SourceFieldMetricsTests.java b/server/src/test/java/org/elasticsearch/index/mapper/SourceFieldMetricsTests.java index 81532114a7050..c640cea16487b 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/SourceFieldMetricsTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/SourceFieldMetricsTests.java @@ -36,8 +36,8 @@ public void testFieldHasValue() {} public void testFieldHasValueWithEmptyFieldInfos() {} public void testSyntheticSourceLoadLatency() throws IOException { - var mapping = syntheticSourceMapping(b -> b.startObject("kwd").field("type", "keyword").endObject()); - var mapper = createDocumentMapper(mapping); + var mapping = mapping(b -> b.startObject("kwd").field("type", "keyword").endObject()); + var mapper = createSytheticSourceMapperService(mapping).documentMapper(); try (Directory directory = newDirectory()) { RandomIndexWriter iw = new RandomIndexWriter(random(), directory); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/SourceLoaderTests.java b/server/src/test/java/org/elasticsearch/index/mapper/SourceLoaderTests.java index c6a4021d8a542..c2e49759cdfde 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/SourceLoaderTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/SourceLoaderTests.java @@ -25,25 +25,25 @@ public void testNonSynthetic() throws IOException { } public void testEmptyObject() throws IOException { - DocumentMapper mapper = createDocumentMapper(syntheticSourceMapping(b -> { + DocumentMapper mapper = createSytheticSourceMapperService(mapping(b -> { b.startObject("o").field("type", "object").endObject(); b.startObject("kwd").field("type", "keyword").endObject(); - })); + })).documentMapper(); assertTrue(mapper.mappers().newSourceLoader(SourceFieldMetrics.NOOP).reordersFieldValues()); assertThat(syntheticSource(mapper, b -> b.field("kwd", "foo")), equalTo(""" {"kwd":"foo"}""")); } public void testDotsInFieldName() throws IOException { - DocumentMapper mapper = createDocumentMapper( - syntheticSourceMapping(b -> b.startObject("foo.bar.baz").field("type", "keyword").endObject()) - ); + DocumentMapper mapper = createSytheticSourceMapperService( + mapping(b -> b.startObject("foo.bar.baz").field("type", "keyword").endObject()) + ).documentMapper(); assertThat(syntheticSource(mapper, b -> b.field("foo.bar.baz", "aaa")), equalTo(""" {"foo":{"bar":{"baz":"aaa"}}}""")); } public void testNoSubobjectsIntermediateObject() throws IOException { - DocumentMapper mapper = createDocumentMapper(syntheticSourceMapping(b -> { + DocumentMapper mapper = createSytheticSourceMapperService(mapping(b -> { b.startObject("foo"); { b.field("type", "object").field("subobjects", false); @@ -54,30 +54,29 @@ public void testNoSubobjectsIntermediateObject() throws IOException { b.endObject(); } b.endObject(); - })); + })).documentMapper(); assertThat(syntheticSource(mapper, b -> b.field("foo.bar.baz", "aaa")), equalTo(""" {"foo":{"bar.baz":"aaa"}}""")); } public void testNoSubobjectsRootObject() throws IOException { XContentBuilder mappings = topMapping(b -> { - b.startObject("_source").field("mode", "synthetic").endObject(); b.field("subobjects", false); b.startObject("properties"); b.startObject("foo.bar.baz").field("type", "keyword").endObject(); b.endObject(); }); - DocumentMapper mapper = createDocumentMapper(mappings); + DocumentMapper mapper = createSytheticSourceMapperService(mappings).documentMapper(); assertThat(syntheticSource(mapper, b -> b.field("foo.bar.baz", "aaa")), equalTo(""" {"foo.bar.baz":"aaa"}""")); } public void testSorted() throws IOException { - DocumentMapper mapper = createDocumentMapper(syntheticSourceMapping(b -> { + DocumentMapper mapper = createSytheticSourceMapperService(mapping(b -> { b.startObject("foo").field("type", "keyword").endObject(); b.startObject("bar").field("type", "keyword").endObject(); b.startObject("baz").field("type", "keyword").endObject(); - })); + })).documentMapper(); assertThat( syntheticSource(mapper, b -> b.field("foo", "over the lazy dog").field("bar", "the quick").field("baz", "brown fox jumped")), equalTo(""" @@ -86,12 +85,12 @@ public void testSorted() throws IOException { } public void testArraysPushedToLeaves() throws IOException { - DocumentMapper mapper = createDocumentMapper(syntheticSourceMapping(b -> { + DocumentMapper mapper = createSytheticSourceMapperService(mapping(b -> { b.startObject("o").startObject("properties"); b.startObject("foo").field("type", "keyword").endObject(); b.startObject("bar").field("type", "keyword").endObject(); b.endObject().endObject(); - })); + })).documentMapper(); assertThat(syntheticSource(mapper, b -> { b.startArray("o"); b.startObject().field("foo", "a").endObject(); @@ -104,7 +103,7 @@ public void testArraysPushedToLeaves() throws IOException { } public void testHideTheCopyTo() { - Exception e = expectThrows(IllegalArgumentException.class, () -> createDocumentMapper(syntheticSourceMapping(b -> { + Exception e = expectThrows(IllegalArgumentException.class, () -> createSytheticSourceMapperService(mapping(b -> { b.startObject("foo"); { b.field("type", "keyword"); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/TextFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/TextFieldMapperTests.java index c2375e948fda0..7f9474f5bab83 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/TextFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/TextFieldMapperTests.java @@ -1249,7 +1249,7 @@ public void testDocValues() throws IOException { } public void testDocValuesLoadedFromStoredSynthetic() throws IOException { - MapperService mapper = createMapperService(syntheticSourceFieldMapping(b -> b.field("type", "text").field("store", true))); + MapperService mapper = createSytheticSourceMapperService(fieldMapping(b -> b.field("type", "text").field("store", true))); for (String input : new String[] { "foo", // Won't be tokenized "foo bar", // Will be tokenized. But script doc values still returns the whole field. @@ -1259,7 +1259,7 @@ public void testDocValuesLoadedFromStoredSynthetic() throws IOException { } public void testDocValuesLoadedFromSubKeywordSynthetic() throws IOException { - MapperService mapper = createMapperService(syntheticSourceFieldMapping(b -> { + MapperService mapper = createSytheticSourceMapperService(fieldMapping(b -> { b.field("type", "text"); b.startObject("fields"); { @@ -1276,7 +1276,7 @@ public void testDocValuesLoadedFromSubKeywordSynthetic() throws IOException { } public void testDocValuesLoadedFromSubStoredKeywordSynthetic() throws IOException { - MapperService mapper = createMapperService(syntheticSourceFieldMapping(b -> { + MapperService mapper = createSytheticSourceMapperService(fieldMapping(b -> { b.field("type", "text"); b.startObject("fields"); { @@ -1351,7 +1351,8 @@ private void testBlockLoaderFromParent(boolean columnReader, boolean syntheticSo } b.endObject(); }; - MapperService mapper = createMapperService(syntheticSource ? syntheticSourceMapping(buildFields) : mapping(buildFields)); + XContentBuilder mapping = mapping(buildFields); + MapperService mapper = syntheticSource ? createSytheticSourceMapperService(mapping) : createMapperService(mapping); BlockReaderSupport blockReaderSupport = getSupportedReaders(mapper, "field.sub"); var sourceLoader = mapper.mappingLookup().newSourceLoader(SourceFieldMetrics.NOOP); testBlockLoader(columnReader, example, blockReaderSupport, sourceLoader); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/flattened/FlattenedFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/flattened/FlattenedFieldMapperTests.java index 5aca2357092e4..82dc0683fa98e 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/flattened/FlattenedFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/flattened/FlattenedFieldMapperTests.java @@ -831,9 +831,9 @@ private void mapping(XContentBuilder b) throws IOException { } public void testSyntheticSourceWithOnlyIgnoredValues() throws IOException { - DocumentMapper mapper = createDocumentMapper(syntheticSourceMapping(b -> { + DocumentMapper mapper = createSytheticSourceMapperService(mapping(b -> { b.startObject("field").field("type", "flattened").field("ignore_above", 1).endObject(); - })); + })).documentMapper(); var syntheticSource = syntheticSource(mapper, b -> { b.startObject("field"); diff --git a/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperServiceTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperServiceTestCase.java index 3960aa5a91cc5..bf47efcad7b53 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperServiceTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperServiceTestCase.java @@ -170,7 +170,7 @@ protected final DocumentMapper createDocumentMapper(IndexVersion version, XConte } protected final DocumentMapper createDocumentMapper(String mappings) throws IOException { - MapperService mapperService = createMapperService(mapping(b -> {})); + var mapperService = createMapperService(mapping(b -> {})); merge(mapperService, mappings); return mapperService.documentMapper(); } @@ -892,24 +892,6 @@ protected void validateRoundTripReader(String syntheticSource, DirectoryReader r ); } - protected static XContentBuilder syntheticSourceMapping(CheckedConsumer buildFields) throws IOException { - return topMapping(b -> { - b.startObject("_source").field("mode", "synthetic").endObject(); - b.startObject("properties"); - buildFields.accept(b); - b.endObject(); - }); - } - - protected static XContentBuilder syntheticSourceFieldMapping(CheckedConsumer buildField) - throws IOException { - return syntheticSourceMapping(b -> { - b.startObject("field"); - buildField.accept(b); - b.endObject(); - }); - } - protected static DirectoryReader wrapInMockESDirectoryReader(DirectoryReader directoryReader) throws IOException { return ElasticsearchDirectoryReader.wrap(directoryReader, new ShardId(new Index("index", "_na_"), 0)); } diff --git a/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperTestCase.java index c89c0b2e37dd2..29bb3b15a9f86 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperTestCase.java @@ -1147,11 +1147,11 @@ public void testSyntheticSourceIgnoreMalformedExamples() throws IOException { } private void assertSyntheticSource(SyntheticSourceExample example) throws IOException { - DocumentMapper mapper = createDocumentMapper(syntheticSourceMapping(b -> { + DocumentMapper mapper = createSytheticSourceMapperService(mapping(b -> { b.startObject("field"); example.mapping().accept(b); b.endObject(); - })); + })).documentMapper(); assertThat(syntheticSource(mapper, example::buildInput), equalTo(example.expected())); } @@ -1183,11 +1183,11 @@ public final void testSyntheticSourceMany() throws IOException { boolean ignoreMalformed = shouldUseIgnoreMalformed(); int maxValues = randomBoolean() ? 1 : 5; SyntheticSourceSupport support = syntheticSourceSupport(ignoreMalformed); - DocumentMapper mapper = createDocumentMapper(syntheticSourceMapping(b -> { + DocumentMapper mapper = createSytheticSourceMapperService(mapping(b -> { b.startObject("field"); support.example(maxValues).mapping().accept(b); b.endObject(); - })); + })).documentMapper(); int count = between(2, 1000); String[] expected = new String[count]; try (Directory directory = newDirectory()) { @@ -1232,23 +1232,23 @@ public final void testSyntheticSourceMany() throws IOException { public final void testNoSyntheticSourceForScript() throws IOException { // Fetch the ingest script support to eagerly assumeFalse if the mapper doesn't support ingest scripts ingestScriptSupport(); - DocumentMapper mapper = createDocumentMapper(syntheticSourceMapping(b -> { + DocumentMapper mapper = createSytheticSourceMapperService(mapping(b -> { b.startObject("field"); minimalMapping(b); b.field("script", randomBoolean() ? "empty" : "non-empty"); b.endObject(); - })); + })).documentMapper(); assertThat(syntheticSource(mapper, b -> {}), equalTo("{}")); } public final void testSyntheticSourceInObject() throws IOException { boolean ignoreMalformed = shouldUseIgnoreMalformed(); SyntheticSourceExample syntheticSourceExample = syntheticSourceSupport(ignoreMalformed).example(5); - DocumentMapper mapper = createDocumentMapper(syntheticSourceMapping(b -> { + DocumentMapper mapper = createSytheticSourceMapperService(mapping(b -> { b.startObject("obj").startObject("properties").startObject("field"); syntheticSourceExample.mapping().accept(b); b.endObject().endObject().endObject(); - })); + })).documentMapper(); assertThat(syntheticSource(mapper, b -> { b.startObject("obj"); syntheticSourceExample.buildInput(b); @@ -1261,11 +1261,11 @@ public final void testSyntheticEmptyList() throws IOException { boolean ignoreMalformed = shouldUseIgnoreMalformed(); SyntheticSourceSupport support = syntheticSourceSupport(ignoreMalformed); SyntheticSourceExample syntheticSourceExample = support.example(5); - DocumentMapper mapper = createDocumentMapper(syntheticSourceMapping(b -> { + DocumentMapper mapper = createSytheticSourceMapperService(mapping(b -> { b.startObject("field"); syntheticSourceExample.mapping().accept(b); b.endObject(); - })); + })).documentMapper(); var expected = support.preservesExactSource() ? "{\"field\":[]}" : "{}"; assertThat(syntheticSource(mapper, b -> b.startArray("field").endArray()), equalTo(expected)); @@ -1374,8 +1374,7 @@ private void testBlockLoader(boolean syntheticSource, boolean columnReader) thro // TODO: fix this by improving block loader support: https://github.com/elastic/elasticsearch/issues/115257 assumeTrue("inconsistent synthetic source testing support with ignore above", syntheticSourceSupport.ignoreAbove() == false); } - // TODO: only rely index.mapping.source.mode setting - XContentBuilder mapping = syntheticSource ? syntheticSourceFieldMapping(example.mapping) : fieldMapping(example.mapping); + XContentBuilder mapping = fieldMapping(example.mapping); MapperService mapper = syntheticSource ? createSytheticSourceMapperService(mapping) : createMapperService(mapping); BlockReaderSupport blockReaderSupport = getSupportedReaders(mapper, "field"); if (syntheticSource) { @@ -1501,11 +1500,11 @@ protected boolean addsValueWhenNotSupplied() { private void assertNoDocValueLoader(CheckedConsumer doc) throws IOException { boolean ignoreMalformed = supportsIgnoreMalformed() ? rarely() : false; SyntheticSourceExample syntheticSourceExample = syntheticSourceSupport(ignoreMalformed).example(5); - DocumentMapper mapper = createDocumentMapper(syntheticSourceMapping(b -> { + DocumentMapper mapper = createSytheticSourceMapperService(mapping(b -> { b.startObject("field"); syntheticSourceExample.mapping().accept(b); b.endObject(); - })); + })).documentMapper(); try (Directory directory = newDirectory()) { RandomIndexWriter iw = new RandomIndexWriter(random(), directory); iw.addDocument(mapper.parse(source(doc)).rootDoc()); @@ -1530,7 +1529,7 @@ public final void testSyntheticSourceInvalid() throws IOException { Exception e = expectThrows( IllegalArgumentException.class, example.toString(), - () -> createDocumentMapper(syntheticSourceMapping(b -> { + () -> createSytheticSourceMapperService(mapping(b -> { b.startObject("field"); example.mapping.accept(b); b.endObject(); @@ -1543,11 +1542,11 @@ public final void testSyntheticSourceInvalid() throws IOException { public final void testSyntheticSourceInNestedObject() throws IOException { boolean ignoreMalformed = shouldUseIgnoreMalformed(); SyntheticSourceExample syntheticSourceExample = syntheticSourceSupport(ignoreMalformed).example(5); - DocumentMapper mapper = createDocumentMapper(syntheticSourceMapping(b -> { + DocumentMapper mapper = createSytheticSourceMapperService(mapping(b -> { b.startObject("obj").field("type", "nested").startObject("properties").startObject("field"); syntheticSourceExample.mapping().accept(b); b.endObject().endObject().endObject(); - })); + })).documentMapper(); assertThat(syntheticSource(mapper, b -> { b.startObject("obj"); syntheticSourceExample.buildInput(b); @@ -1561,23 +1560,23 @@ protected SyntheticSourceSupport syntheticSourceSupportForKeepTests(boolean igno public void testSyntheticSourceKeepNone() throws IOException { SyntheticSourceExample example = syntheticSourceSupportForKeepTests(shouldUseIgnoreMalformed()).example(1); - DocumentMapper mapper = createDocumentMapper(syntheticSourceMapping(b -> { + DocumentMapper mapper = createSytheticSourceMapperService(mapping(b -> { b.startObject("field"); b.field("synthetic_source_keep", "none"); example.mapping().accept(b); b.endObject(); - })); + })).documentMapper(); assertThat(syntheticSource(mapper, example::buildInput), equalTo(example.expected())); } public void testSyntheticSourceKeepAll() throws IOException { SyntheticSourceExample example = syntheticSourceSupportForKeepTests(shouldUseIgnoreMalformed()).example(1); - DocumentMapper mapperAll = createDocumentMapper(syntheticSourceMapping(b -> { + DocumentMapper mapperAll = createSytheticSourceMapperService(mapping(b -> { b.startObject("field"); b.field("synthetic_source_keep", "all"); example.mapping().accept(b); b.endObject(); - })); + })).documentMapper(); var builder = XContentFactory.jsonBuilder(); builder.startObject(); @@ -1589,12 +1588,12 @@ public void testSyntheticSourceKeepAll() throws IOException { public void testSyntheticSourceKeepArrays() throws IOException { SyntheticSourceExample example = syntheticSourceSupportForKeepTests(shouldUseIgnoreMalformed()).example(1); - DocumentMapper mapperAll = createDocumentMapper(syntheticSourceMapping(b -> { + DocumentMapper mapperAll = createSytheticSourceMapperService(mapping(b -> { b.startObject("field"); b.field("synthetic_source_keep", randomFrom("arrays", "all")); // Both options keep array source. example.mapping().accept(b); b.endObject(); - })); + })).documentMapper(); int elementCount = randomIntBetween(2, 5); CheckedConsumer buildInput = (XContentBuilder builder) -> { diff --git a/x-pack/plugin/analytics/src/test/java/org/elasticsearch/xpack/analytics/mapper/HistogramFieldMapperTests.java b/x-pack/plugin/analytics/src/test/java/org/elasticsearch/xpack/analytics/mapper/HistogramFieldMapperTests.java index d340a55a4173d..fd2888129817f 100644 --- a/x-pack/plugin/analytics/src/test/java/org/elasticsearch/xpack/analytics/mapper/HistogramFieldMapperTests.java +++ b/x-pack/plugin/analytics/src/test/java/org/elasticsearch/xpack/analytics/mapper/HistogramFieldMapperTests.java @@ -374,9 +374,9 @@ protected IngestScriptSupport ingestScriptSupport() { } public void testArrayValueSyntheticSource() throws Exception { - DocumentMapper mapper = createDocumentMapper( - syntheticSourceFieldMapping(b -> b.field("type", "histogram").field("ignore_malformed", "true")) - ); + DocumentMapper mapper = createSytheticSourceMapperService( + fieldMapping(b -> b.field("type", "histogram").field("ignore_malformed", "true")) + ).documentMapper(); var randomString = randomAlphaOfLength(10); CheckedConsumer arrayValue = b -> { diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/FieldSubsetReaderTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/FieldSubsetReaderTests.java index db250b16eab16..3f7dfc912d76c 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/FieldSubsetReaderTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/FieldSubsetReaderTests.java @@ -688,8 +688,9 @@ public void testIgnoredSourceFilteringIntegration() throws Exception { Settings.builder() .put("index.mapping.total_fields.limit", 1) .put("index.mapping.total_fields.ignore_dynamic_beyond_limit", true) + .put("index.mapping.source.mode", "synthetic") .build(), - syntheticSourceMapping(b -> { + mapping(b -> { b.startObject("foo").field("type", "keyword").endObject(); }) ).documentMapper(); diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/SyntheticSourceIT.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/SyntheticSourceIT.java index b924ad492c0c6..59955c23c16e0 100644 --- a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/SyntheticSourceIT.java +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/SyntheticSourceIT.java @@ -9,6 +9,7 @@ import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.support.WriteRequest; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.CheckedFunction; import org.elasticsearch.index.mapper.extras.MapperExtrasPlugin; import org.elasticsearch.plugins.Plugin; @@ -75,11 +76,6 @@ public void testText() throws Exception { private void createIndex(CheckedFunction fieldMapping) throws IOException { XContentBuilder mapping = JsonXContent.contentBuilder(); mapping.startObject(); - { - mapping.startObject("_source"); - mapping.field("mode", "synthetic"); - mapping.endObject(); - } { mapping.startObject("properties"); mapping.startObject("id").field("type", "keyword").endObject(); @@ -90,6 +86,8 @@ private void createIndex(CheckedFunction b.field("type", CONTENT_TYPE) .array("metrics", "min", "max") .field("default_metric", "min") .field("ignore_malformed", "true") ) - ); + ).documentMapper(); var randomString = randomAlphaOfLength(10); CheckedConsumer arrayValue = b -> { diff --git a/x-pack/plugin/mapper-constant-keyword/src/test/java/org/elasticsearch/xpack/constantkeyword/mapper/ConstantKeywordFieldMapperTests.java b/x-pack/plugin/mapper-constant-keyword/src/test/java/org/elasticsearch/xpack/constantkeyword/mapper/ConstantKeywordFieldMapperTests.java index 92aac7897bcfd..4661fe77e8b11 100644 --- a/x-pack/plugin/mapper-constant-keyword/src/test/java/org/elasticsearch/xpack/constantkeyword/mapper/ConstantKeywordFieldMapperTests.java +++ b/x-pack/plugin/mapper-constant-keyword/src/test/java/org/elasticsearch/xpack/constantkeyword/mapper/ConstantKeywordFieldMapperTests.java @@ -230,7 +230,7 @@ protected boolean allowsNullValues() { * contain the field. */ public void testNullValueBlockLoader() throws IOException { - MapperService mapper = createMapperService(syntheticSourceMapping(b -> { + MapperService mapper = createSytheticSourceMapperService(mapping(b -> { b.startObject("field"); b.field("type", "constant_keyword"); b.endObject(); @@ -325,11 +325,11 @@ protected Function loadBlockExpected() { } public void testNullValueSyntheticSource() throws IOException { - DocumentMapper mapper = createDocumentMapper(syntheticSourceMapping(b -> { + DocumentMapper mapper = createSytheticSourceMapperService(mapping(b -> { b.startObject("field"); b.field("type", "constant_keyword"); b.endObject(); - })); + })).documentMapper(); assertThat(syntheticSource(mapper, b -> {}), equalTo("{}")); } From e543e824f651581753ed63b7bc19069f8cddafd0 Mon Sep 17 00:00:00 2001 From: Nikolaj Volgushev Date: Wed, 30 Oct 2024 17:20:48 +0100 Subject: [PATCH 209/324] Clearer error on modifying read-only role mappings (#115951) Copy of: https://github.com/elastic/elasticsearch/pull/115509 also due to temporary repo unavailability. That PR is already approved. --- .../rolemapping/PutRoleMappingRequest.java | 18 ++++-- .../rolemapping/RoleMappingRestIT.java | 32 +++++++++- .../PutRoleMappingRequestTests.java | 61 +++++++++++++++++++ 3 files changed, 106 insertions(+), 5 deletions(-) diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/rolemapping/PutRoleMappingRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/rolemapping/PutRoleMappingRequest.java index f85ca260c3fff..1ce27c1e7c372 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/rolemapping/PutRoleMappingRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/rolemapping/PutRoleMappingRequest.java @@ -26,6 +26,7 @@ import java.util.Objects; import static org.elasticsearch.action.ValidateActions.addValidationError; +import static org.elasticsearch.xpack.core.security.authc.support.mapper.ExpressionRoleMapping.READ_ONLY_ROLE_MAPPING_METADATA_FLAG; /** * Request object for adding/updating a role-mapping to the native store @@ -77,10 +78,19 @@ public ActionRequestValidationException validate(boolean validateMetadata) { validationException = addValidationError("role-mapping rules are missing", validationException); } if (validateMetadata && MetadataUtils.containsReservedMetadata(metadata)) { - validationException = addValidationError( - "metadata keys may not start with [" + MetadataUtils.RESERVED_PREFIX + "]", - validationException - ); + if (metadata.containsKey(READ_ONLY_ROLE_MAPPING_METADATA_FLAG)) { + validationException = addValidationError( + "metadata contains [" + + READ_ONLY_ROLE_MAPPING_METADATA_FLAG + + "] flag. You cannot create or update role-mappings with a read-only flag", + validationException + ); + } else { + validationException = addValidationError( + "metadata keys may not start with [" + MetadataUtils.RESERVED_PREFIX + "]", + validationException + ); + } } return validationException; } diff --git a/x-pack/plugin/security/qa/security-trial/src/javaRestTest/java/org/elasticsearch/xpack/security/rolemapping/RoleMappingRestIT.java b/x-pack/plugin/security/qa/security-trial/src/javaRestTest/java/org/elasticsearch/xpack/security/rolemapping/RoleMappingRestIT.java index 51970af4b88a0..d40c933d94b44 100644 --- a/x-pack/plugin/security/qa/security-trial/src/javaRestTest/java/org/elasticsearch/xpack/security/rolemapping/RoleMappingRestIT.java +++ b/x-pack/plugin/security/qa/security-trial/src/javaRestTest/java/org/elasticsearch/xpack/security/rolemapping/RoleMappingRestIT.java @@ -150,6 +150,32 @@ public void testPutAndDeleteRoleMappings() throws IOException { ); } + // simulate attempt to update a CS role mapping (the request will include a _read_only metadata flag + { + var ex = expectThrows( + ResponseException.class, + () -> putMapping(expressionRoleMapping("role-mapping-1-read-only-operator-mapping", Map.of("_read_only", true))) + ); + assertThat( + ex.getMessage(), + containsString("metadata contains [_read_only] flag. You cannot create or update role-mappings with a read-only flag") + ); + } + + { + var ex = expectThrows( + ResponseException.class, + () -> putMapping(expressionRoleMapping("role-mapping-1-read-only-operator-mapping")) + ); + assertThat( + ex.getMessage(), + containsString( + "Invalid mapping name [role-mapping-1-read-only-operator-mapping]. " + + "[-read-only-operator-mapping] is not an allowed suffix" + ) + ); + } + // Also fails even if a CS role mapping with that name does not exist { var ex = expectThrows( @@ -209,12 +235,16 @@ private static Response deleteMapping(String name, @Nullable String warning) thr } private static ExpressionRoleMapping expressionRoleMapping(String name) { + return expressionRoleMapping(name, Map.of()); + } + + private static ExpressionRoleMapping expressionRoleMapping(String name, Map metadata) { return new ExpressionRoleMapping( name, new FieldExpression("username", List.of(new FieldExpression.FieldValue(randomAlphaOfLength(10)))), List.of(randomAlphaOfLength(5)), null, - Map.of(), + metadata, true ); } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/rolemapping/PutRoleMappingRequestTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/rolemapping/PutRoleMappingRequestTests.java index 0fa648305d029..57b50dfd8e6a9 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/rolemapping/PutRoleMappingRequestTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/rolemapping/PutRoleMappingRequestTests.java @@ -11,14 +11,19 @@ import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.core.security.action.rolemapping.PutRoleMappingRequest; import org.elasticsearch.xpack.core.security.action.rolemapping.PutRoleMappingRequestBuilder; +import org.elasticsearch.xpack.core.security.authc.support.mapper.ExpressionRoleMapping; import org.elasticsearch.xpack.core.security.authc.support.mapper.expressiondsl.RoleMapperExpression; import org.junit.Before; import org.mockito.Mockito; import java.util.Collections; +import java.util.Map; +import static org.elasticsearch.xpack.core.security.authc.support.mapper.ExpressionRoleMapping.READ_ONLY_ROLE_MAPPING_METADATA_FLAG; import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.nullValue; public class PutRoleMappingRequestTests extends ESTestCase { @@ -54,6 +59,62 @@ public void testValidateMetadataKeys() throws Exception { assertValidationFailure(request, "metadata key"); } + public void testValidateReadyOnlyMetadataKey() { + assertValidationFailure( + builder.name("test") + .roles("superuser") + .expression(Mockito.mock(RoleMapperExpression.class)) + .metadata(Map.of("_secret", false, ExpressionRoleMapping.READ_ONLY_ROLE_MAPPING_METADATA_FLAG, true)) + .request(), + "metadata contains [" + + READ_ONLY_ROLE_MAPPING_METADATA_FLAG + + "] flag. You cannot create or update role-mappings with a read-only flag" + ); + + assertValidationFailure( + builder.name("test") + .roles("superuser") + .expression(Mockito.mock(RoleMapperExpression.class)) + .metadata(Map.of(ExpressionRoleMapping.READ_ONLY_ROLE_MAPPING_METADATA_FLAG, true)) + .request(), + "metadata contains [" + + READ_ONLY_ROLE_MAPPING_METADATA_FLAG + + "] flag. You cannot create or update role-mappings with a read-only flag" + ); + } + + public void testValidateMetadataKeySkipped() { + assertThat( + builder.name("test") + .roles("superuser") + .expression(Mockito.mock(RoleMapperExpression.class)) + .metadata(Map.of("_secret", false, ExpressionRoleMapping.READ_ONLY_ROLE_MAPPING_METADATA_FLAG, true)) + .request() + .validate(false), + is(nullValue()) + ); + + assertThat( + builder.name("test") + .roles("superuser") + .expression(Mockito.mock(RoleMapperExpression.class)) + .metadata(Map.of(ExpressionRoleMapping.READ_ONLY_ROLE_MAPPING_METADATA_FLAG, true)) + .request() + .validate(false), + is(nullValue()) + ); + + assertThat( + builder.name("test") + .roles("superuser") + .expression(Mockito.mock(RoleMapperExpression.class)) + .metadata(Map.of("_secret", false)) + .request() + .validate(false), + is(nullValue()) + ); + } + private void assertValidationFailure(PutRoleMappingRequest request, String expectedMessage) { final ValidationException ve = request.validate(); assertThat(ve, notNullValue()); From 5f4e681788347969b71c8948df849cf12ea3f5d0 Mon Sep 17 00:00:00 2001 From: Stanislav Malyshev Date: Wed, 30 Oct 2024 10:37:24 -0600 Subject: [PATCH 210/324] Fix CCS stats test (#115801) Set index stats to be refreshed immediately - cached 0 size may be the reason why it fails. Fixes #115600 --- muted-tests.yml | 3 --- .../rest-api-spec/test/cluster.stats/30_ccs_stats.yml | 5 +++++ 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/muted-tests.yml b/muted-tests.yml index 131bbb14aec10..2c0b1c666d47e 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -236,9 +236,6 @@ tests: - class: org.elasticsearch.smoketest.DocsClientYamlTestSuiteIT method: test {yaml=reference/esql/esql-across-clusters/line_197} issue: https://github.com/elastic/elasticsearch/issues/115575 -- class: org.elasticsearch.xpack.security.CoreWithSecurityClientYamlTestSuiteIT - method: test {yaml=cluster.stats/30_ccs_stats/cross-cluster search stats search} - issue: https://github.com/elastic/elasticsearch/issues/115600 - class: org.elasticsearch.oldrepos.OldRepositoryAccessIT method: testOldRepoAccess issue: https://github.com/elastic/elasticsearch/issues/115631 diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.stats/30_ccs_stats.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.stats/30_ccs_stats.yml index 689c58dad31e6..5f18bd496c6c8 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.stats/30_ccs_stats.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.stats/30_ccs_stats.yml @@ -70,6 +70,7 @@ body: settings: number_of_replicas: 0 + store.stats_refresh_interval: 0ms - do: index: @@ -79,6 +80,10 @@ body: foo: bar + - do: + indices.flush: + index: test + - do: cluster.health: wait_for_status: green From 8b9da15e43ae8f12d5a74d6041c122e7384d7313 Mon Sep 17 00:00:00 2001 From: Pat Whelan Date: Wed, 30 Oct 2024 12:50:22 -0400 Subject: [PATCH 211/324] [ML] Handle Errors and pre-streaming exceptions (#115868) If we fail to establish a connection to bedrock, the error is returned in the client's CompletableFuture. We will forward it to the listener via the stream processor. Any Errors are thrown on another thread. --- docs/changelog/115868.yaml | 5 +++++ .../amazonbedrock/AmazonBedrockInferenceClient.java | 7 ++++++- .../amazonbedrock/AmazonBedrockStreamingChatProcessor.java | 4 +++- .../inference/rest/ServerSentEventsRestActionListener.java | 2 +- 4 files changed, 15 insertions(+), 3 deletions(-) create mode 100644 docs/changelog/115868.yaml diff --git a/docs/changelog/115868.yaml b/docs/changelog/115868.yaml new file mode 100644 index 0000000000000..abe6a63c3a4d8 --- /dev/null +++ b/docs/changelog/115868.yaml @@ -0,0 +1,5 @@ +pr: 115868 +summary: Forward bedrock connection errors to user +area: Machine Learning +type: bug +issues: [] diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockInferenceClient.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockInferenceClient.java index 040aa99d81346..bd03909db380c 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockInferenceClient.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockInferenceClient.java @@ -23,6 +23,7 @@ import software.amazon.awssdk.services.bedrockruntime.model.InvokeModelResponse; import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.SpecialPermission; import org.elasticsearch.action.ActionListener; import org.elasticsearch.common.xcontent.ChunkedToXContent; @@ -93,11 +94,15 @@ public Flow.Publisher converseStream(ConverseStream internalClient.converseStream( request, ConverseStreamResponseHandler.builder().subscriber(() -> FlowAdapters.toSubscriber(awsResponseProcessor)).build() - ); + ).exceptionally(e -> { + awsResponseProcessor.onError(e); + return null; // Void + }); return awsResponseProcessor; } private void onFailure(ActionListener listener, Throwable t, String method) { + ExceptionsHelper.maybeDieOnAnotherThread(t); var unwrappedException = t; if (t instanceof CompletionException || t instanceof ExecutionException) { unwrappedException = t.getCause() != null ? t.getCause() : t; diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockStreamingChatProcessor.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockStreamingChatProcessor.java index 12f394e300e0f..33e756b75c339 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockStreamingChatProcessor.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockStreamingChatProcessor.java @@ -12,6 +12,7 @@ import software.amazon.awssdk.services.bedrockruntime.model.ConverseStreamResponseHandler; import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.core.Strings; import org.elasticsearch.logging.LogManager; @@ -89,6 +90,7 @@ private void sendDownstreamOnAnotherThread(ContentBlockDeltaEvent event) { @Override public void onError(Throwable amazonBedrockRuntimeException) { + ExceptionsHelper.maybeDieOnAnotherThread(amazonBedrockRuntimeException); error.set( new ElasticsearchException( Strings.format("AmazonBedrock StreamingChatProcessor failure: [%s]", amazonBedrockRuntimeException.getMessage()), @@ -96,7 +98,7 @@ public void onError(Throwable amazonBedrockRuntimeException) { ) ); if (isDone.compareAndSet(false, true) && checkAndResetDemand() && onErrorCalled.compareAndSet(false, true)) { - downstream.onError(error.get()); + runOnUtilityThreadPool(() -> downstream.onError(amazonBedrockRuntimeException)); } } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rest/ServerSentEventsRestActionListener.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rest/ServerSentEventsRestActionListener.java index a397da05b1ce4..3177474ea8ca6 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rest/ServerSentEventsRestActionListener.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rest/ServerSentEventsRestActionListener.java @@ -223,7 +223,7 @@ public void onNext(ChunkedToXContent item) { @Override public void onError(Throwable throwable) { if (isLastPart.compareAndSet(false, true)) { - logger.error("A failure occurred in ElasticSearch while streaming the response.", throwable); + logger.warn("A failure occurred in ElasticSearch while streaming the response.", throwable); nextBodyPartListener().onResponse(new ServerSentEventResponseBodyPart(ServerSentEvents.ERROR, errorChunk(throwable))); } } From e5c7fce65e8a745697c18948462860d870a24693 Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Thu, 31 Oct 2024 04:25:58 +1100 Subject: [PATCH 212/324] Mute org.elasticsearch.xpack.test.rest.XPackRestIT test {p0=ml/inference_crud/Test delete given model referenced by pipeline} #115970 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index 2c0b1c666d47e..ae1e641f12347 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -272,6 +272,9 @@ tests: - class: org.elasticsearch.reservedstate.service.FileSettingsServiceTests method: testProcessFileChanges issue: https://github.com/elastic/elasticsearch/issues/115280 +- class: org.elasticsearch.xpack.test.rest.XPackRestIT + method: test {p0=ml/inference_crud/Test delete given model referenced by pipeline} + issue: https://github.com/elastic/elasticsearch/issues/115970 # Examples: # From 4ecdfbb214b1a8ecb8e17e9c4ab9819f97fd638d Mon Sep 17 00:00:00 2001 From: Ying Mao Date: Wed, 30 Oct 2024 13:29:58 -0400 Subject: [PATCH 213/324] [Inference API] Add API to get configuration of inference services (#114862) * Adding API to get list of service configurations * Update docs/changelog/114862.yaml * Fixing some configurations * PR feedback -> Stream.of * PR feedback -> singleton * Renaming ServiceConfiguration to SettingsConfiguration. Adding TaskSettingsConfiguration * Adding task type settings configuration to response * PR feedback --- docs/changelog/114862.yaml | 5 + server/src/main/java/module-info.java | 1 + .../inference/EmptySettingsConfiguration.java | 19 + .../inference/InferenceService.java | 9 + .../InferenceServiceConfiguration.java | 183 ++++++ .../inference/SettingsConfiguration.java | 592 ++++++++++++++++++ .../inference/TaskSettingsConfiguration.java | 154 +++++ .../SettingsConfigurationDependency.java | 140 +++++ .../SettingsConfigurationDisplayType.java | 35 ++ .../SettingsConfigurationFieldType.java | 37 ++ .../SettingsConfigurationSelectOption.java | 132 ++++ .../SettingsConfigurationValidation.java | 154 +++++ .../SettingsConfigurationValidationType.java | 34 + ...nferenceServiceConfigurationTestUtils.java | 41 ++ .../InferenceServiceConfigurationTests.java | 190 ++++++ .../SettingsConfigurationTestUtils.java | 74 +++ .../inference/SettingsConfigurationTests.java | 287 +++++++++ .../TaskSettingsConfigurationTestUtils.java | 40 ++ .../TaskSettingsConfigurationTests.java | 94 +++ ...SettingsConfigurationDisplayTypeTests.java | 30 + .../SettingsConfigurationFieldTypeTests.java | 28 + ...tingsConfigurationValidationTypeTests.java | 29 + .../action/GetInferenceServicesAction.java | 113 ++++ .../inference/InferenceBaseRestTest.java | 26 +- .../xpack/inference/InferenceCrudIT.java | 135 ++++ .../TestDenseInferenceServiceExtension.java | 53 ++ .../mock/TestRerankingServiceExtension.java | 53 ++ .../TestSparseInferenceServiceExtension.java | 65 ++ ...stStreamingCompletionServiceExtension.java | 53 ++ .../xpack/inference/InferencePlugin.java | 9 +- .../TransportGetInferenceServicesAction.java | 102 +++ ...abaCloudSearchEmbeddingsRequestEntity.java | 2 +- ...AlibabaCloudSearchSparseRequestEntity.java | 4 +- .../cohere/CohereEmbeddingsRequestEntity.java | 2 +- .../xpack/inference/rest/Paths.java | 3 + .../rest/RestGetInferenceServicesAction.java | 50 ++ .../AlibabaCloudSearchService.java | 137 +++- .../AlibabaCloudSearchEmbeddingsModel.java | 40 ++ .../sparse/AlibabaCloudSearchSparseModel.java | 55 ++ .../AmazonBedrockSecretSettings.java | 39 ++ .../amazonbedrock/AmazonBedrockService.java | 97 +++ .../AmazonBedrockChatCompletionModel.java | 69 ++ .../services/anthropic/AnthropicService.java | 64 ++ .../AnthropicChatCompletionModel.java | 69 ++ .../azureaistudio/AzureAiStudioService.java | 99 +++ .../AzureAiStudioChatCompletionModel.java | 33 + .../AzureAiStudioEmbeddingsModel.java | 71 +++ .../AzureOpenAiSecretSettings.java | 39 ++ .../azureopenai/AzureOpenAiService.java | 92 ++- .../AzureOpenAiCompletionModel.java | 33 + .../AzureOpenAiEmbeddingsModel.java | 33 + .../services/cohere/CohereService.java | 47 ++ .../embeddings/CohereEmbeddingsModel.java | 59 ++ .../cohere/rerank/CohereRerankModel.java | 46 ++ .../elastic/ElasticInferenceService.java | 71 +++ .../BaseElasticsearchInternalService.java | 7 - .../elasticsearch/CustomElandRerankModel.java | 36 ++ .../ElasticsearchInternalService.java | 104 ++- .../googleaistudio/GoogleAiStudioService.java | 60 ++ .../GoogleVertexAiSecretSettings.java | 28 + .../googlevertexai/GoogleVertexAiService.java | 92 +++ .../GoogleVertexAiEmbeddingsModel.java | 33 + .../rerank/GoogleVertexAiRerankModel.java | 33 + .../huggingface/HuggingFaceService.java | 62 ++ .../elser/HuggingFaceElserService.java | 60 ++ .../HuggingFaceElserServiceSettings.java | 2 +- .../ibmwatsonx/IbmWatsonxService.java | 107 ++++ .../services/mistral/MistralService.java | 73 +++ .../services/openai/OpenAiService.java | 99 +++ .../completion/OpenAiChatCompletionModel.java | 34 + .../embeddings/OpenAiEmbeddingsModel.java | 34 + .../settings/DefaultSecretSettings.java | 23 + .../services/settings/RateLimitSettings.java | 24 + .../services/SenderServiceTests.java | 26 + .../AlibabaCloudSearchServiceTests.java | 237 +++++++ .../AmazonBedrockServiceTests.java | 212 +++++++ .../anthropic/AnthropicServiceTests.java | 136 ++++ .../AzureAiStudioServiceTests.java | 222 +++++++ .../azureopenai/AzureOpenAiServiceTests.java | 158 +++++ .../services/cohere/CohereServiceTests.java | 166 +++++ .../elastic/ElasticInferenceServiceTests.java | 79 +++ .../ElasticsearchInternalServiceTests.java | 125 ++++ .../GoogleAiStudioServiceTests.java | 83 +++ .../GoogleVertexAiServiceTests.java | 145 +++++ .../HuggingFaceElserServiceTests.java | 84 +++ .../huggingface/HuggingFaceServiceTests.java | 83 +++ .../ibmwatsonx/IbmWatsonxServiceTests.java | 107 ++++ .../services/mistral/MistralServiceTests.java | 93 +++ .../services/openai/OpenAiServiceTests.java | 144 +++++ .../xpack/security/operator/Constants.java | 1 + 90 files changed, 7156 insertions(+), 27 deletions(-) create mode 100644 docs/changelog/114862.yaml create mode 100644 server/src/main/java/org/elasticsearch/inference/EmptySettingsConfiguration.java create mode 100644 server/src/main/java/org/elasticsearch/inference/InferenceServiceConfiguration.java create mode 100644 server/src/main/java/org/elasticsearch/inference/SettingsConfiguration.java create mode 100644 server/src/main/java/org/elasticsearch/inference/TaskSettingsConfiguration.java create mode 100644 server/src/main/java/org/elasticsearch/inference/configuration/SettingsConfigurationDependency.java create mode 100644 server/src/main/java/org/elasticsearch/inference/configuration/SettingsConfigurationDisplayType.java create mode 100644 server/src/main/java/org/elasticsearch/inference/configuration/SettingsConfigurationFieldType.java create mode 100644 server/src/main/java/org/elasticsearch/inference/configuration/SettingsConfigurationSelectOption.java create mode 100644 server/src/main/java/org/elasticsearch/inference/configuration/SettingsConfigurationValidation.java create mode 100644 server/src/main/java/org/elasticsearch/inference/configuration/SettingsConfigurationValidationType.java create mode 100644 server/src/test/java/org/elasticsearch/inference/InferenceServiceConfigurationTestUtils.java create mode 100644 server/src/test/java/org/elasticsearch/inference/InferenceServiceConfigurationTests.java create mode 100644 server/src/test/java/org/elasticsearch/inference/SettingsConfigurationTestUtils.java create mode 100644 server/src/test/java/org/elasticsearch/inference/SettingsConfigurationTests.java create mode 100644 server/src/test/java/org/elasticsearch/inference/TaskSettingsConfigurationTestUtils.java create mode 100644 server/src/test/java/org/elasticsearch/inference/TaskSettingsConfigurationTests.java create mode 100644 server/src/test/java/org/elasticsearch/inference/configuration/SettingsConfigurationDisplayTypeTests.java create mode 100644 server/src/test/java/org/elasticsearch/inference/configuration/SettingsConfigurationFieldTypeTests.java create mode 100644 server/src/test/java/org/elasticsearch/inference/configuration/SettingsConfigurationValidationTypeTests.java create mode 100644 x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/action/GetInferenceServicesAction.java create mode 100644 x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/action/TransportGetInferenceServicesAction.java create mode 100644 x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rest/RestGetInferenceServicesAction.java diff --git a/docs/changelog/114862.yaml b/docs/changelog/114862.yaml new file mode 100644 index 0000000000000..fb5f05fb8e2f9 --- /dev/null +++ b/docs/changelog/114862.yaml @@ -0,0 +1,5 @@ +pr: 114862 +summary: "[Inference API] Add API to get configuration of inference services" +area: Machine Learning +type: enhancement +issues: [] diff --git a/server/src/main/java/module-info.java b/server/src/main/java/module-info.java index 89fc5f676cb1e..17b90f08bf051 100644 --- a/server/src/main/java/module-info.java +++ b/server/src/main/java/module-info.java @@ -469,5 +469,6 @@ org.elasticsearch.serverless.shardhealth, org.elasticsearch.serverless.apifiltering; exports org.elasticsearch.lucene.spatial; + exports org.elasticsearch.inference.configuration; } diff --git a/server/src/main/java/org/elasticsearch/inference/EmptySettingsConfiguration.java b/server/src/main/java/org/elasticsearch/inference/EmptySettingsConfiguration.java new file mode 100644 index 0000000000000..8a3f96750f2ea --- /dev/null +++ b/server/src/main/java/org/elasticsearch/inference/EmptySettingsConfiguration.java @@ -0,0 +1,19 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.inference; + +import java.util.Collections; +import java.util.Map; + +public class EmptySettingsConfiguration { + public static Map get() { + return Collections.emptyMap(); + } +} diff --git a/server/src/main/java/org/elasticsearch/inference/InferenceService.java b/server/src/main/java/org/elasticsearch/inference/InferenceService.java index 2c99563955746..24b305e382160 100644 --- a/server/src/main/java/org/elasticsearch/inference/InferenceService.java +++ b/server/src/main/java/org/elasticsearch/inference/InferenceService.java @@ -16,6 +16,7 @@ import org.elasticsearch.core.TimeValue; import java.io.Closeable; +import java.util.EnumSet; import java.util.List; import java.util.Map; import java.util.Set; @@ -71,6 +72,14 @@ default void init(Client client) {} */ Model parsePersistedConfig(String modelId, TaskType taskType, Map config); + InferenceServiceConfiguration getConfiguration(); + + /** + * The task types supported by the service + * @return Set of supported. + */ + EnumSet supportedTaskTypes(); + /** * Perform inference on the model. * diff --git a/server/src/main/java/org/elasticsearch/inference/InferenceServiceConfiguration.java b/server/src/main/java/org/elasticsearch/inference/InferenceServiceConfiguration.java new file mode 100644 index 0000000000000..c8bd4f2e27e8b --- /dev/null +++ b/server/src/main/java/org/elasticsearch/inference/InferenceServiceConfiguration.java @@ -0,0 +1,183 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.inference; + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.xcontent.ConstructingObjectParser; +import org.elasticsearch.xcontent.ParseField; +import org.elasticsearch.xcontent.ToXContentObject; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xcontent.XContentParserConfiguration; +import org.elasticsearch.xcontent.XContentType; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; + +import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; + +/** + * Represents the configuration field settings for an inference provider. + */ +public class InferenceServiceConfiguration implements Writeable, ToXContentObject { + + private final String provider; + private final List taskTypes; + private final Map configuration; + + /** + * Constructs a new {@link InferenceServiceConfiguration} instance with specified properties. + * + * @param provider The name of the service provider. + * @param taskTypes A list of {@link TaskSettingsConfiguration} supported by the service provider. + * @param configuration The configuration of the service provider, defined by {@link SettingsConfiguration}. + */ + private InferenceServiceConfiguration( + String provider, + List taskTypes, + Map configuration + ) { + this.provider = provider; + this.taskTypes = taskTypes; + this.configuration = configuration; + } + + public InferenceServiceConfiguration(StreamInput in) throws IOException { + this.provider = in.readString(); + this.taskTypes = in.readCollectionAsList(TaskSettingsConfiguration::new); + this.configuration = in.readMap(SettingsConfiguration::new); + } + + static final ParseField PROVIDER_FIELD = new ParseField("provider"); + static final ParseField TASK_TYPES_FIELD = new ParseField("task_types"); + static final ParseField CONFIGURATION_FIELD = new ParseField("configuration"); + + @SuppressWarnings("unchecked") + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "inference_service_configuration", + true, + args -> { + List taskTypes = (ArrayList) args[1]; + return new InferenceServiceConfiguration.Builder().setProvider((String) args[0]) + .setTaskTypes((List) args[1]) + .setConfiguration((Map) args[2]) + .build(); + } + ); + + static { + PARSER.declareString(constructorArg(), PROVIDER_FIELD); + PARSER.declareObjectArray(constructorArg(), (p, c) -> TaskSettingsConfiguration.fromXContent(p), TASK_TYPES_FIELD); + PARSER.declareObject(constructorArg(), (p, c) -> p.map(), CONFIGURATION_FIELD); + } + + public String getProvider() { + return provider; + } + + public List getTaskTypes() { + return taskTypes; + } + + public Map getConfiguration() { + return configuration; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + { + builder.field(PROVIDER_FIELD.getPreferredName(), provider); + builder.field(TASK_TYPES_FIELD.getPreferredName(), taskTypes); + builder.field(CONFIGURATION_FIELD.getPreferredName(), configuration); + } + builder.endObject(); + return builder; + } + + public static InferenceServiceConfiguration fromXContent(XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } + + public static InferenceServiceConfiguration fromXContentBytes(BytesReference source, XContentType xContentType) { + try (XContentParser parser = XContentHelper.createParser(XContentParserConfiguration.EMPTY, source, xContentType)) { + return InferenceServiceConfiguration.fromXContent(parser); + } catch (IOException e) { + throw new ElasticsearchParseException("failed to parse inference service configuration", e); + } + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(provider); + out.writeCollection(taskTypes); + out.writeMapValues(configuration); + } + + public Map toMap() { + Map map = new HashMap<>(); + + map.put(PROVIDER_FIELD.getPreferredName(), provider); + map.put(TASK_TYPES_FIELD.getPreferredName(), taskTypes); + map.put(CONFIGURATION_FIELD.getPreferredName(), configuration); + + return map; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + InferenceServiceConfiguration that = (InferenceServiceConfiguration) o; + return provider.equals(that.provider) + && Objects.equals(taskTypes, that.taskTypes) + && Objects.equals(configuration, that.configuration); + } + + @Override + public int hashCode() { + return Objects.hash(provider, taskTypes, configuration); + } + + public static class Builder { + + private String provider; + private List taskTypes; + private Map configuration; + + public Builder setProvider(String provider) { + this.provider = provider; + return this; + } + + public Builder setTaskTypes(List taskTypes) { + this.taskTypes = taskTypes; + return this; + } + + public Builder setConfiguration(Map configuration) { + this.configuration = configuration; + return this; + } + + public InferenceServiceConfiguration build() { + return new InferenceServiceConfiguration(provider, taskTypes, configuration); + } + } +} diff --git a/server/src/main/java/org/elasticsearch/inference/SettingsConfiguration.java b/server/src/main/java/org/elasticsearch/inference/SettingsConfiguration.java new file mode 100644 index 0000000000000..fb97e62f01b19 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/inference/SettingsConfiguration.java @@ -0,0 +1,592 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.inference; + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.inference.configuration.SettingsConfigurationDependency; +import org.elasticsearch.inference.configuration.SettingsConfigurationDisplayType; +import org.elasticsearch.inference.configuration.SettingsConfigurationFieldType; +import org.elasticsearch.inference.configuration.SettingsConfigurationSelectOption; +import org.elasticsearch.inference.configuration.SettingsConfigurationValidation; +import org.elasticsearch.xcontent.ConstructingObjectParser; +import org.elasticsearch.xcontent.ObjectParser; +import org.elasticsearch.xcontent.ParseField; +import org.elasticsearch.xcontent.ToXContentObject; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentParseException; +import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xcontent.XContentParserConfiguration; +import org.elasticsearch.xcontent.XContentType; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Optional; + +import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; +import static org.elasticsearch.xcontent.ConstructingObjectParser.optionalConstructorArg; + +/** + * Represents the configuration field settings for an inference provider. + */ +public class SettingsConfiguration implements Writeable, ToXContentObject { + + @Nullable + private final String category; + @Nullable + private final Object defaultValue; + @Nullable + private final List dependsOn; + @Nullable + private final SettingsConfigurationDisplayType display; + private final String label; + @Nullable + private final List options; + @Nullable + private final Integer order; + @Nullable + private final String placeholder; + private final boolean required; + private final boolean sensitive; + @Nullable + private final String tooltip; + @Nullable + private final SettingsConfigurationFieldType type; + @Nullable + private final List uiRestrictions; + @Nullable + private final List validations; + @Nullable + private final Object value; + + /** + * Constructs a new {@link SettingsConfiguration} instance with specified properties. + * + * @param category The category of the configuration field. + * @param defaultValue The default value for the configuration. + * @param dependsOn A list of {@link SettingsConfigurationDependency} indicating dependencies on other configurations. + * @param display The display type, defined by {@link SettingsConfigurationDisplayType}. + * @param label The display label associated with the config field. + * @param options A list of {@link SettingsConfigurationSelectOption} for selectable options. + * @param order The order in which this configuration appears. + * @param placeholder A placeholder text for the configuration field. + * @param required A boolean indicating whether the configuration is required. + * @param sensitive A boolean indicating whether the configuration contains sensitive information. + * @param tooltip A tooltip text providing additional information about the configuration. + * @param type The type of the configuration field, defined by {@link SettingsConfigurationFieldType}. + * @param uiRestrictions A list of UI restrictions in string format. + * @param validations A list of {@link SettingsConfigurationValidation} for validating the configuration. + * @param value The current value of the configuration. + */ + private SettingsConfiguration( + String category, + Object defaultValue, + List dependsOn, + SettingsConfigurationDisplayType display, + String label, + List options, + Integer order, + String placeholder, + boolean required, + boolean sensitive, + String tooltip, + SettingsConfigurationFieldType type, + List uiRestrictions, + List validations, + Object value + ) { + this.category = category; + this.defaultValue = defaultValue; + this.dependsOn = dependsOn; + this.display = display; + this.label = label; + this.options = options; + this.order = order; + this.placeholder = placeholder; + this.required = required; + this.sensitive = sensitive; + this.tooltip = tooltip; + this.type = type; + this.uiRestrictions = uiRestrictions; + this.validations = validations; + this.value = value; + } + + public SettingsConfiguration(StreamInput in) throws IOException { + this.category = in.readString(); + this.defaultValue = in.readGenericValue(); + this.dependsOn = in.readOptionalCollectionAsList(SettingsConfigurationDependency::new); + this.display = in.readEnum(SettingsConfigurationDisplayType.class); + this.label = in.readString(); + this.options = in.readOptionalCollectionAsList(SettingsConfigurationSelectOption::new); + this.order = in.readOptionalInt(); + this.placeholder = in.readOptionalString(); + this.required = in.readBoolean(); + this.sensitive = in.readBoolean(); + this.tooltip = in.readOptionalString(); + this.type = in.readEnum(SettingsConfigurationFieldType.class); + this.uiRestrictions = in.readOptionalStringCollectionAsList(); + this.validations = in.readOptionalCollectionAsList(SettingsConfigurationValidation::new); + this.value = in.readGenericValue(); + } + + static final ParseField CATEGORY_FIELD = new ParseField("category"); + static final ParseField DEFAULT_VALUE_FIELD = new ParseField("default_value"); + static final ParseField DEPENDS_ON_FIELD = new ParseField("depends_on"); + static final ParseField DISPLAY_FIELD = new ParseField("display"); + static final ParseField LABEL_FIELD = new ParseField("label"); + static final ParseField OPTIONS_FIELD = new ParseField("options"); + static final ParseField ORDER_FIELD = new ParseField("order"); + static final ParseField PLACEHOLDER_FIELD = new ParseField("placeholder"); + static final ParseField REQUIRED_FIELD = new ParseField("required"); + static final ParseField SENSITIVE_FIELD = new ParseField("sensitive"); + static final ParseField TOOLTIP_FIELD = new ParseField("tooltip"); + static final ParseField TYPE_FIELD = new ParseField("type"); + static final ParseField UI_RESTRICTIONS_FIELD = new ParseField("ui_restrictions"); + static final ParseField VALIDATIONS_FIELD = new ParseField("validations"); + static final ParseField VALUE_FIELD = new ParseField("value"); + + @SuppressWarnings("unchecked") + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "service_configuration", + true, + args -> { + int i = 0; + return new SettingsConfiguration.Builder().setCategory((String) args[i++]) + .setDefaultValue(args[i++]) + .setDependsOn((List) args[i++]) + .setDisplay((SettingsConfigurationDisplayType) args[i++]) + .setLabel((String) args[i++]) + .setOptions((List) args[i++]) + .setOrder((Integer) args[i++]) + .setPlaceholder((String) args[i++]) + .setRequired((Boolean) args[i++]) + .setSensitive((Boolean) args[i++]) + .setTooltip((String) args[i++]) + .setType((SettingsConfigurationFieldType) args[i++]) + .setUiRestrictions((List) args[i++]) + .setValidations((List) args[i++]) + .setValue(args[i]) + .build(); + } + ); + + static { + PARSER.declareString(optionalConstructorArg(), CATEGORY_FIELD); + PARSER.declareField(optionalConstructorArg(), (p, c) -> { + if (p.currentToken() == XContentParser.Token.VALUE_STRING) { + return p.text(); + } else if (p.currentToken() == XContentParser.Token.VALUE_NUMBER) { + return p.numberValue(); + } else if (p.currentToken() == XContentParser.Token.VALUE_BOOLEAN) { + return p.booleanValue(); + } else if (p.currentToken() == XContentParser.Token.VALUE_NULL) { + return null; + } + throw new XContentParseException("Unsupported token [" + p.currentToken() + "]"); + }, DEFAULT_VALUE_FIELD, ObjectParser.ValueType.VALUE); + PARSER.declareObjectArray(optionalConstructorArg(), (p, c) -> SettingsConfigurationDependency.fromXContent(p), DEPENDS_ON_FIELD); + PARSER.declareField( + optionalConstructorArg(), + (p, c) -> SettingsConfigurationDisplayType.displayType(p.text()), + DISPLAY_FIELD, + ObjectParser.ValueType.STRING_OR_NULL + ); + PARSER.declareString(constructorArg(), LABEL_FIELD); + PARSER.declareObjectArray(optionalConstructorArg(), (p, c) -> SettingsConfigurationSelectOption.fromXContent(p), OPTIONS_FIELD); + PARSER.declareInt(optionalConstructorArg(), ORDER_FIELD); + PARSER.declareStringOrNull(optionalConstructorArg(), PLACEHOLDER_FIELD); + PARSER.declareBoolean(optionalConstructorArg(), REQUIRED_FIELD); + PARSER.declareBoolean(optionalConstructorArg(), SENSITIVE_FIELD); + PARSER.declareStringOrNull(optionalConstructorArg(), TOOLTIP_FIELD); + PARSER.declareField( + optionalConstructorArg(), + (p, c) -> p.currentToken() == XContentParser.Token.VALUE_NULL ? null : SettingsConfigurationFieldType.fieldType(p.text()), + TYPE_FIELD, + ObjectParser.ValueType.STRING_OR_NULL + ); + PARSER.declareStringArray(optionalConstructorArg(), UI_RESTRICTIONS_FIELD); + PARSER.declareObjectArray(optionalConstructorArg(), (p, c) -> SettingsConfigurationValidation.fromXContent(p), VALIDATIONS_FIELD); + PARSER.declareField( + optionalConstructorArg(), + (p, c) -> parseConfigurationValue(p), + VALUE_FIELD, + ObjectParser.ValueType.VALUE_OBJECT_ARRAY + ); + } + + public String getCategory() { + return category; + } + + public Object getDefaultValue() { + return defaultValue; + } + + public List getDependsOn() { + return dependsOn; + } + + public SettingsConfigurationDisplayType getDisplay() { + return display; + } + + public String getLabel() { + return label; + } + + public List getOptions() { + return options; + } + + public Integer getOrder() { + return order; + } + + public String getPlaceholder() { + return placeholder; + } + + public boolean isRequired() { + return required; + } + + public boolean isSensitive() { + return sensitive; + } + + public String getTooltip() { + return tooltip; + } + + public SettingsConfigurationFieldType getType() { + return type; + } + + public List getUiRestrictions() { + return uiRestrictions; + } + + public List getValidations() { + return validations; + } + + public Object getValue() { + return value; + } + + /** + * Parses a configuration value from a parser context. + * This method can parse strings, numbers, booleans, objects, and null values, matching the types commonly + * supported in {@link SettingsConfiguration}. + * + * @param p the {@link org.elasticsearch.xcontent.XContentParser} instance from which to parse the configuration value. + */ + public static Object parseConfigurationValue(XContentParser p) throws IOException { + + if (p.currentToken() == XContentParser.Token.VALUE_STRING) { + return p.text(); + } else if (p.currentToken() == XContentParser.Token.VALUE_NUMBER) { + return p.numberValue(); + } else if (p.currentToken() == XContentParser.Token.VALUE_BOOLEAN) { + return p.booleanValue(); + } else if (p.currentToken() == XContentParser.Token.START_OBJECT) { + // Crawler expects the value to be an object + return p.map(); + } else if (p.currentToken() == XContentParser.Token.VALUE_NULL) { + return null; + } + throw new XContentParseException("Unsupported token [" + p.currentToken() + "]"); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + { + if (category != null) { + builder.field(CATEGORY_FIELD.getPreferredName(), category); + } + builder.field(DEFAULT_VALUE_FIELD.getPreferredName(), defaultValue); + if (dependsOn != null) { + builder.xContentList(DEPENDS_ON_FIELD.getPreferredName(), dependsOn); + } else { + builder.xContentList(DEPENDS_ON_FIELD.getPreferredName(), new ArrayList<>()); + } + if (display != null) { + builder.field(DISPLAY_FIELD.getPreferredName(), display.toString()); + } + builder.field(LABEL_FIELD.getPreferredName(), label); + if (options != null) { + builder.xContentList(OPTIONS_FIELD.getPreferredName(), options); + } + if (order != null) { + builder.field(ORDER_FIELD.getPreferredName(), order); + } + if (placeholder != null) { + builder.field(PLACEHOLDER_FIELD.getPreferredName(), placeholder); + } + builder.field(REQUIRED_FIELD.getPreferredName(), required); + builder.field(SENSITIVE_FIELD.getPreferredName(), sensitive); + if (tooltip != null) { + builder.field(TOOLTIP_FIELD.getPreferredName(), tooltip); + } + if (type != null) { + builder.field(TYPE_FIELD.getPreferredName(), type.toString()); + } + if (uiRestrictions != null) { + builder.stringListField(UI_RESTRICTIONS_FIELD.getPreferredName(), uiRestrictions); + } else { + builder.stringListField(UI_RESTRICTIONS_FIELD.getPreferredName(), new ArrayList<>()); + } + if (validations != null) { + builder.xContentList(VALIDATIONS_FIELD.getPreferredName(), validations); + } else { + builder.xContentList(VALIDATIONS_FIELD.getPreferredName(), new ArrayList<>()); + } + builder.field(VALUE_FIELD.getPreferredName(), value); + } + builder.endObject(); + return builder; + } + + public static SettingsConfiguration fromXContent(XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } + + public static SettingsConfiguration fromXContentBytes(BytesReference source, XContentType xContentType) { + try (XContentParser parser = XContentHelper.createParser(XContentParserConfiguration.EMPTY, source, xContentType)) { + return SettingsConfiguration.fromXContent(parser); + } catch (IOException e) { + throw new ElasticsearchParseException("Failed to parse service configuration.", e); + } + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(category); + out.writeGenericValue(defaultValue); + out.writeOptionalCollection(dependsOn); + out.writeEnum(display); + out.writeString(label); + out.writeOptionalCollection(options); + out.writeOptionalInt(order); + out.writeOptionalString(placeholder); + out.writeBoolean(required); + out.writeBoolean(sensitive); + out.writeOptionalString(tooltip); + out.writeEnum(type); + out.writeOptionalStringCollection(uiRestrictions); + out.writeOptionalCollection(validations); + out.writeGenericValue(value); + } + + public Map toMap() { + Map map = new HashMap<>(); + + Optional.ofNullable(category).ifPresent(c -> map.put(CATEGORY_FIELD.getPreferredName(), c)); + map.put(DEFAULT_VALUE_FIELD.getPreferredName(), defaultValue); + + Optional.ofNullable(dependsOn) + .ifPresent(d -> map.put(DEPENDS_ON_FIELD.getPreferredName(), d.stream().map(SettingsConfigurationDependency::toMap).toList())); + + Optional.ofNullable(display).ifPresent(d -> map.put(DISPLAY_FIELD.getPreferredName(), d.toString())); + + map.put(LABEL_FIELD.getPreferredName(), label); + + Optional.ofNullable(options) + .ifPresent(o -> map.put(OPTIONS_FIELD.getPreferredName(), o.stream().map(SettingsConfigurationSelectOption::toMap).toList())); + + Optional.ofNullable(order).ifPresent(o -> map.put(ORDER_FIELD.getPreferredName(), o)); + + Optional.ofNullable(placeholder).ifPresent(p -> map.put(PLACEHOLDER_FIELD.getPreferredName(), p)); + + map.put(REQUIRED_FIELD.getPreferredName(), required); + map.put(SENSITIVE_FIELD.getPreferredName(), sensitive); + + Optional.ofNullable(tooltip).ifPresent(t -> map.put(TOOLTIP_FIELD.getPreferredName(), t)); + + Optional.ofNullable(type).ifPresent(t -> map.put(TYPE_FIELD.getPreferredName(), t.toString())); + + Optional.ofNullable(uiRestrictions).ifPresent(u -> map.put(UI_RESTRICTIONS_FIELD.getPreferredName(), u)); + + Optional.ofNullable(validations) + .ifPresent(v -> map.put(VALIDATIONS_FIELD.getPreferredName(), v.stream().map(SettingsConfigurationValidation::toMap).toList())); + + map.put(VALUE_FIELD.getPreferredName(), value); + + return map; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + SettingsConfiguration that = (SettingsConfiguration) o; + return required == that.required + && sensitive == that.sensitive + && Objects.equals(category, that.category) + && Objects.equals(defaultValue, that.defaultValue) + && Objects.equals(dependsOn, that.dependsOn) + && display == that.display + && Objects.equals(label, that.label) + && Objects.equals(options, that.options) + && Objects.equals(order, that.order) + && Objects.equals(placeholder, that.placeholder) + && Objects.equals(tooltip, that.tooltip) + && type == that.type + && Objects.equals(uiRestrictions, that.uiRestrictions) + && Objects.equals(validations, that.validations) + && Objects.equals(value, that.value); + } + + @Override + public int hashCode() { + return Objects.hash( + category, + defaultValue, + dependsOn, + display, + label, + options, + order, + placeholder, + required, + sensitive, + tooltip, + type, + uiRestrictions, + validations, + value + ); + } + + public static class Builder { + + private String category; + private Object defaultValue; + private List dependsOn; + private SettingsConfigurationDisplayType display; + private String label; + private List options; + private Integer order; + private String placeholder; + private boolean required; + private boolean sensitive; + private String tooltip; + private SettingsConfigurationFieldType type; + private List uiRestrictions; + private List validations; + private Object value; + + public Builder setCategory(String category) { + this.category = category; + return this; + } + + public Builder setDefaultValue(Object defaultValue) { + this.defaultValue = defaultValue; + return this; + } + + public Builder setDependsOn(List dependsOn) { + this.dependsOn = dependsOn; + return this; + } + + public Builder setDisplay(SettingsConfigurationDisplayType display) { + this.display = display; + return this; + } + + public Builder setLabel(String label) { + this.label = label; + return this; + } + + public Builder setOptions(List options) { + this.options = options; + return this; + } + + public Builder setOrder(Integer order) { + this.order = order; + return this; + } + + public Builder setPlaceholder(String placeholder) { + this.placeholder = placeholder; + return this; + } + + public Builder setRequired(Boolean required) { + this.required = Objects.requireNonNullElse(required, false); + return this; + } + + public Builder setSensitive(Boolean sensitive) { + this.sensitive = Objects.requireNonNullElse(sensitive, false); + return this; + } + + public Builder setTooltip(String tooltip) { + this.tooltip = tooltip; + return this; + } + + public Builder setType(SettingsConfigurationFieldType type) { + this.type = type; + return this; + } + + public Builder setUiRestrictions(List uiRestrictions) { + this.uiRestrictions = uiRestrictions; + return this; + } + + public Builder setValidations(List validations) { + this.validations = validations; + return this; + } + + public Builder setValue(Object value) { + this.value = value; + return this; + } + + public SettingsConfiguration build() { + return new SettingsConfiguration( + category, + defaultValue, + dependsOn, + display, + label, + options, + order, + placeholder, + required, + sensitive, + tooltip, + type, + uiRestrictions, + validations, + value + ); + } + } +} diff --git a/server/src/main/java/org/elasticsearch/inference/TaskSettingsConfiguration.java b/server/src/main/java/org/elasticsearch/inference/TaskSettingsConfiguration.java new file mode 100644 index 0000000000000..150532f138e8d --- /dev/null +++ b/server/src/main/java/org/elasticsearch/inference/TaskSettingsConfiguration.java @@ -0,0 +1,154 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.inference; + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.xcontent.ConstructingObjectParser; +import org.elasticsearch.xcontent.ParseField; +import org.elasticsearch.xcontent.ToXContentObject; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xcontent.XContentParserConfiguration; +import org.elasticsearch.xcontent.XContentType; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; +import java.util.Objects; + +import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; + +/** + * Represents the configuration field settings for a specific task type inference provider. + */ +public class TaskSettingsConfiguration implements Writeable, ToXContentObject { + + private final TaskType taskType; + private final Map configuration; + + /** + * Constructs a new {@link TaskSettingsConfiguration} instance with specified properties. + * + * @param taskType The {@link TaskType} this configuration describes. + * @param configuration The configuration of the task, defined by {@link SettingsConfiguration}. + */ + private TaskSettingsConfiguration(TaskType taskType, Map configuration) { + this.taskType = taskType; + this.configuration = configuration; + } + + public TaskSettingsConfiguration(StreamInput in) throws IOException { + this.taskType = in.readEnum(TaskType.class); + this.configuration = in.readMap(SettingsConfiguration::new); + } + + static final ParseField TASK_TYPE_FIELD = new ParseField("task_type"); + static final ParseField CONFIGURATION_FIELD = new ParseField("configuration"); + + @SuppressWarnings("unchecked") + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "task_configuration", + true, + args -> { + return new TaskSettingsConfiguration.Builder().setTaskType(TaskType.fromString((String) args[0])) + .setConfiguration((Map) args[1]) + .build(); + } + ); + + static { + PARSER.declareString(constructorArg(), TASK_TYPE_FIELD); + PARSER.declareObject(constructorArg(), (p, c) -> p.map(), CONFIGURATION_FIELD); + } + + public TaskType getTaskType() { + return taskType; + } + + public Map getConfiguration() { + return configuration; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + { + builder.field(TASK_TYPE_FIELD.getPreferredName(), taskType); + builder.field(CONFIGURATION_FIELD.getPreferredName(), configuration); + } + builder.endObject(); + return builder; + } + + public static TaskSettingsConfiguration fromXContent(XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } + + public static TaskSettingsConfiguration fromXContentBytes(BytesReference source, XContentType xContentType) { + try (XContentParser parser = XContentHelper.createParser(XContentParserConfiguration.EMPTY, source, xContentType)) { + return TaskSettingsConfiguration.fromXContent(parser); + } catch (IOException e) { + throw new ElasticsearchParseException("failed to parse task configuration", e); + } + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeEnum(taskType); + out.writeMapValues(configuration); + } + + public Map toMap() { + Map map = new HashMap<>(); + + map.put(TASK_TYPE_FIELD.getPreferredName(), taskType); + map.put(CONFIGURATION_FIELD.getPreferredName(), configuration); + + return map; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + TaskSettingsConfiguration that = (TaskSettingsConfiguration) o; + return Objects.equals(taskType, that.taskType) && Objects.equals(configuration, that.configuration); + } + + @Override + public int hashCode() { + return Objects.hash(taskType, configuration); + } + + public static class Builder { + + private TaskType taskType; + private Map configuration; + + public Builder setTaskType(TaskType taskType) { + this.taskType = taskType; + return this; + } + + public Builder setConfiguration(Map configuration) { + this.configuration = configuration; + return this; + } + + public TaskSettingsConfiguration build() { + return new TaskSettingsConfiguration(taskType, configuration); + } + } +} diff --git a/server/src/main/java/org/elasticsearch/inference/configuration/SettingsConfigurationDependency.java b/server/src/main/java/org/elasticsearch/inference/configuration/SettingsConfigurationDependency.java new file mode 100644 index 0000000000000..d319d1a395f85 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/inference/configuration/SettingsConfigurationDependency.java @@ -0,0 +1,140 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.inference.configuration; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.xcontent.ConstructingObjectParser; +import org.elasticsearch.xcontent.ObjectParser; +import org.elasticsearch.xcontent.ParseField; +import org.elasticsearch.xcontent.ToXContentObject; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentParseException; +import org.elasticsearch.xcontent.XContentParser; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; +import java.util.Objects; + +import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; + +/** + * Represents a dependency within a connector configuration, defining a specific field and its associated value. + * This class is used to encapsulate configuration dependencies in a structured format. + */ +public class SettingsConfigurationDependency implements Writeable, ToXContentObject { + + private final String field; + private final Object value; + + /** + * Constructs a new instance of SettingsConfigurationDependency. + * + * @param field The name of the field in the service dependency. + * @param value The value associated with the field. + */ + public SettingsConfigurationDependency(String field, Object value) { + this.field = field; + this.value = value; + } + + public SettingsConfigurationDependency(StreamInput in) throws IOException { + this.field = in.readString(); + this.value = in.readGenericValue(); + } + + private static final ParseField FIELD_FIELD = new ParseField("field"); + private static final ParseField VALUE_FIELD = new ParseField("value"); + + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "service_configuration_dependency", + true, + args -> new SettingsConfigurationDependency.Builder().setField((String) args[0]).setValue(args[1]).build() + ); + + static { + PARSER.declareString(constructorArg(), FIELD_FIELD); + PARSER.declareField(constructorArg(), (p, c) -> { + if (p.currentToken() == XContentParser.Token.VALUE_STRING) { + return p.text(); + } else if (p.currentToken() == XContentParser.Token.VALUE_NUMBER) { + return p.numberValue(); + } else if (p.currentToken() == XContentParser.Token.VALUE_BOOLEAN) { + return p.booleanValue(); + } else if (p.currentToken() == XContentParser.Token.VALUE_NULL) { + return null; + } + throw new XContentParseException("Unsupported token [" + p.currentToken() + "]"); + }, VALUE_FIELD, ObjectParser.ValueType.VALUE); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + { + builder.field(FIELD_FIELD.getPreferredName(), field); + builder.field(VALUE_FIELD.getPreferredName(), value); + } + builder.endObject(); + return builder; + } + + public Map toMap() { + Map map = new HashMap<>(); + map.put(FIELD_FIELD.getPreferredName(), field); + map.put(VALUE_FIELD.getPreferredName(), value); + return map; + } + + public static SettingsConfigurationDependency fromXContent(XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(field); + out.writeGenericValue(value); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + SettingsConfigurationDependency that = (SettingsConfigurationDependency) o; + return Objects.equals(field, that.field) && Objects.equals(value, that.value); + } + + @Override + public int hashCode() { + return Objects.hash(field, value); + } + + public static class Builder { + + private String field; + private Object value; + + public Builder setField(String field) { + this.field = field; + return this; + } + + public Builder setValue(Object value) { + this.value = value; + return this; + } + + public SettingsConfigurationDependency build() { + return new SettingsConfigurationDependency(field, value); + } + } +} diff --git a/server/src/main/java/org/elasticsearch/inference/configuration/SettingsConfigurationDisplayType.java b/server/src/main/java/org/elasticsearch/inference/configuration/SettingsConfigurationDisplayType.java new file mode 100644 index 0000000000000..e072238a52d01 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/inference/configuration/SettingsConfigurationDisplayType.java @@ -0,0 +1,35 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.inference.configuration; + +import java.util.Locale; + +public enum SettingsConfigurationDisplayType { + TEXT, + TEXTBOX, + TEXTAREA, + NUMERIC, + TOGGLE, + DROPDOWN; + + @Override + public String toString() { + return name().toLowerCase(Locale.ROOT); + } + + public static SettingsConfigurationDisplayType displayType(String type) { + for (SettingsConfigurationDisplayType displayType : SettingsConfigurationDisplayType.values()) { + if (displayType.name().equalsIgnoreCase(type)) { + return displayType; + } + } + throw new IllegalArgumentException("Unknown " + SettingsConfigurationDisplayType.class.getSimpleName() + " [" + type + "]."); + } +} diff --git a/server/src/main/java/org/elasticsearch/inference/configuration/SettingsConfigurationFieldType.java b/server/src/main/java/org/elasticsearch/inference/configuration/SettingsConfigurationFieldType.java new file mode 100644 index 0000000000000..a1cf0b05617ae --- /dev/null +++ b/server/src/main/java/org/elasticsearch/inference/configuration/SettingsConfigurationFieldType.java @@ -0,0 +1,37 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.inference.configuration; + +public enum SettingsConfigurationFieldType { + STRING("str"), + INTEGER("int"), + LIST("list"), + BOOLEAN("bool"); + + private final String value; + + SettingsConfigurationFieldType(String value) { + this.value = value; + } + + @Override + public String toString() { + return this.value; + } + + public static SettingsConfigurationFieldType fieldType(String type) { + for (SettingsConfigurationFieldType fieldType : SettingsConfigurationFieldType.values()) { + if (fieldType.value.equals(type)) { + return fieldType; + } + } + throw new IllegalArgumentException("Unknown " + SettingsConfigurationFieldType.class.getSimpleName() + " [" + type + "]."); + } +} diff --git a/server/src/main/java/org/elasticsearch/inference/configuration/SettingsConfigurationSelectOption.java b/server/src/main/java/org/elasticsearch/inference/configuration/SettingsConfigurationSelectOption.java new file mode 100644 index 0000000000000..8ad8d561da58e --- /dev/null +++ b/server/src/main/java/org/elasticsearch/inference/configuration/SettingsConfigurationSelectOption.java @@ -0,0 +1,132 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.inference.configuration; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.xcontent.ConstructingObjectParser; +import org.elasticsearch.xcontent.ObjectParser; +import org.elasticsearch.xcontent.ParseField; +import org.elasticsearch.xcontent.ToXContentObject; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentParseException; +import org.elasticsearch.xcontent.XContentParser; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; +import java.util.Objects; + +import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; + +public class SettingsConfigurationSelectOption implements Writeable, ToXContentObject { + private final String label; + private final Object value; + + private SettingsConfigurationSelectOption(String label, Object value) { + this.label = label; + this.value = value; + } + + public SettingsConfigurationSelectOption(StreamInput in) throws IOException { + this.label = in.readString(); + this.value = in.readGenericValue(); + } + + private static final ParseField LABEL_FIELD = new ParseField("label"); + private static final ParseField VALUE_FIELD = new ParseField("value"); + + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "service_configuration_select_option", + true, + args -> new SettingsConfigurationSelectOption.Builder().setLabel((String) args[0]).setValue(args[1]).build() + ); + + static { + PARSER.declareString(constructorArg(), LABEL_FIELD); + PARSER.declareField(constructorArg(), (p, c) -> { + if (p.currentToken() == XContentParser.Token.VALUE_STRING) { + return p.text(); + } else if (p.currentToken() == XContentParser.Token.VALUE_NUMBER) { + return p.numberValue(); + } + throw new XContentParseException("Unsupported token [" + p.currentToken() + "]"); + }, VALUE_FIELD, ObjectParser.ValueType.VALUE); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + { + builder.field(LABEL_FIELD.getPreferredName(), label); + builder.field(VALUE_FIELD.getPreferredName(), value); + } + builder.endObject(); + return builder; + } + + public Map toMap() { + Map map = new HashMap<>(); + map.put(LABEL_FIELD.getPreferredName(), label); + map.put(VALUE_FIELD.getPreferredName(), value); + return map; + } + + public static SettingsConfigurationSelectOption fromXContent(XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(label); + out.writeGenericValue(value); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + SettingsConfigurationSelectOption that = (SettingsConfigurationSelectOption) o; + return Objects.equals(label, that.label) && Objects.equals(value, that.value); + } + + @Override + public int hashCode() { + return Objects.hash(label, value); + } + + public static class Builder { + + private String label; + private Object value; + + public Builder setLabel(String label) { + this.label = label; + return this; + } + + public Builder setValue(Object value) { + this.value = value; + return this; + } + + public Builder setLabelAndValue(String labelAndValue) { + this.label = labelAndValue; + this.value = labelAndValue; + return this; + } + + public SettingsConfigurationSelectOption build() { + return new SettingsConfigurationSelectOption(label, value); + } + } + +} diff --git a/server/src/main/java/org/elasticsearch/inference/configuration/SettingsConfigurationValidation.java b/server/src/main/java/org/elasticsearch/inference/configuration/SettingsConfigurationValidation.java new file mode 100644 index 0000000000000..f106442d6d4ac --- /dev/null +++ b/server/src/main/java/org/elasticsearch/inference/configuration/SettingsConfigurationValidation.java @@ -0,0 +1,154 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.inference.configuration; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.xcontent.ConstructingObjectParser; +import org.elasticsearch.xcontent.ObjectParser; +import org.elasticsearch.xcontent.ParseField; +import org.elasticsearch.xcontent.ToXContentObject; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentParseException; +import org.elasticsearch.xcontent.XContentParser; + +import java.io.IOException; +import java.util.Map; +import java.util.Objects; + +import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; + +/** + * Represents a configuration validation entity, encapsulating a validation constraint and its corresponding type. + * This class is used to define and handle specific validation rules or requirements within a configuration context. + */ +public class SettingsConfigurationValidation implements Writeable, ToXContentObject { + + private final Object constraint; + private final SettingsConfigurationValidationType type; + + /** + * Constructs a new SettingsConfigurationValidation instance with specified constraint and type. + * This constructor initializes the object with a given validation constraint and its associated validation type. + * + * @param constraint The validation constraint (string, number or list), represented as generic Object type. + * @param type The type of configuration validation, specified as an instance of {@link SettingsConfigurationValidationType}. + */ + private SettingsConfigurationValidation(Object constraint, SettingsConfigurationValidationType type) { + this.constraint = constraint; + this.type = type; + } + + public SettingsConfigurationValidation(StreamInput in) throws IOException { + this.constraint = in.readGenericValue(); + this.type = in.readEnum(SettingsConfigurationValidationType.class); + } + + private static final ParseField CONSTRAINT_FIELD = new ParseField("constraint"); + private static final ParseField TYPE_FIELD = new ParseField("type"); + + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "service_configuration_validation", + true, + args -> new SettingsConfigurationValidation.Builder().setConstraint(args[0]) + .setType((SettingsConfigurationValidationType) args[1]) + .build() + ); + + static { + PARSER.declareField( + constructorArg(), + (p, c) -> parseConstraintValue(p), + CONSTRAINT_FIELD, + ObjectParser.ValueType.VALUE_OBJECT_ARRAY + ); + PARSER.declareField( + constructorArg(), + (p, c) -> SettingsConfigurationValidationType.validationType(p.text()), + TYPE_FIELD, + ObjectParser.ValueType.STRING + ); + } + + /** + * Parses the value of a constraint from the XContentParser stream. + * This method is designed to handle various types of constraint values as per the connector's protocol original specification. + * The constraints can be of type string, number, or list of values. + */ + private static Object parseConstraintValue(XContentParser p) throws IOException { + if (p.currentToken() == XContentParser.Token.VALUE_STRING) { + return p.text(); + } else if (p.currentToken() == XContentParser.Token.VALUE_NUMBER) { + return p.numberValue(); + } else if (p.currentToken() == XContentParser.Token.START_ARRAY) { + return p.list(); + } + throw new XContentParseException("Unsupported token [" + p.currentToken() + "]"); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + { + builder.field(CONSTRAINT_FIELD.getPreferredName(), constraint); + builder.field(TYPE_FIELD.getPreferredName(), type.toString()); + } + builder.endObject(); + return builder; + } + + public Map toMap() { + return Map.of(CONSTRAINT_FIELD.getPreferredName(), constraint, TYPE_FIELD.getPreferredName(), type.toString()); + } + + public static SettingsConfigurationValidation fromXContent(XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeGenericValue(constraint); + out.writeEnum(type); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + SettingsConfigurationValidation that = (SettingsConfigurationValidation) o; + return Objects.equals(constraint, that.constraint) && type == that.type; + } + + @Override + public int hashCode() { + return Objects.hash(constraint, type); + } + + public static class Builder { + + private Object constraint; + private SettingsConfigurationValidationType type; + + public Builder setConstraint(Object constraint) { + this.constraint = constraint; + return this; + } + + public Builder setType(SettingsConfigurationValidationType type) { + this.type = type; + return this; + } + + public SettingsConfigurationValidation build() { + return new SettingsConfigurationValidation(constraint, type); + } + } +} diff --git a/server/src/main/java/org/elasticsearch/inference/configuration/SettingsConfigurationValidationType.java b/server/src/main/java/org/elasticsearch/inference/configuration/SettingsConfigurationValidationType.java new file mode 100644 index 0000000000000..6fb07d38d7db5 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/inference/configuration/SettingsConfigurationValidationType.java @@ -0,0 +1,34 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.inference.configuration; + +import java.util.Locale; + +public enum SettingsConfigurationValidationType { + LESS_THAN, + GREATER_THAN, + LIST_TYPE, + INCLUDED_IN, + REGEX; + + @Override + public String toString() { + return name().toLowerCase(Locale.ROOT); + } + + public static SettingsConfigurationValidationType validationType(String type) { + for (SettingsConfigurationValidationType displayType : SettingsConfigurationValidationType.values()) { + if (displayType.name().equalsIgnoreCase(type)) { + return displayType; + } + } + throw new IllegalArgumentException("Unknown " + SettingsConfigurationValidationType.class.getSimpleName() + " [" + type + "]."); + } +} diff --git a/server/src/test/java/org/elasticsearch/inference/InferenceServiceConfigurationTestUtils.java b/server/src/test/java/org/elasticsearch/inference/InferenceServiceConfigurationTestUtils.java new file mode 100644 index 0000000000000..8d145202f7165 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/inference/InferenceServiceConfigurationTestUtils.java @@ -0,0 +1,41 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.inference; + +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import static org.elasticsearch.test.ESTestCase.randomAlphaOfLength; +import static org.elasticsearch.test.ESTestCase.randomInt; + +public class InferenceServiceConfigurationTestUtils { + + public static InferenceServiceConfiguration getRandomServiceConfigurationField() { + return new InferenceServiceConfiguration.Builder().setProvider(randomAlphaOfLength(10)) + .setTaskTypes(getRandomTaskTypeConfiguration()) + .setConfiguration(getRandomServiceConfiguration(10)) + .build(); + } + + private static List getRandomTaskTypeConfiguration() { + return List.of(TaskSettingsConfigurationTestUtils.getRandomTaskSettingsConfigurationField()); + } + + private static Map getRandomServiceConfiguration(int numFields) { + var numConfigFields = randomInt(numFields); + Map configuration = new HashMap<>(); + for (int i = 0; i < numConfigFields; i++) { + configuration.put(randomAlphaOfLength(10), SettingsConfigurationTestUtils.getRandomSettingsConfigurationField()); + } + + return configuration; + } +} diff --git a/server/src/test/java/org/elasticsearch/inference/InferenceServiceConfigurationTests.java b/server/src/test/java/org/elasticsearch/inference/InferenceServiceConfigurationTests.java new file mode 100644 index 0000000000000..7d97f85360c57 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/inference/InferenceServiceConfigurationTests.java @@ -0,0 +1,190 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.inference; + +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xcontent.ToXContent; +import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xcontent.XContentType; + +import java.io.IOException; +import java.util.Map; + +import static org.elasticsearch.common.xcontent.XContentHelper.toXContent; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertToXContentEquivalent; +import static org.hamcrest.CoreMatchers.equalTo; + +public class InferenceServiceConfigurationTests extends ESTestCase { + public void testToXContent() throws IOException { + String content = XContentHelper.stripWhitespace(""" + { + "provider": "some_provider", + "task_types": [ + { + "task_type": "text_embedding", + "configuration": { + "text_field_configuration": { + "default_value": null, + "depends_on": [ + { + "field": "some_field", + "value": true + } + ], + "display": "textbox", + "label": "Very important field", + "options": [], + "order": 4, + "required": true, + "sensitive": true, + "tooltip": "Wow, this tooltip is useful.", + "type": "str", + "ui_restrictions": [], + "validations": null, + "value": "" + }, + "numeric_field_configuration": { + "default_value": 3, + "depends_on": null, + "display": "numeric", + "label": "Very important numeric field", + "options": [], + "order": 2, + "required": true, + "sensitive": false, + "tooltip": "Wow, this tooltip is useful.", + "type": "int", + "ui_restrictions": [], + "validations": [ + { + "constraint": 0, + "type": "greater_than" + } + ], + "value": "" + } + } + }, + { + "task_type": "completion", + "configuration": { + "text_field_configuration": { + "default_value": null, + "depends_on": [ + { + "field": "some_field", + "value": true + } + ], + "display": "textbox", + "label": "Very important field", + "options": [], + "order": 4, + "required": true, + "sensitive": true, + "tooltip": "Wow, this tooltip is useful.", + "type": "str", + "ui_restrictions": [], + "validations": null, + "value": "" + }, + "numeric_field_configuration": { + "default_value": 3, + "depends_on": null, + "display": "numeric", + "label": "Very important numeric field", + "options": [], + "order": 2, + "required": true, + "sensitive": false, + "tooltip": "Wow, this tooltip is useful.", + "type": "int", + "ui_restrictions": [], + "validations": [ + { + "constraint": 0, + "type": "greater_than" + } + ], + "value": "" + } + } + } + ], + "configuration": { + "text_field_configuration": { + "default_value": null, + "depends_on": [ + { + "field": "some_field", + "value": true + } + ], + "display": "textbox", + "label": "Very important field", + "options": [], + "order": 4, + "required": true, + "sensitive": true, + "tooltip": "Wow, this tooltip is useful.", + "type": "str", + "ui_restrictions": [], + "validations": null, + "value": "" + }, + "numeric_field_configuration": { + "default_value": 3, + "depends_on": null, + "display": "numeric", + "label": "Very important numeric field", + "options": [], + "order": 2, + "required": true, + "sensitive": false, + "tooltip": "Wow, this tooltip is useful.", + "type": "int", + "ui_restrictions": [], + "validations": [ + { + "constraint": 0, + "type": "greater_than" + } + ], + "value": "" + } + } + } + """); + + InferenceServiceConfiguration configuration = InferenceServiceConfiguration.fromXContentBytes( + new BytesArray(content), + XContentType.JSON + ); + boolean humanReadable = true; + BytesReference originalBytes = toShuffledXContent(configuration, XContentType.JSON, ToXContent.EMPTY_PARAMS, humanReadable); + InferenceServiceConfiguration parsed; + try (XContentParser parser = createParser(XContentType.JSON.xContent(), originalBytes)) { + parsed = InferenceServiceConfiguration.fromXContent(parser); + } + assertToXContentEquivalent(originalBytes, toXContent(parsed, XContentType.JSON, humanReadable), XContentType.JSON); + } + + public void testToMap() { + InferenceServiceConfiguration configField = InferenceServiceConfigurationTestUtils.getRandomServiceConfigurationField(); + Map configFieldAsMap = configField.toMap(); + + assertThat(configFieldAsMap.get("provider"), equalTo(configField.getProvider())); + assertThat(configFieldAsMap.get("task_types"), equalTo(configField.getTaskTypes())); + assertThat(configFieldAsMap.get("configuration"), equalTo(configField.getConfiguration())); + } +} diff --git a/server/src/test/java/org/elasticsearch/inference/SettingsConfigurationTestUtils.java b/server/src/test/java/org/elasticsearch/inference/SettingsConfigurationTestUtils.java new file mode 100644 index 0000000000000..728dafc5383c1 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/inference/SettingsConfigurationTestUtils.java @@ -0,0 +1,74 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.inference; + +import org.elasticsearch.inference.configuration.SettingsConfigurationDependency; +import org.elasticsearch.inference.configuration.SettingsConfigurationDisplayType; +import org.elasticsearch.inference.configuration.SettingsConfigurationFieldType; +import org.elasticsearch.inference.configuration.SettingsConfigurationSelectOption; +import org.elasticsearch.inference.configuration.SettingsConfigurationValidation; +import org.elasticsearch.inference.configuration.SettingsConfigurationValidationType; + +import java.util.List; + +import static org.elasticsearch.test.ESTestCase.randomAlphaOfLength; +import static org.elasticsearch.test.ESTestCase.randomBoolean; +import static org.elasticsearch.test.ESTestCase.randomInt; + +public class SettingsConfigurationTestUtils { + + public static SettingsConfiguration getRandomSettingsConfigurationField() { + return new SettingsConfiguration.Builder().setCategory(randomAlphaOfLength(10)) + .setDefaultValue(randomAlphaOfLength(10)) + .setDependsOn(List.of(getRandomSettingsConfigurationDependency())) + .setDisplay(getRandomSettingsConfigurationDisplayType()) + .setLabel(randomAlphaOfLength(10)) + .setOptions(List.of(getRandomSettingsConfigurationSelectOption(), getRandomSettingsConfigurationSelectOption())) + .setOrder(randomInt()) + .setPlaceholder(randomAlphaOfLength(10)) + .setRequired(randomBoolean()) + .setSensitive(randomBoolean()) + .setTooltip(randomAlphaOfLength(10)) + .setType(getRandomConfigurationFieldType()) + .setUiRestrictions(List.of(randomAlphaOfLength(10), randomAlphaOfLength(10))) + .setValidations(List.of(getRandomSettingsConfigurationValidation())) + .setValue(randomAlphaOfLength(10)) + .build(); + } + + private static SettingsConfigurationDependency getRandomSettingsConfigurationDependency() { + return new SettingsConfigurationDependency.Builder().setField(randomAlphaOfLength(10)).setValue(randomAlphaOfLength(10)).build(); + } + + private static SettingsConfigurationSelectOption getRandomSettingsConfigurationSelectOption() { + return new SettingsConfigurationSelectOption.Builder().setLabel(randomAlphaOfLength(10)).setValue(randomAlphaOfLength(10)).build(); + } + + private static SettingsConfigurationValidation getRandomSettingsConfigurationValidation() { + return new SettingsConfigurationValidation.Builder().setConstraint(randomAlphaOfLength(10)) + .setType(getRandomConfigurationValidationType()) + .build(); + } + + public static SettingsConfigurationDisplayType getRandomSettingsConfigurationDisplayType() { + SettingsConfigurationDisplayType[] values = SettingsConfigurationDisplayType.values(); + return values[randomInt(values.length - 1)]; + } + + public static SettingsConfigurationFieldType getRandomConfigurationFieldType() { + SettingsConfigurationFieldType[] values = SettingsConfigurationFieldType.values(); + return values[randomInt(values.length - 1)]; + } + + public static SettingsConfigurationValidationType getRandomConfigurationValidationType() { + SettingsConfigurationValidationType[] values = SettingsConfigurationValidationType.values(); + return values[randomInt(values.length - 1)]; + } +} diff --git a/server/src/test/java/org/elasticsearch/inference/SettingsConfigurationTests.java b/server/src/test/java/org/elasticsearch/inference/SettingsConfigurationTests.java new file mode 100644 index 0000000000000..e1293366a1152 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/inference/SettingsConfigurationTests.java @@ -0,0 +1,287 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.inference; + +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.inference.configuration.SettingsConfigurationDependency; +import org.elasticsearch.inference.configuration.SettingsConfigurationSelectOption; +import org.elasticsearch.inference.configuration.SettingsConfigurationValidation; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xcontent.ToXContent; +import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xcontent.XContentType; + +import java.io.IOException; +import java.util.List; +import java.util.Map; + +import static org.elasticsearch.common.xcontent.XContentHelper.toXContent; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertToXContentEquivalent; +import static org.hamcrest.CoreMatchers.equalTo; + +public class SettingsConfigurationTests extends ESTestCase { + + public void testToXContent() throws IOException { + String content = XContentHelper.stripWhitespace(""" + { + "default_value": null, + "depends_on": [ + { + "field": "some_field", + "value": true + } + ], + "display": "textbox", + "label": "Very important field", + "options": [], + "order": 4, + "required": true, + "sensitive": false, + "tooltip": "Wow, this tooltip is useful.", + "type": "str", + "ui_restrictions": [], + "validations": [ + { + "constraint": 0, + "type": "greater_than" + } + ], + "value": "" + } + """); + + SettingsConfiguration configuration = SettingsConfiguration.fromXContentBytes(new BytesArray(content), XContentType.JSON); + boolean humanReadable = true; + BytesReference originalBytes = toShuffledXContent(configuration, XContentType.JSON, ToXContent.EMPTY_PARAMS, humanReadable); + SettingsConfiguration parsed; + try (XContentParser parser = createParser(XContentType.JSON.xContent(), originalBytes)) { + parsed = SettingsConfiguration.fromXContent(parser); + } + assertToXContentEquivalent(originalBytes, toXContent(parsed, XContentType.JSON, humanReadable), XContentType.JSON); + } + + public void testToXContent_WithNumericSelectOptions() throws IOException { + String content = XContentHelper.stripWhitespace(""" + { + "default_value": null, + "depends_on": [ + { + "field": "some_field", + "value": true + } + ], + "display": "textbox", + "label": "Very important field", + "options": [ + { + "label": "five", + "value": 5 + }, + { + "label": "ten", + "value": 10 + } + ], + "order": 4, + "required": true, + "sensitive": false, + "tooltip": "Wow, this tooltip is useful.", + "type": "str", + "ui_restrictions": [], + "validations": [ + { + "constraint": 0, + "type": "greater_than" + } + ], + "value": "" + } + """); + + SettingsConfiguration configuration = SettingsConfiguration.fromXContentBytes(new BytesArray(content), XContentType.JSON); + boolean humanReadable = true; + BytesReference originalBytes = toShuffledXContent(configuration, XContentType.JSON, ToXContent.EMPTY_PARAMS, humanReadable); + SettingsConfiguration parsed; + try (XContentParser parser = createParser(XContentType.JSON.xContent(), originalBytes)) { + parsed = SettingsConfiguration.fromXContent(parser); + } + assertToXContentEquivalent(originalBytes, toXContent(parsed, XContentType.JSON, humanReadable), XContentType.JSON); + } + + public void testToXContentCrawlerConfig_WithNullValue() throws IOException { + String content = XContentHelper.stripWhitespace(""" + { + "label": "nextSyncConfig", + "value": null + } + """); + + SettingsConfiguration configuration = SettingsConfiguration.fromXContentBytes(new BytesArray(content), XContentType.JSON); + boolean humanReadable = true; + BytesReference originalBytes = toShuffledXContent(configuration, XContentType.JSON, ToXContent.EMPTY_PARAMS, humanReadable); + SettingsConfiguration parsed; + try (XContentParser parser = createParser(XContentType.JSON.xContent(), originalBytes)) { + parsed = SettingsConfiguration.fromXContent(parser); + } + assertToXContentEquivalent(originalBytes, toXContent(parsed, XContentType.JSON, humanReadable), XContentType.JSON); + } + + public void testToXContentWithMultipleConstraintTypes() throws IOException { + String content = XContentHelper.stripWhitespace(""" + { + "default_value": null, + "depends_on": [ + { + "field": "some_field", + "value": true + } + ], + "display": "textbox", + "label": "Very important field", + "options": [], + "order": 4, + "required": true, + "sensitive": false, + "tooltip": "Wow, this tooltip is useful.", + "type": "str", + "ui_restrictions": [], + "validations": [ + { + "constraint": 32, + "type": "less_than" + }, + { + "constraint": "^\\\\\\\\d{4}-\\\\\\\\d{2}-\\\\\\\\d{2}$", + "type": "regex" + }, + { + "constraint": "int", + "type": "list_type" + }, + { + "constraint": [ + 1, + 2, + 3 + ], + "type": "included_in" + }, + { + "constraint": [ + "string_1", + "string_2", + "string_3" + ], + "type": "included_in" + } + ], + "value": "" + } + """); + + SettingsConfiguration configuration = SettingsConfiguration.fromXContentBytes(new BytesArray(content), XContentType.JSON); + boolean humanReadable = true; + BytesReference originalBytes = toShuffledXContent(configuration, XContentType.JSON, ToXContent.EMPTY_PARAMS, humanReadable); + SettingsConfiguration parsed; + try (XContentParser parser = createParser(XContentType.JSON.xContent(), originalBytes)) { + parsed = SettingsConfiguration.fromXContent(parser); + } + assertToXContentEquivalent(originalBytes, toXContent(parsed, XContentType.JSON, humanReadable), XContentType.JSON); + } + + public void testToMap() { + SettingsConfiguration configField = SettingsConfigurationTestUtils.getRandomSettingsConfigurationField(); + Map configFieldAsMap = configField.toMap(); + + if (configField.getCategory() != null) { + assertThat(configFieldAsMap.get("category"), equalTo(configField.getCategory())); + } else { + assertFalse(configFieldAsMap.containsKey("category")); + } + + assertThat(configFieldAsMap.get("default_value"), equalTo(configField.getDefaultValue())); + + if (configField.getDependsOn() != null) { + List> dependsOnAsList = configField.getDependsOn() + .stream() + .map(SettingsConfigurationDependency::toMap) + .toList(); + assertThat(configFieldAsMap.get("depends_on"), equalTo(dependsOnAsList)); + } else { + assertFalse(configFieldAsMap.containsKey("depends_on")); + } + + if (configField.getDisplay() != null) { + assertThat(configFieldAsMap.get("display"), equalTo(configField.getDisplay().toString())); + } else { + assertFalse(configFieldAsMap.containsKey("display")); + } + + assertThat(configFieldAsMap.get("label"), equalTo(configField.getLabel())); + + if (configField.getOptions() != null) { + List> optionsAsList = configField.getOptions() + .stream() + .map(SettingsConfigurationSelectOption::toMap) + .toList(); + assertThat(configFieldAsMap.get("options"), equalTo(optionsAsList)); + } else { + assertFalse(configFieldAsMap.containsKey("options")); + } + + if (configField.getOrder() != null) { + assertThat(configFieldAsMap.get("order"), equalTo(configField.getOrder())); + } else { + assertFalse(configFieldAsMap.containsKey("order")); + } + + if (configField.getPlaceholder() != null) { + assertThat(configFieldAsMap.get("placeholder"), equalTo(configField.getPlaceholder())); + } else { + assertFalse(configFieldAsMap.containsKey("placeholder")); + } + + assertThat(configFieldAsMap.get("required"), equalTo(configField.isRequired())); + assertThat(configFieldAsMap.get("sensitive"), equalTo(configField.isSensitive())); + + if (configField.getTooltip() != null) { + assertThat(configFieldAsMap.get("tooltip"), equalTo(configField.getTooltip())); + } else { + assertFalse(configFieldAsMap.containsKey("tooltip")); + } + + if (configField.getType() != null) { + assertThat(configFieldAsMap.get("type"), equalTo(configField.getType().toString())); + } else { + assertFalse(configFieldAsMap.containsKey("type")); + } + + if (configField.getUiRestrictions() != null) { + assertThat(configFieldAsMap.get("ui_restrictions"), equalTo(configField.getUiRestrictions())); + } else { + assertFalse(configFieldAsMap.containsKey("ui_restrictions")); + } + + if (configField.getValidations() != null) { + List> validationsAsList = configField.getValidations() + .stream() + .map(SettingsConfigurationValidation::toMap) + .toList(); + assertThat(configFieldAsMap.get("validations"), equalTo(validationsAsList)); + } else { + assertFalse(configFieldAsMap.containsKey("validations")); + } + + assertThat(configFieldAsMap.get("value"), equalTo(configField.getValue())); + + } +} diff --git a/server/src/test/java/org/elasticsearch/inference/TaskSettingsConfigurationTestUtils.java b/server/src/test/java/org/elasticsearch/inference/TaskSettingsConfigurationTestUtils.java new file mode 100644 index 0000000000000..81abeaefd9f1a --- /dev/null +++ b/server/src/test/java/org/elasticsearch/inference/TaskSettingsConfigurationTestUtils.java @@ -0,0 +1,40 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.inference; + +import java.util.HashMap; +import java.util.Map; + +import static org.elasticsearch.test.ESTestCase.randomAlphaOfLength; +import static org.elasticsearch.test.ESTestCase.randomInt; + +public class TaskSettingsConfigurationTestUtils { + + public static TaskSettingsConfiguration getRandomTaskSettingsConfigurationField() { + return new TaskSettingsConfiguration.Builder().setTaskType(getRandomTaskType()) + .setConfiguration(getRandomServiceConfiguration(10)) + .build(); + } + + private static TaskType getRandomTaskType() { + TaskType[] values = TaskType.values(); + return values[randomInt(values.length - 1)]; + } + + private static Map getRandomServiceConfiguration(int numFields) { + var numConfigFields = randomInt(numFields); + Map configuration = new HashMap<>(); + for (int i = 0; i < numConfigFields; i++) { + configuration.put(randomAlphaOfLength(10), SettingsConfigurationTestUtils.getRandomSettingsConfigurationField()); + } + + return configuration; + } +} diff --git a/server/src/test/java/org/elasticsearch/inference/TaskSettingsConfigurationTests.java b/server/src/test/java/org/elasticsearch/inference/TaskSettingsConfigurationTests.java new file mode 100644 index 0000000000000..d37fffc78ebd6 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/inference/TaskSettingsConfigurationTests.java @@ -0,0 +1,94 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.inference; + +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xcontent.ToXContent; +import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xcontent.XContentType; + +import java.io.IOException; +import java.util.Map; + +import static org.elasticsearch.common.xcontent.XContentHelper.toXContent; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertToXContentEquivalent; +import static org.hamcrest.CoreMatchers.equalTo; + +public class TaskSettingsConfigurationTests extends ESTestCase { + public void testToXContent() throws IOException { + String content = XContentHelper.stripWhitespace(""" + { + "task_type": "text_embedding", + "configuration": { + "text_field_configuration": { + "default_value": null, + "depends_on": [ + { + "field": "some_field", + "value": true + } + ], + "display": "textbox", + "label": "Very important field", + "options": [], + "order": 4, + "required": true, + "sensitive": true, + "tooltip": "Wow, this tooltip is useful.", + "type": "str", + "ui_restrictions": [], + "validations": null, + "value": "" + }, + "numeric_field_configuration": { + "default_value": 3, + "depends_on": null, + "display": "numeric", + "label": "Very important numeric field", + "options": [], + "order": 2, + "required": true, + "sensitive": false, + "tooltip": "Wow, this tooltip is useful.", + "type": "int", + "ui_restrictions": [], + "validations": [ + { + "constraint": 0, + "type": "greater_than" + } + ], + "value": "" + } + } + } + """); + + TaskSettingsConfiguration configuration = TaskSettingsConfiguration.fromXContentBytes(new BytesArray(content), XContentType.JSON); + boolean humanReadable = true; + BytesReference originalBytes = toShuffledXContent(configuration, XContentType.JSON, ToXContent.EMPTY_PARAMS, humanReadable); + TaskSettingsConfiguration parsed; + try (XContentParser parser = createParser(XContentType.JSON.xContent(), originalBytes)) { + parsed = TaskSettingsConfiguration.fromXContent(parser); + } + assertToXContentEquivalent(originalBytes, toXContent(parsed, XContentType.JSON, humanReadable), XContentType.JSON); + } + + public void testToMap() { + TaskSettingsConfiguration configField = TaskSettingsConfigurationTestUtils.getRandomTaskSettingsConfigurationField(); + Map configFieldAsMap = configField.toMap(); + + assertThat(configFieldAsMap.get("task_type"), equalTo(configField.getTaskType())); + assertThat(configFieldAsMap.get("configuration"), equalTo(configField.getConfiguration())); + } +} diff --git a/server/src/test/java/org/elasticsearch/inference/configuration/SettingsConfigurationDisplayTypeTests.java b/server/src/test/java/org/elasticsearch/inference/configuration/SettingsConfigurationDisplayTypeTests.java new file mode 100644 index 0000000000000..603ea9480783c --- /dev/null +++ b/server/src/test/java/org/elasticsearch/inference/configuration/SettingsConfigurationDisplayTypeTests.java @@ -0,0 +1,30 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.inference.configuration; + +import org.elasticsearch.inference.SettingsConfigurationTestUtils; +import org.elasticsearch.test.ESTestCase; + +import static org.hamcrest.Matchers.equalTo; + +public class SettingsConfigurationDisplayTypeTests extends ESTestCase { + + public void testDisplayType_WithValidConfigurationDisplayTypeString() { + SettingsConfigurationDisplayType displayType = SettingsConfigurationTestUtils.getRandomSettingsConfigurationDisplayType(); + assertThat(SettingsConfigurationDisplayType.displayType(displayType.toString()), equalTo(displayType)); + } + + public void testDisplayType_WithInvalidConfigurationDisplayTypeString_ExpectIllegalArgumentException() { + expectThrows( + IllegalArgumentException.class, + () -> SettingsConfigurationDisplayType.displayType("invalid configuration display type") + ); + } +} diff --git a/server/src/test/java/org/elasticsearch/inference/configuration/SettingsConfigurationFieldTypeTests.java b/server/src/test/java/org/elasticsearch/inference/configuration/SettingsConfigurationFieldTypeTests.java new file mode 100644 index 0000000000000..c7b8884696a49 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/inference/configuration/SettingsConfigurationFieldTypeTests.java @@ -0,0 +1,28 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.inference.configuration; + +import org.elasticsearch.inference.SettingsConfigurationTestUtils; +import org.elasticsearch.test.ESTestCase; + +import static org.hamcrest.Matchers.equalTo; + +public class SettingsConfigurationFieldTypeTests extends ESTestCase { + + public void testFieldType_WithValidConfigurationFieldTypeString() { + SettingsConfigurationFieldType fieldType = SettingsConfigurationTestUtils.getRandomConfigurationFieldType(); + assertThat(SettingsConfigurationFieldType.fieldType(fieldType.toString()), equalTo(fieldType)); + } + + public void testFieldType_WithInvalidConfigurationFieldTypeString_ExpectIllegalArgumentException() { + assertThrows(IllegalArgumentException.class, () -> SettingsConfigurationFieldType.fieldType("invalid field type")); + } + +} diff --git a/server/src/test/java/org/elasticsearch/inference/configuration/SettingsConfigurationValidationTypeTests.java b/server/src/test/java/org/elasticsearch/inference/configuration/SettingsConfigurationValidationTypeTests.java new file mode 100644 index 0000000000000..d35968004ea0d --- /dev/null +++ b/server/src/test/java/org/elasticsearch/inference/configuration/SettingsConfigurationValidationTypeTests.java @@ -0,0 +1,29 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.inference.configuration; + +import org.elasticsearch.inference.SettingsConfigurationTestUtils; +import org.elasticsearch.test.ESTestCase; + +import static org.hamcrest.Matchers.equalTo; + +public class SettingsConfigurationValidationTypeTests extends ESTestCase { + + public void testValidationType_WithValidConfigurationValidationTypeString() { + SettingsConfigurationValidationType validationType = SettingsConfigurationTestUtils.getRandomConfigurationValidationType(); + + assertThat(SettingsConfigurationValidationType.validationType(validationType.toString()), equalTo(validationType)); + } + + public void testValidationType_WithInvalidConfigurationValidationTypeString_ExpectIllegalArgumentException() { + assertThrows(IllegalArgumentException.class, () -> SettingsConfigurationValidationType.validationType("invalid validation type")); + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/action/GetInferenceServicesAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/action/GetInferenceServicesAction.java new file mode 100644 index 0000000000000..f4865c1010134 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/action/GetInferenceServicesAction.java @@ -0,0 +1,113 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.core.inference.action; + +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.ActionType; +import org.elasticsearch.action.support.master.AcknowledgedRequest; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.inference.InferenceServiceConfiguration; +import org.elasticsearch.inference.TaskType; +import org.elasticsearch.xcontent.ToXContentObject; +import org.elasticsearch.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.List; +import java.util.Objects; + +public class GetInferenceServicesAction extends ActionType { + + public static final GetInferenceServicesAction INSTANCE = new GetInferenceServicesAction(); + public static final String NAME = "cluster:monitor/xpack/inference/services/get"; + + public GetInferenceServicesAction() { + super(NAME); + } + + public static class Request extends AcknowledgedRequest { + + private final TaskType taskType; + + public Request(TaskType taskType) { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, DEFAULT_ACK_TIMEOUT); + this.taskType = Objects.requireNonNull(taskType); + } + + public Request(StreamInput in) throws IOException { + super(in); + this.taskType = TaskType.fromStream(in); + } + + public TaskType getTaskType() { + return taskType; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + taskType.writeTo(out); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + Request request = (Request) o; + return taskType == request.taskType; + } + + @Override + public int hashCode() { + return Objects.hash(taskType); + } + } + + public static class Response extends ActionResponse implements ToXContentObject { + + private final List configurations; + + public Response(List configurations) { + this.configurations = configurations; + } + + public List getConfigurations() { + return configurations; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeCollection(configurations); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startArray(); + for (var configuration : configurations) { + if (configuration != null) { + configuration.toXContent(builder, params); + } + } + builder.endArray(); + return builder; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + GetInferenceServicesAction.Response response = (GetInferenceServicesAction.Response) o; + return Objects.equals(configurations, response.configurations); + } + + @Override + public int hashCode() { + return Objects.hash(configurations); + } + } +} diff --git a/x-pack/plugin/inference/qa/inference-service-tests/src/javaRestTest/java/org/elasticsearch/xpack/inference/InferenceBaseRestTest.java b/x-pack/plugin/inference/qa/inference-service-tests/src/javaRestTest/java/org/elasticsearch/xpack/inference/InferenceBaseRestTest.java index 74c1e2f0d3356..6790b9bb14c5a 100644 --- a/x-pack/plugin/inference/qa/inference-service-tests/src/javaRestTest/java/org/elasticsearch/xpack/inference/InferenceBaseRestTest.java +++ b/x-pack/plugin/inference/qa/inference-service-tests/src/javaRestTest/java/org/elasticsearch/xpack/inference/InferenceBaseRestTest.java @@ -291,28 +291,46 @@ protected Map deployE5TrainedModels() throws IOException { @SuppressWarnings("unchecked") protected Map getModel(String modelId) throws IOException { var endpoint = Strings.format("_inference/%s?error_trace", modelId); - return ((List>) getInternal(endpoint).get("endpoints")).get(0); + return ((List>) getInternalAsMap(endpoint).get("endpoints")).get(0); } @SuppressWarnings("unchecked") protected List> getModels(String modelId, TaskType taskType) throws IOException { var endpoint = Strings.format("_inference/%s/%s", taskType, modelId); - return (List>) getInternal(endpoint).get("endpoints"); + return (List>) getInternalAsMap(endpoint).get("endpoints"); } @SuppressWarnings("unchecked") protected List> getAllModels() throws IOException { var endpoint = Strings.format("_inference/_all"); - return (List>) getInternal("_inference/_all").get("endpoints"); + return (List>) getInternalAsMap("_inference/_all").get("endpoints"); } - private Map getInternal(String endpoint) throws IOException { + protected List getAllServices() throws IOException { + var endpoint = Strings.format("_inference/_services"); + return getInternalAsList(endpoint); + } + + @SuppressWarnings("unchecked") + protected List getServices(TaskType taskType) throws IOException { + var endpoint = Strings.format("_inference/_services/%s", taskType); + return getInternalAsList(endpoint); + } + + private Map getInternalAsMap(String endpoint) throws IOException { var request = new Request("GET", endpoint); var response = client().performRequest(request); assertOkOrCreated(response); return entityAsMap(response); } + private List getInternalAsList(String endpoint) throws IOException { + var request = new Request("GET", endpoint); + var response = client().performRequest(request); + assertOkOrCreated(response); + return entityAsList(response); + } + protected Map infer(String modelId, List input) throws IOException { var endpoint = Strings.format("_inference/%s", modelId); return inferInternal(endpoint, input, Map.of()); diff --git a/x-pack/plugin/inference/qa/inference-service-tests/src/javaRestTest/java/org/elasticsearch/xpack/inference/InferenceCrudIT.java b/x-pack/plugin/inference/qa/inference-service-tests/src/javaRestTest/java/org/elasticsearch/xpack/inference/InferenceCrudIT.java index 53c82219e2f12..fed63477701e3 100644 --- a/x-pack/plugin/inference/qa/inference-service-tests/src/javaRestTest/java/org/elasticsearch/xpack/inference/InferenceCrudIT.java +++ b/x-pack/plugin/inference/qa/inference-service-tests/src/javaRestTest/java/org/elasticsearch/xpack/inference/InferenceCrudIT.java @@ -15,6 +15,7 @@ import org.elasticsearch.inference.TaskType; import java.io.IOException; +import java.util.Arrays; import java.util.List; import java.util.Map; import java.util.Objects; @@ -128,6 +129,140 @@ public void testApisWithoutTaskType() throws IOException { deleteModel(modelId); } + @SuppressWarnings("unchecked") + public void testGetServicesWithoutTaskType() throws IOException { + List services = getAllServices(); + assertThat(services.size(), equalTo(19)); + + String[] providers = new String[services.size()]; + for (int i = 0; i < services.size(); i++) { + Map serviceConfig = (Map) services.get(i); + providers[i] = (String) serviceConfig.get("provider"); + } + + Arrays.sort(providers); + assertArrayEquals( + providers, + List.of( + "alibabacloud-ai-search", + "amazonbedrock", + "anthropic", + "azureaistudio", + "azureopenai", + "cohere", + "elastic", + "elasticsearch", + "googleaistudio", + "googlevertexai", + "hugging_face", + "hugging_face_elser", + "mistral", + "openai", + "streaming_completion_test_service", + "test_reranking_service", + "test_service", + "text_embedding_test_service", + "watsonxai" + ).toArray() + ); + } + + @SuppressWarnings("unchecked") + public void testGetServicesWithTextEmbeddingTaskType() throws IOException { + List services = getServices(TaskType.TEXT_EMBEDDING); + assertThat(services.size(), equalTo(13)); + + String[] providers = new String[services.size()]; + for (int i = 0; i < services.size(); i++) { + Map serviceConfig = (Map) services.get(i); + providers[i] = (String) serviceConfig.get("provider"); + } + + Arrays.sort(providers); + assertArrayEquals( + providers, + List.of( + "alibabacloud-ai-search", + "amazonbedrock", + "azureaistudio", + "azureopenai", + "cohere", + "elasticsearch", + "googleaistudio", + "googlevertexai", + "hugging_face", + "mistral", + "openai", + "text_embedding_test_service", + "watsonxai" + ).toArray() + ); + } + + @SuppressWarnings("unchecked") + public void testGetServicesWithRerankTaskType() throws IOException { + List services = getServices(TaskType.RERANK); + assertThat(services.size(), equalTo(5)); + + String[] providers = new String[services.size()]; + for (int i = 0; i < services.size(); i++) { + Map serviceConfig = (Map) services.get(i); + providers[i] = (String) serviceConfig.get("provider"); + } + + Arrays.sort(providers); + assertArrayEquals( + providers, + List.of("alibabacloud-ai-search", "cohere", "elasticsearch", "googlevertexai", "test_reranking_service").toArray() + ); + } + + @SuppressWarnings("unchecked") + public void testGetServicesWithCompletionTaskType() throws IOException { + List services = getServices(TaskType.COMPLETION); + assertThat(services.size(), equalTo(9)); + + String[] providers = new String[services.size()]; + for (int i = 0; i < services.size(); i++) { + Map serviceConfig = (Map) services.get(i); + providers[i] = (String) serviceConfig.get("provider"); + } + + Arrays.sort(providers); + assertArrayEquals( + providers, + List.of( + "alibabacloud-ai-search", + "amazonbedrock", + "anthropic", + "azureaistudio", + "azureopenai", + "cohere", + "googleaistudio", + "openai", + "streaming_completion_test_service" + ).toArray() + ); + } + + @SuppressWarnings("unchecked") + public void testGetServicesWithSparseEmbeddingTaskType() throws IOException { + List services = getServices(TaskType.SPARSE_EMBEDDING); + assertThat(services.size(), equalTo(6)); + + String[] providers = new String[services.size()]; + for (int i = 0; i < services.size(); i++) { + Map serviceConfig = (Map) services.get(i); + providers[i] = (String) serviceConfig.get("provider"); + } + + Arrays.sort(providers); + assertArrayEquals( + providers, + List.of("alibabacloud-ai-search", "elastic", "elasticsearch", "hugging_face", "hugging_face_elser", "test_service").toArray() + ); + } + public void testSkipValidationAndStart() throws IOException { String openAiConfigWithBadApiKey = """ { diff --git a/x-pack/plugin/inference/qa/test-service-plugin/src/main/java/org/elasticsearch/xpack/inference/mock/TestDenseInferenceServiceExtension.java b/x-pack/plugin/inference/qa/test-service-plugin/src/main/java/org/elasticsearch/xpack/inference/mock/TestDenseInferenceServiceExtension.java index cd9a773f49f44..2ddc4f6c3e2f6 100644 --- a/x-pack/plugin/inference/qa/test-service-plugin/src/main/java/org/elasticsearch/xpack/inference/mock/TestDenseInferenceServiceExtension.java +++ b/x-pack/plugin/inference/qa/test-service-plugin/src/main/java/org/elasticsearch/xpack/inference/mock/TestDenseInferenceServiceExtension.java @@ -13,11 +13,14 @@ import org.elasticsearch.common.ValidationException; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.util.LazyInitializable; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.mapper.vectors.DenseVectorFieldMapper; import org.elasticsearch.inference.ChunkedInferenceServiceResults; import org.elasticsearch.inference.ChunkingOptions; +import org.elasticsearch.inference.EmptySettingsConfiguration; +import org.elasticsearch.inference.InferenceServiceConfiguration; import org.elasticsearch.inference.InferenceServiceExtension; import org.elasticsearch.inference.InferenceServiceResults; import org.elasticsearch.inference.InputType; @@ -25,8 +28,12 @@ import org.elasticsearch.inference.ModelConfigurations; import org.elasticsearch.inference.ModelSecrets; import org.elasticsearch.inference.ServiceSettings; +import org.elasticsearch.inference.SettingsConfiguration; import org.elasticsearch.inference.SimilarityMeasure; +import org.elasticsearch.inference.TaskSettingsConfiguration; import org.elasticsearch.inference.TaskType; +import org.elasticsearch.inference.configuration.SettingsConfigurationDisplayType; +import org.elasticsearch.inference.configuration.SettingsConfigurationFieldType; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; @@ -36,6 +43,8 @@ import java.io.IOException; import java.nio.charset.StandardCharsets; import java.util.ArrayList; +import java.util.EnumSet; +import java.util.HashMap; import java.util.List; import java.util.Map; @@ -62,6 +71,8 @@ public TestDenseModel(String inferenceEntityId, TestDenseInferenceServiceExtensi public static class TestInferenceService extends AbstractTestInferenceService { public static final String NAME = "text_embedding_test_service"; + private static final EnumSet supportedTaskTypes = EnumSet.of(TaskType.TEXT_EMBEDDING); + public TestInferenceService(InferenceServiceFactoryContext context) {} @Override @@ -87,6 +98,16 @@ public void parseRequestConfig( parsedModelListener.onResponse(new TestServiceModel(modelId, taskType, name(), serviceSettings, taskSettings, secretSettings)); } + @Override + public InferenceServiceConfiguration getConfiguration() { + return Configuration.get(); + } + + @Override + public EnumSet supportedTaskTypes() { + return supportedTaskTypes; + } + @Override public void infer( Model model, @@ -203,6 +224,38 @@ private static List generateEmbedding(String input, int dimensions) { return embedding; } + + public static class Configuration { + public static InferenceServiceConfiguration get() { + return configuration.getOrCompute(); + } + + private static final LazyInitializable configuration = new LazyInitializable<>( + () -> { + var configurationMap = new HashMap(); + + configurationMap.put( + "model", + new SettingsConfiguration.Builder().setDisplay(SettingsConfigurationDisplayType.TEXTBOX) + .setLabel("Model") + .setOrder(1) + .setRequired(true) + .setSensitive(true) + .setTooltip("") + .setType(SettingsConfigurationFieldType.STRING) + .build() + ); + + return new InferenceServiceConfiguration.Builder().setProvider(NAME).setTaskTypes(supportedTaskTypes.stream().map(t -> { + Map taskSettingsConfig; + switch (t) { + default -> taskSettingsConfig = EmptySettingsConfiguration.get(); + } + return new TaskSettingsConfiguration.Builder().setTaskType(t).setConfiguration(taskSettingsConfig).build(); + }).toList()).setConfiguration(configurationMap).build(); + } + ); + } } public record TestServiceSettings( diff --git a/x-pack/plugin/inference/qa/test-service-plugin/src/main/java/org/elasticsearch/xpack/inference/mock/TestRerankingServiceExtension.java b/x-pack/plugin/inference/qa/test-service-plugin/src/main/java/org/elasticsearch/xpack/inference/mock/TestRerankingServiceExtension.java index d8ee70986a57d..2075c1b1924bf 100644 --- a/x-pack/plugin/inference/qa/test-service-plugin/src/main/java/org/elasticsearch/xpack/inference/mock/TestRerankingServiceExtension.java +++ b/x-pack/plugin/inference/qa/test-service-plugin/src/main/java/org/elasticsearch/xpack/inference/mock/TestRerankingServiceExtension.java @@ -13,10 +13,13 @@ import org.elasticsearch.common.ValidationException; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.util.LazyInitializable; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.TimeValue; import org.elasticsearch.inference.ChunkedInferenceServiceResults; import org.elasticsearch.inference.ChunkingOptions; +import org.elasticsearch.inference.EmptySettingsConfiguration; +import org.elasticsearch.inference.InferenceServiceConfiguration; import org.elasticsearch.inference.InferenceServiceExtension; import org.elasticsearch.inference.InferenceServiceResults; import org.elasticsearch.inference.InputType; @@ -24,7 +27,11 @@ import org.elasticsearch.inference.ModelConfigurations; import org.elasticsearch.inference.ModelSecrets; import org.elasticsearch.inference.ServiceSettings; +import org.elasticsearch.inference.SettingsConfiguration; +import org.elasticsearch.inference.TaskSettingsConfiguration; import org.elasticsearch.inference.TaskType; +import org.elasticsearch.inference.configuration.SettingsConfigurationDisplayType; +import org.elasticsearch.inference.configuration.SettingsConfigurationFieldType; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; @@ -32,6 +39,8 @@ import java.io.IOException; import java.util.ArrayList; +import java.util.EnumSet; +import java.util.HashMap; import java.util.List; import java.util.Map; @@ -53,6 +62,8 @@ public TestRerankingModel(String inferenceEntityId, TestServiceSettings serviceS public static class TestInferenceService extends AbstractTestInferenceService { public static final String NAME = "test_reranking_service"; + private static final EnumSet supportedTaskTypes = EnumSet.of(TaskType.RERANK); + public TestInferenceService(InferenceServiceFactoryContext context) {} @Override @@ -78,6 +89,16 @@ public void parseRequestConfig( parsedModelListener.onResponse(new TestServiceModel(modelId, taskType, name(), serviceSettings, taskSettings, secretSettings)); } + @Override + public InferenceServiceConfiguration getConfiguration() { + return Configuration.get(); + } + + @Override + public EnumSet supportedTaskTypes() { + return supportedTaskTypes; + } + @Override public void infer( Model model, @@ -132,6 +153,38 @@ private RankedDocsResults makeResults(List input) { protected ServiceSettings getServiceSettingsFromMap(Map serviceSettingsMap) { return TestServiceSettings.fromMap(serviceSettingsMap); } + + public static class Configuration { + public static InferenceServiceConfiguration get() { + return configuration.getOrCompute(); + } + + private static final LazyInitializable configuration = new LazyInitializable<>( + () -> { + var configurationMap = new HashMap(); + + configurationMap.put( + "model", + new SettingsConfiguration.Builder().setDisplay(SettingsConfigurationDisplayType.TEXTBOX) + .setLabel("Model") + .setOrder(1) + .setRequired(true) + .setSensitive(true) + .setTooltip("") + .setType(SettingsConfigurationFieldType.STRING) + .build() + ); + + return new InferenceServiceConfiguration.Builder().setProvider(NAME).setTaskTypes(supportedTaskTypes.stream().map(t -> { + Map taskSettingsConfig; + switch (t) { + default -> taskSettingsConfig = EmptySettingsConfiguration.get(); + } + return new TaskSettingsConfiguration.Builder().setTaskType(t).setConfiguration(taskSettingsConfig).build(); + }).toList()).setConfiguration(configurationMap).build(); + } + ); + } } public record TestServiceSettings(String modelId) implements ServiceSettings { diff --git a/x-pack/plugin/inference/qa/test-service-plugin/src/main/java/org/elasticsearch/xpack/inference/mock/TestSparseInferenceServiceExtension.java b/x-pack/plugin/inference/qa/test-service-plugin/src/main/java/org/elasticsearch/xpack/inference/mock/TestSparseInferenceServiceExtension.java index 6eb0caad36261..3d6f0ce6eba05 100644 --- a/x-pack/plugin/inference/qa/test-service-plugin/src/main/java/org/elasticsearch/xpack/inference/mock/TestSparseInferenceServiceExtension.java +++ b/x-pack/plugin/inference/qa/test-service-plugin/src/main/java/org/elasticsearch/xpack/inference/mock/TestSparseInferenceServiceExtension.java @@ -13,10 +13,13 @@ import org.elasticsearch.common.ValidationException; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.util.LazyInitializable; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.TimeValue; import org.elasticsearch.inference.ChunkedInferenceServiceResults; import org.elasticsearch.inference.ChunkingOptions; +import org.elasticsearch.inference.EmptySettingsConfiguration; +import org.elasticsearch.inference.InferenceServiceConfiguration; import org.elasticsearch.inference.InferenceServiceExtension; import org.elasticsearch.inference.InferenceServiceResults; import org.elasticsearch.inference.InputType; @@ -24,7 +27,11 @@ import org.elasticsearch.inference.ModelConfigurations; import org.elasticsearch.inference.ModelSecrets; import org.elasticsearch.inference.ServiceSettings; +import org.elasticsearch.inference.SettingsConfiguration; +import org.elasticsearch.inference.TaskSettingsConfiguration; import org.elasticsearch.inference.TaskType; +import org.elasticsearch.inference.configuration.SettingsConfigurationDisplayType; +import org.elasticsearch.inference.configuration.SettingsConfigurationFieldType; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; @@ -35,6 +42,8 @@ import java.io.IOException; import java.util.ArrayList; +import java.util.EnumSet; +import java.util.HashMap; import java.util.List; import java.util.Map; @@ -56,6 +65,8 @@ public TestSparseModel(String inferenceEntityId, TestServiceSettings serviceSett public static class TestInferenceService extends AbstractTestInferenceService { public static final String NAME = "test_service"; + private static final EnumSet supportedTaskTypes = EnumSet.of(TaskType.SPARSE_EMBEDDING); + public TestInferenceService(InferenceServiceExtension.InferenceServiceFactoryContext context) {} @Override @@ -81,6 +92,16 @@ public void parseRequestConfig( parsedModelListener.onResponse(new TestServiceModel(modelId, taskType, name(), serviceSettings, taskSettings, secretSettings)); } + @Override + public InferenceServiceConfiguration getConfiguration() { + return Configuration.get(); + } + + @Override + public EnumSet supportedTaskTypes() { + return supportedTaskTypes; + } + @Override public void infer( Model model, @@ -161,6 +182,50 @@ private static float generateEmbedding(String input, int position) { // Ensure non-negative and non-zero values for features return Math.abs(input.hashCode()) + 1 + position; } + + public static class Configuration { + public static InferenceServiceConfiguration get() { + return configuration.getOrCompute(); + } + + private static final LazyInitializable configuration = new LazyInitializable<>( + () -> { + var configurationMap = new HashMap(); + + configurationMap.put( + "model", + new SettingsConfiguration.Builder().setDisplay(SettingsConfigurationDisplayType.TEXTBOX) + .setLabel("Model") + .setOrder(1) + .setRequired(true) + .setSensitive(false) + .setTooltip("") + .setType(SettingsConfigurationFieldType.STRING) + .build() + ); + + configurationMap.put( + "hidden_field", + new SettingsConfiguration.Builder().setDisplay(SettingsConfigurationDisplayType.TEXTBOX) + .setLabel("Hidden Field") + .setOrder(2) + .setRequired(true) + .setSensitive(false) + .setTooltip("") + .setType(SettingsConfigurationFieldType.STRING) + .build() + ); + + return new InferenceServiceConfiguration.Builder().setProvider(NAME).setTaskTypes(supportedTaskTypes.stream().map(t -> { + Map taskSettingsConfig; + switch (t) { + default -> taskSettingsConfig = EmptySettingsConfiguration.get(); + } + return new TaskSettingsConfiguration.Builder().setTaskType(t).setConfiguration(taskSettingsConfig).build(); + }).toList()).setConfiguration(configurationMap).build(); + } + ); + } } public record TestServiceSettings(String model, String hiddenField, boolean shouldReturnHiddenField) implements ServiceSettings { diff --git a/x-pack/plugin/inference/qa/test-service-plugin/src/main/java/org/elasticsearch/xpack/inference/mock/TestStreamingCompletionServiceExtension.java b/x-pack/plugin/inference/qa/test-service-plugin/src/main/java/org/elasticsearch/xpack/inference/mock/TestStreamingCompletionServiceExtension.java index 206aa1f3e5d28..595b92a6be66b 100644 --- a/x-pack/plugin/inference/qa/test-service-plugin/src/main/java/org/elasticsearch/xpack/inference/mock/TestStreamingCompletionServiceExtension.java +++ b/x-pack/plugin/inference/qa/test-service-plugin/src/main/java/org/elasticsearch/xpack/inference/mock/TestStreamingCompletionServiceExtension.java @@ -14,24 +14,33 @@ import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.util.LazyInitializable; import org.elasticsearch.common.xcontent.ChunkedToXContent; import org.elasticsearch.common.xcontent.ChunkedToXContentHelper; import org.elasticsearch.core.TimeValue; import org.elasticsearch.inference.ChunkedInferenceServiceResults; import org.elasticsearch.inference.ChunkingOptions; +import org.elasticsearch.inference.EmptySettingsConfiguration; +import org.elasticsearch.inference.InferenceServiceConfiguration; import org.elasticsearch.inference.InferenceServiceExtension; import org.elasticsearch.inference.InferenceServiceResults; import org.elasticsearch.inference.InputType; import org.elasticsearch.inference.Model; import org.elasticsearch.inference.ModelConfigurations; import org.elasticsearch.inference.ServiceSettings; +import org.elasticsearch.inference.SettingsConfiguration; +import org.elasticsearch.inference.TaskSettingsConfiguration; import org.elasticsearch.inference.TaskType; +import org.elasticsearch.inference.configuration.SettingsConfigurationDisplayType; +import org.elasticsearch.inference.configuration.SettingsConfigurationFieldType; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xpack.core.inference.results.StreamingChatCompletionResults; import java.io.IOException; +import java.util.EnumSet; +import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Set; @@ -49,6 +58,8 @@ public static class TestInferenceService extends AbstractTestInferenceService { private static final String NAME = "streaming_completion_test_service"; private static final Set supportedStreamingTasks = Set.of(TaskType.COMPLETION); + private static final EnumSet supportedTaskTypes = EnumSet.of(TaskType.COMPLETION); + public TestInferenceService(InferenceServiceExtension.InferenceServiceFactoryContext context) {} @Override @@ -79,6 +90,16 @@ public void parseRequestConfig( parsedModelListener.onResponse(new TestServiceModel(modelId, taskType, name(), serviceSettings, taskSettings, secretSettings)); } + @Override + public InferenceServiceConfiguration getConfiguration() { + return Configuration.get(); + } + + @Override + public EnumSet supportedTaskTypes() { + return supportedTaskTypes; + } + @Override public void infer( Model model, @@ -155,6 +176,38 @@ public void chunkedInfer( public Set supportedStreamingTasks() { return supportedStreamingTasks; } + + public static class Configuration { + public static InferenceServiceConfiguration get() { + return configuration.getOrCompute(); + } + + private static final LazyInitializable configuration = new LazyInitializable<>( + () -> { + var configurationMap = new HashMap(); + + configurationMap.put( + "model_id", + new SettingsConfiguration.Builder().setDisplay(SettingsConfigurationDisplayType.TEXTBOX) + .setLabel("Model ID") + .setOrder(1) + .setRequired(true) + .setSensitive(true) + .setTooltip("") + .setType(SettingsConfigurationFieldType.STRING) + .build() + ); + + return new InferenceServiceConfiguration.Builder().setProvider(NAME).setTaskTypes(supportedTaskTypes.stream().map(t -> { + Map taskSettingsConfig; + switch (t) { + default -> taskSettingsConfig = EmptySettingsConfiguration.get(); + } + return new TaskSettingsConfiguration.Builder().setTaskType(t).setConfiguration(taskSettingsConfig).build(); + }).toList()).setConfiguration(configurationMap).build(); + } + ); + } } public record TestServiceSettings(String modelId) implements ServiceSettings { diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferencePlugin.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferencePlugin.java index ebbf1e59e8b1f..0450400e5ca8b 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferencePlugin.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferencePlugin.java @@ -45,12 +45,14 @@ import org.elasticsearch.xpack.core.inference.action.DeleteInferenceEndpointAction; import org.elasticsearch.xpack.core.inference.action.GetInferenceDiagnosticsAction; import org.elasticsearch.xpack.core.inference.action.GetInferenceModelAction; +import org.elasticsearch.xpack.core.inference.action.GetInferenceServicesAction; import org.elasticsearch.xpack.core.inference.action.InferenceAction; import org.elasticsearch.xpack.core.inference.action.PutInferenceModelAction; import org.elasticsearch.xpack.core.inference.action.UpdateInferenceModelAction; import org.elasticsearch.xpack.inference.action.TransportDeleteInferenceEndpointAction; import org.elasticsearch.xpack.inference.action.TransportGetInferenceDiagnosticsAction; import org.elasticsearch.xpack.inference.action.TransportGetInferenceModelAction; +import org.elasticsearch.xpack.inference.action.TransportGetInferenceServicesAction; import org.elasticsearch.xpack.inference.action.TransportInferenceAction; import org.elasticsearch.xpack.inference.action.TransportInferenceUsageAction; import org.elasticsearch.xpack.inference.action.TransportPutInferenceModelAction; @@ -75,6 +77,7 @@ import org.elasticsearch.xpack.inference.rest.RestDeleteInferenceEndpointAction; import org.elasticsearch.xpack.inference.rest.RestGetInferenceDiagnosticsAction; import org.elasticsearch.xpack.inference.rest.RestGetInferenceModelAction; +import org.elasticsearch.xpack.inference.rest.RestGetInferenceServicesAction; import org.elasticsearch.xpack.inference.rest.RestInferenceAction; import org.elasticsearch.xpack.inference.rest.RestPutInferenceModelAction; import org.elasticsearch.xpack.inference.rest.RestStreamInferenceAction; @@ -155,7 +158,8 @@ public InferencePlugin(Settings settings) { new ActionHandler<>(UpdateInferenceModelAction.INSTANCE, TransportUpdateInferenceModelAction.class), new ActionHandler<>(DeleteInferenceEndpointAction.INSTANCE, TransportDeleteInferenceEndpointAction.class), new ActionHandler<>(XPackUsageFeatureAction.INFERENCE, TransportInferenceUsageAction.class), - new ActionHandler<>(GetInferenceDiagnosticsAction.INSTANCE, TransportGetInferenceDiagnosticsAction.class) + new ActionHandler<>(GetInferenceDiagnosticsAction.INSTANCE, TransportGetInferenceDiagnosticsAction.class), + new ActionHandler<>(GetInferenceServicesAction.INSTANCE, TransportGetInferenceServicesAction.class) ); } @@ -178,7 +182,8 @@ public List getRestHandlers( new RestPutInferenceModelAction(), new RestUpdateInferenceModelAction(), new RestDeleteInferenceEndpointAction(), - new RestGetInferenceDiagnosticsAction() + new RestGetInferenceDiagnosticsAction(), + new RestGetInferenceServicesAction() ); } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/action/TransportGetInferenceServicesAction.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/action/TransportGetInferenceServicesAction.java new file mode 100644 index 0000000000000..a6109bfe659d7 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/action/TransportGetInferenceServicesAction.java @@ -0,0 +1,102 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.action; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.HandledTransportAction; +import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.inference.InferenceService; +import org.elasticsearch.inference.InferenceServiceConfiguration; +import org.elasticsearch.inference.InferenceServiceRegistry; +import org.elasticsearch.inference.TaskType; +import org.elasticsearch.injection.guice.Inject; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.inference.action.GetInferenceServicesAction; + +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.stream.Collectors; + +public class TransportGetInferenceServicesAction extends HandledTransportAction< + GetInferenceServicesAction.Request, + GetInferenceServicesAction.Response> { + + private final InferenceServiceRegistry serviceRegistry; + + @Inject + public TransportGetInferenceServicesAction( + TransportService transportService, + ActionFilters actionFilters, + InferenceServiceRegistry serviceRegistry + ) { + super( + GetInferenceServicesAction.NAME, + transportService, + actionFilters, + GetInferenceServicesAction.Request::new, + EsExecutors.DIRECT_EXECUTOR_SERVICE + ); + this.serviceRegistry = serviceRegistry; + } + + @Override + protected void doExecute( + Task task, + GetInferenceServicesAction.Request request, + ActionListener listener + ) { + if (request.getTaskType() == TaskType.ANY) { + getAllServiceConfigurations(listener); + } else { + getServiceConfigurationsForTaskType(request.getTaskType(), listener); + } + } + + private void getServiceConfigurationsForTaskType( + TaskType requestedTaskType, + ActionListener listener + ) { + var filteredServices = serviceRegistry.getServices() + .entrySet() + .stream() + .filter(service -> service.getValue().supportedTaskTypes().contains(requestedTaskType)) + .collect(Collectors.toSet()); + + getServiceConfigurationsForServices(filteredServices, listener.delegateFailureAndWrap((delegate, configurations) -> { + delegate.onResponse(new GetInferenceServicesAction.Response(configurations)); + })); + } + + private void getAllServiceConfigurations(ActionListener listener) { + getServiceConfigurationsForServices( + serviceRegistry.getServices().entrySet(), + listener.delegateFailureAndWrap((delegate, configurations) -> { + delegate.onResponse(new GetInferenceServicesAction.Response(configurations)); + }) + ); + } + + private void getServiceConfigurationsForServices( + Set> services, + ActionListener> listener + ) { + try { + var serviceConfigurations = new ArrayList(); + for (var service : services) { + serviceConfigurations.add(service.getValue().getConfiguration()); + } + listener.onResponse(serviceConfigurations.stream().toList()); + } catch (Exception e) { + listener.onFailure(e); + } + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/alibabacloudsearch/AlibabaCloudSearchEmbeddingsRequestEntity.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/alibabacloudsearch/AlibabaCloudSearchEmbeddingsRequestEntity.java index c2367aeff3070..1fc61d3331d20 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/alibabacloudsearch/AlibabaCloudSearchEmbeddingsRequestEntity.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/alibabacloudsearch/AlibabaCloudSearchEmbeddingsRequestEntity.java @@ -27,7 +27,7 @@ public record AlibabaCloudSearchEmbeddingsRequestEntity(List input, Alib private static final String TEXTS_FIELD = "input"; - static final String INPUT_TYPE_FIELD = "input_type"; + public static final String INPUT_TYPE_FIELD = "input_type"; public AlibabaCloudSearchEmbeddingsRequestEntity { Objects.requireNonNull(input); diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/alibabacloudsearch/AlibabaCloudSearchSparseRequestEntity.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/alibabacloudsearch/AlibabaCloudSearchSparseRequestEntity.java index 3aec226bfc277..8fae9408b860d 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/alibabacloudsearch/AlibabaCloudSearchSparseRequestEntity.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/alibabacloudsearch/AlibabaCloudSearchSparseRequestEntity.java @@ -21,9 +21,9 @@ public record AlibabaCloudSearchSparseRequestEntity(List input, AlibabaC private static final String TEXTS_FIELD = "input"; - static final String INPUT_TYPE_FIELD = "input_type"; + public static final String INPUT_TYPE_FIELD = "input_type"; - static final String RETURN_TOKEN_FIELD = "return_token"; + public static final String RETURN_TOKEN_FIELD = "return_token"; public AlibabaCloudSearchSparseRequestEntity { Objects.requireNonNull(input); diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/cohere/CohereEmbeddingsRequestEntity.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/cohere/CohereEmbeddingsRequestEntity.java index 6e389e8537d27..63cc5c3cb7261 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/cohere/CohereEmbeddingsRequestEntity.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/cohere/CohereEmbeddingsRequestEntity.java @@ -34,7 +34,7 @@ public record CohereEmbeddingsRequestEntity( private static final String CLUSTERING = "clustering"; private static final String CLASSIFICATION = "classification"; private static final String TEXTS_FIELD = "texts"; - static final String INPUT_TYPE_FIELD = "input_type"; + public static final String INPUT_TYPE_FIELD = "input_type"; static final String EMBEDDING_TYPES_FIELD = "embedding_types"; public CohereEmbeddingsRequestEntity { diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rest/Paths.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rest/Paths.java index 2dec72e6692a6..55d6443b43c03 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rest/Paths.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rest/Paths.java @@ -11,6 +11,7 @@ public final class Paths { static final String INFERENCE_ID = "inference_id"; static final String TASK_TYPE_OR_INFERENCE_ID = "task_type_or_id"; + static final String TASK_TYPE = "task_type"; static final String INFERENCE_ID_PATH = "_inference/{" + TASK_TYPE_OR_INFERENCE_ID + "}"; static final String TASK_TYPE_INFERENCE_ID_PATH = "_inference/{" + TASK_TYPE_OR_INFERENCE_ID + "}/{" + INFERENCE_ID + "}"; static final String INFERENCE_DIAGNOSTICS_PATH = "_inference/.diagnostics"; @@ -20,6 +21,8 @@ public final class Paths { + INFERENCE_ID + "}/_update"; static final String INFERENCE_ID_UPDATE_PATH = "_inference/{" + TASK_TYPE_OR_INFERENCE_ID + "}/_update"; + static final String INFERENCE_SERVICES_PATH = "_inference/_services"; + static final String TASK_TYPE_INFERENCE_SERVICES_PATH = "_inference/_services/{" + TASK_TYPE + "}"; static final String STREAM_INFERENCE_ID_PATH = "_inference/{" + TASK_TYPE_OR_INFERENCE_ID + "}/_stream"; static final String STREAM_TASK_TYPE_INFERENCE_ID_PATH = "_inference/{" diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rest/RestGetInferenceServicesAction.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rest/RestGetInferenceServicesAction.java new file mode 100644 index 0000000000000..25f09e7982dff --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rest/RestGetInferenceServicesAction.java @@ -0,0 +1,50 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.rest; + +import org.elasticsearch.client.internal.node.NodeClient; +import org.elasticsearch.inference.TaskType; +import org.elasticsearch.rest.BaseRestHandler; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.Scope; +import org.elasticsearch.rest.ServerlessScope; +import org.elasticsearch.rest.action.RestToXContentListener; +import org.elasticsearch.xpack.core.inference.action.GetInferenceServicesAction; + +import java.util.List; + +import static org.elasticsearch.rest.RestRequest.Method.GET; +import static org.elasticsearch.xpack.inference.rest.Paths.INFERENCE_SERVICES_PATH; +import static org.elasticsearch.xpack.inference.rest.Paths.TASK_TYPE; +import static org.elasticsearch.xpack.inference.rest.Paths.TASK_TYPE_INFERENCE_SERVICES_PATH; + +@ServerlessScope(Scope.INTERNAL) +public class RestGetInferenceServicesAction extends BaseRestHandler { + @Override + public String getName() { + return "get_inference_services_action"; + } + + @Override + public List routes() { + return List.of(new Route(GET, INFERENCE_SERVICES_PATH), new Route(GET, TASK_TYPE_INFERENCE_SERVICES_PATH)); + } + + @Override + protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) { + TaskType taskType; + if (restRequest.hasParam(TASK_TYPE)) { + taskType = TaskType.fromStringOrStatusException(restRequest.param(TASK_TYPE)); + } else { + taskType = TaskType.ANY; + } + + var request = new GetInferenceServicesAction.Request(taskType); + return channel -> client.execute(GetInferenceServicesAction.INSTANCE, request, new RestToXContentListener<>(channel)); + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/alibabacloudsearch/AlibabaCloudSearchService.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/alibabacloudsearch/AlibabaCloudSearchService.java index cc26a3b2babe5..f1472dda4f86f 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/alibabacloudsearch/AlibabaCloudSearchService.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/alibabacloudsearch/AlibabaCloudSearchService.java @@ -11,19 +11,27 @@ import org.elasticsearch.TransportVersion; import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.common.util.LazyInitializable; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.TimeValue; import org.elasticsearch.inference.ChunkedInferenceServiceResults; import org.elasticsearch.inference.ChunkingOptions; import org.elasticsearch.inference.ChunkingSettings; +import org.elasticsearch.inference.EmptySettingsConfiguration; import org.elasticsearch.inference.InferenceService; +import org.elasticsearch.inference.InferenceServiceConfiguration; import org.elasticsearch.inference.InferenceServiceResults; import org.elasticsearch.inference.InputType; import org.elasticsearch.inference.Model; import org.elasticsearch.inference.ModelConfigurations; import org.elasticsearch.inference.ModelSecrets; +import org.elasticsearch.inference.SettingsConfiguration; import org.elasticsearch.inference.SimilarityMeasure; +import org.elasticsearch.inference.TaskSettingsConfiguration; import org.elasticsearch.inference.TaskType; +import org.elasticsearch.inference.configuration.SettingsConfigurationDisplayType; +import org.elasticsearch.inference.configuration.SettingsConfigurationFieldType; +import org.elasticsearch.inference.configuration.SettingsConfigurationSelectOption; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.xpack.core.inference.ChunkingSettingsFeatureFlag; import org.elasticsearch.xpack.inference.chunking.ChunkingSettingsBuilder; @@ -42,10 +50,17 @@ import org.elasticsearch.xpack.inference.services.alibabacloudsearch.embeddings.AlibabaCloudSearchEmbeddingsServiceSettings; import org.elasticsearch.xpack.inference.services.alibabacloudsearch.rerank.AlibabaCloudSearchRerankModel; import org.elasticsearch.xpack.inference.services.alibabacloudsearch.sparse.AlibabaCloudSearchSparseModel; +import org.elasticsearch.xpack.inference.services.settings.DefaultSecretSettings; +import org.elasticsearch.xpack.inference.services.settings.RateLimitSettings; +import java.util.EnumSet; +import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.stream.Stream; +import static org.elasticsearch.inference.TaskType.SPARSE_EMBEDDING; +import static org.elasticsearch.inference.TaskType.TEXT_EMBEDDING; import static org.elasticsearch.xpack.core.inference.action.InferenceAction.Request.DEFAULT_TIMEOUT; import static org.elasticsearch.xpack.inference.services.ServiceUtils.createInvalidModelException; import static org.elasticsearch.xpack.inference.services.ServiceUtils.parsePersistedConfigErrorMsg; @@ -53,10 +68,21 @@ import static org.elasticsearch.xpack.inference.services.ServiceUtils.removeFromMapOrThrowIfNull; import static org.elasticsearch.xpack.inference.services.ServiceUtils.throwIfNotEmptyMap; import static org.elasticsearch.xpack.inference.services.alibabacloudsearch.AlibabaCloudSearchServiceFields.EMBEDDING_MAX_BATCH_SIZE; +import static org.elasticsearch.xpack.inference.services.alibabacloudsearch.AlibabaCloudSearchServiceSettings.HOST; +import static org.elasticsearch.xpack.inference.services.alibabacloudsearch.AlibabaCloudSearchServiceSettings.HTTP_SCHEMA_NAME; +import static org.elasticsearch.xpack.inference.services.alibabacloudsearch.AlibabaCloudSearchServiceSettings.SERVICE_ID; +import static org.elasticsearch.xpack.inference.services.alibabacloudsearch.AlibabaCloudSearchServiceSettings.WORKSPACE_NAME; public class AlibabaCloudSearchService extends SenderService { public static final String NAME = AlibabaCloudSearchUtils.SERVICE_NAME; + private static final EnumSet supportedTaskTypes = EnumSet.of( + TaskType.TEXT_EMBEDDING, + TaskType.SPARSE_EMBEDDING, + TaskType.RERANK, + TaskType.COMPLETION + ); + public AlibabaCloudSearchService(HttpRequestSender.Factory factory, ServiceComponents serviceComponents) { super(factory, serviceComponents); } @@ -78,7 +104,7 @@ public void parseRequestConfig( Map taskSettingsMap = removeFromMapOrDefaultEmpty(config, ModelConfigurations.TASK_SETTINGS); ChunkingSettings chunkingSettings = null; - if (ChunkingSettingsFeatureFlag.isEnabled() && List.of(TaskType.TEXT_EMBEDDING, TaskType.SPARSE_EMBEDDING).contains(taskType)) { + if (ChunkingSettingsFeatureFlag.isEnabled() && List.of(TEXT_EMBEDDING, SPARSE_EMBEDDING).contains(taskType)) { chunkingSettings = ChunkingSettingsBuilder.fromMap( removeFromMapOrDefaultEmpty(config, ModelConfigurations.CHUNKING_SETTINGS) ); @@ -105,6 +131,16 @@ public void parseRequestConfig( } } + @Override + public InferenceServiceConfiguration getConfiguration() { + return Configuration.get(); + } + + @Override + public EnumSet supportedTaskTypes() { + return supportedTaskTypes; + } + private static AlibabaCloudSearchModel createModelWithoutLoggingDeprecations( String inferenceEntityId, TaskType taskType, @@ -191,7 +227,7 @@ public AlibabaCloudSearchModel parsePersistedConfigWithSecrets( Map secretSettingsMap = removeFromMapOrThrowIfNull(secrets, ModelSecrets.SECRET_SETTINGS); ChunkingSettings chunkingSettings = null; - if (ChunkingSettingsFeatureFlag.isEnabled() && List.of(TaskType.TEXT_EMBEDDING, TaskType.SPARSE_EMBEDDING).contains(taskType)) { + if (ChunkingSettingsFeatureFlag.isEnabled() && List.of(TEXT_EMBEDDING, SPARSE_EMBEDDING).contains(taskType)) { chunkingSettings = ChunkingSettingsBuilder.fromMap(removeFromMapOrDefaultEmpty(config, ModelConfigurations.CHUNKING_SETTINGS)); } @@ -212,7 +248,7 @@ public AlibabaCloudSearchModel parsePersistedConfig(String inferenceEntityId, Ta Map taskSettingsMap = removeFromMapOrDefaultEmpty(config, ModelConfigurations.TASK_SETTINGS); ChunkingSettings chunkingSettings = null; - if (ChunkingSettingsFeatureFlag.isEnabled() && List.of(TaskType.TEXT_EMBEDDING, TaskType.SPARSE_EMBEDDING).contains(taskType)) { + if (ChunkingSettingsFeatureFlag.isEnabled() && List.of(TEXT_EMBEDDING, SPARSE_EMBEDDING).contains(taskType)) { chunkingSettings = ChunkingSettingsBuilder.fromMap(removeFromMapOrDefaultEmpty(config, ModelConfigurations.CHUNKING_SETTINGS)); } @@ -366,4 +402,99 @@ private void checkAlibabaCloudSearchServiceConfig(Model model, InferenceService private static final String ALIBABA_CLOUD_SEARCH_SERVICE_CONFIG_INPUT = "input"; private static final String ALIBABA_CLOUD_SEARCH_SERVICE_CONFIG_QUERY = "query"; + + public static class Configuration { + public static InferenceServiceConfiguration get() { + return configuration.getOrCompute(); + } + + private static final LazyInitializable configuration = new LazyInitializable<>( + () -> { + var configurationMap = new HashMap(); + + configurationMap.put( + SERVICE_ID, + new SettingsConfiguration.Builder().setDisplay(SettingsConfigurationDisplayType.DROPDOWN) + .setLabel("Project ID") + .setOrder(2) + .setRequired(true) + .setSensitive(false) + .setTooltip("The name of the model service to use for the {infer} task.") + .setType(SettingsConfigurationFieldType.STRING) + .setOptions( + Stream.of( + "ops-text-embedding-001", + "ops-text-embedding-zh-001", + "ops-text-embedding-en-001", + "ops-text-embedding-002", + "ops-text-sparse-embedding-001", + "ops-bge-reranker-larger" + ).map(v -> new SettingsConfigurationSelectOption.Builder().setLabelAndValue(v).build()).toList() + ) + .build() + ); + + configurationMap.put( + HOST, + new SettingsConfiguration.Builder().setDisplay(SettingsConfigurationDisplayType.TEXTBOX) + .setLabel("Host") + .setOrder(3) + .setRequired(true) + .setSensitive(false) + .setTooltip( + "The name of the host address used for the {infer} task. You can find the host address at " + + "https://opensearch.console.aliyun.com/cn-shanghai/rag/api-key[ the API keys section] " + + "of the documentation." + ) + .setType(SettingsConfigurationFieldType.STRING) + .build() + ); + + configurationMap.put( + HTTP_SCHEMA_NAME, + new SettingsConfiguration.Builder().setDisplay(SettingsConfigurationDisplayType.DROPDOWN) + .setLabel("HTTP Schema") + .setOrder(4) + .setRequired(true) + .setSensitive(false) + .setTooltip("") + .setType(SettingsConfigurationFieldType.STRING) + .setOptions( + Stream.of("https", "http") + .map(v -> new SettingsConfigurationSelectOption.Builder().setLabelAndValue(v).build()) + .toList() + ) + .build() + ); + + configurationMap.put( + WORKSPACE_NAME, + new SettingsConfiguration.Builder().setDisplay(SettingsConfigurationDisplayType.TEXTBOX) + .setLabel("Workspace") + .setOrder(5) + .setRequired(true) + .setSensitive(false) + .setTooltip("The name of the workspace used for the {infer} task.") + .setType(SettingsConfigurationFieldType.STRING) + .build() + ); + + configurationMap.putAll( + DefaultSecretSettings.toSettingsConfigurationWithTooltip("A valid API key for the AlibabaCloud AI Search API.") + ); + configurationMap.putAll(RateLimitSettings.toSettingsConfiguration()); + + return new InferenceServiceConfiguration.Builder().setProvider(NAME).setTaskTypes(supportedTaskTypes.stream().map(t -> { + Map taskSettingsConfig; + switch (t) { + case TEXT_EMBEDDING -> taskSettingsConfig = AlibabaCloudSearchEmbeddingsModel.Configuration.get(); + case SPARSE_EMBEDDING -> taskSettingsConfig = AlibabaCloudSearchSparseModel.Configuration.get(); + // COMPLETION, RERANK task types have no task settings + default -> taskSettingsConfig = EmptySettingsConfiguration.get(); + } + return new TaskSettingsConfiguration.Builder().setTaskType(t).setConfiguration(taskSettingsConfig).build(); + }).toList()).setConfiguration(configurationMap).build(); + } + ); + } } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/alibabacloudsearch/embeddings/AlibabaCloudSearchEmbeddingsModel.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/alibabacloudsearch/embeddings/AlibabaCloudSearchEmbeddingsModel.java index 2654ee4d22ce6..1bcc802ab18ea 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/alibabacloudsearch/embeddings/AlibabaCloudSearchEmbeddingsModel.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/alibabacloudsearch/embeddings/AlibabaCloudSearchEmbeddingsModel.java @@ -7,19 +7,28 @@ package org.elasticsearch.xpack.inference.services.alibabacloudsearch.embeddings; +import org.elasticsearch.common.util.LazyInitializable; import org.elasticsearch.core.Nullable; import org.elasticsearch.inference.ChunkingSettings; import org.elasticsearch.inference.InputType; import org.elasticsearch.inference.ModelConfigurations; import org.elasticsearch.inference.ModelSecrets; +import org.elasticsearch.inference.SettingsConfiguration; import org.elasticsearch.inference.TaskType; +import org.elasticsearch.inference.configuration.SettingsConfigurationDisplayType; +import org.elasticsearch.inference.configuration.SettingsConfigurationFieldType; +import org.elasticsearch.inference.configuration.SettingsConfigurationSelectOption; import org.elasticsearch.xpack.inference.external.action.ExecutableAction; import org.elasticsearch.xpack.inference.external.action.alibabacloudsearch.AlibabaCloudSearchActionVisitor; +import org.elasticsearch.xpack.inference.external.request.alibabacloudsearch.AlibabaCloudSearchEmbeddingsRequestEntity; import org.elasticsearch.xpack.inference.services.ConfigurationParseContext; import org.elasticsearch.xpack.inference.services.alibabacloudsearch.AlibabaCloudSearchModel; import org.elasticsearch.xpack.inference.services.settings.DefaultSecretSettings; +import java.util.Collections; +import java.util.HashMap; import java.util.Map; +import java.util.stream.Stream; public class AlibabaCloudSearchEmbeddingsModel extends AlibabaCloudSearchModel { public static AlibabaCloudSearchEmbeddingsModel of( @@ -105,4 +114,35 @@ public DefaultSecretSettings getSecretSettings() { public ExecutableAction accept(AlibabaCloudSearchActionVisitor visitor, Map taskSettings, InputType inputType) { return visitor.create(this, taskSettings, inputType); } + + public static class Configuration { + public static Map get() { + return configuration.getOrCompute(); + } + + private static final LazyInitializable, RuntimeException> configuration = + new LazyInitializable<>(() -> { + var configurationMap = new HashMap(); + + configurationMap.put( + AlibabaCloudSearchEmbeddingsRequestEntity.INPUT_TYPE_FIELD, + new SettingsConfiguration.Builder().setDisplay(SettingsConfigurationDisplayType.DROPDOWN) + .setLabel("Input Type") + .setOrder(1) + .setRequired(false) + .setSensitive(false) + .setTooltip("Specifies the type of input passed to the model.") + .setType(SettingsConfigurationFieldType.STRING) + .setOptions( + Stream.of("ingest", "search") + .map(v -> new SettingsConfigurationSelectOption.Builder().setLabelAndValue(v).build()) + .toList() + ) + .setValue("") + .build() + ); + + return Collections.unmodifiableMap(configurationMap); + }); + } } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/alibabacloudsearch/sparse/AlibabaCloudSearchSparseModel.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/alibabacloudsearch/sparse/AlibabaCloudSearchSparseModel.java index 0155d8fbc1f08..95bf500434c5a 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/alibabacloudsearch/sparse/AlibabaCloudSearchSparseModel.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/alibabacloudsearch/sparse/AlibabaCloudSearchSparseModel.java @@ -7,19 +7,28 @@ package org.elasticsearch.xpack.inference.services.alibabacloudsearch.sparse; +import org.elasticsearch.common.util.LazyInitializable; import org.elasticsearch.core.Nullable; import org.elasticsearch.inference.ChunkingSettings; import org.elasticsearch.inference.InputType; import org.elasticsearch.inference.ModelConfigurations; import org.elasticsearch.inference.ModelSecrets; +import org.elasticsearch.inference.SettingsConfiguration; import org.elasticsearch.inference.TaskType; +import org.elasticsearch.inference.configuration.SettingsConfigurationDisplayType; +import org.elasticsearch.inference.configuration.SettingsConfigurationFieldType; +import org.elasticsearch.inference.configuration.SettingsConfigurationSelectOption; import org.elasticsearch.xpack.inference.external.action.ExecutableAction; import org.elasticsearch.xpack.inference.external.action.alibabacloudsearch.AlibabaCloudSearchActionVisitor; +import org.elasticsearch.xpack.inference.external.request.alibabacloudsearch.AlibabaCloudSearchSparseRequestEntity; import org.elasticsearch.xpack.inference.services.ConfigurationParseContext; import org.elasticsearch.xpack.inference.services.alibabacloudsearch.AlibabaCloudSearchModel; import org.elasticsearch.xpack.inference.services.settings.DefaultSecretSettings; +import java.util.Collections; +import java.util.HashMap; import java.util.Map; +import java.util.stream.Stream; public class AlibabaCloudSearchSparseModel extends AlibabaCloudSearchModel { public static AlibabaCloudSearchSparseModel of( @@ -99,4 +108,50 @@ public DefaultSecretSettings getSecretSettings() { public ExecutableAction accept(AlibabaCloudSearchActionVisitor visitor, Map taskSettings, InputType inputType) { return visitor.create(this, taskSettings, inputType); } + + public static class Configuration { + public static Map get() { + return configuration.getOrCompute(); + } + + private static final LazyInitializable, RuntimeException> configuration = + new LazyInitializable<>(() -> { + var configurationMap = new HashMap(); + + configurationMap.put( + AlibabaCloudSearchSparseRequestEntity.INPUT_TYPE_FIELD, + new SettingsConfiguration.Builder().setDisplay(SettingsConfigurationDisplayType.DROPDOWN) + .setLabel("Input Type") + .setOrder(1) + .setRequired(false) + .setSensitive(false) + .setTooltip("Specifies the type of input passed to the model.") + .setType(SettingsConfigurationFieldType.STRING) + .setOptions( + Stream.of("ingest", "search") + .map(v -> new SettingsConfigurationSelectOption.Builder().setLabelAndValue(v).build()) + .toList() + ) + .setValue("") + .build() + ); + configurationMap.put( + AlibabaCloudSearchSparseRequestEntity.RETURN_TOKEN_FIELD, + new SettingsConfiguration.Builder().setDisplay(SettingsConfigurationDisplayType.TOGGLE) + .setLabel("Return Token") + .setOrder(2) + .setRequired(false) + .setSensitive(false) + .setTooltip( + "If `true`, the token name will be returned in the response. Defaults to `false` which means only the " + + "token ID will be returned in the response." + ) + .setType(SettingsConfigurationFieldType.BOOLEAN) + .setValue(true) + .build() + ); + + return Collections.unmodifiableMap(configurationMap); + }); + } } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/amazonbedrock/AmazonBedrockSecretSettings.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/amazonbedrock/AmazonBedrockSecretSettings.java index 0ca71d47eb1b6..b5818d7e4a287 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/amazonbedrock/AmazonBedrockSecretSettings.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/amazonbedrock/AmazonBedrockSecretSettings.java @@ -13,12 +13,17 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.util.LazyInitializable; import org.elasticsearch.core.Nullable; import org.elasticsearch.inference.ModelSecrets; import org.elasticsearch.inference.SecretSettings; +import org.elasticsearch.inference.SettingsConfiguration; +import org.elasticsearch.inference.configuration.SettingsConfigurationDisplayType; +import org.elasticsearch.inference.configuration.SettingsConfigurationFieldType; import org.elasticsearch.xcontent.XContentBuilder; import java.io.IOException; +import java.util.Collections; import java.util.HashMap; import java.util.Map; import java.util.Objects; @@ -113,4 +118,38 @@ public int hashCode() { public SecretSettings newSecretSettings(Map newSecrets) { return fromMap(new HashMap<>(newSecrets)); } + + public static class Configuration { + public static Map get() { + return configuration.getOrCompute(); + } + + private static final LazyInitializable, RuntimeException> configuration = + new LazyInitializable<>(() -> { + var configurationMap = new HashMap(); + configurationMap.put( + ACCESS_KEY_FIELD, + new SettingsConfiguration.Builder().setDisplay(SettingsConfigurationDisplayType.TEXTBOX) + .setLabel("Access Key") + .setOrder(1) + .setRequired(true) + .setSensitive(true) + .setTooltip("A valid AWS access key that has permissions to use Amazon Bedrock.") + .setType(SettingsConfigurationFieldType.STRING) + .build() + ); + configurationMap.put( + SECRET_KEY_FIELD, + new SettingsConfiguration.Builder().setDisplay(SettingsConfigurationDisplayType.TEXTBOX) + .setLabel("Secret Key") + .setOrder(2) + .setRequired(true) + .setSensitive(true) + .setTooltip("A valid AWS secret key that is paired with the access_key.") + .setType(SettingsConfigurationFieldType.STRING) + .build() + ); + return Collections.unmodifiableMap(configurationMap); + }); + } } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/amazonbedrock/AmazonBedrockService.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/amazonbedrock/AmazonBedrockService.java index 96dd6d2b3690f..f42b48ce59a89 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/amazonbedrock/AmazonBedrockService.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/amazonbedrock/AmazonBedrockService.java @@ -12,18 +12,26 @@ import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionListener; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.util.LazyInitializable; import org.elasticsearch.core.IOUtils; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.TimeValue; import org.elasticsearch.inference.ChunkedInferenceServiceResults; import org.elasticsearch.inference.ChunkingOptions; import org.elasticsearch.inference.ChunkingSettings; +import org.elasticsearch.inference.EmptySettingsConfiguration; +import org.elasticsearch.inference.InferenceServiceConfiguration; import org.elasticsearch.inference.InferenceServiceResults; import org.elasticsearch.inference.InputType; import org.elasticsearch.inference.Model; import org.elasticsearch.inference.ModelConfigurations; import org.elasticsearch.inference.ModelSecrets; +import org.elasticsearch.inference.SettingsConfiguration; +import org.elasticsearch.inference.TaskSettingsConfiguration; import org.elasticsearch.inference.TaskType; +import org.elasticsearch.inference.configuration.SettingsConfigurationDisplayType; +import org.elasticsearch.inference.configuration.SettingsConfigurationFieldType; +import org.elasticsearch.inference.configuration.SettingsConfigurationSelectOption; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.xpack.core.inference.ChunkingSettingsFeatureFlag; import org.elasticsearch.xpack.inference.chunking.ChunkingSettingsBuilder; @@ -41,17 +49,24 @@ import org.elasticsearch.xpack.inference.services.amazonbedrock.completion.AmazonBedrockChatCompletionModel; import org.elasticsearch.xpack.inference.services.amazonbedrock.embeddings.AmazonBedrockEmbeddingsModel; import org.elasticsearch.xpack.inference.services.amazonbedrock.embeddings.AmazonBedrockEmbeddingsServiceSettings; +import org.elasticsearch.xpack.inference.services.settings.RateLimitSettings; import java.io.IOException; +import java.util.EnumSet; +import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Set; +import java.util.stream.Stream; import static org.elasticsearch.xpack.inference.services.ServiceUtils.createInvalidModelException; import static org.elasticsearch.xpack.inference.services.ServiceUtils.parsePersistedConfigErrorMsg; import static org.elasticsearch.xpack.inference.services.ServiceUtils.removeFromMapOrDefaultEmpty; import static org.elasticsearch.xpack.inference.services.ServiceUtils.removeFromMapOrThrowIfNull; import static org.elasticsearch.xpack.inference.services.ServiceUtils.throwIfNotEmptyMap; +import static org.elasticsearch.xpack.inference.services.amazonbedrock.AmazonBedrockConstants.MODEL_FIELD; +import static org.elasticsearch.xpack.inference.services.amazonbedrock.AmazonBedrockConstants.PROVIDER_FIELD; +import static org.elasticsearch.xpack.inference.services.amazonbedrock.AmazonBedrockConstants.REGION_FIELD; import static org.elasticsearch.xpack.inference.services.amazonbedrock.AmazonBedrockConstants.TOP_K_FIELD; import static org.elasticsearch.xpack.inference.services.amazonbedrock.AmazonBedrockProviderCapabilities.chatCompletionProviderHasTopKParameter; import static org.elasticsearch.xpack.inference.services.amazonbedrock.AmazonBedrockProviderCapabilities.getEmbeddingsMaxBatchSize; @@ -63,6 +78,8 @@ public class AmazonBedrockService extends SenderService { private final Sender amazonBedrockSender; + private static final EnumSet supportedTaskTypes = EnumSet.of(TaskType.TEXT_EMBEDDING, TaskType.COMPLETION); + public AmazonBedrockService( HttpRequestSender.Factory httpSenderFactory, AmazonBedrockRequestSender.Factory amazonBedrockFactory, @@ -220,6 +237,16 @@ public Model parsePersistedConfig(String modelId, TaskType taskType, Map supportedTaskTypes() { + return supportedTaskTypes; + } + private static AmazonBedrockModel createModel( String inferenceEntityId, TaskType taskType, @@ -353,4 +380,74 @@ public void close() throws IOException { super.close(); IOUtils.closeWhileHandlingException(amazonBedrockSender); } + + public static class Configuration { + public static InferenceServiceConfiguration get() { + return configuration.getOrCompute(); + } + + private static final LazyInitializable configuration = new LazyInitializable<>( + () -> { + var configurationMap = new HashMap(); + + configurationMap.put( + PROVIDER_FIELD, + new SettingsConfiguration.Builder().setDisplay(SettingsConfigurationDisplayType.DROPDOWN) + .setLabel("Provider") + .setOrder(3) + .setRequired(true) + .setSensitive(false) + .setTooltip("The model provider for your deployment.") + .setType(SettingsConfigurationFieldType.STRING) + .setOptions( + Stream.of("amazontitan", "anthropic", "ai21labs", "cohere", "meta", "mistral") + .map(v -> new SettingsConfigurationSelectOption.Builder().setLabelAndValue(v).build()) + .toList() + ) + .build() + ); + + configurationMap.put( + MODEL_FIELD, + new SettingsConfiguration.Builder().setDisplay(SettingsConfigurationDisplayType.TEXTBOX) + .setLabel("Model") + .setOrder(4) + .setRequired(true) + .setSensitive(false) + .setTooltip("The base model ID or an ARN to a custom model based on a foundational model.") + .setType(SettingsConfigurationFieldType.STRING) + .build() + ); + + configurationMap.put( + REGION_FIELD, + new SettingsConfiguration.Builder().setDisplay(SettingsConfigurationDisplayType.TEXTBOX) + .setLabel("Region") + .setOrder(5) + .setRequired(true) + .setSensitive(false) + .setTooltip("The region that your model or ARN is deployed in.") + .setType(SettingsConfigurationFieldType.STRING) + .build() + ); + + configurationMap.putAll(AmazonBedrockSecretSettings.Configuration.get()); + configurationMap.putAll( + RateLimitSettings.toSettingsConfigurationWithTooltip( + "By default, the amazonbedrock service sets the number of requests allowed per minute to 240." + ) + ); + + return new InferenceServiceConfiguration.Builder().setProvider(NAME).setTaskTypes(supportedTaskTypes.stream().map(t -> { + Map taskSettingsConfig; + switch (t) { + case COMPLETION -> taskSettingsConfig = AmazonBedrockChatCompletionModel.Configuration.get(); + // TEXT_EMBEDDING task type has no task settings + default -> taskSettingsConfig = EmptySettingsConfiguration.get(); + } + return new TaskSettingsConfiguration.Builder().setTaskType(t).setConfiguration(taskSettingsConfig).build(); + }).toList()).setConfiguration(configurationMap).build(); + } + ); + } } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/amazonbedrock/completion/AmazonBedrockChatCompletionModel.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/amazonbedrock/completion/AmazonBedrockChatCompletionModel.java index 27dc607d671aa..9339a8a05dc81 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/amazonbedrock/completion/AmazonBedrockChatCompletionModel.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/amazonbedrock/completion/AmazonBedrockChatCompletionModel.java @@ -7,19 +7,30 @@ package org.elasticsearch.xpack.inference.services.amazonbedrock.completion; +import org.elasticsearch.common.util.LazyInitializable; import org.elasticsearch.inference.Model; import org.elasticsearch.inference.ModelConfigurations; import org.elasticsearch.inference.ModelSecrets; +import org.elasticsearch.inference.SettingsConfiguration; import org.elasticsearch.inference.TaskSettings; import org.elasticsearch.inference.TaskType; +import org.elasticsearch.inference.configuration.SettingsConfigurationDisplayType; +import org.elasticsearch.inference.configuration.SettingsConfigurationFieldType; import org.elasticsearch.xpack.inference.external.action.ExecutableAction; import org.elasticsearch.xpack.inference.external.action.amazonbedrock.AmazonBedrockActionVisitor; import org.elasticsearch.xpack.inference.services.ConfigurationParseContext; import org.elasticsearch.xpack.inference.services.amazonbedrock.AmazonBedrockModel; import org.elasticsearch.xpack.inference.services.amazonbedrock.AmazonBedrockSecretSettings; +import java.util.Collections; +import java.util.HashMap; import java.util.Map; +import static org.elasticsearch.xpack.inference.services.amazonbedrock.AmazonBedrockConstants.MAX_NEW_TOKENS_FIELD; +import static org.elasticsearch.xpack.inference.services.amazonbedrock.AmazonBedrockConstants.TEMPERATURE_FIELD; +import static org.elasticsearch.xpack.inference.services.amazonbedrock.AmazonBedrockConstants.TOP_K_FIELD; +import static org.elasticsearch.xpack.inference.services.amazonbedrock.AmazonBedrockConstants.TOP_P_FIELD; + public class AmazonBedrockChatCompletionModel extends AmazonBedrockModel { public static AmazonBedrockChatCompletionModel of(AmazonBedrockChatCompletionModel completionModel, Map taskSettings) { @@ -80,4 +91,62 @@ public AmazonBedrockChatCompletionServiceSettings getServiceSettings() { public AmazonBedrockChatCompletionTaskSettings getTaskSettings() { return (AmazonBedrockChatCompletionTaskSettings) super.getTaskSettings(); } + + public static class Configuration { + public static Map get() { + return configuration.getOrCompute(); + } + + private static final LazyInitializable, RuntimeException> configuration = + new LazyInitializable<>(() -> { + var configurationMap = new HashMap(); + + configurationMap.put( + MAX_NEW_TOKENS_FIELD, + new SettingsConfiguration.Builder().setDisplay(SettingsConfigurationDisplayType.NUMERIC) + .setLabel("Max New Tokens") + .setOrder(1) + .setRequired(false) + .setSensitive(false) + .setTooltip("Sets the maximum number for the output tokens to be generated.") + .setType(SettingsConfigurationFieldType.INTEGER) + .build() + ); + configurationMap.put( + TEMPERATURE_FIELD, + new SettingsConfiguration.Builder().setDisplay(SettingsConfigurationDisplayType.NUMERIC) + .setLabel("Temperature") + .setOrder(2) + .setRequired(false) + .setSensitive(false) + .setTooltip("A number between 0.0 and 1.0 that controls the apparent creativity of the results.") + .setType(SettingsConfigurationFieldType.INTEGER) + .build() + ); + configurationMap.put( + TOP_P_FIELD, + new SettingsConfiguration.Builder().setDisplay(SettingsConfigurationDisplayType.NUMERIC) + .setLabel("Top P") + .setOrder(3) + .setRequired(false) + .setSensitive(false) + .setTooltip("Alternative to temperature. A number in the range of 0.0 to 1.0, to eliminate low-probability tokens.") + .setType(SettingsConfigurationFieldType.INTEGER) + .build() + ); + configurationMap.put( + TOP_K_FIELD, + new SettingsConfiguration.Builder().setDisplay(SettingsConfigurationDisplayType.NUMERIC) + .setLabel("Top K") + .setOrder(4) + .setRequired(false) + .setSensitive(false) + .setTooltip("Only available for anthropic, cohere, and mistral providers. Alternative to temperature.") + .setType(SettingsConfigurationFieldType.INTEGER) + .build() + ); + + return Collections.unmodifiableMap(configurationMap); + }); + } } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/anthropic/AnthropicService.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/anthropic/AnthropicService.java index c925053c38116..556b34b945c14 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/anthropic/AnthropicService.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/anthropic/AnthropicService.java @@ -11,16 +11,23 @@ import org.elasticsearch.TransportVersion; import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.common.util.LazyInitializable; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.TimeValue; import org.elasticsearch.inference.ChunkedInferenceServiceResults; import org.elasticsearch.inference.ChunkingOptions; +import org.elasticsearch.inference.EmptySettingsConfiguration; +import org.elasticsearch.inference.InferenceServiceConfiguration; import org.elasticsearch.inference.InferenceServiceResults; import org.elasticsearch.inference.InputType; import org.elasticsearch.inference.Model; import org.elasticsearch.inference.ModelConfigurations; import org.elasticsearch.inference.ModelSecrets; +import org.elasticsearch.inference.SettingsConfiguration; +import org.elasticsearch.inference.TaskSettingsConfiguration; import org.elasticsearch.inference.TaskType; +import org.elasticsearch.inference.configuration.SettingsConfigurationDisplayType; +import org.elasticsearch.inference.configuration.SettingsConfigurationFieldType; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.xpack.inference.external.action.anthropic.AnthropicActionCreator; import org.elasticsearch.xpack.inference.external.http.sender.DocumentsOnlyInput; @@ -30,11 +37,16 @@ import org.elasticsearch.xpack.inference.services.SenderService; import org.elasticsearch.xpack.inference.services.ServiceComponents; import org.elasticsearch.xpack.inference.services.anthropic.completion.AnthropicChatCompletionModel; +import org.elasticsearch.xpack.inference.services.settings.DefaultSecretSettings; +import org.elasticsearch.xpack.inference.services.settings.RateLimitSettings; +import java.util.EnumSet; +import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Set; +import static org.elasticsearch.xpack.inference.services.ServiceFields.MODEL_ID; import static org.elasticsearch.xpack.inference.services.ServiceUtils.createInvalidModelException; import static org.elasticsearch.xpack.inference.services.ServiceUtils.parsePersistedConfigErrorMsg; import static org.elasticsearch.xpack.inference.services.ServiceUtils.removeFromMapOrDefaultEmpty; @@ -44,6 +56,8 @@ public class AnthropicService extends SenderService { public static final String NAME = "anthropic"; + private static final EnumSet supportedTaskTypes = EnumSet.of(TaskType.COMPLETION); + public AnthropicService(HttpRequestSender.Factory factory, ServiceComponents serviceComponents) { super(factory, serviceComponents); } @@ -162,6 +176,16 @@ public AnthropicModel parsePersistedConfig(String inferenceEntityId, TaskType ta ); } + @Override + public InferenceServiceConfiguration getConfiguration() { + return Configuration.get(); + } + + @Override + public EnumSet supportedTaskTypes() { + return supportedTaskTypes; + } + @Override public void doInfer( Model model, @@ -205,4 +229,44 @@ public TransportVersion getMinimalSupportedVersion() { public Set supportedStreamingTasks() { return COMPLETION_ONLY; } + + public static class Configuration { + public static InferenceServiceConfiguration get() { + return configuration.getOrCompute(); + } + + private static final LazyInitializable configuration = new LazyInitializable<>( + () -> { + var configurationMap = new HashMap(); + + configurationMap.put( + MODEL_ID, + new SettingsConfiguration.Builder().setDisplay(SettingsConfigurationDisplayType.TEXTBOX) + .setLabel("Model ID") + .setOrder(2) + .setRequired(true) + .setSensitive(false) + .setTooltip("The name of the model to use for the inference task.") + .setType(SettingsConfigurationFieldType.STRING) + .build() + ); + + configurationMap.putAll(DefaultSecretSettings.toSettingsConfiguration()); + configurationMap.putAll( + RateLimitSettings.toSettingsConfigurationWithTooltip( + "By default, the anthropic service sets the number of requests allowed per minute to 50." + ) + ); + + return new InferenceServiceConfiguration.Builder().setProvider(NAME).setTaskTypes(supportedTaskTypes.stream().map(t -> { + Map taskSettingsConfig; + switch (t) { + case COMPLETION -> taskSettingsConfig = AnthropicChatCompletionModel.Configuration.get(); + default -> taskSettingsConfig = EmptySettingsConfiguration.get(); + } + return new TaskSettingsConfiguration.Builder().setTaskType(t).setConfiguration(taskSettingsConfig).build(); + }).toList()).setConfiguration(configurationMap).build(); + } + ); + } } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/anthropic/completion/AnthropicChatCompletionModel.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/anthropic/completion/AnthropicChatCompletionModel.java index 942cae8960daf..df54ee4ec97c4 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/anthropic/completion/AnthropicChatCompletionModel.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/anthropic/completion/AnthropicChatCompletionModel.java @@ -8,10 +8,14 @@ package org.elasticsearch.xpack.inference.services.anthropic.completion; import org.apache.http.client.utils.URIBuilder; +import org.elasticsearch.common.util.LazyInitializable; import org.elasticsearch.core.Nullable; import org.elasticsearch.inference.ModelConfigurations; import org.elasticsearch.inference.ModelSecrets; +import org.elasticsearch.inference.SettingsConfiguration; import org.elasticsearch.inference.TaskType; +import org.elasticsearch.inference.configuration.SettingsConfigurationDisplayType; +import org.elasticsearch.inference.configuration.SettingsConfigurationFieldType; import org.elasticsearch.xpack.inference.external.action.ExecutableAction; import org.elasticsearch.xpack.inference.external.action.anthropic.AnthropicActionVisitor; import org.elasticsearch.xpack.inference.external.request.anthropic.AnthropicRequestUtils; @@ -22,8 +26,15 @@ import java.net.URI; import java.net.URISyntaxException; +import java.util.Collections; +import java.util.HashMap; import java.util.Map; +import static org.elasticsearch.xpack.inference.services.anthropic.AnthropicServiceFields.MAX_TOKENS; +import static org.elasticsearch.xpack.inference.services.anthropic.AnthropicServiceFields.TEMPERATURE_FIELD; +import static org.elasticsearch.xpack.inference.services.anthropic.AnthropicServiceFields.TOP_K_FIELD; +import static org.elasticsearch.xpack.inference.services.anthropic.AnthropicServiceFields.TOP_P_FIELD; + public class AnthropicChatCompletionModel extends AnthropicModel { public static AnthropicChatCompletionModel of(AnthropicChatCompletionModel model, Map taskSettings) { @@ -123,4 +134,62 @@ private static URI buildDefaultUri() throws URISyntaxException { .setPathSegments(AnthropicRequestUtils.API_VERSION_1, AnthropicRequestUtils.MESSAGES_PATH) .build(); } + + public static class Configuration { + public static Map get() { + return configuration.getOrCompute(); + } + + private static final LazyInitializable, RuntimeException> configuration = + new LazyInitializable<>(() -> { + var configurationMap = new HashMap(); + + configurationMap.put( + MAX_TOKENS, + new SettingsConfiguration.Builder().setDisplay(SettingsConfigurationDisplayType.NUMERIC) + .setLabel("Max Tokens") + .setOrder(1) + .setRequired(true) + .setSensitive(false) + .setTooltip("The maximum number of tokens to generate before stopping.") + .setType(SettingsConfigurationFieldType.INTEGER) + .build() + ); + configurationMap.put( + TEMPERATURE_FIELD, + new SettingsConfiguration.Builder().setDisplay(SettingsConfigurationDisplayType.TEXTBOX) + .setLabel("Temperature") + .setOrder(2) + .setRequired(false) + .setSensitive(false) + .setTooltip("The amount of randomness injected into the response.") + .setType(SettingsConfigurationFieldType.STRING) + .build() + ); + configurationMap.put( + TOP_K_FIELD, + new SettingsConfiguration.Builder().setDisplay(SettingsConfigurationDisplayType.NUMERIC) + .setLabel("Top K") + .setOrder(3) + .setRequired(false) + .setSensitive(false) + .setTooltip("Specifies to only sample from the top K options for each subsequent token.") + .setType(SettingsConfigurationFieldType.INTEGER) + .build() + ); + configurationMap.put( + TOP_P_FIELD, + new SettingsConfiguration.Builder().setDisplay(SettingsConfigurationDisplayType.NUMERIC) + .setLabel("Top P") + .setOrder(4) + .setRequired(false) + .setSensitive(false) + .setTooltip("Specifies to use Anthropic’s nucleus sampling.") + .setType(SettingsConfigurationFieldType.INTEGER) + .build() + ); + + return Collections.unmodifiableMap(configurationMap); + }); + } } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/azureaistudio/AzureAiStudioService.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/azureaistudio/AzureAiStudioService.java index 5525fff6b1a7c..89efb1c95a12a 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/azureaistudio/AzureAiStudioService.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/azureaistudio/AzureAiStudioService.java @@ -12,18 +12,26 @@ import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionListener; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.util.LazyInitializable; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.TimeValue; import org.elasticsearch.inference.ChunkedInferenceServiceResults; import org.elasticsearch.inference.ChunkingOptions; import org.elasticsearch.inference.ChunkingSettings; +import org.elasticsearch.inference.EmptySettingsConfiguration; +import org.elasticsearch.inference.InferenceServiceConfiguration; import org.elasticsearch.inference.InferenceServiceResults; import org.elasticsearch.inference.InputType; import org.elasticsearch.inference.Model; import org.elasticsearch.inference.ModelConfigurations; import org.elasticsearch.inference.ModelSecrets; +import org.elasticsearch.inference.SettingsConfiguration; import org.elasticsearch.inference.SimilarityMeasure; +import org.elasticsearch.inference.TaskSettingsConfiguration; import org.elasticsearch.inference.TaskType; +import org.elasticsearch.inference.configuration.SettingsConfigurationDisplayType; +import org.elasticsearch.inference.configuration.SettingsConfigurationFieldType; +import org.elasticsearch.inference.configuration.SettingsConfigurationSelectOption; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.xpack.core.inference.ChunkingSettingsFeatureFlag; import org.elasticsearch.xpack.inference.chunking.ChunkingSettingsBuilder; @@ -40,16 +48,24 @@ import org.elasticsearch.xpack.inference.services.azureaistudio.completion.AzureAiStudioChatCompletionTaskSettings; import org.elasticsearch.xpack.inference.services.azureaistudio.embeddings.AzureAiStudioEmbeddingsModel; import org.elasticsearch.xpack.inference.services.azureaistudio.embeddings.AzureAiStudioEmbeddingsServiceSettings; +import org.elasticsearch.xpack.inference.services.settings.DefaultSecretSettings; +import org.elasticsearch.xpack.inference.services.settings.RateLimitSettings; +import java.util.EnumSet; +import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Set; +import java.util.stream.Stream; import static org.elasticsearch.xpack.inference.services.ServiceUtils.createInvalidModelException; import static org.elasticsearch.xpack.inference.services.ServiceUtils.parsePersistedConfigErrorMsg; import static org.elasticsearch.xpack.inference.services.ServiceUtils.removeFromMapOrDefaultEmpty; import static org.elasticsearch.xpack.inference.services.ServiceUtils.removeFromMapOrThrowIfNull; import static org.elasticsearch.xpack.inference.services.ServiceUtils.throwIfNotEmptyMap; +import static org.elasticsearch.xpack.inference.services.azureaistudio.AzureAiStudioConstants.ENDPOINT_TYPE_FIELD; +import static org.elasticsearch.xpack.inference.services.azureaistudio.AzureAiStudioConstants.PROVIDER_FIELD; +import static org.elasticsearch.xpack.inference.services.azureaistudio.AzureAiStudioConstants.TARGET_FIELD; import static org.elasticsearch.xpack.inference.services.azureaistudio.AzureAiStudioProviderCapabilities.providerAllowsEndpointTypeForTask; import static org.elasticsearch.xpack.inference.services.azureaistudio.AzureAiStudioProviderCapabilities.providerAllowsTaskType; import static org.elasticsearch.xpack.inference.services.azureaistudio.completion.AzureAiStudioChatCompletionTaskSettings.DEFAULT_MAX_NEW_TOKENS; @@ -59,6 +75,8 @@ public class AzureAiStudioService extends SenderService { static final String NAME = "azureaistudio"; + private static final EnumSet supportedTaskTypes = EnumSet.of(TaskType.TEXT_EMBEDDING, TaskType.COMPLETION); + public AzureAiStudioService(HttpRequestSender.Factory factory, ServiceComponents serviceComponents) { super(factory, serviceComponents); } @@ -207,6 +225,16 @@ public Model parsePersistedConfig(String inferenceEntityId, TaskType taskType, M ); } + @Override + public InferenceServiceConfiguration getConfiguration() { + return Configuration.get(); + } + + @Override + public EnumSet supportedTaskTypes() { + return supportedTaskTypes; + } + @Override public String name() { return NAME; @@ -378,4 +406,75 @@ private static void checkProviderAndEndpointTypeForTask( ); } } + + public static class Configuration { + public static InferenceServiceConfiguration get() { + return configuration.getOrCompute(); + } + + private static final LazyInitializable configuration = new LazyInitializable<>( + () -> { + var configurationMap = new HashMap(); + + configurationMap.put( + TARGET_FIELD, + new SettingsConfiguration.Builder().setDisplay(SettingsConfigurationDisplayType.TEXTBOX) + .setLabel("Target") + .setOrder(2) + .setRequired(true) + .setSensitive(false) + .setTooltip("The target URL of your Azure AI Studio model deployment.") + .setType(SettingsConfigurationFieldType.STRING) + .build() + ); + + configurationMap.put( + ENDPOINT_TYPE_FIELD, + new SettingsConfiguration.Builder().setDisplay(SettingsConfigurationDisplayType.DROPDOWN) + .setLabel("Endpoint Type") + .setOrder(3) + .setRequired(true) + .setSensitive(false) + .setTooltip("Specifies the type of endpoint that is used in your model deployment.") + .setType(SettingsConfigurationFieldType.STRING) + .setOptions( + Stream.of("token", "realtime") + .map(v -> new SettingsConfigurationSelectOption.Builder().setLabelAndValue(v).build()) + .toList() + ) + .build() + ); + + configurationMap.put( + PROVIDER_FIELD, + new SettingsConfiguration.Builder().setDisplay(SettingsConfigurationDisplayType.DROPDOWN) + .setLabel("Provider") + .setOrder(3) + .setRequired(true) + .setSensitive(false) + .setTooltip("The model provider for your deployment.") + .setType(SettingsConfigurationFieldType.STRING) + .setOptions( + Stream.of("cohere", "meta", "microsoft_phi", "mistral", "openai", "databricks") + .map(v -> new SettingsConfigurationSelectOption.Builder().setLabelAndValue(v).build()) + .toList() + ) + .build() + ); + + configurationMap.putAll(DefaultSecretSettings.toSettingsConfiguration()); + configurationMap.putAll(RateLimitSettings.toSettingsConfiguration()); + + return new InferenceServiceConfiguration.Builder().setProvider(NAME).setTaskTypes(supportedTaskTypes.stream().map(t -> { + Map taskSettingsConfig; + switch (t) { + case TEXT_EMBEDDING -> taskSettingsConfig = AzureAiStudioEmbeddingsModel.Configuration.get(); + case COMPLETION -> taskSettingsConfig = AzureAiStudioChatCompletionModel.Configuration.get(); + default -> taskSettingsConfig = EmptySettingsConfiguration.get(); + } + return new TaskSettingsConfiguration.Builder().setTaskType(t).setConfiguration(taskSettingsConfig).build(); + }).toList()).setConfiguration(configurationMap).build(); + } + ); + } } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/azureaistudio/completion/AzureAiStudioChatCompletionModel.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/azureaistudio/completion/AzureAiStudioChatCompletionModel.java index 5afb3aaed61ff..0492788c2adcd 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/azureaistudio/completion/AzureAiStudioChatCompletionModel.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/azureaistudio/completion/AzureAiStudioChatCompletionModel.java @@ -7,10 +7,14 @@ package org.elasticsearch.xpack.inference.services.azureaistudio.completion; +import org.elasticsearch.common.util.LazyInitializable; import org.elasticsearch.core.Nullable; import org.elasticsearch.inference.ModelConfigurations; import org.elasticsearch.inference.ModelSecrets; +import org.elasticsearch.inference.SettingsConfiguration; import org.elasticsearch.inference.TaskType; +import org.elasticsearch.inference.configuration.SettingsConfigurationDisplayType; +import org.elasticsearch.inference.configuration.SettingsConfigurationFieldType; import org.elasticsearch.xpack.inference.external.action.ExecutableAction; import org.elasticsearch.xpack.inference.external.action.azureaistudio.AzureAiStudioActionVisitor; import org.elasticsearch.xpack.inference.services.ConfigurationParseContext; @@ -21,9 +25,12 @@ import java.net.URI; import java.net.URISyntaxException; +import java.util.Collections; +import java.util.HashMap; import java.util.Map; import static org.elasticsearch.xpack.inference.services.azureaistudio.AzureAiStudioConstants.COMPLETIONS_URI_PATH; +import static org.elasticsearch.xpack.inference.services.azureaistudio.AzureAiStudioConstants.USER_FIELD; public class AzureAiStudioChatCompletionModel extends AzureAiStudioModel { @@ -102,4 +109,30 @@ protected URI getEndpointUri() throws URISyntaxException { public ExecutableAction accept(AzureAiStudioActionVisitor creator, Map taskSettings) { return creator.create(this, taskSettings); } + + public static class Configuration { + public static Map get() { + return configuration.getOrCompute(); + } + + private static final LazyInitializable, RuntimeException> configuration = + new LazyInitializable<>(() -> { + var configurationMap = new HashMap(); + + configurationMap.put( + USER_FIELD, + new SettingsConfiguration.Builder().setDisplay(SettingsConfigurationDisplayType.TEXTBOX) + .setLabel("User") + .setOrder(1) + .setRequired(false) + .setSensitive(false) + .setTooltip("Specifies the user issuing the request.") + .setType(SettingsConfigurationFieldType.STRING) + .setValue("") + .build() + ); + + return Collections.unmodifiableMap(configurationMap); + }); + } } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/azureaistudio/embeddings/AzureAiStudioEmbeddingsModel.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/azureaistudio/embeddings/AzureAiStudioEmbeddingsModel.java index edbefe07cff02..8b0b52c69b82c 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/azureaistudio/embeddings/AzureAiStudioEmbeddingsModel.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/azureaistudio/embeddings/AzureAiStudioEmbeddingsModel.java @@ -7,11 +7,15 @@ package org.elasticsearch.xpack.inference.services.azureaistudio.embeddings; +import org.elasticsearch.common.util.LazyInitializable; import org.elasticsearch.core.Nullable; import org.elasticsearch.inference.ChunkingSettings; import org.elasticsearch.inference.ModelConfigurations; import org.elasticsearch.inference.ModelSecrets; +import org.elasticsearch.inference.SettingsConfiguration; import org.elasticsearch.inference.TaskType; +import org.elasticsearch.inference.configuration.SettingsConfigurationDisplayType; +import org.elasticsearch.inference.configuration.SettingsConfigurationFieldType; import org.elasticsearch.xpack.inference.external.action.ExecutableAction; import org.elasticsearch.xpack.inference.external.action.azureaistudio.AzureAiStudioActionVisitor; import org.elasticsearch.xpack.inference.services.ConfigurationParseContext; @@ -22,9 +26,15 @@ import java.net.URI; import java.net.URISyntaxException; +import java.util.Collections; +import java.util.HashMap; import java.util.Map; +import static org.elasticsearch.xpack.inference.services.azureaistudio.AzureAiStudioConstants.DO_SAMPLE_FIELD; import static org.elasticsearch.xpack.inference.services.azureaistudio.AzureAiStudioConstants.EMBEDDINGS_URI_PATH; +import static org.elasticsearch.xpack.inference.services.azureaistudio.AzureAiStudioConstants.MAX_NEW_TOKENS_FIELD; +import static org.elasticsearch.xpack.inference.services.azureaistudio.AzureAiStudioConstants.TEMPERATURE_FIELD; +import static org.elasticsearch.xpack.inference.services.azureaistudio.AzureAiStudioConstants.TOP_P_FIELD; public class AzureAiStudioEmbeddingsModel extends AzureAiStudioModel { @@ -106,4 +116,65 @@ protected URI getEndpointUri() throws URISyntaxException { public ExecutableAction accept(AzureAiStudioActionVisitor creator, Map taskSettings) { return creator.create(this, taskSettings); } + + public static class Configuration { + public static Map get() { + return configuration.getOrCompute(); + } + + private static final LazyInitializable, RuntimeException> configuration = + new LazyInitializable<>(() -> { + var configurationMap = new HashMap(); + + configurationMap.put( + DO_SAMPLE_FIELD, + new SettingsConfiguration.Builder().setDisplay(SettingsConfigurationDisplayType.NUMERIC) + .setLabel("Do Sample") + .setOrder(1) + .setRequired(false) + .setSensitive(false) + .setTooltip("Instructs the inference process to perform sampling or not.") + .setType(SettingsConfigurationFieldType.INTEGER) + .build() + ); + configurationMap.put( + MAX_NEW_TOKENS_FIELD, + new SettingsConfiguration.Builder().setDisplay(SettingsConfigurationDisplayType.NUMERIC) + .setLabel("Max New Tokens") + .setOrder(2) + .setRequired(false) + .setSensitive(false) + .setTooltip("Provides a hint for the maximum number of output tokens to be generated.") + .setType(SettingsConfigurationFieldType.INTEGER) + .build() + ); + configurationMap.put( + TEMPERATURE_FIELD, + new SettingsConfiguration.Builder().setDisplay(SettingsConfigurationDisplayType.NUMERIC) + .setLabel("Temperature") + .setOrder(3) + .setRequired(false) + .setSensitive(false) + .setTooltip("A number in the range of 0.0 to 2.0 that specifies the sampling temperature.") + .setType(SettingsConfigurationFieldType.INTEGER) + .build() + ); + configurationMap.put( + TOP_P_FIELD, + new SettingsConfiguration.Builder().setDisplay(SettingsConfigurationDisplayType.NUMERIC) + .setLabel("Top P") + .setOrder(4) + .setRequired(false) + .setSensitive(false) + .setTooltip( + "A number in the range of 0.0 to 2.0 that is an alternative value to temperature. Should not be used " + + "if temperature is specified." + ) + .setType(SettingsConfigurationFieldType.INTEGER) + .build() + ); + + return Collections.unmodifiableMap(configurationMap); + }); + } } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/azureopenai/AzureOpenAiSecretSettings.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/azureopenai/AzureOpenAiSecretSettings.java index a2bd4f6175989..70a29b28a607c 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/azureopenai/AzureOpenAiSecretSettings.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/azureopenai/AzureOpenAiSecretSettings.java @@ -13,12 +13,17 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.util.LazyInitializable; import org.elasticsearch.core.Nullable; import org.elasticsearch.inference.ModelSecrets; import org.elasticsearch.inference.SecretSettings; +import org.elasticsearch.inference.SettingsConfiguration; +import org.elasticsearch.inference.configuration.SettingsConfigurationDisplayType; +import org.elasticsearch.inference.configuration.SettingsConfigurationFieldType; import org.elasticsearch.xcontent.XContentBuilder; import java.io.IOException; +import java.util.Collections; import java.util.HashMap; import java.util.Map; import java.util.Objects; @@ -131,4 +136,38 @@ public int hashCode() { public SecretSettings newSecretSettings(Map newSecrets) { return AzureOpenAiSecretSettings.fromMap(new HashMap<>(newSecrets)); } + + public static class Configuration { + public static Map get() { + return configuration.getOrCompute(); + } + + private static final LazyInitializable, RuntimeException> configuration = + new LazyInitializable<>(() -> { + var configurationMap = new HashMap(); + configurationMap.put( + API_KEY, + new SettingsConfiguration.Builder().setDisplay(SettingsConfigurationDisplayType.TEXTBOX) + .setLabel("API Key") + .setOrder(1) + .setRequired(false) + .setSensitive(true) + .setTooltip("You must provide either an API key or an Entra ID.") + .setType(SettingsConfigurationFieldType.STRING) + .build() + ); + configurationMap.put( + ENTRA_ID, + new SettingsConfiguration.Builder().setDisplay(SettingsConfigurationDisplayType.TEXTBOX) + .setLabel("Entra ID") + .setOrder(2) + .setRequired(false) + .setSensitive(true) + .setTooltip("You must provide either an API key or an Entra ID.") + .setType(SettingsConfigurationFieldType.STRING) + .build() + ); + return Collections.unmodifiableMap(configurationMap); + }); + } } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/azureopenai/AzureOpenAiService.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/azureopenai/AzureOpenAiService.java index cd657113d7b61..6e825355ee74f 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/azureopenai/AzureOpenAiService.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/azureopenai/AzureOpenAiService.java @@ -12,18 +12,25 @@ import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionListener; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.util.LazyInitializable; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.TimeValue; import org.elasticsearch.inference.ChunkedInferenceServiceResults; import org.elasticsearch.inference.ChunkingOptions; import org.elasticsearch.inference.ChunkingSettings; +import org.elasticsearch.inference.EmptySettingsConfiguration; +import org.elasticsearch.inference.InferenceServiceConfiguration; import org.elasticsearch.inference.InferenceServiceResults; import org.elasticsearch.inference.InputType; import org.elasticsearch.inference.Model; import org.elasticsearch.inference.ModelConfigurations; import org.elasticsearch.inference.ModelSecrets; +import org.elasticsearch.inference.SettingsConfiguration; import org.elasticsearch.inference.SimilarityMeasure; +import org.elasticsearch.inference.TaskSettingsConfiguration; import org.elasticsearch.inference.TaskType; +import org.elasticsearch.inference.configuration.SettingsConfigurationDisplayType; +import org.elasticsearch.inference.configuration.SettingsConfigurationFieldType; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.xpack.core.inference.ChunkingSettingsFeatureFlag; import org.elasticsearch.xpack.inference.chunking.ChunkingSettingsBuilder; @@ -39,7 +46,10 @@ import org.elasticsearch.xpack.inference.services.azureopenai.completion.AzureOpenAiCompletionModel; import org.elasticsearch.xpack.inference.services.azureopenai.embeddings.AzureOpenAiEmbeddingsModel; import org.elasticsearch.xpack.inference.services.azureopenai.embeddings.AzureOpenAiEmbeddingsServiceSettings; +import org.elasticsearch.xpack.inference.services.settings.RateLimitSettings; +import java.util.EnumSet; +import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Set; @@ -49,11 +59,16 @@ import static org.elasticsearch.xpack.inference.services.ServiceUtils.removeFromMapOrDefaultEmpty; import static org.elasticsearch.xpack.inference.services.ServiceUtils.removeFromMapOrThrowIfNull; import static org.elasticsearch.xpack.inference.services.ServiceUtils.throwIfNotEmptyMap; +import static org.elasticsearch.xpack.inference.services.azureopenai.AzureOpenAiServiceFields.API_VERSION; +import static org.elasticsearch.xpack.inference.services.azureopenai.AzureOpenAiServiceFields.DEPLOYMENT_ID; +import static org.elasticsearch.xpack.inference.services.azureopenai.AzureOpenAiServiceFields.RESOURCE_NAME; import static org.elasticsearch.xpack.inference.services.openai.OpenAiServiceFields.EMBEDDING_MAX_BATCH_SIZE; public class AzureOpenAiService extends SenderService { public static final String NAME = "azureopenai"; + private static final EnumSet supportedTaskTypes = EnumSet.of(TaskType.TEXT_EMBEDDING, TaskType.COMPLETION); + public AzureOpenAiService(HttpRequestSender.Factory factory, ServiceComponents serviceComponents) { super(factory, serviceComponents); } @@ -209,6 +224,16 @@ public AzureOpenAiModel parsePersistedConfig(String inferenceEntityId, TaskType ); } + @Override + public InferenceServiceConfiguration getConfiguration() { + return Configuration.get(); + } + + @Override + public EnumSet supportedTaskTypes() { + return supportedTaskTypes; + } + @Override protected void doInfer( Model model, @@ -273,7 +298,7 @@ protected void doChunkedInfer( * For text embedding models get the embedding size and * update the service settings. * - * @param model The new model + * @param model The new model * @param listener The listener */ @Override @@ -331,4 +356,69 @@ public TransportVersion getMinimalSupportedVersion() { public Set supportedStreamingTasks() { return COMPLETION_ONLY; } + + public static class Configuration { + public static InferenceServiceConfiguration get() { + return configuration.getOrCompute(); + } + + private static final LazyInitializable configuration = new LazyInitializable<>( + () -> { + var configurationMap = new HashMap(); + + configurationMap.put( + RESOURCE_NAME, + new SettingsConfiguration.Builder().setDisplay(SettingsConfigurationDisplayType.TEXTBOX) + .setLabel("Resource Name") + .setOrder(3) + .setRequired(true) + .setSensitive(false) + .setTooltip("The name of your Azure OpenAI resource.") + .setType(SettingsConfigurationFieldType.STRING) + .build() + ); + + configurationMap.put( + API_VERSION, + new SettingsConfiguration.Builder().setDisplay(SettingsConfigurationDisplayType.TEXTBOX) + .setLabel("API Version") + .setOrder(4) + .setRequired(true) + .setSensitive(false) + .setTooltip("The Azure API version ID to use.") + .setType(SettingsConfigurationFieldType.STRING) + .build() + ); + + configurationMap.put( + DEPLOYMENT_ID, + new SettingsConfiguration.Builder().setDisplay(SettingsConfigurationDisplayType.TEXTBOX) + .setLabel("Deployment ID") + .setOrder(5) + .setRequired(true) + .setSensitive(false) + .setTooltip("The deployment name of your deployed models.") + .setType(SettingsConfigurationFieldType.STRING) + .build() + ); + + configurationMap.putAll(AzureOpenAiSecretSettings.Configuration.get()); + configurationMap.putAll( + RateLimitSettings.toSettingsConfigurationWithTooltip( + "The azureopenai service sets a default number of requests allowed per minute depending on the task type." + ) + ); + + return new InferenceServiceConfiguration.Builder().setProvider(NAME).setTaskTypes(supportedTaskTypes.stream().map(t -> { + Map taskSettingsConfig; + switch (t) { + case TEXT_EMBEDDING -> taskSettingsConfig = AzureOpenAiEmbeddingsModel.Configuration.get(); + case COMPLETION -> taskSettingsConfig = AzureOpenAiCompletionModel.Configuration.get(); + default -> taskSettingsConfig = EmptySettingsConfiguration.get(); + } + return new TaskSettingsConfiguration.Builder().setTaskType(t).setConfiguration(taskSettingsConfig).build(); + }).toList()).setConfiguration(configurationMap).build(); + } + ); + } } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/azureopenai/completion/AzureOpenAiCompletionModel.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/azureopenai/completion/AzureOpenAiCompletionModel.java index c4146b2ba2d30..8b2846fd9ced7 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/azureopenai/completion/AzureOpenAiCompletionModel.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/azureopenai/completion/AzureOpenAiCompletionModel.java @@ -7,10 +7,14 @@ package org.elasticsearch.xpack.inference.services.azureopenai.completion; +import org.elasticsearch.common.util.LazyInitializable; import org.elasticsearch.core.Nullable; import org.elasticsearch.inference.ModelConfigurations; import org.elasticsearch.inference.ModelSecrets; +import org.elasticsearch.inference.SettingsConfiguration; import org.elasticsearch.inference.TaskType; +import org.elasticsearch.inference.configuration.SettingsConfigurationDisplayType; +import org.elasticsearch.inference.configuration.SettingsConfigurationFieldType; import org.elasticsearch.xpack.inference.external.action.ExecutableAction; import org.elasticsearch.xpack.inference.external.action.azureopenai.AzureOpenAiActionVisitor; import org.elasticsearch.xpack.inference.external.request.azureopenai.AzureOpenAiUtils; @@ -19,8 +23,12 @@ import org.elasticsearch.xpack.inference.services.azureopenai.AzureOpenAiSecretSettings; import java.net.URISyntaxException; +import java.util.Collections; +import java.util.HashMap; import java.util.Map; +import static org.elasticsearch.xpack.inference.services.azureopenai.AzureOpenAiServiceFields.USER; + public class AzureOpenAiCompletionModel extends AzureOpenAiModel { public static AzureOpenAiCompletionModel of(AzureOpenAiCompletionModel model, Map taskSettings) { @@ -120,4 +128,29 @@ public String[] operationPathSegments() { return new String[] { AzureOpenAiUtils.CHAT_PATH, AzureOpenAiUtils.COMPLETIONS_PATH }; } + public static class Configuration { + public static Map get() { + return configuration.getOrCompute(); + } + + private static final LazyInitializable, RuntimeException> configuration = + new LazyInitializable<>(() -> { + var configurationMap = new HashMap(); + + configurationMap.put( + USER, + new SettingsConfiguration.Builder().setDisplay(SettingsConfigurationDisplayType.TEXTBOX) + .setLabel("User") + .setOrder(1) + .setRequired(false) + .setSensitive(false) + .setTooltip("Specifies the user issuing the request.") + .setType(SettingsConfigurationFieldType.STRING) + .setValue("") + .build() + ); + + return Collections.unmodifiableMap(configurationMap); + }); + } } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/azureopenai/embeddings/AzureOpenAiEmbeddingsModel.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/azureopenai/embeddings/AzureOpenAiEmbeddingsModel.java index 7b83d5322a696..0316804664510 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/azureopenai/embeddings/AzureOpenAiEmbeddingsModel.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/azureopenai/embeddings/AzureOpenAiEmbeddingsModel.java @@ -7,11 +7,15 @@ package org.elasticsearch.xpack.inference.services.azureopenai.embeddings; +import org.elasticsearch.common.util.LazyInitializable; import org.elasticsearch.core.Nullable; import org.elasticsearch.inference.ChunkingSettings; import org.elasticsearch.inference.ModelConfigurations; import org.elasticsearch.inference.ModelSecrets; +import org.elasticsearch.inference.SettingsConfiguration; import org.elasticsearch.inference.TaskType; +import org.elasticsearch.inference.configuration.SettingsConfigurationDisplayType; +import org.elasticsearch.inference.configuration.SettingsConfigurationFieldType; import org.elasticsearch.xpack.inference.external.action.ExecutableAction; import org.elasticsearch.xpack.inference.external.action.azureopenai.AzureOpenAiActionVisitor; import org.elasticsearch.xpack.inference.external.request.azureopenai.AzureOpenAiUtils; @@ -20,8 +24,12 @@ import org.elasticsearch.xpack.inference.services.azureopenai.AzureOpenAiSecretSettings; import java.net.URISyntaxException; +import java.util.Collections; +import java.util.HashMap; import java.util.Map; +import static org.elasticsearch.xpack.inference.services.azureopenai.AzureOpenAiServiceFields.USER; + public class AzureOpenAiEmbeddingsModel extends AzureOpenAiModel { public static AzureOpenAiEmbeddingsModel of(AzureOpenAiEmbeddingsModel model, Map taskSettings) { @@ -124,4 +132,29 @@ public String[] operationPathSegments() { return new String[] { AzureOpenAiUtils.EMBEDDINGS_PATH }; } + public static class Configuration { + public static Map get() { + return configuration.getOrCompute(); + } + + private static final LazyInitializable, RuntimeException> configuration = + new LazyInitializable<>(() -> { + var configurationMap = new HashMap(); + + configurationMap.put( + USER, + new SettingsConfiguration.Builder().setDisplay(SettingsConfigurationDisplayType.TEXTBOX) + .setLabel("User") + .setOrder(1) + .setRequired(false) + .setSensitive(false) + .setTooltip("Specifies the user issuing the request.") + .setType(SettingsConfigurationFieldType.STRING) + .setValue("") + .build() + ); + + return Collections.unmodifiableMap(configurationMap); + }); + } } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/cohere/CohereService.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/cohere/CohereService.java index ce0fa0a885a20..1685683173a11 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/cohere/CohereService.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/cohere/CohereService.java @@ -11,17 +11,22 @@ import org.elasticsearch.TransportVersion; import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.common.util.LazyInitializable; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.TimeValue; import org.elasticsearch.inference.ChunkedInferenceServiceResults; import org.elasticsearch.inference.ChunkingOptions; import org.elasticsearch.inference.ChunkingSettings; +import org.elasticsearch.inference.EmptySettingsConfiguration; +import org.elasticsearch.inference.InferenceServiceConfiguration; import org.elasticsearch.inference.InferenceServiceResults; import org.elasticsearch.inference.InputType; import org.elasticsearch.inference.Model; import org.elasticsearch.inference.ModelConfigurations; import org.elasticsearch.inference.ModelSecrets; +import org.elasticsearch.inference.SettingsConfiguration; import org.elasticsearch.inference.SimilarityMeasure; +import org.elasticsearch.inference.TaskSettingsConfiguration; import org.elasticsearch.inference.TaskType; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.xpack.core.inference.ChunkingSettingsFeatureFlag; @@ -39,7 +44,11 @@ import org.elasticsearch.xpack.inference.services.cohere.embeddings.CohereEmbeddingsModel; import org.elasticsearch.xpack.inference.services.cohere.embeddings.CohereEmbeddingsServiceSettings; import org.elasticsearch.xpack.inference.services.cohere.rerank.CohereRerankModel; +import org.elasticsearch.xpack.inference.services.settings.DefaultSecretSettings; +import org.elasticsearch.xpack.inference.services.settings.RateLimitSettings; +import java.util.EnumSet; +import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Set; @@ -54,6 +63,8 @@ public class CohereService extends SenderService { public static final String NAME = "cohere"; + private static final EnumSet supportedTaskTypes = EnumSet.of(TaskType.TEXT_EMBEDDING, TaskType.COMPLETION, TaskType.RERANK); + // TODO Batching - We'll instantiate a batching class within the services that want to support it and pass it through to // the Cohere*RequestManager via the CohereActionCreator class // The reason it needs to be done here is that the batching logic needs to hold state but the *RequestManagers are instantiated @@ -211,6 +222,16 @@ public CohereModel parsePersistedConfig(String inferenceEntityId, TaskType taskT ); } + @Override + public InferenceServiceConfiguration getConfiguration() { + return Configuration.get(); + } + + @Override + public EnumSet supportedTaskTypes() { + return supportedTaskTypes; + } + @Override public void doInfer( Model model, @@ -332,4 +353,30 @@ public TransportVersion getMinimalSupportedVersion() { public Set supportedStreamingTasks() { return COMPLETION_ONLY; } + + public static class Configuration { + public static InferenceServiceConfiguration get() { + return configuration.getOrCompute(); + } + + private static final LazyInitializable configuration = new LazyInitializable<>( + () -> { + var configurationMap = new HashMap(); + + configurationMap.putAll(DefaultSecretSettings.toSettingsConfiguration()); + configurationMap.putAll(RateLimitSettings.toSettingsConfiguration()); + + return new InferenceServiceConfiguration.Builder().setProvider(NAME).setTaskTypes(supportedTaskTypes.stream().map(t -> { + Map taskSettingsConfig; + switch (t) { + case TEXT_EMBEDDING -> taskSettingsConfig = CohereEmbeddingsModel.Configuration.get(); + case RERANK -> taskSettingsConfig = CohereRerankModel.Configuration.get(); + // COMPLETION task type has no task settings + default -> taskSettingsConfig = EmptySettingsConfiguration.get(); + } + return new TaskSettingsConfiguration.Builder().setTaskType(t).setConfiguration(taskSettingsConfig).build(); + }).toList()).setConfiguration(configurationMap).build(); + } + ); + } } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/cohere/embeddings/CohereEmbeddingsModel.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/cohere/embeddings/CohereEmbeddingsModel.java index 0f62ab51145f4..43a7bc0a5e678 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/cohere/embeddings/CohereEmbeddingsModel.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/cohere/embeddings/CohereEmbeddingsModel.java @@ -7,12 +7,17 @@ package org.elasticsearch.xpack.inference.services.cohere.embeddings; +import org.elasticsearch.common.util.LazyInitializable; import org.elasticsearch.core.Nullable; import org.elasticsearch.inference.ChunkingSettings; import org.elasticsearch.inference.InputType; import org.elasticsearch.inference.ModelConfigurations; import org.elasticsearch.inference.ModelSecrets; +import org.elasticsearch.inference.SettingsConfiguration; import org.elasticsearch.inference.TaskType; +import org.elasticsearch.inference.configuration.SettingsConfigurationDisplayType; +import org.elasticsearch.inference.configuration.SettingsConfigurationFieldType; +import org.elasticsearch.inference.configuration.SettingsConfigurationSelectOption; import org.elasticsearch.xpack.inference.external.action.ExecutableAction; import org.elasticsearch.xpack.inference.external.action.cohere.CohereActionVisitor; import org.elasticsearch.xpack.inference.services.ConfigurationParseContext; @@ -20,7 +25,13 @@ import org.elasticsearch.xpack.inference.services.settings.DefaultSecretSettings; import java.net.URI; +import java.util.Collections; +import java.util.HashMap; import java.util.Map; +import java.util.stream.Stream; + +import static org.elasticsearch.xpack.inference.external.request.cohere.CohereEmbeddingsRequestEntity.INPUT_TYPE_FIELD; +import static org.elasticsearch.xpack.inference.services.cohere.CohereServiceFields.TRUNCATE; public class CohereEmbeddingsModel extends CohereModel { public static CohereEmbeddingsModel of(CohereEmbeddingsModel model, Map taskSettings, InputType inputType) { @@ -99,4 +110,52 @@ public ExecutableAction accept(CohereActionVisitor visitor, Map public URI uri() { return getServiceSettings().getCommonSettings().uri(); } + + public static class Configuration { + public static Map get() { + return configuration.getOrCompute(); + } + + private static final LazyInitializable, RuntimeException> configuration = + new LazyInitializable<>(() -> { + var configurationMap = new HashMap(); + + configurationMap.put( + INPUT_TYPE_FIELD, + new SettingsConfiguration.Builder().setDisplay(SettingsConfigurationDisplayType.DROPDOWN) + .setLabel("Input Type") + .setOrder(1) + .setRequired(false) + .setSensitive(false) + .setTooltip("Specifies the type of input passed to the model.") + .setType(SettingsConfigurationFieldType.STRING) + .setOptions( + Stream.of("classification", "clusterning", "ingest", "search") + .map(v -> new SettingsConfigurationSelectOption.Builder().setLabelAndValue(v).build()) + .toList() + ) + .setValue("") + .build() + ); + configurationMap.put( + TRUNCATE, + new SettingsConfiguration.Builder().setDisplay(SettingsConfigurationDisplayType.DROPDOWN) + .setLabel("Truncate") + .setOrder(2) + .setRequired(false) + .setSensitive(false) + .setTooltip("Specifies how the API handles inputs longer than the maximum token length.") + .setType(SettingsConfigurationFieldType.STRING) + .setOptions( + Stream.of("NONE", "START", "END") + .map(v -> new SettingsConfigurationSelectOption.Builder().setLabelAndValue(v).build()) + .toList() + ) + .setValue("") + .build() + ); + + return Collections.unmodifiableMap(configurationMap); + }); + } } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/cohere/rerank/CohereRerankModel.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/cohere/rerank/CohereRerankModel.java index b84b98973bbe5..cfcfb8a3d5dae 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/cohere/rerank/CohereRerankModel.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/cohere/rerank/CohereRerankModel.java @@ -7,11 +7,15 @@ package org.elasticsearch.xpack.inference.services.cohere.rerank; +import org.elasticsearch.common.util.LazyInitializable; import org.elasticsearch.core.Nullable; import org.elasticsearch.inference.InputType; import org.elasticsearch.inference.ModelConfigurations; import org.elasticsearch.inference.ModelSecrets; +import org.elasticsearch.inference.SettingsConfiguration; import org.elasticsearch.inference.TaskType; +import org.elasticsearch.inference.configuration.SettingsConfigurationDisplayType; +import org.elasticsearch.inference.configuration.SettingsConfigurationFieldType; import org.elasticsearch.xpack.inference.external.action.ExecutableAction; import org.elasticsearch.xpack.inference.external.action.cohere.CohereActionVisitor; import org.elasticsearch.xpack.inference.services.ConfigurationParseContext; @@ -19,8 +23,13 @@ import org.elasticsearch.xpack.inference.services.settings.DefaultSecretSettings; import java.net.URI; +import java.util.Collections; +import java.util.HashMap; import java.util.Map; +import static org.elasticsearch.xpack.inference.services.cohere.rerank.CohereRerankTaskSettings.RETURN_DOCUMENTS; +import static org.elasticsearch.xpack.inference.services.cohere.rerank.CohereRerankTaskSettings.TOP_N_DOCS_ONLY; + public class CohereRerankModel extends CohereModel { public static CohereRerankModel of(CohereRerankModel model, Map taskSettings) { var requestTaskSettings = CohereRerankTaskSettings.fromMap(taskSettings); @@ -102,4 +111,41 @@ public ExecutableAction accept(CohereActionVisitor visitor, Map public URI uri() { return getServiceSettings().uri(); } + + public static class Configuration { + public static Map get() { + return configuration.getOrCompute(); + } + + private static final LazyInitializable, RuntimeException> configuration = + new LazyInitializable<>(() -> { + var configurationMap = new HashMap(); + + configurationMap.put( + RETURN_DOCUMENTS, + new SettingsConfiguration.Builder().setDisplay(SettingsConfigurationDisplayType.TOGGLE) + .setLabel("Return Documents") + .setOrder(1) + .setRequired(false) + .setSensitive(false) + .setTooltip("Specify whether to return doc text within the results.") + .setType(SettingsConfigurationFieldType.BOOLEAN) + .setValue(false) + .build() + ); + configurationMap.put( + TOP_N_DOCS_ONLY, + new SettingsConfiguration.Builder().setDisplay(SettingsConfigurationDisplayType.NUMERIC) + .setLabel("Top N") + .setOrder(2) + .setRequired(false) + .setSensitive(false) + .setTooltip("The number of most relevant documents to return, defaults to the number of the documents.") + .setType(SettingsConfigurationFieldType.INTEGER) + .build() + ); + + return Collections.unmodifiableMap(configurationMap); + }); + } } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elastic/ElasticInferenceService.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elastic/ElasticInferenceService.java index 85c7273b47493..98429ed3d001d 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elastic/ElasticInferenceService.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elastic/ElasticInferenceService.java @@ -12,16 +12,23 @@ import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionListener; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.util.LazyInitializable; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.TimeValue; import org.elasticsearch.inference.ChunkedInferenceServiceResults; import org.elasticsearch.inference.ChunkingOptions; +import org.elasticsearch.inference.EmptySettingsConfiguration; +import org.elasticsearch.inference.InferenceServiceConfiguration; import org.elasticsearch.inference.InferenceServiceResults; import org.elasticsearch.inference.InputType; import org.elasticsearch.inference.Model; import org.elasticsearch.inference.ModelConfigurations; import org.elasticsearch.inference.ModelSecrets; +import org.elasticsearch.inference.SettingsConfiguration; +import org.elasticsearch.inference.TaskSettingsConfiguration; import org.elasticsearch.inference.TaskType; +import org.elasticsearch.inference.configuration.SettingsConfigurationDisplayType; +import org.elasticsearch.inference.configuration.SettingsConfigurationFieldType; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.tasks.Task; import org.elasticsearch.xpack.core.inference.results.ErrorChunkedInferenceResults; @@ -35,12 +42,17 @@ import org.elasticsearch.xpack.inference.services.ConfigurationParseContext; import org.elasticsearch.xpack.inference.services.SenderService; import org.elasticsearch.xpack.inference.services.ServiceComponents; +import org.elasticsearch.xpack.inference.services.settings.RateLimitSettings; import org.elasticsearch.xpack.inference.telemetry.TraceContext; +import java.util.EnumSet; +import java.util.HashMap; import java.util.List; import java.util.Map; import static org.elasticsearch.xpack.core.inference.results.ResultUtils.createInvalidChunkedResultException; +import static org.elasticsearch.xpack.inference.services.ServiceFields.MAX_INPUT_TOKENS; +import static org.elasticsearch.xpack.inference.services.ServiceFields.MODEL_ID; import static org.elasticsearch.xpack.inference.services.ServiceUtils.createInvalidModelException; import static org.elasticsearch.xpack.inference.services.ServiceUtils.parsePersistedConfigErrorMsg; import static org.elasticsearch.xpack.inference.services.ServiceUtils.removeFromMapOrDefaultEmpty; @@ -53,6 +65,8 @@ public class ElasticInferenceService extends SenderService { private final ElasticInferenceServiceComponents elasticInferenceServiceComponents; + private static final EnumSet supportedTaskTypes = EnumSet.of(TaskType.SPARSE_EMBEDDING); + public ElasticInferenceService( HttpRequestSender.Factory factory, ServiceComponents serviceComponents, @@ -143,6 +157,16 @@ public void parseRequestConfig( } } + @Override + public InferenceServiceConfiguration getConfiguration() { + return Configuration.get(); + } + + @Override + public EnumSet supportedTaskTypes() { + return supportedTaskTypes; + } + private static ElasticInferenceServiceModel createModel( String inferenceEntityId, TaskType taskType, @@ -272,4 +296,51 @@ private TraceContext getCurrentTraceInfo() { return new TraceContext(traceParent, traceState); } + + public static class Configuration { + public static InferenceServiceConfiguration get() { + return configuration.getOrCompute(); + } + + private static final LazyInitializable configuration = new LazyInitializable<>( + () -> { + var configurationMap = new HashMap(); + + configurationMap.put( + MODEL_ID, + new SettingsConfiguration.Builder().setDisplay(SettingsConfigurationDisplayType.TEXTBOX) + .setLabel("Model ID") + .setOrder(2) + .setRequired(true) + .setSensitive(false) + .setTooltip("The name of the model to use for the inference task.") + .setType(SettingsConfigurationFieldType.STRING) + .build() + ); + + configurationMap.put( + MAX_INPUT_TOKENS, + new SettingsConfiguration.Builder().setDisplay(SettingsConfigurationDisplayType.NUMERIC) + .setLabel("Maximum Input Tokens") + .setOrder(3) + .setRequired(false) + .setSensitive(false) + .setTooltip("Allows you to specify the maximum number of tokens per input.") + .setType(SettingsConfigurationFieldType.INTEGER) + .build() + ); + + configurationMap.putAll(RateLimitSettings.toSettingsConfiguration()); + + return new InferenceServiceConfiguration.Builder().setProvider(NAME).setTaskTypes(supportedTaskTypes.stream().map(t -> { + Map taskSettingsConfig; + switch (t) { + // SPARSE_EMBEDDING task type has no task settings + default -> taskSettingsConfig = EmptySettingsConfiguration.get(); + } + return new TaskSettingsConfiguration.Builder().setTaskType(t).setConfiguration(taskSettingsConfig).build(); + }).toList()).setConfiguration(configurationMap).build(); + } + ); + } } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/BaseElasticsearchInternalService.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/BaseElasticsearchInternalService.java index cd0c33082cb30..5f97f3bad3dc8 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/BaseElasticsearchInternalService.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/BaseElasticsearchInternalService.java @@ -39,7 +39,6 @@ import org.elasticsearch.xpack.inference.InferencePlugin; import java.io.IOException; -import java.util.EnumSet; import java.util.List; import java.util.concurrent.ExecutorService; import java.util.function.Consumer; @@ -83,12 +82,6 @@ public BaseElasticsearchInternalService( this.clusterService = context.clusterService(); } - /** - * The task types supported by the service - * @return Set of supported. - */ - protected abstract EnumSet supportedTaskTypes(); - @Override public void start(Model model, ActionListener finalListener) { if (model instanceof ElasticsearchInternalModel esModel) { diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/CustomElandRerankModel.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/CustomElandRerankModel.java index 63f4a3dbf8472..f620b15680c8d 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/CustomElandRerankModel.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/CustomElandRerankModel.java @@ -7,7 +7,17 @@ package org.elasticsearch.xpack.inference.services.elasticsearch; +import org.elasticsearch.common.util.LazyInitializable; +import org.elasticsearch.inference.SettingsConfiguration; import org.elasticsearch.inference.TaskType; +import org.elasticsearch.inference.configuration.SettingsConfigurationDisplayType; +import org.elasticsearch.inference.configuration.SettingsConfigurationFieldType; + +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; + +import static org.elasticsearch.xpack.inference.services.elasticsearch.CustomElandRerankTaskSettings.RETURN_DOCUMENTS; public class CustomElandRerankModel extends CustomElandModel { @@ -25,4 +35,30 @@ public CustomElandRerankModel( public CustomElandInternalServiceSettings getServiceSettings() { return (CustomElandInternalServiceSettings) super.getServiceSettings(); } + + public static class Configuration { + public static Map get() { + return configuration.getOrCompute(); + } + + private static final LazyInitializable, RuntimeException> configuration = + new LazyInitializable<>(() -> { + var configurationMap = new HashMap(); + + configurationMap.put( + RETURN_DOCUMENTS, + new SettingsConfiguration.Builder().setDisplay(SettingsConfigurationDisplayType.TOGGLE) + .setLabel("Return Documents") + .setOrder(1) + .setRequired(false) + .setSensitive(false) + .setTooltip("Returns the document instead of only the index.") + .setType(SettingsConfigurationFieldType.BOOLEAN) + .setValue(true) + .build() + ); + + return Collections.unmodifiableMap(configurationMap); + }); + } } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalService.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalService.java index fec690199d97d..2e69a88731fd3 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalService.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalService.java @@ -15,18 +15,26 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.common.logging.DeprecationCategory; import org.elasticsearch.common.logging.DeprecationLogger; +import org.elasticsearch.common.util.LazyInitializable; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.TimeValue; import org.elasticsearch.inference.ChunkedInferenceServiceResults; import org.elasticsearch.inference.ChunkingOptions; import org.elasticsearch.inference.ChunkingSettings; +import org.elasticsearch.inference.EmptySettingsConfiguration; import org.elasticsearch.inference.InferenceResults; +import org.elasticsearch.inference.InferenceServiceConfiguration; import org.elasticsearch.inference.InferenceServiceExtension; import org.elasticsearch.inference.InferenceServiceResults; import org.elasticsearch.inference.InputType; import org.elasticsearch.inference.Model; import org.elasticsearch.inference.ModelConfigurations; +import org.elasticsearch.inference.SettingsConfiguration; +import org.elasticsearch.inference.TaskSettingsConfiguration; import org.elasticsearch.inference.TaskType; +import org.elasticsearch.inference.configuration.SettingsConfigurationDisplayType; +import org.elasticsearch.inference.configuration.SettingsConfigurationFieldType; +import org.elasticsearch.inference.configuration.SettingsConfigurationSelectOption; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.xpack.core.inference.ChunkingSettingsFeatureFlag; import org.elasticsearch.xpack.core.inference.results.InferenceTextEmbeddingFloatResults; @@ -63,11 +71,16 @@ import java.util.Set; import java.util.function.Consumer; import java.util.function.Function; +import java.util.stream.Stream; import static org.elasticsearch.xpack.core.inference.results.ResultUtils.createInvalidChunkedResultException; import static org.elasticsearch.xpack.inference.services.ServiceUtils.removeFromMapOrDefaultEmpty; import static org.elasticsearch.xpack.inference.services.ServiceUtils.removeFromMapOrThrowIfNull; import static org.elasticsearch.xpack.inference.services.ServiceUtils.throwIfNotEmptyMap; +import static org.elasticsearch.xpack.inference.services.elasticsearch.ElasticsearchInternalServiceSettings.MODEL_ID; +import static org.elasticsearch.xpack.inference.services.elasticsearch.ElasticsearchInternalServiceSettings.NUM_ALLOCATIONS; +import static org.elasticsearch.xpack.inference.services.elasticsearch.ElasticsearchInternalServiceSettings.NUM_THREADS; +import static org.elasticsearch.xpack.inference.services.elasticsearch.ElserModels.ELSER_V1_MODEL; import static org.elasticsearch.xpack.inference.services.elasticsearch.ElserModels.ELSER_V2_MODEL; import static org.elasticsearch.xpack.inference.services.elasticsearch.ElserModels.ELSER_V2_MODEL_LINUX_X86; @@ -87,6 +100,12 @@ public class ElasticsearchInternalService extends BaseElasticsearchInternalServi public static final String DEFAULT_ELSER_ID = ".elser-2-elasticsearch"; public static final String DEFAULT_E5_ID = ".multilingual-e5-small-elasticsearch"; + private static final EnumSet supportedTaskTypes = EnumSet.of( + TaskType.RERANK, + TaskType.TEXT_EMBEDDING, + TaskType.SPARSE_EMBEDDING + ); + private static final Logger logger = LogManager.getLogger(ElasticsearchInternalService.class); private static final DeprecationLogger DEPRECATION_LOGGER = DeprecationLogger.getLogger(ElasticsearchInternalService.class); @@ -103,8 +122,8 @@ public ElasticsearchInternalService(InferenceServiceExtension.InferenceServiceFa } @Override - protected EnumSet supportedTaskTypes() { - return EnumSet.of(TaskType.RERANK, TaskType.TEXT_EMBEDDING, TaskType.SPARSE_EMBEDDING); + public EnumSet supportedTaskTypes() { + return supportedTaskTypes; } @Override @@ -142,7 +161,7 @@ public void parseRequestConfig( throwIfNotEmptyMap(config, name()); - String modelId = (String) serviceSettingsMap.get(ElasticsearchInternalServiceSettings.MODEL_ID); + String modelId = (String) serviceSettingsMap.get(MODEL_ID); String deploymentId = (String) serviceSettingsMap.get(ElasticsearchInternalServiceSettings.DEPLOYMENT_ID); if (deploymentId != null) { validateAgainstDeployment(modelId, deploymentId, taskType, modelListener.delegateFailureAndWrap((l, settings) -> { @@ -212,6 +231,11 @@ public void parseRequestConfig( } } + @Override + public InferenceServiceConfiguration getConfiguration() { + return Configuration.get(); + } + private void customElandCase( String inferenceEntityId, TaskType taskType, @@ -220,7 +244,7 @@ private void customElandCase( ChunkingSettings chunkingSettings, ActionListener modelListener ) { - String modelId = (String) serviceSettingsMap.get(ElasticsearchInternalServiceSettings.MODEL_ID); + String modelId = (String) serviceSettingsMap.get(MODEL_ID); var request = new GetTrainedModelsAction.Request(modelId); var getModelsListener = modelListener.delegateFailureAndWrap((delegate, response) -> { @@ -439,7 +463,7 @@ public Model parsePersistedConfig(String inferenceEntityId, TaskType taskType, M chunkingSettings = ChunkingSettingsBuilder.fromMap(removeFromMapOrDefaultEmpty(config, ModelConfigurations.CHUNKING_SETTINGS)); } - String modelId = (String) serviceSettingsMap.get(ElasticsearchInternalServiceSettings.MODEL_ID); + String modelId = (String) serviceSettingsMap.get(MODEL_ID); if (modelId == null) { throw new IllegalArgumentException("Error parsing request config, model id is missing"); } @@ -1004,4 +1028,74 @@ static TaskType inferenceConfigToTaskType(InferenceConfig config) { return null; } } + + public static class Configuration { + public static InferenceServiceConfiguration get() { + return configuration.getOrCompute(); + } + + private static final LazyInitializable configuration = new LazyInitializable<>( + () -> { + var configurationMap = new HashMap(); + + configurationMap.put( + MODEL_ID, + new SettingsConfiguration.Builder().setDisplay(SettingsConfigurationDisplayType.DROPDOWN) + .setLabel("Model ID") + .setOrder(1) + .setRequired(true) + .setSensitive(false) + .setTooltip("The name of the model to use for the inference task.") + .setType(SettingsConfigurationFieldType.STRING) + .setOptions( + Stream.of( + ELSER_V1_MODEL, + ELSER_V2_MODEL, + ELSER_V2_MODEL_LINUX_X86, + MULTILINGUAL_E5_SMALL_MODEL_ID, + MULTILINGUAL_E5_SMALL_MODEL_ID_LINUX_X86 + ).map(v -> new SettingsConfigurationSelectOption.Builder().setLabelAndValue(v).build()).toList() + ) + .setDefaultValue(MULTILINGUAL_E5_SMALL_MODEL_ID) + .build() + ); + + configurationMap.put( + NUM_ALLOCATIONS, + new SettingsConfiguration.Builder().setDisplay(SettingsConfigurationDisplayType.NUMERIC) + .setLabel("Number Allocations") + .setOrder(2) + .setRequired(true) + .setSensitive(false) + .setTooltip("The total number of allocations this model is assigned across machine learning nodes.") + .setType(SettingsConfigurationFieldType.INTEGER) + .setDefaultValue(1) + .build() + ); + + configurationMap.put( + NUM_THREADS, + new SettingsConfiguration.Builder().setDisplay(SettingsConfigurationDisplayType.NUMERIC) + .setLabel("Number Threads") + .setOrder(3) + .setRequired(true) + .setSensitive(false) + .setTooltip("Sets the number of threads used by each model allocation during inference.") + .setType(SettingsConfigurationFieldType.INTEGER) + .setDefaultValue(2) + .build() + ); + + return new InferenceServiceConfiguration.Builder().setProvider(NAME).setTaskTypes(supportedTaskTypes.stream().map(t -> { + Map taskSettingsConfig; + switch (t) { + case RERANK -> taskSettingsConfig = CustomElandRerankModel.Configuration.get(); + // SPARSE_EMBEDDING, TEXT_EMBEDDING task types have no task settings + default -> taskSettingsConfig = EmptySettingsConfiguration.get(); + } + return new TaskSettingsConfiguration.Builder().setTaskType(t).setConfiguration(taskSettingsConfig).build(); + }).toList()).setConfiguration(configurationMap).build(); + } + ); + } } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/googleaistudio/GoogleAiStudioService.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/googleaistudio/GoogleAiStudioService.java index f583caeac8ee3..d5f021b77e7c4 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/googleaistudio/GoogleAiStudioService.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/googleaistudio/GoogleAiStudioService.java @@ -11,18 +11,25 @@ import org.elasticsearch.TransportVersion; import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.common.util.LazyInitializable; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.TimeValue; import org.elasticsearch.inference.ChunkedInferenceServiceResults; import org.elasticsearch.inference.ChunkingOptions; import org.elasticsearch.inference.ChunkingSettings; +import org.elasticsearch.inference.EmptySettingsConfiguration; +import org.elasticsearch.inference.InferenceServiceConfiguration; import org.elasticsearch.inference.InferenceServiceResults; import org.elasticsearch.inference.InputType; import org.elasticsearch.inference.Model; import org.elasticsearch.inference.ModelConfigurations; import org.elasticsearch.inference.ModelSecrets; +import org.elasticsearch.inference.SettingsConfiguration; import org.elasticsearch.inference.SimilarityMeasure; +import org.elasticsearch.inference.TaskSettingsConfiguration; import org.elasticsearch.inference.TaskType; +import org.elasticsearch.inference.configuration.SettingsConfigurationDisplayType; +import org.elasticsearch.inference.configuration.SettingsConfigurationFieldType; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.xpack.core.inference.ChunkingSettingsFeatureFlag; import org.elasticsearch.xpack.inference.chunking.ChunkingSettingsBuilder; @@ -41,13 +48,18 @@ import org.elasticsearch.xpack.inference.services.googleaistudio.completion.GoogleAiStudioCompletionModel; import org.elasticsearch.xpack.inference.services.googleaistudio.embeddings.GoogleAiStudioEmbeddingsModel; import org.elasticsearch.xpack.inference.services.googleaistudio.embeddings.GoogleAiStudioEmbeddingsServiceSettings; +import org.elasticsearch.xpack.inference.services.settings.DefaultSecretSettings; +import org.elasticsearch.xpack.inference.services.settings.RateLimitSettings; import org.elasticsearch.xpack.inference.services.validation.ModelValidatorBuilder; +import java.util.EnumSet; +import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Set; import static org.elasticsearch.xpack.inference.external.action.ActionUtils.constructFailedToSendRequestMessage; +import static org.elasticsearch.xpack.inference.services.ServiceFields.MODEL_ID; import static org.elasticsearch.xpack.inference.services.ServiceUtils.createInvalidModelException; import static org.elasticsearch.xpack.inference.services.ServiceUtils.parsePersistedConfigErrorMsg; import static org.elasticsearch.xpack.inference.services.ServiceUtils.removeFromMapOrDefaultEmpty; @@ -59,6 +71,8 @@ public class GoogleAiStudioService extends SenderService { public static final String NAME = "googleaistudio"; + private static final EnumSet supportedTaskTypes = EnumSet.of(TaskType.TEXT_EMBEDDING, TaskType.COMPLETION); + public GoogleAiStudioService(HttpRequestSender.Factory factory, ServiceComponents serviceComponents) { super(factory, serviceComponents); } @@ -211,6 +225,16 @@ public Model parsePersistedConfig(String inferenceEntityId, TaskType taskType, M ); } + @Override + public InferenceServiceConfiguration getConfiguration() { + return Configuration.get(); + } + + @Override + public EnumSet supportedTaskTypes() { + return supportedTaskTypes; + } + @Override public TransportVersion getMinimalSupportedVersion() { return TransportVersions.V_8_15_0; @@ -316,4 +340,40 @@ protected void doChunkedInfer( doInfer(model, new DocumentsOnlyInput(request.batch().inputs()), taskSettings, inputType, timeout, request.listener()); } } + + public static class Configuration { + public static InferenceServiceConfiguration get() { + return configuration.getOrCompute(); + } + + private static final LazyInitializable configuration = new LazyInitializable<>( + () -> { + var configurationMap = new HashMap(); + + configurationMap.put( + MODEL_ID, + new SettingsConfiguration.Builder().setDisplay(SettingsConfigurationDisplayType.TEXTBOX) + .setLabel("Model ID") + .setOrder(2) + .setRequired(true) + .setSensitive(false) + .setTooltip("ID of the LLM you're using.") + .setType(SettingsConfigurationFieldType.STRING) + .build() + ); + + configurationMap.putAll(DefaultSecretSettings.toSettingsConfiguration()); + configurationMap.putAll(RateLimitSettings.toSettingsConfiguration()); + + return new InferenceServiceConfiguration.Builder().setProvider(NAME).setTaskTypes(supportedTaskTypes.stream().map(t -> { + Map taskSettingsConfig; + switch (t) { + // COMPLETION, TEXT_EMBEDDING task types have no task settings + default -> taskSettingsConfig = EmptySettingsConfiguration.get(); + } + return new TaskSettingsConfiguration.Builder().setTaskType(t).setConfiguration(taskSettingsConfig).build(); + }).toList()).setConfiguration(configurationMap).build(); + } + ); + } } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/googlevertexai/GoogleVertexAiSecretSettings.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/googlevertexai/GoogleVertexAiSecretSettings.java index 44e16fa058506..272bc9eaa9a62 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/googlevertexai/GoogleVertexAiSecretSettings.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/googlevertexai/GoogleVertexAiSecretSettings.java @@ -13,12 +13,17 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.util.LazyInitializable; import org.elasticsearch.core.Nullable; import org.elasticsearch.inference.ModelSecrets; import org.elasticsearch.inference.SecretSettings; +import org.elasticsearch.inference.SettingsConfiguration; +import org.elasticsearch.inference.configuration.SettingsConfigurationDisplayType; +import org.elasticsearch.inference.configuration.SettingsConfigurationFieldType; import org.elasticsearch.xcontent.XContentBuilder; import java.io.IOException; +import java.util.Collections; import java.util.HashMap; import java.util.Map; import java.util.Objects; @@ -107,4 +112,27 @@ public int hashCode() { public SecretSettings newSecretSettings(Map newSecrets) { return GoogleVertexAiSecretSettings.fromMap(new HashMap<>(newSecrets)); } + + public static class Configuration { + public static Map get() { + return configuration.getOrCompute(); + } + + private static final LazyInitializable, RuntimeException> configuration = + new LazyInitializable<>(() -> { + var configurationMap = new HashMap(); + configurationMap.put( + SERVICE_ACCOUNT_JSON, + new SettingsConfiguration.Builder().setDisplay(SettingsConfigurationDisplayType.TEXTBOX) + .setLabel("Credentials JSON") + .setOrder(1) + .setRequired(true) + .setSensitive(true) + .setTooltip("API Key for the provider you're connecting to.") + .setType(SettingsConfigurationFieldType.STRING) + .build() + ); + return Collections.unmodifiableMap(configurationMap); + }); + } } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/googlevertexai/GoogleVertexAiService.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/googlevertexai/GoogleVertexAiService.java index 36fb183f6de70..a38691c4de750 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/googlevertexai/GoogleVertexAiService.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/googlevertexai/GoogleVertexAiService.java @@ -12,17 +12,24 @@ import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionListener; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.util.LazyInitializable; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.TimeValue; import org.elasticsearch.inference.ChunkedInferenceServiceResults; import org.elasticsearch.inference.ChunkingOptions; import org.elasticsearch.inference.ChunkingSettings; +import org.elasticsearch.inference.EmptySettingsConfiguration; +import org.elasticsearch.inference.InferenceServiceConfiguration; import org.elasticsearch.inference.InferenceServiceResults; import org.elasticsearch.inference.InputType; import org.elasticsearch.inference.Model; import org.elasticsearch.inference.ModelConfigurations; import org.elasticsearch.inference.ModelSecrets; +import org.elasticsearch.inference.SettingsConfiguration; +import org.elasticsearch.inference.TaskSettingsConfiguration; import org.elasticsearch.inference.TaskType; +import org.elasticsearch.inference.configuration.SettingsConfigurationDisplayType; +import org.elasticsearch.inference.configuration.SettingsConfigurationFieldType; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.xpack.core.inference.ChunkingSettingsFeatureFlag; import org.elasticsearch.xpack.inference.chunking.ChunkingSettingsBuilder; @@ -38,21 +45,29 @@ import org.elasticsearch.xpack.inference.services.googlevertexai.embeddings.GoogleVertexAiEmbeddingsModel; import org.elasticsearch.xpack.inference.services.googlevertexai.embeddings.GoogleVertexAiEmbeddingsServiceSettings; import org.elasticsearch.xpack.inference.services.googlevertexai.rerank.GoogleVertexAiRerankModel; +import org.elasticsearch.xpack.inference.services.settings.RateLimitSettings; +import java.util.EnumSet; +import java.util.HashMap; import java.util.List; import java.util.Map; +import static org.elasticsearch.xpack.inference.services.ServiceFields.MODEL_ID; import static org.elasticsearch.xpack.inference.services.ServiceUtils.createInvalidModelException; import static org.elasticsearch.xpack.inference.services.ServiceUtils.parsePersistedConfigErrorMsg; import static org.elasticsearch.xpack.inference.services.ServiceUtils.removeFromMapOrDefaultEmpty; import static org.elasticsearch.xpack.inference.services.ServiceUtils.removeFromMapOrThrowIfNull; import static org.elasticsearch.xpack.inference.services.ServiceUtils.throwIfNotEmptyMap; import static org.elasticsearch.xpack.inference.services.googlevertexai.GoogleVertexAiServiceFields.EMBEDDING_MAX_BATCH_SIZE; +import static org.elasticsearch.xpack.inference.services.googlevertexai.GoogleVertexAiServiceFields.LOCATION; +import static org.elasticsearch.xpack.inference.services.googlevertexai.GoogleVertexAiServiceFields.PROJECT_ID; public class GoogleVertexAiService extends SenderService { public static final String NAME = "googlevertexai"; + private static final EnumSet supportedTaskTypes = EnumSet.of(TaskType.TEXT_EMBEDDING, TaskType.RERANK); + public GoogleVertexAiService(HttpRequestSender.Factory factory, ServiceComponents serviceComponents) { super(factory, serviceComponents); } @@ -149,6 +164,16 @@ public Model parsePersistedConfig(String inferenceEntityId, TaskType taskType, M ); } + @Override + public InferenceServiceConfiguration getConfiguration() { + return Configuration.get(); + } + + @Override + public EnumSet supportedTaskTypes() { + return supportedTaskTypes; + } + @Override public TransportVersion getMinimalSupportedVersion() { return TransportVersions.V_8_15_0; @@ -308,4 +333,71 @@ private static GoogleVertexAiModel createModel( default -> throw new ElasticsearchStatusException(failureMessage, RestStatus.BAD_REQUEST); }; } + + public static class Configuration { + public static InferenceServiceConfiguration get() { + return configuration.getOrCompute(); + } + + private static final LazyInitializable configuration = new LazyInitializable<>( + () -> { + var configurationMap = new HashMap(); + + configurationMap.put( + MODEL_ID, + new SettingsConfiguration.Builder().setDisplay(SettingsConfigurationDisplayType.TEXTBOX) + .setLabel("Model ID") + .setOrder(2) + .setRequired(true) + .setSensitive(false) + .setTooltip("ID of the LLM you're using.") + .setType(SettingsConfigurationFieldType.STRING) + .build() + ); + + configurationMap.put( + LOCATION, + new SettingsConfiguration.Builder().setDisplay(SettingsConfigurationDisplayType.TEXTBOX) + .setLabel("GCP Region") + .setOrder(3) + .setRequired(true) + .setSensitive(false) + .setTooltip( + "Please provide the GCP region where the Vertex AI API(s) is enabled. " + + "For more information, refer to the {geminiVertexAIDocs}." + ) + .setType(SettingsConfigurationFieldType.STRING) + .build() + ); + + configurationMap.put( + PROJECT_ID, + new SettingsConfiguration.Builder().setDisplay(SettingsConfigurationDisplayType.TEXTBOX) + .setLabel("GCP Project") + .setOrder(4) + .setRequired(true) + .setSensitive(false) + .setTooltip( + "The GCP Project ID which has Vertex AI API(s) enabled. For more information " + + "on the URL, refer to the {geminiVertexAIDocs}." + ) + .setType(SettingsConfigurationFieldType.STRING) + .build() + ); + + configurationMap.putAll(GoogleVertexAiSecretSettings.Configuration.get()); + configurationMap.putAll(RateLimitSettings.toSettingsConfiguration()); + + return new InferenceServiceConfiguration.Builder().setProvider(NAME).setTaskTypes(supportedTaskTypes.stream().map(t -> { + Map taskSettingsConfig; + switch (t) { + case TEXT_EMBEDDING -> taskSettingsConfig = GoogleVertexAiEmbeddingsModel.Configuration.get(); + case RERANK -> taskSettingsConfig = GoogleVertexAiRerankModel.Configuration.get(); + default -> taskSettingsConfig = EmptySettingsConfiguration.get(); + } + return new TaskSettingsConfiguration.Builder().setTaskType(t).setConfiguration(taskSettingsConfig).build(); + }).toList()).setConfiguration(configurationMap).build(); + } + ); + } } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/googlevertexai/embeddings/GoogleVertexAiEmbeddingsModel.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/googlevertexai/embeddings/GoogleVertexAiEmbeddingsModel.java index 3a5fae09b40ef..1df8ee937497a 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/googlevertexai/embeddings/GoogleVertexAiEmbeddingsModel.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/googlevertexai/embeddings/GoogleVertexAiEmbeddingsModel.java @@ -8,11 +8,15 @@ package org.elasticsearch.xpack.inference.services.googlevertexai.embeddings; import org.apache.http.client.utils.URIBuilder; +import org.elasticsearch.common.util.LazyInitializable; import org.elasticsearch.core.Nullable; import org.elasticsearch.inference.ChunkingSettings; import org.elasticsearch.inference.ModelConfigurations; import org.elasticsearch.inference.ModelSecrets; +import org.elasticsearch.inference.SettingsConfiguration; import org.elasticsearch.inference.TaskType; +import org.elasticsearch.inference.configuration.SettingsConfigurationDisplayType; +import org.elasticsearch.inference.configuration.SettingsConfigurationFieldType; import org.elasticsearch.xpack.inference.external.action.ExecutableAction; import org.elasticsearch.xpack.inference.external.action.googlevertexai.GoogleVertexAiActionVisitor; import org.elasticsearch.xpack.inference.external.request.googlevertexai.GoogleVertexAiUtils; @@ -22,9 +26,12 @@ import java.net.URI; import java.net.URISyntaxException; +import java.util.Collections; +import java.util.HashMap; import java.util.Map; import static org.elasticsearch.core.Strings.format; +import static org.elasticsearch.xpack.inference.services.googlevertexai.embeddings.GoogleVertexAiEmbeddingsTaskSettings.AUTO_TRUNCATE; public class GoogleVertexAiEmbeddingsModel extends GoogleVertexAiModel { @@ -144,4 +151,30 @@ public static URI buildUri(String location, String projectId, String modelId) th ) .build(); } + + public static class Configuration { + public static Map get() { + return configuration.getOrCompute(); + } + + private static final LazyInitializable, RuntimeException> configuration = + new LazyInitializable<>(() -> { + var configurationMap = new HashMap(); + + configurationMap.put( + AUTO_TRUNCATE, + new SettingsConfiguration.Builder().setDisplay(SettingsConfigurationDisplayType.TOGGLE) + .setLabel("Auto Truncate") + .setOrder(1) + .setRequired(false) + .setSensitive(false) + .setTooltip("Specifies if the API truncates inputs longer than the maximum token length automatically.") + .setType(SettingsConfigurationFieldType.BOOLEAN) + .setValue(false) + .build() + ); + + return Collections.unmodifiableMap(configurationMap); + }); + } } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/googlevertexai/rerank/GoogleVertexAiRerankModel.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/googlevertexai/rerank/GoogleVertexAiRerankModel.java index 45fad977a2b6b..3f9c4f7a66560 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/googlevertexai/rerank/GoogleVertexAiRerankModel.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/googlevertexai/rerank/GoogleVertexAiRerankModel.java @@ -8,10 +8,14 @@ package org.elasticsearch.xpack.inference.services.googlevertexai.rerank; import org.apache.http.client.utils.URIBuilder; +import org.elasticsearch.common.util.LazyInitializable; import org.elasticsearch.core.Nullable; import org.elasticsearch.inference.ModelConfigurations; import org.elasticsearch.inference.ModelSecrets; +import org.elasticsearch.inference.SettingsConfiguration; import org.elasticsearch.inference.TaskType; +import org.elasticsearch.inference.configuration.SettingsConfigurationDisplayType; +import org.elasticsearch.inference.configuration.SettingsConfigurationFieldType; import org.elasticsearch.xpack.inference.external.action.ExecutableAction; import org.elasticsearch.xpack.inference.external.action.googlevertexai.GoogleVertexAiActionVisitor; import org.elasticsearch.xpack.inference.external.request.googlevertexai.GoogleVertexAiUtils; @@ -21,9 +25,12 @@ import java.net.URI; import java.net.URISyntaxException; +import java.util.Collections; +import java.util.HashMap; import java.util.Map; import static org.elasticsearch.core.Strings.format; +import static org.elasticsearch.xpack.inference.services.googlevertexai.rerank.GoogleVertexAiRerankTaskSettings.TOP_N; public class GoogleVertexAiRerankModel extends GoogleVertexAiModel { @@ -138,4 +145,30 @@ public static URI buildUri(String projectId) throws URISyntaxException { ) .build(); } + + public static class Configuration { + public static Map get() { + return configuration.getOrCompute(); + } + + private static final LazyInitializable, RuntimeException> configuration = + new LazyInitializable<>(() -> { + var configurationMap = new HashMap(); + + configurationMap.put( + TOP_N, + new SettingsConfiguration.Builder().setDisplay(SettingsConfigurationDisplayType.TOGGLE) + .setLabel("Top N") + .setOrder(1) + .setRequired(false) + .setSensitive(false) + .setTooltip("Specifies the number of the top n documents, which should be returned.") + .setType(SettingsConfigurationFieldType.BOOLEAN) + .setValue(false) + .build() + ); + + return Collections.unmodifiableMap(configurationMap); + }); + } } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/huggingface/HuggingFaceService.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/huggingface/HuggingFaceService.java index 752d1dd605cd7..b1c478d229c73 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/huggingface/HuggingFaceService.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/huggingface/HuggingFaceService.java @@ -11,15 +11,22 @@ import org.elasticsearch.TransportVersion; import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.common.util.LazyInitializable; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.TimeValue; import org.elasticsearch.inference.ChunkedInferenceServiceResults; import org.elasticsearch.inference.ChunkingOptions; import org.elasticsearch.inference.ChunkingSettings; +import org.elasticsearch.inference.EmptySettingsConfiguration; +import org.elasticsearch.inference.InferenceServiceConfiguration; import org.elasticsearch.inference.InputType; import org.elasticsearch.inference.Model; +import org.elasticsearch.inference.SettingsConfiguration; import org.elasticsearch.inference.SimilarityMeasure; +import org.elasticsearch.inference.TaskSettingsConfiguration; import org.elasticsearch.inference.TaskType; +import org.elasticsearch.inference.configuration.SettingsConfigurationDisplayType; +import org.elasticsearch.inference.configuration.SettingsConfigurationFieldType; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.xpack.core.inference.ChunkingSettingsFeatureFlag; import org.elasticsearch.xpack.inference.chunking.EmbeddingRequestChunker; @@ -31,16 +38,23 @@ import org.elasticsearch.xpack.inference.services.ServiceUtils; import org.elasticsearch.xpack.inference.services.huggingface.elser.HuggingFaceElserModel; import org.elasticsearch.xpack.inference.services.huggingface.embeddings.HuggingFaceEmbeddingsModel; +import org.elasticsearch.xpack.inference.services.settings.DefaultSecretSettings; +import org.elasticsearch.xpack.inference.services.settings.RateLimitSettings; import org.elasticsearch.xpack.inference.services.validation.ModelValidatorBuilder; +import java.util.EnumSet; +import java.util.HashMap; import java.util.List; import java.util.Map; +import static org.elasticsearch.xpack.inference.services.ServiceFields.URL; import static org.elasticsearch.xpack.inference.services.ServiceUtils.createInvalidModelException; public class HuggingFaceService extends HuggingFaceBaseService { public static final String NAME = "hugging_face"; + private static final EnumSet supportedTaskTypes = EnumSet.of(TaskType.TEXT_EMBEDDING, TaskType.SPARSE_EMBEDDING); + public HuggingFaceService(HttpRequestSender.Factory factory, ServiceComponents serviceComponents) { super(factory, serviceComponents); } @@ -137,6 +151,16 @@ protected void doChunkedInfer( } } + @Override + public InferenceServiceConfiguration getConfiguration() { + return Configuration.get(); + } + + @Override + public EnumSet supportedTaskTypes() { + return supportedTaskTypes; + } + @Override public String name() { return NAME; @@ -146,4 +170,42 @@ public String name() { public TransportVersion getMinimalSupportedVersion() { return TransportVersions.V_8_15_0; } + + public static class Configuration { + public static InferenceServiceConfiguration get() { + return configuration.getOrCompute(); + } + + private static final LazyInitializable configuration = new LazyInitializable<>( + () -> { + var configurationMap = new HashMap(); + + configurationMap.put( + URL, + new SettingsConfiguration.Builder().setDisplay(SettingsConfigurationDisplayType.TEXTBOX) + .setLabel("URL") + .setOrder(1) + .setRequired(true) + .setSensitive(false) + .setTooltip("The URL endpoint to use for the requests.") + .setType(SettingsConfigurationFieldType.STRING) + .setValue("https://api.openai.com/v1/embeddings") + .setDefaultValue("https://api.openai.com/v1/embeddings") + .build() + ); + + configurationMap.putAll(DefaultSecretSettings.toSettingsConfiguration()); + configurationMap.putAll(RateLimitSettings.toSettingsConfiguration()); + + return new InferenceServiceConfiguration.Builder().setProvider(NAME).setTaskTypes(supportedTaskTypes.stream().map(t -> { + Map taskSettingsConfig; + switch (t) { + // SPARSE_EMBEDDING, TEXT_EMBEDDING task types have no task settings + default -> taskSettingsConfig = EmptySettingsConfiguration.get(); + } + return new TaskSettingsConfiguration.Builder().setTaskType(t).setConfiguration(taskSettingsConfig).build(); + }).toList()).setConfiguration(configurationMap).build(); + } + ); + } } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/huggingface/elser/HuggingFaceElserService.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/huggingface/elser/HuggingFaceElserService.java index a8de51c23831f..e0afbf924f654 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/huggingface/elser/HuggingFaceElserService.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/huggingface/elser/HuggingFaceElserService.java @@ -12,15 +12,22 @@ import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionListener; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.util.LazyInitializable; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.TimeValue; import org.elasticsearch.inference.ChunkedInferenceServiceResults; import org.elasticsearch.inference.ChunkingOptions; import org.elasticsearch.inference.ChunkingSettings; +import org.elasticsearch.inference.EmptySettingsConfiguration; +import org.elasticsearch.inference.InferenceServiceConfiguration; import org.elasticsearch.inference.InferenceServiceResults; import org.elasticsearch.inference.InputType; import org.elasticsearch.inference.Model; +import org.elasticsearch.inference.SettingsConfiguration; +import org.elasticsearch.inference.TaskSettingsConfiguration; import org.elasticsearch.inference.TaskType; +import org.elasticsearch.inference.configuration.SettingsConfigurationDisplayType; +import org.elasticsearch.inference.configuration.SettingsConfigurationFieldType; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.xpack.core.inference.results.ErrorChunkedInferenceResults; import org.elasticsearch.xpack.core.inference.results.InferenceChunkedSparseEmbeddingResults; @@ -34,15 +41,22 @@ import org.elasticsearch.xpack.inference.services.ServiceComponents; import org.elasticsearch.xpack.inference.services.huggingface.HuggingFaceBaseService; import org.elasticsearch.xpack.inference.services.huggingface.HuggingFaceModel; +import org.elasticsearch.xpack.inference.services.settings.DefaultSecretSettings; +import org.elasticsearch.xpack.inference.services.settings.RateLimitSettings; +import java.util.EnumSet; +import java.util.HashMap; import java.util.List; import java.util.Map; import static org.elasticsearch.xpack.core.inference.results.ResultUtils.createInvalidChunkedResultException; +import static org.elasticsearch.xpack.inference.services.huggingface.elser.HuggingFaceElserServiceSettings.URL; public class HuggingFaceElserService extends HuggingFaceBaseService { public static final String NAME = "hugging_face_elser"; + private static final EnumSet supportedTaskTypes = EnumSet.of(TaskType.SPARSE_EMBEDDING); + public HuggingFaceElserService(HttpRequestSender.Factory factory, ServiceComponents serviceComponents) { super(factory, serviceComponents); } @@ -106,8 +120,54 @@ private static List translateToChunkedResults( } } + @Override + public InferenceServiceConfiguration getConfiguration() { + return Configuration.get(); + } + + @Override + public EnumSet supportedTaskTypes() { + return supportedTaskTypes; + } + @Override public TransportVersion getMinimalSupportedVersion() { return TransportVersions.V_8_12_0; } + + public static class Configuration { + public static InferenceServiceConfiguration get() { + return configuration.getOrCompute(); + } + + private static final LazyInitializable configuration = new LazyInitializable<>( + () -> { + var configurationMap = new HashMap(); + + configurationMap.put( + URL, + new SettingsConfiguration.Builder().setDisplay(SettingsConfigurationDisplayType.TEXTBOX) + .setLabel("URL") + .setOrder(1) + .setRequired(true) + .setSensitive(false) + .setTooltip("The URL endpoint to use for the requests.") + .setType(SettingsConfigurationFieldType.STRING) + .build() + ); + + configurationMap.putAll(DefaultSecretSettings.toSettingsConfiguration()); + configurationMap.putAll(RateLimitSettings.toSettingsConfiguration()); + + return new InferenceServiceConfiguration.Builder().setProvider(NAME).setTaskTypes(supportedTaskTypes.stream().map(t -> { + Map taskSettingsConfig; + switch (t) { + // SPARSE_EMBEDDING task type has no task settings + default -> taskSettingsConfig = EmptySettingsConfiguration.get(); + } + return new TaskSettingsConfiguration.Builder().setTaskType(t).setConfiguration(taskSettingsConfig).build(); + }).toList()).setConfiguration(configurationMap).build(); + } + ); + } } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/huggingface/elser/HuggingFaceElserServiceSettings.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/huggingface/elser/HuggingFaceElserServiceSettings.java index be5f595d2c0fb..b1d3297fc6328 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/huggingface/elser/HuggingFaceElserServiceSettings.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/huggingface/elser/HuggingFaceElserServiceSettings.java @@ -36,7 +36,7 @@ public class HuggingFaceElserServiceSettings extends FilteredXContentObject HuggingFaceRateLimitServiceSettings { public static final String NAME = "hugging_face_elser_service_settings"; - static final String URL = "url"; + public static final String URL = "url"; private static final int ELSER_TOKEN_LIMIT = 512; // At the time of writing HuggingFace hasn't posted the default rate limit for inference endpoints so the value his is only a guess // 3000 requests per minute diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/ibmwatsonx/IbmWatsonxService.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/ibmwatsonx/IbmWatsonxService.java index ee88eb6206b52..e960b0b777f2b 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/ibmwatsonx/IbmWatsonxService.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/ibmwatsonx/IbmWatsonxService.java @@ -11,17 +11,24 @@ import org.elasticsearch.TransportVersion; import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.common.util.LazyInitializable; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.TimeValue; import org.elasticsearch.inference.ChunkedInferenceServiceResults; import org.elasticsearch.inference.ChunkingOptions; +import org.elasticsearch.inference.EmptySettingsConfiguration; +import org.elasticsearch.inference.InferenceServiceConfiguration; import org.elasticsearch.inference.InferenceServiceResults; import org.elasticsearch.inference.InputType; import org.elasticsearch.inference.Model; import org.elasticsearch.inference.ModelConfigurations; import org.elasticsearch.inference.ModelSecrets; +import org.elasticsearch.inference.SettingsConfiguration; import org.elasticsearch.inference.SimilarityMeasure; +import org.elasticsearch.inference.TaskSettingsConfiguration; import org.elasticsearch.inference.TaskType; +import org.elasticsearch.inference.configuration.SettingsConfigurationDisplayType; +import org.elasticsearch.inference.configuration.SettingsConfigurationFieldType; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.xpack.inference.chunking.EmbeddingRequestChunker; import org.elasticsearch.xpack.inference.external.action.ibmwatsonx.IbmWatsonxActionCreator; @@ -36,20 +43,29 @@ import org.elasticsearch.xpack.inference.services.ibmwatsonx.embeddings.IbmWatsonxEmbeddingsModel; import org.elasticsearch.xpack.inference.services.ibmwatsonx.embeddings.IbmWatsonxEmbeddingsServiceSettings; +import java.util.EnumSet; +import java.util.HashMap; import java.util.List; import java.util.Map; +import static org.elasticsearch.xpack.inference.services.ServiceFields.MAX_INPUT_TOKENS; +import static org.elasticsearch.xpack.inference.services.ServiceFields.MODEL_ID; import static org.elasticsearch.xpack.inference.services.ServiceUtils.createInvalidModelException; import static org.elasticsearch.xpack.inference.services.ServiceUtils.parsePersistedConfigErrorMsg; import static org.elasticsearch.xpack.inference.services.ServiceUtils.removeFromMapOrDefaultEmpty; import static org.elasticsearch.xpack.inference.services.ServiceUtils.removeFromMapOrThrowIfNull; import static org.elasticsearch.xpack.inference.services.ServiceUtils.throwIfNotEmptyMap; +import static org.elasticsearch.xpack.inference.services.huggingface.elser.HuggingFaceElserServiceSettings.URL; +import static org.elasticsearch.xpack.inference.services.ibmwatsonx.IbmWatsonxServiceFields.API_VERSION; import static org.elasticsearch.xpack.inference.services.ibmwatsonx.IbmWatsonxServiceFields.EMBEDDING_MAX_BATCH_SIZE; +import static org.elasticsearch.xpack.inference.services.ibmwatsonx.IbmWatsonxServiceFields.PROJECT_ID; public class IbmWatsonxService extends SenderService { public static final String NAME = "watsonxai"; + private static final EnumSet supportedTaskTypes = EnumSet.of(TaskType.TEXT_EMBEDDING); + public IbmWatsonxService(HttpRequestSender.Factory factory, ServiceComponents serviceComponents) { super(factory, serviceComponents); } @@ -135,6 +151,16 @@ public IbmWatsonxModel parsePersistedConfigWithSecrets( ); } + @Override + public InferenceServiceConfiguration getConfiguration() { + return Configuration.get(); + } + + @Override + public EnumSet supportedTaskTypes() { + return supportedTaskTypes; + } + private static IbmWatsonxModel createModelFromPersistent( String inferenceEntityId, TaskType taskType, @@ -251,4 +277,85 @@ protected void doChunkedInfer( protected IbmWatsonxActionCreator getActionCreator(Sender sender, ServiceComponents serviceComponents) { return new IbmWatsonxActionCreator(getSender(), getServiceComponents()); } + + public static class Configuration { + public static InferenceServiceConfiguration get() { + return configuration.getOrCompute(); + } + + private static final LazyInitializable configuration = new LazyInitializable<>( + () -> { + var configurationMap = new HashMap(); + + configurationMap.put( + API_VERSION, + new SettingsConfiguration.Builder().setDisplay(SettingsConfigurationDisplayType.TEXTBOX) + .setLabel("API Version") + .setOrder(1) + .setRequired(true) + .setSensitive(false) + .setTooltip("The IBM Watsonx API version ID to use.") + .setType(SettingsConfigurationFieldType.STRING) + .build() + ); + + configurationMap.put( + PROJECT_ID, + new SettingsConfiguration.Builder().setDisplay(SettingsConfigurationDisplayType.TEXTBOX) + .setLabel("Project ID") + .setOrder(2) + .setRequired(true) + .setSensitive(false) + .setTooltip("") + .setType(SettingsConfigurationFieldType.STRING) + .build() + ); + + configurationMap.put( + MODEL_ID, + new SettingsConfiguration.Builder().setDisplay(SettingsConfigurationDisplayType.TEXTBOX) + .setLabel("Model ID") + .setOrder(3) + .setRequired(true) + .setSensitive(false) + .setTooltip("The name of the model to use for the inference task.") + .setType(SettingsConfigurationFieldType.STRING) + .build() + ); + + configurationMap.put( + URL, + new SettingsConfiguration.Builder().setDisplay(SettingsConfigurationDisplayType.TEXTBOX) + .setLabel("URL") + .setOrder(4) + .setRequired(true) + .setSensitive(false) + .setTooltip("") + .setType(SettingsConfigurationFieldType.STRING) + .build() + ); + + configurationMap.put( + MAX_INPUT_TOKENS, + new SettingsConfiguration.Builder().setDisplay(SettingsConfigurationDisplayType.NUMERIC) + .setLabel("Maximum Input Tokens") + .setOrder(5) + .setRequired(false) + .setSensitive(false) + .setTooltip("Allows you to specify the maximum number of tokens per input.") + .setType(SettingsConfigurationFieldType.INTEGER) + .build() + ); + + return new InferenceServiceConfiguration.Builder().setProvider(NAME).setTaskTypes(supportedTaskTypes.stream().map(t -> { + Map taskSettingsConfig; + switch (t) { + // TEXT_EMBEDDING task type has no task settings + default -> taskSettingsConfig = EmptySettingsConfiguration.get(); + } + return new TaskSettingsConfiguration.Builder().setTaskType(t).setConfiguration(taskSettingsConfig).build(); + }).toList()).setConfiguration(configurationMap).build(); + } + ); + } } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/mistral/MistralService.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/mistral/MistralService.java index 8ae9b91b599d9..acc20fa35fd47 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/mistral/MistralService.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/mistral/MistralService.java @@ -11,18 +11,25 @@ import org.elasticsearch.TransportVersion; import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.common.util.LazyInitializable; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.TimeValue; import org.elasticsearch.inference.ChunkedInferenceServiceResults; import org.elasticsearch.inference.ChunkingOptions; import org.elasticsearch.inference.ChunkingSettings; +import org.elasticsearch.inference.EmptySettingsConfiguration; +import org.elasticsearch.inference.InferenceServiceConfiguration; import org.elasticsearch.inference.InferenceServiceResults; import org.elasticsearch.inference.InputType; import org.elasticsearch.inference.Model; import org.elasticsearch.inference.ModelConfigurations; import org.elasticsearch.inference.ModelSecrets; +import org.elasticsearch.inference.SettingsConfiguration; import org.elasticsearch.inference.SimilarityMeasure; +import org.elasticsearch.inference.TaskSettingsConfiguration; import org.elasticsearch.inference.TaskType; +import org.elasticsearch.inference.configuration.SettingsConfigurationDisplayType; +import org.elasticsearch.inference.configuration.SettingsConfigurationFieldType; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.xpack.core.inference.ChunkingSettingsFeatureFlag; import org.elasticsearch.xpack.inference.chunking.ChunkingSettingsBuilder; @@ -37,20 +44,28 @@ import org.elasticsearch.xpack.inference.services.ServiceUtils; import org.elasticsearch.xpack.inference.services.mistral.embeddings.MistralEmbeddingsModel; import org.elasticsearch.xpack.inference.services.mistral.embeddings.MistralEmbeddingsServiceSettings; +import org.elasticsearch.xpack.inference.services.settings.DefaultSecretSettings; +import org.elasticsearch.xpack.inference.services.settings.RateLimitSettings; import org.elasticsearch.xpack.inference.services.validation.ModelValidatorBuilder; +import java.util.EnumSet; +import java.util.HashMap; import java.util.List; import java.util.Map; +import static org.elasticsearch.xpack.inference.services.ServiceFields.MAX_INPUT_TOKENS; import static org.elasticsearch.xpack.inference.services.ServiceUtils.createInvalidModelException; import static org.elasticsearch.xpack.inference.services.ServiceUtils.parsePersistedConfigErrorMsg; import static org.elasticsearch.xpack.inference.services.ServiceUtils.removeFromMapOrDefaultEmpty; import static org.elasticsearch.xpack.inference.services.ServiceUtils.removeFromMapOrThrowIfNull; import static org.elasticsearch.xpack.inference.services.ServiceUtils.throwIfNotEmptyMap; +import static org.elasticsearch.xpack.inference.services.mistral.MistralConstants.MODEL_FIELD; public class MistralService extends SenderService { public static final String NAME = "mistral"; + private static final EnumSet supportedTaskTypes = EnumSet.of(TaskType.TEXT_EMBEDDING); + public MistralService(HttpRequestSender.Factory factory, ServiceComponents serviceComponents) { super(factory, serviceComponents); } @@ -112,6 +127,16 @@ protected void doChunkedInfer( } } + @Override + public InferenceServiceConfiguration getConfiguration() { + return Configuration.get(); + } + + @Override + public EnumSet supportedTaskTypes() { + return supportedTaskTypes; + } + @Override public String name() { return NAME; @@ -282,4 +307,52 @@ public Model updateModelWithEmbeddingDetails(Model model, int embeddingSize) { throw ServiceUtils.invalidModelTypeForUpdateModelWithEmbeddingDetails(model.getClass()); } } + + public static class Configuration { + public static InferenceServiceConfiguration get() { + return configuration.getOrCompute(); + } + + private static final LazyInitializable configuration = new LazyInitializable<>( + () -> { + var configurationMap = new HashMap(); + + configurationMap.put( + MODEL_FIELD, + new SettingsConfiguration.Builder().setDisplay(SettingsConfigurationDisplayType.TEXTBOX) + .setLabel("Model") + .setOrder(2) + .setRequired(true) + .setSensitive(false) + .setTooltip("Refer to the Mistral models documentation for the list of available text embedding models.") + .setType(SettingsConfigurationFieldType.STRING) + .build() + ); + + configurationMap.put( + MAX_INPUT_TOKENS, + new SettingsConfiguration.Builder().setDisplay(SettingsConfigurationDisplayType.NUMERIC) + .setLabel("Maximum Input Tokens") + .setOrder(3) + .setRequired(false) + .setSensitive(false) + .setTooltip("Allows you to specify the maximum number of tokens per input.") + .setType(SettingsConfigurationFieldType.INTEGER) + .build() + ); + + configurationMap.putAll(DefaultSecretSettings.toSettingsConfiguration()); + configurationMap.putAll(RateLimitSettings.toSettingsConfiguration()); + + return new InferenceServiceConfiguration.Builder().setProvider(NAME).setTaskTypes(supportedTaskTypes.stream().map(t -> { + Map taskSettingsConfig; + switch (t) { + // TEXT_EMBEDDING task type has no task settings + default -> taskSettingsConfig = EmptySettingsConfiguration.get(); + } + return new TaskSettingsConfiguration.Builder().setTaskType(t).setConfiguration(taskSettingsConfig).build(); + }).toList()).setConfiguration(configurationMap).build(); + } + ); + } } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/OpenAiService.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/OpenAiService.java index 30656d004b1d2..7b65f97a3074c 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/OpenAiService.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/OpenAiService.java @@ -11,18 +11,25 @@ import org.elasticsearch.TransportVersion; import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.common.util.LazyInitializable; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.TimeValue; import org.elasticsearch.inference.ChunkedInferenceServiceResults; import org.elasticsearch.inference.ChunkingOptions; import org.elasticsearch.inference.ChunkingSettings; +import org.elasticsearch.inference.EmptySettingsConfiguration; +import org.elasticsearch.inference.InferenceServiceConfiguration; import org.elasticsearch.inference.InferenceServiceResults; import org.elasticsearch.inference.InputType; import org.elasticsearch.inference.Model; import org.elasticsearch.inference.ModelConfigurations; import org.elasticsearch.inference.ModelSecrets; +import org.elasticsearch.inference.SettingsConfiguration; import org.elasticsearch.inference.SimilarityMeasure; +import org.elasticsearch.inference.TaskSettingsConfiguration; import org.elasticsearch.inference.TaskType; +import org.elasticsearch.inference.configuration.SettingsConfigurationDisplayType; +import org.elasticsearch.inference.configuration.SettingsConfigurationFieldType; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.xpack.core.inference.ChunkingSettingsFeatureFlag; import org.elasticsearch.xpack.inference.chunking.ChunkingSettingsBuilder; @@ -38,23 +45,31 @@ import org.elasticsearch.xpack.inference.services.openai.completion.OpenAiChatCompletionModel; import org.elasticsearch.xpack.inference.services.openai.embeddings.OpenAiEmbeddingsModel; import org.elasticsearch.xpack.inference.services.openai.embeddings.OpenAiEmbeddingsServiceSettings; +import org.elasticsearch.xpack.inference.services.settings.DefaultSecretSettings; +import org.elasticsearch.xpack.inference.services.settings.RateLimitSettings; import org.elasticsearch.xpack.inference.services.validation.ModelValidatorBuilder; +import java.util.EnumSet; +import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Set; import static org.elasticsearch.xpack.inference.services.ServiceFields.MODEL_ID; +import static org.elasticsearch.xpack.inference.services.ServiceFields.URL; import static org.elasticsearch.xpack.inference.services.ServiceUtils.createInvalidModelException; import static org.elasticsearch.xpack.inference.services.ServiceUtils.parsePersistedConfigErrorMsg; import static org.elasticsearch.xpack.inference.services.ServiceUtils.removeFromMapOrDefaultEmpty; import static org.elasticsearch.xpack.inference.services.ServiceUtils.removeFromMapOrThrowIfNull; import static org.elasticsearch.xpack.inference.services.ServiceUtils.throwIfNotEmptyMap; import static org.elasticsearch.xpack.inference.services.openai.OpenAiServiceFields.EMBEDDING_MAX_BATCH_SIZE; +import static org.elasticsearch.xpack.inference.services.openai.OpenAiServiceFields.ORGANIZATION; public class OpenAiService extends SenderService { public static final String NAME = "openai"; + private static final EnumSet supportedTaskTypes = EnumSet.of(TaskType.TEXT_EMBEDDING, TaskType.COMPLETION); + public OpenAiService(HttpRequestSender.Factory factory, ServiceComponents serviceComponents) { super(factory, serviceComponents); } @@ -212,6 +227,16 @@ public OpenAiModel parsePersistedConfig(String inferenceEntityId, TaskType taskT ); } + @Override + public InferenceServiceConfiguration getConfiguration() { + return Configuration.get(); + } + + @Override + public EnumSet supportedTaskTypes() { + return supportedTaskTypes; + } + @Override public void doInfer( Model model, @@ -344,4 +369,78 @@ static void moveModelFromTaskToServiceSettings(Map taskSettings, serviceSettings.put(MODEL_ID, modelId); } } + + public static class Configuration { + public static InferenceServiceConfiguration get() { + return configuration.getOrCompute(); + } + + private static final LazyInitializable configuration = new LazyInitializable<>( + () -> { + var configurationMap = new HashMap(); + + configurationMap.put( + MODEL_ID, + new SettingsConfiguration.Builder().setDisplay(SettingsConfigurationDisplayType.TEXTBOX) + .setLabel("Model ID") + .setOrder(2) + .setRequired(true) + .setSensitive(false) + .setTooltip("The name of the model to use for the inference task.") + .setType(SettingsConfigurationFieldType.STRING) + .build() + ); + + configurationMap.put( + ORGANIZATION, + new SettingsConfiguration.Builder().setDisplay(SettingsConfigurationDisplayType.TEXTBOX) + .setLabel("Organization ID") + .setOrder(3) + .setRequired(false) + .setSensitive(false) + .setTooltip("The unique identifier of your organization.") + .setType(SettingsConfigurationFieldType.STRING) + .build() + ); + + configurationMap.put( + URL, + new SettingsConfiguration.Builder().setDisplay(SettingsConfigurationDisplayType.TEXTBOX) + .setLabel("URL") + .setOrder(4) + .setRequired(true) + .setSensitive(false) + .setTooltip( + "The OpenAI API endpoint URL. For more information on the URL, refer to the " + + "https://platform.openai.com/docs/api-reference." + ) + .setType(SettingsConfigurationFieldType.STRING) + .setDefaultValue("https://api.openai.com/v1/chat/completions") + .build() + ); + + configurationMap.putAll( + DefaultSecretSettings.toSettingsConfigurationWithTooltip( + "The OpenAI API authentication key. For more details about generating OpenAI API keys, " + + "refer to the https://platform.openai.com/account/api-keys." + ) + ); + configurationMap.putAll( + RateLimitSettings.toSettingsConfigurationWithTooltip( + "Default number of requests allowed per minute. For text_embedding is 3000. For completion is 500." + ) + ); + + return new InferenceServiceConfiguration.Builder().setProvider(NAME).setTaskTypes(supportedTaskTypes.stream().map(t -> { + Map taskSettingsConfig; + switch (t) { + case TEXT_EMBEDDING -> taskSettingsConfig = OpenAiEmbeddingsModel.Configuration.get(); + case COMPLETION -> taskSettingsConfig = OpenAiChatCompletionModel.Configuration.get(); + default -> taskSettingsConfig = EmptySettingsConfiguration.get(); + } + return new TaskSettingsConfiguration.Builder().setTaskType(t).setConfiguration(taskSettingsConfig).build(); + }).toList()).setConfiguration(configurationMap).build(); + } + ); + } } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/completion/OpenAiChatCompletionModel.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/completion/OpenAiChatCompletionModel.java index 7ca93684bc680..e721cd2955cf3 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/completion/OpenAiChatCompletionModel.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/completion/OpenAiChatCompletionModel.java @@ -7,18 +7,26 @@ package org.elasticsearch.xpack.inference.services.openai.completion; +import org.elasticsearch.common.util.LazyInitializable; import org.elasticsearch.core.Nullable; import org.elasticsearch.inference.ModelConfigurations; import org.elasticsearch.inference.ModelSecrets; +import org.elasticsearch.inference.SettingsConfiguration; import org.elasticsearch.inference.TaskType; +import org.elasticsearch.inference.configuration.SettingsConfigurationDisplayType; +import org.elasticsearch.inference.configuration.SettingsConfigurationFieldType; import org.elasticsearch.xpack.inference.external.action.ExecutableAction; import org.elasticsearch.xpack.inference.external.action.openai.OpenAiActionVisitor; import org.elasticsearch.xpack.inference.services.ConfigurationParseContext; import org.elasticsearch.xpack.inference.services.openai.OpenAiModel; import org.elasticsearch.xpack.inference.services.settings.DefaultSecretSettings; +import java.util.Collections; +import java.util.HashMap; import java.util.Map; +import static org.elasticsearch.xpack.inference.services.openai.OpenAiServiceFields.USER; + public class OpenAiChatCompletionModel extends OpenAiModel { public static OpenAiChatCompletionModel of(OpenAiChatCompletionModel model, Map taskSettings) { @@ -88,4 +96,30 @@ public DefaultSecretSettings getSecretSettings() { public ExecutableAction accept(OpenAiActionVisitor creator, Map taskSettings) { return creator.create(this, taskSettings); } + + public static class Configuration { + public static Map get() { + return configuration.getOrCompute(); + } + + private static final LazyInitializable, RuntimeException> configuration = + new LazyInitializable<>(() -> { + var configurationMap = new HashMap(); + + configurationMap.put( + USER, + new SettingsConfiguration.Builder().setDisplay(SettingsConfigurationDisplayType.TEXTBOX) + .setLabel("User") + .setOrder(1) + .setRequired(false) + .setSensitive(false) + .setTooltip("Specifies the user issuing the request.") + .setType(SettingsConfigurationFieldType.STRING) + .setValue("") + .build() + ); + + return Collections.unmodifiableMap(configurationMap); + }); + } } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/embeddings/OpenAiEmbeddingsModel.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/embeddings/OpenAiEmbeddingsModel.java index 5659c46050ad8..cab2a82fc86c6 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/embeddings/OpenAiEmbeddingsModel.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/embeddings/OpenAiEmbeddingsModel.java @@ -7,19 +7,27 @@ package org.elasticsearch.xpack.inference.services.openai.embeddings; +import org.elasticsearch.common.util.LazyInitializable; import org.elasticsearch.core.Nullable; import org.elasticsearch.inference.ChunkingSettings; import org.elasticsearch.inference.ModelConfigurations; import org.elasticsearch.inference.ModelSecrets; +import org.elasticsearch.inference.SettingsConfiguration; import org.elasticsearch.inference.TaskType; +import org.elasticsearch.inference.configuration.SettingsConfigurationDisplayType; +import org.elasticsearch.inference.configuration.SettingsConfigurationFieldType; import org.elasticsearch.xpack.inference.external.action.ExecutableAction; import org.elasticsearch.xpack.inference.external.action.openai.OpenAiActionVisitor; import org.elasticsearch.xpack.inference.services.ConfigurationParseContext; import org.elasticsearch.xpack.inference.services.openai.OpenAiModel; import org.elasticsearch.xpack.inference.services.settings.DefaultSecretSettings; +import java.util.Collections; +import java.util.HashMap; import java.util.Map; +import static org.elasticsearch.xpack.inference.services.openai.OpenAiServiceFields.USER; + public class OpenAiEmbeddingsModel extends OpenAiModel { public static OpenAiEmbeddingsModel of(OpenAiEmbeddingsModel model, Map taskSettings) { @@ -97,4 +105,30 @@ public DefaultSecretSettings getSecretSettings() { public ExecutableAction accept(OpenAiActionVisitor creator, Map taskSettings) { return creator.create(this, taskSettings); } + + public static class Configuration { + public static Map get() { + return configuration.getOrCompute(); + } + + private static final LazyInitializable, RuntimeException> configuration = + new LazyInitializable<>(() -> { + var configurationMap = new HashMap(); + + configurationMap.put( + USER, + new SettingsConfiguration.Builder().setDisplay(SettingsConfigurationDisplayType.TEXTBOX) + .setLabel("User") + .setOrder(1) + .setRequired(false) + .setSensitive(false) + .setTooltip("Specifies the user issuing the request.") + .setType(SettingsConfigurationFieldType.STRING) + .setValue("") + .build() + ); + + return Collections.unmodifiableMap(configurationMap); + }); + } } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/settings/DefaultSecretSettings.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/settings/DefaultSecretSettings.java index c68d4bc801724..771d2308a502f 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/settings/DefaultSecretSettings.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/settings/DefaultSecretSettings.java @@ -16,6 +16,9 @@ import org.elasticsearch.core.Nullable; import org.elasticsearch.inference.ModelSecrets; import org.elasticsearch.inference.SecretSettings; +import org.elasticsearch.inference.SettingsConfiguration; +import org.elasticsearch.inference.configuration.SettingsConfigurationDisplayType; +import org.elasticsearch.inference.configuration.SettingsConfigurationFieldType; import org.elasticsearch.xcontent.XContentBuilder; import java.io.IOException; @@ -49,6 +52,26 @@ public static DefaultSecretSettings fromMap(@Nullable Map map) { return new DefaultSecretSettings(secureApiToken); } + public static Map toSettingsConfigurationWithTooltip(String tooltip) { + var configurationMap = new HashMap(); + configurationMap.put( + API_KEY, + new SettingsConfiguration.Builder().setDisplay(SettingsConfigurationDisplayType.TEXTBOX) + .setLabel("API Key") + .setOrder(1) + .setRequired(true) + .setSensitive(true) + .setTooltip(tooltip) + .setType(SettingsConfigurationFieldType.STRING) + .build() + ); + return configurationMap; + } + + public static Map toSettingsConfiguration() { + return DefaultSecretSettings.toSettingsConfigurationWithTooltip("API Key for the provider you're connecting to."); + } + public DefaultSecretSettings { Objects.requireNonNull(apiKey); } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/settings/RateLimitSettings.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/settings/RateLimitSettings.java index f593ca4e0c603..416d5bff12ce9 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/settings/RateLimitSettings.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/settings/RateLimitSettings.java @@ -11,11 +11,15 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.inference.SettingsConfiguration; +import org.elasticsearch.inference.configuration.SettingsConfigurationDisplayType; +import org.elasticsearch.inference.configuration.SettingsConfigurationFieldType; import org.elasticsearch.xcontent.ToXContentFragment; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xpack.inference.services.ConfigurationParseContext; import java.io.IOException; +import java.util.HashMap; import java.util.Map; import java.util.Objects; import java.util.concurrent.TimeUnit; @@ -48,6 +52,26 @@ public static RateLimitSettings of( return requestsPerMinute == null ? defaultValue : new RateLimitSettings(requestsPerMinute); } + public static Map toSettingsConfigurationWithTooltip(String tooltip) { + var configurationMap = new HashMap(); + configurationMap.put( + FIELD_NAME + "." + REQUESTS_PER_MINUTE_FIELD, + new SettingsConfiguration.Builder().setDisplay(SettingsConfigurationDisplayType.NUMERIC) + .setLabel("Rate Limit") + .setOrder(6) + .setRequired(false) + .setSensitive(false) + .setTooltip(tooltip) + .setType(SettingsConfigurationFieldType.INTEGER) + .build() + ); + return configurationMap; + } + + public static Map toSettingsConfiguration() { + return RateLimitSettings.toSettingsConfigurationWithTooltip("Minimize the number of rate limit errors."); + } + /** * Defines the settings in requests per minute * @param requestsPerMinute _ diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/SenderServiceTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/SenderServiceTests.java index a063a398a4947..d8402c28cec87 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/SenderServiceTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/SenderServiceTests.java @@ -13,9 +13,13 @@ import org.elasticsearch.core.TimeValue; import org.elasticsearch.inference.ChunkedInferenceServiceResults; import org.elasticsearch.inference.ChunkingOptions; +import org.elasticsearch.inference.EmptySettingsConfiguration; +import org.elasticsearch.inference.InferenceServiceConfiguration; import org.elasticsearch.inference.InferenceServiceResults; import org.elasticsearch.inference.InputType; import org.elasticsearch.inference.Model; +import org.elasticsearch.inference.SettingsConfiguration; +import org.elasticsearch.inference.TaskSettingsConfiguration; import org.elasticsearch.inference.TaskType; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.ThreadPool; @@ -27,6 +31,8 @@ import org.junit.Before; import java.io.IOException; +import java.util.EnumSet; +import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.concurrent.TimeUnit; @@ -161,5 +167,25 @@ public Model parsePersistedConfig(String inferenceEntityId, TaskType taskType, M public TransportVersion getMinimalSupportedVersion() { return TransportVersion.current(); } + + @Override + public InferenceServiceConfiguration getConfiguration() { + return new InferenceServiceConfiguration.Builder().setProvider("test service") + .setTaskTypes(supportedTaskTypes().stream().map(t -> { + Map taskSettingsConfig; + switch (t) { + // no task settings + default -> taskSettingsConfig = EmptySettingsConfiguration.get(); + } + return new TaskSettingsConfiguration.Builder().setTaskType(t).setConfiguration(taskSettingsConfig).build(); + }).toList()) + .setConfiguration(new HashMap<>()) + .build(); + } + + @Override + public EnumSet supportedTaskTypes() { + return EnumSet.of(TaskType.TEXT_EMBEDDING); + } } } diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/alibabacloudsearch/AlibabaCloudSearchServiceTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/alibabacloudsearch/AlibabaCloudSearchServiceTests.java index 7cedc36ffa5f0..445d9c68a88aa 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/alibabacloudsearch/AlibabaCloudSearchServiceTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/alibabacloudsearch/AlibabaCloudSearchServiceTests.java @@ -10,11 +10,15 @@ import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.core.TimeValue; import org.elasticsearch.inference.ChunkedInferenceServiceResults; import org.elasticsearch.inference.ChunkingOptions; import org.elasticsearch.inference.ChunkingSettings; +import org.elasticsearch.inference.InferenceServiceConfiguration; import org.elasticsearch.inference.InferenceServiceResults; import org.elasticsearch.inference.InputType; import org.elasticsearch.inference.Model; @@ -22,6 +26,8 @@ import org.elasticsearch.inference.TaskType; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xcontent.ToXContent; +import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.core.inference.ChunkingSettingsFeatureFlag; import org.elasticsearch.xpack.core.inference.action.InferenceAction; import org.elasticsearch.xpack.core.inference.results.InferenceChunkedSparseEmbeddingResults; @@ -56,6 +62,8 @@ import java.util.Map; import java.util.concurrent.TimeUnit; +import static org.elasticsearch.common.xcontent.XContentHelper.toXContent; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertToXContentEquivalent; import static org.elasticsearch.xpack.inference.Utils.getPersistedConfigMap; import static org.elasticsearch.xpack.inference.Utils.inferenceUtilityPool; import static org.elasticsearch.xpack.inference.Utils.mockClusterServiceEmpty; @@ -493,6 +501,235 @@ private void testChunkedInfer(TaskType taskType, ChunkingSettings chunkingSettin } } + @SuppressWarnings("checkstyle:LineLength") + public void testGetConfiguration() throws Exception { + try (var service = new AlibabaCloudSearchService(mock(HttpRequestSender.Factory.class), createWithEmptySettings(threadPool))) { + String content = XContentHelper.stripWhitespace( + """ + { + "provider": "alibabacloud-ai-search", + "task_types": [ + { + "task_type": "text_embedding", + "configuration": { + "input_type": { + "default_value": null, + "depends_on": [], + "display": "dropdown", + "label": "Input Type", + "options": [ + { + "label": "ingest", + "value": "ingest" + }, + { + "label": "search", + "value": "search" + } + ], + "order": 1, + "required": false, + "sensitive": false, + "tooltip": "Specifies the type of input passed to the model.", + "type": "str", + "ui_restrictions": [], + "validations": [], + "value": "" + } + } + }, + { + "task_type": "sparse_embedding", + "configuration": { + "return_token": { + "default_value": null, + "depends_on": [], + "display": "toggle", + "label": "Return Token", + "order": 2, + "required": false, + "sensitive": false, + "tooltip": "If `true`, the token name will be returned in the response. Defaults to `false` which means only the token ID will be returned in the response.", + "type": "bool", + "ui_restrictions": [], + "validations": [], + "value": true + }, + "input_type": { + "default_value": null, + "depends_on": [], + "display": "dropdown", + "label": "Input Type", + "options": [ + { + "label": "ingest", + "value": "ingest" + }, + { + "label": "search", + "value": "search" + } + ], + "order": 1, + "required": false, + "sensitive": false, + "tooltip": "Specifies the type of input passed to the model.", + "type": "str", + "ui_restrictions": [], + "validations": [], + "value": "" + } + } + }, + { + "task_type": "rerank", + "configuration": {} + }, + { + "task_type": "completion", + "configuration": {} + } + ], + "configuration": { + "workspace": { + "default_value": null, + "depends_on": [], + "display": "textbox", + "label": "Workspace", + "order": 5, + "required": true, + "sensitive": false, + "tooltip": "The name of the workspace used for the {infer} task.", + "type": "str", + "ui_restrictions": [], + "validations": [], + "value": null + }, + "api_key": { + "default_value": null, + "depends_on": [], + "display": "textbox", + "label": "API Key", + "order": 1, + "required": true, + "sensitive": true, + "tooltip": "A valid API key for the AlibabaCloud AI Search API.", + "type": "str", + "ui_restrictions": [], + "validations": [], + "value": null + }, + "service_id": { + "default_value": null, + "depends_on": [], + "display": "dropdown", + "label": "Project ID", + "options": [ + { + "label": "ops-text-embedding-001", + "value": "ops-text-embedding-001" + }, + { + "label": "ops-text-embedding-zh-001", + "value": "ops-text-embedding-zh-001" + }, + { + "label": "ops-text-embedding-en-001", + "value": "ops-text-embedding-en-001" + }, + { + "label": "ops-text-embedding-002", + "value": "ops-text-embedding-002" + }, + { + "label": "ops-text-sparse-embedding-001", + "value": "ops-text-sparse-embedding-001" + }, + { + "label": "ops-bge-reranker-larger", + "value": "ops-bge-reranker-larger" + } + ], + "order": 2, + "required": true, + "sensitive": false, + "tooltip": "The name of the model service to use for the {infer} task.", + "type": "str", + "ui_restrictions": [], + "validations": [], + "value": null + }, + "host": { + "default_value": null, + "depends_on": [], + "display": "textbox", + "label": "Host", + "order": 3, + "required": true, + "sensitive": false, + "tooltip": "The name of the host address used for the {infer} task. You can find the host address at https://opensearch.console.aliyun.com/cn-shanghai/rag/api-key[ the API keys section] of the documentation.", + "type": "str", + "ui_restrictions": [], + "validations": [], + "value": null + }, + "rate_limit.requests_per_minute": { + "default_value": null, + "depends_on": [], + "display": "numeric", + "label": "Rate Limit", + "order": 6, + "required": false, + "sensitive": false, + "tooltip": "Minimize the number of rate limit errors.", + "type": "int", + "ui_restrictions": [], + "validations": [], + "value": null + }, + "http_schema": { + "default_value": null, + "depends_on": [], + "display": "dropdown", + "label": "HTTP Schema", + "options": [ + { + "label": "https", + "value": "https" + }, + { + "label": "http", + "value": "http" + } + ], + "order": 4, + "required": true, + "sensitive": false, + "tooltip": "", + "type": "str", + "ui_restrictions": [], + "validations": [], + "value": null + } + } + } + """ + ); + InferenceServiceConfiguration configuration = InferenceServiceConfiguration.fromXContentBytes( + new BytesArray(content), + XContentType.JSON + ); + boolean humanReadable = true; + BytesReference originalBytes = toShuffledXContent(configuration, XContentType.JSON, ToXContent.EMPTY_PARAMS, humanReadable); + InferenceServiceConfiguration serviceConfiguration = service.getConfiguration(); + assertToXContentEquivalent( + originalBytes, + toXContent(serviceConfiguration, XContentType.JSON, humanReadable), + XContentType.JSON + ); + } + } + private AlibabaCloudSearchModel createModelForTaskType(TaskType taskType, ChunkingSettings chunkingSettings) { Map serviceSettingsMap = new HashMap<>(); serviceSettingsMap.put(AlibabaCloudSearchServiceSettings.SERVICE_ID, "service_id"); diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/amazonbedrock/AmazonBedrockServiceTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/amazonbedrock/AmazonBedrockServiceTests.java index 931d418a3664b..6de6b38330ad1 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/amazonbedrock/AmazonBedrockServiceTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/amazonbedrock/AmazonBedrockServiceTests.java @@ -14,11 +14,15 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.common.ValidationException; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.core.TimeValue; import org.elasticsearch.inference.ChunkedInferenceServiceResults; import org.elasticsearch.inference.ChunkingOptions; import org.elasticsearch.inference.ChunkingSettings; +import org.elasticsearch.inference.InferenceServiceConfiguration; import org.elasticsearch.inference.InferenceServiceResults; import org.elasticsearch.inference.InputType; import org.elasticsearch.inference.Model; @@ -28,6 +32,8 @@ import org.elasticsearch.inference.TaskType; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xcontent.ToXContent; +import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.core.inference.ChunkingSettingsFeatureFlag; import org.elasticsearch.xpack.core.inference.action.InferenceAction; import org.elasticsearch.xpack.core.inference.results.ChatCompletionResults; @@ -57,6 +63,8 @@ import java.util.Map; import java.util.concurrent.TimeUnit; +import static org.elasticsearch.common.xcontent.XContentHelper.toXContent; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertToXContentEquivalent; import static org.elasticsearch.xpack.inference.Utils.getInvalidModel; import static org.elasticsearch.xpack.inference.Utils.inferenceUtilityPool; import static org.elasticsearch.xpack.inference.Utils.mockClusterServiceEmpty; @@ -143,6 +151,210 @@ public void testParseRequestConfig_ThrowsUnsupportedModelType() throws IOExcepti } } + @SuppressWarnings("checkstyle:LineLength") + public void testGetConfiguration() throws Exception { + try (var service = createAmazonBedrockService()) { + String content = XContentHelper.stripWhitespace( + """ + { + "provider": "amazonbedrock", + "task_types": [ + { + "task_type": "text_embedding", + "configuration": {} + }, + { + "task_type": "completion", + "configuration": { + "top_p": { + "default_value": null, + "depends_on": [], + "display": "numeric", + "label": "Top P", + "order": 3, + "required": false, + "sensitive": false, + "tooltip": "Alternative to temperature. A number in the range of 0.0 to 1.0, to eliminate low-probability tokens.", + "type": "int", + "ui_restrictions": [], + "validations": [], + "value": null + }, + "max_new_tokens": { + "default_value": null, + "depends_on": [], + "display": "numeric", + "label": "Max New Tokens", + "order": 1, + "required": false, + "sensitive": false, + "tooltip": "Sets the maximum number for the output tokens to be generated.", + "type": "int", + "ui_restrictions": [], + "validations": [], + "value": null + }, + "top_k": { + "default_value": null, + "depends_on": [], + "display": "numeric", + "label": "Top K", + "order": 4, + "required": false, + "sensitive": false, + "tooltip": "Only available for anthropic, cohere, and mistral providers. Alternative to temperature.", + "type": "int", + "ui_restrictions": [], + "validations": [], + "value": null + }, + "temperature": { + "default_value": null, + "depends_on": [], + "display": "numeric", + "label": "Temperature", + "order": 2, + "required": false, + "sensitive": false, + "tooltip": "A number between 0.0 and 1.0 that controls the apparent creativity of the results.", + "type": "int", + "ui_restrictions": [], + "validations": [], + "value": null + } + } + } + ], + "configuration": { + "secret_key": { + "default_value": null, + "depends_on": [], + "display": "textbox", + "label": "Secret Key", + "order": 2, + "required": true, + "sensitive": true, + "tooltip": "A valid AWS secret key that is paired with the access_key.", + "type": "str", + "ui_restrictions": [], + "validations": [], + "value": null + }, + "provider": { + "default_value": null, + "depends_on": [], + "display": "dropdown", + "label": "Provider", + "options": [ + { + "label": "amazontitan", + "value": "amazontitan" + }, + { + "label": "anthropic", + "value": "anthropic" + }, + { + "label": "ai21labs", + "value": "ai21labs" + }, + { + "label": "cohere", + "value": "cohere" + }, + { + "label": "meta", + "value": "meta" + }, + { + "label": "mistral", + "value": "mistral" + } + ], + "order": 3, + "required": true, + "sensitive": false, + "tooltip": "The model provider for your deployment.", + "type": "str", + "ui_restrictions": [], + "validations": [], + "value": null + }, + "access_key": { + "default_value": null, + "depends_on": [], + "display": "textbox", + "label": "Access Key", + "order": 1, + "required": true, + "sensitive": true, + "tooltip": "A valid AWS access key that has permissions to use Amazon Bedrock.", + "type": "str", + "ui_restrictions": [], + "validations": [], + "value": null + }, + "model": { + "default_value": null, + "depends_on": [], + "display": "textbox", + "label": "Model", + "order": 4, + "required": true, + "sensitive": false, + "tooltip": "The base model ID or an ARN to a custom model based on a foundational model.", + "type": "str", + "ui_restrictions": [], + "validations": [], + "value": null + }, + "rate_limit.requests_per_minute": { + "default_value": null, + "depends_on": [], + "display": "numeric", + "label": "Rate Limit", + "order": 6, + "required": false, + "sensitive": false, + "tooltip": "By default, the amazonbedrock service sets the number of requests allowed per minute to 240.", + "type": "int", + "ui_restrictions": [], + "validations": [], + "value": null + }, + "region": { + "default_value": null, + "depends_on": [], + "display": "textbox", + "label": "Region", + "order": 5, + "required": true, + "sensitive": false, + "tooltip": "The region that your model or ARN is deployed in.", + "type": "str", + "ui_restrictions": [], + "validations": [], + "value": null + } + } + } + """ + ); + InferenceServiceConfiguration configuration = InferenceServiceConfiguration.fromXContentBytes( + new BytesArray(content), + XContentType.JSON + ); + boolean humanReadable = true; + BytesReference originalBytes = toShuffledXContent(configuration, XContentType.JSON, ToXContent.EMPTY_PARAMS, humanReadable); + InferenceServiceConfiguration serviceConfiguration = service.getConfiguration(); + assertToXContentEquivalent( + originalBytes, + toXContent(serviceConfiguration, XContentType.JSON, humanReadable), + XContentType.JSON + ); + } + } + public void testCreateModel_ForEmbeddingsTask_InvalidProvider() throws IOException { try (var service = createAmazonBedrockService()) { ActionListener modelVerificationListener = ActionListener.wrap( diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/anthropic/AnthropicServiceTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/anthropic/AnthropicServiceTests.java index c4f7fbfb14437..0f802637c6700 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/anthropic/AnthropicServiceTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/anthropic/AnthropicServiceTests.java @@ -11,8 +11,12 @@ import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.inference.InferenceServiceConfiguration; import org.elasticsearch.inference.InferenceServiceResults; import org.elasticsearch.inference.InputType; import org.elasticsearch.inference.Model; @@ -22,6 +26,7 @@ import org.elasticsearch.test.http.MockResponse; import org.elasticsearch.test.http.MockWebServer; import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.core.inference.action.InferenceAction; import org.elasticsearch.xpack.inference.external.http.HttpClientManager; @@ -47,6 +52,8 @@ import java.util.Map; import java.util.concurrent.TimeUnit; +import static org.elasticsearch.common.xcontent.XContentHelper.toXContent; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertToXContentEquivalent; import static org.elasticsearch.xpack.inference.Utils.buildExpectationCompletions; import static org.elasticsearch.xpack.inference.Utils.getInvalidModel; import static org.elasticsearch.xpack.inference.Utils.getModelListenerForException; @@ -593,6 +600,135 @@ public void testInfer_StreamRequest_ErrorResponse() throws Exception { .hasErrorContaining("blah"); } + public void testGetConfiguration() throws Exception { + try (var service = createServiceWithMockSender()) { + String content = XContentHelper.stripWhitespace(""" + { + "provider": "anthropic", + "task_types": [ + { + "task_type": "completion", + "configuration": { + "top_p": { + "default_value": null, + "depends_on": [], + "display": "numeric", + "label": "Top P", + "order": 4, + "required": false, + "sensitive": false, + "tooltip": "Specifies to use Anthropic’s nucleus sampling.", + "type": "int", + "ui_restrictions": [], + "validations": [], + "value": null + }, + "max_tokens": { + "default_value": null, + "depends_on": [], + "display": "numeric", + "label": "Max Tokens", + "order": 1, + "required": true, + "sensitive": false, + "tooltip": "The maximum number of tokens to generate before stopping.", + "type": "int", + "ui_restrictions": [], + "validations": [], + "value": null + }, + "top_k": { + "default_value": null, + "depends_on": [], + "display": "numeric", + "label": "Top K", + "order": 3, + "required": false, + "sensitive": false, + "tooltip": "Specifies to only sample from the top K options for each subsequent token.", + "type": "int", + "ui_restrictions": [], + "validations": [], + "value": null + }, + "temperature": { + "default_value": null, + "depends_on": [], + "display": "textbox", + "label": "Temperature", + "order": 2, + "required": false, + "sensitive": false, + "tooltip": "The amount of randomness injected into the response.", + "type": "str", + "ui_restrictions": [], + "validations": [], + "value": null + } + } + } + ], + "configuration": { + "api_key": { + "default_value": null, + "depends_on": [], + "display": "textbox", + "label": "API Key", + "order": 1, + "required": true, + "sensitive": true, + "tooltip": "API Key for the provider you're connecting to.", + "type": "str", + "ui_restrictions": [], + "validations": [], + "value": null + }, + "rate_limit.requests_per_minute": { + "default_value": null, + "depends_on": [], + "display": "numeric", + "label": "Rate Limit", + "order": 6, + "required": false, + "sensitive": false, + "tooltip": "By default, the anthropic service sets the number of requests allowed per minute to 50.", + "type": "int", + "ui_restrictions": [], + "validations": [], + "value": null + }, + "model_id": { + "default_value": null, + "depends_on": [], + "display": "textbox", + "label": "Model ID", + "order": 2, + "required": true, + "sensitive": false, + "tooltip": "The name of the model to use for the inference task.", + "type": "str", + "ui_restrictions": [], + "validations": [], + "value": null + } + } + } + """); + InferenceServiceConfiguration configuration = InferenceServiceConfiguration.fromXContentBytes( + new BytesArray(content), + XContentType.JSON + ); + boolean humanReadable = true; + BytesReference originalBytes = toShuffledXContent(configuration, XContentType.JSON, ToXContent.EMPTY_PARAMS, humanReadable); + InferenceServiceConfiguration serviceConfiguration = service.getConfiguration(); + assertToXContentEquivalent( + originalBytes, + toXContent(serviceConfiguration, XContentType.JSON, humanReadable), + XContentType.JSON + ); + } + } + public void testSupportsStreaming() throws IOException { try (var service = new AnthropicService(mock(), createWithEmptySettings(mock()))) { assertTrue(service.canStream(TaskType.COMPLETION)); diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/azureaistudio/AzureAiStudioServiceTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/azureaistudio/AzureAiStudioServiceTests.java index 4d2eb60767f44..ec5eef4428e7d 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/azureaistudio/AzureAiStudioServiceTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/azureaistudio/AzureAiStudioServiceTests.java @@ -13,12 +13,16 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.common.ValidationException; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.TimeValue; import org.elasticsearch.inference.ChunkedInferenceServiceResults; import org.elasticsearch.inference.ChunkingOptions; import org.elasticsearch.inference.ChunkingSettings; +import org.elasticsearch.inference.InferenceServiceConfiguration; import org.elasticsearch.inference.InferenceServiceResults; import org.elasticsearch.inference.InputType; import org.elasticsearch.inference.Model; @@ -29,6 +33,7 @@ import org.elasticsearch.test.http.MockResponse; import org.elasticsearch.test.http.MockWebServer; import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.core.inference.ChunkingSettingsFeatureFlag; import org.elasticsearch.xpack.core.inference.action.InferenceAction; @@ -62,6 +67,8 @@ import java.util.Map; import java.util.concurrent.TimeUnit; +import static org.elasticsearch.common.xcontent.XContentHelper.toXContent; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertToXContentEquivalent; import static org.elasticsearch.xpack.inference.Utils.getInvalidModel; import static org.elasticsearch.xpack.inference.Utils.getPersistedConfigMap; import static org.elasticsearch.xpack.inference.Utils.inferenceUtilityPool; @@ -1384,6 +1391,221 @@ public void testInfer_StreamRequest_ErrorResponse() throws Exception { .hasErrorContaining("You didn't provide an API key..."); } + @SuppressWarnings("checkstyle:LineLength") + public void testGetConfiguration() throws Exception { + try (var service = createService()) { + String content = XContentHelper.stripWhitespace( + """ + { + "provider": "azureaistudio", + "task_types": [ + { + "task_type": "text_embedding", + "configuration": { + "top_p": { + "default_value": null, + "depends_on": [], + "display": "numeric", + "label": "Top P", + "order": 4, + "required": false, + "sensitive": false, + "tooltip": "A number in the range of 0.0 to 2.0 that is an alternative value to temperature. Should not be used if temperature is specified.", + "type": "int", + "ui_restrictions": [], + "validations": [], + "value": null + }, + "max_new_tokens": { + "default_value": null, + "depends_on": [], + "display": "numeric", + "label": "Max New Tokens", + "order": 2, + "required": false, + "sensitive": false, + "tooltip": "Provides a hint for the maximum number of output tokens to be generated.", + "type": "int", + "ui_restrictions": [], + "validations": [], + "value": null + }, + "temperature": { + "default_value": null, + "depends_on": [], + "display": "numeric", + "label": "Temperature", + "order": 3, + "required": false, + "sensitive": false, + "tooltip": "A number in the range of 0.0 to 2.0 that specifies the sampling temperature.", + "type": "int", + "ui_restrictions": [], + "validations": [], + "value": null + }, + "do_sample": { + "default_value": null, + "depends_on": [], + "display": "numeric", + "label": "Do Sample", + "order": 1, + "required": false, + "sensitive": false, + "tooltip": "Instructs the inference process to perform sampling or not.", + "type": "int", + "ui_restrictions": [], + "validations": [], + "value": null + } + } + }, + { + "task_type": "completion", + "configuration": { + "user": { + "default_value": null, + "depends_on": [], + "display": "textbox", + "label": "User", + "order": 1, + "required": false, + "sensitive": false, + "tooltip": "Specifies the user issuing the request.", + "type": "str", + "ui_restrictions": [], + "validations": [], + "value": "" + } + } + } + ], + "configuration": { + "endpoint_type": { + "default_value": null, + "depends_on": [], + "display": "dropdown", + "label": "Endpoint Type", + "options": [ + { + "label": "token", + "value": "token" + }, + { + "label": "realtime", + "value": "realtime" + } + ], + "order": 3, + "required": true, + "sensitive": false, + "tooltip": "Specifies the type of endpoint that is used in your model deployment.", + "type": "str", + "ui_restrictions": [], + "validations": [], + "value": null + }, + "provider": { + "default_value": null, + "depends_on": [], + "display": "dropdown", + "label": "Provider", + "options": [ + { + "label": "cohere", + "value": "cohere" + }, + { + "label": "meta", + "value": "meta" + }, + { + "label": "microsoft_phi", + "value": "microsoft_phi" + }, + { + "label": "mistral", + "value": "mistral" + }, + { + "label": "openai", + "value": "openai" + }, + { + "label": "databricks", + "value": "databricks" + } + ], + "order": 3, + "required": true, + "sensitive": false, + "tooltip": "The model provider for your deployment.", + "type": "str", + "ui_restrictions": [], + "validations": [], + "value": null + }, + "api_key": { + "default_value": null, + "depends_on": [], + "display": "textbox", + "label": "API Key", + "order": 1, + "required": true, + "sensitive": true, + "tooltip": "API Key for the provider you're connecting to.", + "type": "str", + "ui_restrictions": [], + "validations": [], + "value": null + }, + "rate_limit.requests_per_minute": { + "default_value": null, + "depends_on": [], + "display": "numeric", + "label": "Rate Limit", + "order": 6, + "required": false, + "sensitive": false, + "tooltip": "Minimize the number of rate limit errors.", + "type": "int", + "ui_restrictions": [], + "validations": [], + "value": null + }, + "target": { + "default_value": null, + "depends_on": [], + "display": "textbox", + "label": "Target", + "order": 2, + "required": true, + "sensitive": false, + "tooltip": "The target URL of your Azure AI Studio model deployment.", + "type": "str", + "ui_restrictions": [], + "validations": [], + "value": null + } + } + } + """ + ); + InferenceServiceConfiguration configuration = InferenceServiceConfiguration.fromXContentBytes( + new BytesArray(content), + XContentType.JSON + ); + boolean humanReadable = true; + BytesReference originalBytes = toShuffledXContent(configuration, XContentType.JSON, ToXContent.EMPTY_PARAMS, humanReadable); + InferenceServiceConfiguration serviceConfiguration = service.getConfiguration(); + assertToXContentEquivalent( + originalBytes, + toXContent(serviceConfiguration, XContentType.JSON, humanReadable), + XContentType.JSON + ); + } + } + public void testSupportsStreaming() throws IOException { try (var service = new AzureAiStudioService(mock(), createWithEmptySettings(mock()))) { assertTrue(service.canStream(TaskType.COMPLETION)); diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/azureopenai/AzureOpenAiServiceTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/azureopenai/AzureOpenAiServiceTests.java index 1bae6ce66d6aa..41fd7d099d416 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/azureopenai/AzureOpenAiServiceTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/azureopenai/AzureOpenAiServiceTests.java @@ -14,11 +14,15 @@ import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.core.TimeValue; import org.elasticsearch.inference.ChunkedInferenceServiceResults; import org.elasticsearch.inference.ChunkingOptions; import org.elasticsearch.inference.ChunkingSettings; +import org.elasticsearch.inference.InferenceServiceConfiguration; import org.elasticsearch.inference.InferenceServiceResults; import org.elasticsearch.inference.InputType; import org.elasticsearch.inference.Model; @@ -29,6 +33,7 @@ import org.elasticsearch.test.http.MockResponse; import org.elasticsearch.test.http.MockWebServer; import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.core.inference.ChunkingSettingsFeatureFlag; import org.elasticsearch.xpack.core.inference.action.InferenceAction; @@ -56,6 +61,8 @@ import java.util.Map; import java.util.concurrent.TimeUnit; +import static org.elasticsearch.common.xcontent.XContentHelper.toXContent; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertToXContentEquivalent; import static org.elasticsearch.xpack.inference.Utils.getInvalidModel; import static org.elasticsearch.xpack.inference.Utils.getPersistedConfigMap; import static org.elasticsearch.xpack.inference.Utils.inferenceUtilityPool; @@ -1504,6 +1511,157 @@ public void testInfer_StreamRequest_ErrorResponse() throws Exception { .hasErrorContaining("You didn't provide an API key..."); } + @SuppressWarnings("checkstyle:LineLength") + public void testGetConfiguration() throws Exception { + try (var service = createAzureOpenAiService()) { + String content = XContentHelper.stripWhitespace( + """ + { + "provider": "azureopenai", + "task_types": [ + { + "task_type": "text_embedding", + "configuration": { + "user": { + "default_value": null, + "depends_on": [], + "display": "textbox", + "label": "User", + "order": 1, + "required": false, + "sensitive": false, + "tooltip": "Specifies the user issuing the request.", + "type": "str", + "ui_restrictions": [], + "validations": [], + "value": "" + } + } + }, + { + "task_type": "completion", + "configuration": { + "user": { + "default_value": null, + "depends_on": [], + "display": "textbox", + "label": "User", + "order": 1, + "required": false, + "sensitive": false, + "tooltip": "Specifies the user issuing the request.", + "type": "str", + "ui_restrictions": [], + "validations": [], + "value": "" + } + } + } + ], + "configuration": { + "api_key": { + "default_value": null, + "depends_on": [], + "display": "textbox", + "label": "API Key", + "order": 1, + "required": false, + "sensitive": true, + "tooltip": "You must provide either an API key or an Entra ID.", + "type": "str", + "ui_restrictions": [], + "validations": [], + "value": null + }, + "entra_id": { + "default_value": null, + "depends_on": [], + "display": "textbox", + "label": "Entra ID", + "order": 2, + "required": false, + "sensitive": true, + "tooltip": "You must provide either an API key or an Entra ID.", + "type": "str", + "ui_restrictions": [], + "validations": [], + "value": null + }, + "rate_limit.requests_per_minute": { + "default_value": null, + "depends_on": [], + "display": "numeric", + "label": "Rate Limit", + "order": 6, + "required": false, + "sensitive": false, + "tooltip": "The azureopenai service sets a default number of requests allowed per minute depending on the task type.", + "type": "int", + "ui_restrictions": [], + "validations": [], + "value": null + }, + "deployment_id": { + "default_value": null, + "depends_on": [], + "display": "textbox", + "label": "Deployment ID", + "order": 5, + "required": true, + "sensitive": false, + "tooltip": "The deployment name of your deployed models.", + "type": "str", + "ui_restrictions": [], + "validations": [], + "value": null + }, + "resource_name": { + "default_value": null, + "depends_on": [], + "display": "textbox", + "label": "Resource Name", + "order": 3, + "required": true, + "sensitive": false, + "tooltip": "The name of your Azure OpenAI resource.", + "type": "str", + "ui_restrictions": [], + "validations": [], + "value": null + }, + "api_version": { + "default_value": null, + "depends_on": [], + "display": "textbox", + "label": "API Version", + "order": 4, + "required": true, + "sensitive": false, + "tooltip": "The Azure API version ID to use.", + "type": "str", + "ui_restrictions": [], + "validations": [], + "value": null + } + } + } + """ + ); + InferenceServiceConfiguration configuration = InferenceServiceConfiguration.fromXContentBytes( + new BytesArray(content), + XContentType.JSON + ); + boolean humanReadable = true; + BytesReference originalBytes = toShuffledXContent(configuration, XContentType.JSON, ToXContent.EMPTY_PARAMS, humanReadable); + InferenceServiceConfiguration serviceConfiguration = service.getConfiguration(); + assertToXContentEquivalent( + originalBytes, + toXContent(serviceConfiguration, XContentType.JSON, humanReadable), + XContentType.JSON + ); + } + } + public void testSupportsStreaming() throws IOException { try (var service = new AzureOpenAiService(mock(), createWithEmptySettings(mock()))) { assertTrue(service.canStream(TaskType.COMPLETION)); diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/cohere/CohereServiceTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/cohere/CohereServiceTests.java index d44be4246f844..3ce06df1f7fdb 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/cohere/CohereServiceTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/cohere/CohereServiceTests.java @@ -14,12 +14,16 @@ import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.mapper.vectors.DenseVectorFieldMapper; import org.elasticsearch.inference.ChunkedInferenceServiceResults; import org.elasticsearch.inference.ChunkingOptions; import org.elasticsearch.inference.ChunkingSettings; +import org.elasticsearch.inference.InferenceServiceConfiguration; import org.elasticsearch.inference.InferenceServiceResults; import org.elasticsearch.inference.InputType; import org.elasticsearch.inference.Model; @@ -30,6 +34,7 @@ import org.elasticsearch.test.http.MockResponse; import org.elasticsearch.test.http.MockWebServer; import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.core.inference.ChunkingSettingsFeatureFlag; import org.elasticsearch.xpack.core.inference.action.InferenceAction; @@ -60,6 +65,8 @@ import java.util.Map; import java.util.concurrent.TimeUnit; +import static org.elasticsearch.common.xcontent.XContentHelper.toXContent; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertToXContentEquivalent; import static org.elasticsearch.xpack.inference.Utils.getInvalidModel; import static org.elasticsearch.xpack.inference.Utils.getPersistedConfigMap; import static org.elasticsearch.xpack.inference.Utils.inferenceUtilityPool; @@ -1683,6 +1690,165 @@ public void testInfer_StreamRequest_ErrorResponse() throws Exception { .hasErrorContaining("how dare you"); } + @SuppressWarnings("checkstyle:LineLength") + public void testGetConfiguration() throws Exception { + try (var service = createCohereService()) { + String content = XContentHelper.stripWhitespace( + """ + { + "provider": "cohere", + "task_types": [ + { + "task_type": "text_embedding", + "configuration": { + "truncate": { + "default_value": null, + "depends_on": [], + "display": "dropdown", + "label": "Truncate", + "options": [ + { + "label": "NONE", + "value": "NONE" + }, + { + "label": "START", + "value": "START" + }, + { + "label": "END", + "value": "END" + } + ], + "order": 2, + "required": false, + "sensitive": false, + "tooltip": "Specifies how the API handles inputs longer than the maximum token length.", + "type": "str", + "ui_restrictions": [], + "validations": [], + "value": "" + }, + "input_type": { + "default_value": null, + "depends_on": [], + "display": "dropdown", + "label": "Input Type", + "options": [ + { + "label": "classification", + "value": "classification" + }, + { + "label": "clusterning", + "value": "clusterning" + }, + { + "label": "ingest", + "value": "ingest" + }, + { + "label": "search", + "value": "search" + } + ], + "order": 1, + "required": false, + "sensitive": false, + "tooltip": "Specifies the type of input passed to the model.", + "type": "str", + "ui_restrictions": [], + "validations": [], + "value": "" + } + } + }, + { + "task_type": "rerank", + "configuration": { + "top_n": { + "default_value": null, + "depends_on": [], + "display": "numeric", + "label": "Top N", + "order": 2, + "required": false, + "sensitive": false, + "tooltip": "The number of most relevant documents to return, defaults to the number of the documents.", + "type": "int", + "ui_restrictions": [], + "validations": [], + "value": null + }, + "return_documents": { + "default_value": null, + "depends_on": [], + "display": "toggle", + "label": "Return Documents", + "order": 1, + "required": false, + "sensitive": false, + "tooltip": "Specify whether to return doc text within the results.", + "type": "bool", + "ui_restrictions": [], + "validations": [], + "value": false + } + } + }, + { + "task_type": "completion", + "configuration": {} + } + ], + "configuration": { + "api_key": { + "default_value": null, + "depends_on": [], + "display": "textbox", + "label": "API Key", + "order": 1, + "required": true, + "sensitive": true, + "tooltip": "API Key for the provider you're connecting to.", + "type": "str", + "ui_restrictions": [], + "validations": [], + "value": null + }, + "rate_limit.requests_per_minute": { + "default_value": null, + "depends_on": [], + "display": "numeric", + "label": "Rate Limit", + "order": 6, + "required": false, + "sensitive": false, + "tooltip": "Minimize the number of rate limit errors.", + "type": "int", + "ui_restrictions": [], + "validations": [], + "value": null + } + } + } + """ + ); + InferenceServiceConfiguration configuration = InferenceServiceConfiguration.fromXContentBytes( + new BytesArray(content), + XContentType.JSON + ); + boolean humanReadable = true; + BytesReference originalBytes = toShuffledXContent(configuration, XContentType.JSON, ToXContent.EMPTY_PARAMS, humanReadable); + InferenceServiceConfiguration serviceConfiguration = service.getConfiguration(); + assertToXContentEquivalent( + originalBytes, + toXContent(serviceConfiguration, XContentType.JSON, humanReadable), + XContentType.JSON + ); + } + } + public void testSupportsStreaming() throws IOException { try (var service = new CohereService(mock(), createWithEmptySettings(mock()))) { assertTrue(service.canStream(TaskType.COMPLETION)); diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/elastic/ElasticInferenceServiceTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/elastic/ElasticInferenceServiceTests.java index d10c70c6f0f5e..3767ac496d183 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/elastic/ElasticInferenceServiceTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/elastic/ElasticInferenceServiceTests.java @@ -11,12 +11,16 @@ import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.core.TimeValue; import org.elasticsearch.inference.ChunkedInferenceServiceResults; import org.elasticsearch.inference.ChunkingOptions; import org.elasticsearch.inference.EmptySecretSettings; import org.elasticsearch.inference.EmptyTaskSettings; +import org.elasticsearch.inference.InferenceServiceConfiguration; import org.elasticsearch.inference.InferenceServiceResults; import org.elasticsearch.inference.InputType; import org.elasticsearch.inference.Model; @@ -25,6 +29,7 @@ import org.elasticsearch.test.http.MockResponse; import org.elasticsearch.test.http.MockWebServer; import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.core.inference.action.InferenceAction; import org.elasticsearch.xpack.core.inference.results.InferenceChunkedSparseEmbeddingResults; @@ -48,6 +53,8 @@ import java.util.Map; import java.util.concurrent.TimeUnit; +import static org.elasticsearch.common.xcontent.XContentHelper.toXContent; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertToXContentEquivalent; import static org.elasticsearch.xpack.inference.Utils.getInvalidModel; import static org.elasticsearch.xpack.inference.Utils.getModelListenerForException; import static org.elasticsearch.xpack.inference.Utils.getPersistedConfigMap; @@ -488,6 +495,78 @@ public void testChunkedInfer_PassesThrough() throws IOException { } } + public void testGetConfiguration() throws Exception { + try (var service = createServiceWithMockSender()) { + String content = XContentHelper.stripWhitespace(""" + { + "provider": "elastic", + "task_types": [ + { + "task_type": "sparse_embedding", + "configuration": {} + } + ], + "configuration": { + "rate_limit.requests_per_minute": { + "default_value": null, + "depends_on": [], + "display": "numeric", + "label": "Rate Limit", + "order": 6, + "required": false, + "sensitive": false, + "tooltip": "Minimize the number of rate limit errors.", + "type": "int", + "ui_restrictions": [], + "validations": [], + "value": null + }, + "model_id": { + "default_value": null, + "depends_on": [], + "display": "textbox", + "label": "Model ID", + "order": 2, + "required": true, + "sensitive": false, + "tooltip": "The name of the model to use for the inference task.", + "type": "str", + "ui_restrictions": [], + "validations": [], + "value": null + }, + "max_input_tokens": { + "default_value": null, + "depends_on": [], + "display": "numeric", + "label": "Maximum Input Tokens", + "order": 3, + "required": false, + "sensitive": false, + "tooltip": "Allows you to specify the maximum number of tokens per input.", + "type": "int", + "ui_restrictions": [], + "validations": [], + "value": null + } + } + } + """); + InferenceServiceConfiguration configuration = InferenceServiceConfiguration.fromXContentBytes( + new BytesArray(content), + XContentType.JSON + ); + boolean humanReadable = true; + BytesReference originalBytes = toShuffledXContent(configuration, XContentType.JSON, ToXContent.EMPTY_PARAMS, humanReadable); + InferenceServiceConfiguration serviceConfiguration = service.getConfiguration(); + assertToXContentEquivalent( + originalBytes, + toXContent(serviceConfiguration, XContentType.JSON, humanReadable), + XContentType.JSON + ); + } + } + private ElasticInferenceService createServiceWithMockSender() { return new ElasticInferenceService( mock(HttpRequestSender.Factory.class), diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalServiceTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalServiceTests.java index 5ec66687752a8..cad33b56ce235 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalServiceTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalServiceTests.java @@ -15,9 +15,12 @@ import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.mapper.vectors.DenseVectorFieldMapper; import org.elasticsearch.inference.ChunkedInferenceServiceResults; @@ -25,6 +28,7 @@ import org.elasticsearch.inference.ChunkingSettings; import org.elasticsearch.inference.EmptyTaskSettings; import org.elasticsearch.inference.InferenceResults; +import org.elasticsearch.inference.InferenceServiceConfiguration; import org.elasticsearch.inference.InferenceServiceExtension; import org.elasticsearch.inference.InputType; import org.elasticsearch.inference.Model; @@ -34,6 +38,8 @@ import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xcontent.ParseField; +import org.elasticsearch.xcontent.ToXContent; +import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.core.action.util.QueryPage; import org.elasticsearch.xpack.core.inference.ChunkingSettingsFeatureFlag; import org.elasticsearch.xpack.core.inference.action.InferenceAction; @@ -77,6 +83,8 @@ import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; +import static org.elasticsearch.common.xcontent.XContentHelper.toXContent; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertToXContentEquivalent; import static org.elasticsearch.xpack.inference.chunking.ChunkingSettingsTests.createRandomChunkingSettingsMap; import static org.elasticsearch.xpack.inference.services.elasticsearch.ElasticsearchInternalService.MULTILINGUAL_E5_SMALL_MODEL_ID; import static org.elasticsearch.xpack.inference.services.elasticsearch.ElasticsearchInternalService.MULTILINGUAL_E5_SMALL_MODEL_ID_LINUX_X86; @@ -1566,6 +1574,123 @@ public void testIsDefaultId() { assertFalse(service.isDefaultId("foo")); } + public void testGetConfiguration() throws Exception { + try (var service = createService(mock(Client.class))) { + String content = XContentHelper.stripWhitespace(""" + { + "provider": "elasticsearch", + "task_types": [ + { + "task_type": "text_embedding", + "configuration": {} + }, + { + "task_type": "sparse_embedding", + "configuration": {} + }, + { + "task_type": "rerank", + "configuration": { + "return_documents": { + "default_value": null, + "depends_on": [], + "display": "toggle", + "label": "Return Documents", + "order": 1, + "required": false, + "sensitive": false, + "tooltip": "Returns the document instead of only the index.", + "type": "bool", + "ui_restrictions": [], + "validations": [], + "value": true + } + } + } + ], + "configuration": { + "num_allocations": { + "default_value": 1, + "depends_on": [], + "display": "numeric", + "label": "Number Allocations", + "order": 2, + "required": true, + "sensitive": false, + "tooltip": "The total number of allocations this model is assigned across machine learning nodes.", + "type": "int", + "ui_restrictions": [], + "validations": [], + "value": null + }, + "num_threads": { + "default_value": 2, + "depends_on": [], + "display": "numeric", + "label": "Number Threads", + "order": 3, + "required": true, + "sensitive": false, + "tooltip": "Sets the number of threads used by each model allocation during inference.", + "type": "int", + "ui_restrictions": [], + "validations": [], + "value": null + }, + "model_id": { + "default_value": ".multilingual-e5-small", + "depends_on": [], + "display": "dropdown", + "label": "Model ID", + "options": [ + { + "label": ".elser_model_1", + "value": ".elser_model_1" + }, + { + "label": ".elser_model_2", + "value": ".elser_model_2" + }, + { + "label": ".elser_model_2_linux-x86_64", + "value": ".elser_model_2_linux-x86_64" + }, + { + "label": ".multilingual-e5-small", + "value": ".multilingual-e5-small" + }, + { + "label": ".multilingual-e5-small_linux-x86_64", + "value": ".multilingual-e5-small_linux-x86_64" + } + ], + "order": 1, + "required": true, + "sensitive": false, + "tooltip": "The name of the model to use for the inference task.", + "type": "str", + "ui_restrictions": [], + "validations": [], + "value": null + } + } + } + """); + InferenceServiceConfiguration configuration = InferenceServiceConfiguration.fromXContentBytes( + new BytesArray(content), + XContentType.JSON + ); + boolean humanReadable = true; + BytesReference originalBytes = toShuffledXContent(configuration, XContentType.JSON, ToXContent.EMPTY_PARAMS, humanReadable); + InferenceServiceConfiguration serviceConfiguration = service.getConfiguration(); + assertToXContentEquivalent( + originalBytes, + toXContent(serviceConfiguration, XContentType.JSON, humanReadable), + XContentType.JSON + ); + } + } + private ElasticsearchInternalService createService(Client client) { var cs = mock(ClusterService.class); var cSettings = new ClusterSettings(Settings.EMPTY, Set.of(MachineLearningField.MAX_LAZY_ML_NODES)); diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/googleaistudio/GoogleAiStudioServiceTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/googleaistudio/GoogleAiStudioServiceTests.java index 27a53177658c6..e94a3f5d727cf 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/googleaistudio/GoogleAiStudioServiceTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/googleaistudio/GoogleAiStudioServiceTests.java @@ -12,13 +12,17 @@ import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.core.Strings; import org.elasticsearch.core.TimeValue; import org.elasticsearch.inference.ChunkedInferenceServiceResults; import org.elasticsearch.inference.ChunkingOptions; import org.elasticsearch.inference.ChunkingSettings; import org.elasticsearch.inference.EmptyTaskSettings; +import org.elasticsearch.inference.InferenceServiceConfiguration; import org.elasticsearch.inference.InferenceServiceResults; import org.elasticsearch.inference.InputType; import org.elasticsearch.inference.Model; @@ -29,6 +33,7 @@ import org.elasticsearch.test.http.MockResponse; import org.elasticsearch.test.http.MockWebServer; import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.core.inference.ChunkingSettingsFeatureFlag; import org.elasticsearch.xpack.core.inference.action.InferenceAction; @@ -57,6 +62,8 @@ import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; +import static org.elasticsearch.common.xcontent.XContentHelper.toXContent; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertToXContentEquivalent; import static org.elasticsearch.xpack.inference.Utils.getInvalidModel; import static org.elasticsearch.xpack.inference.Utils.getPersistedConfigMap; import static org.elasticsearch.xpack.inference.Utils.inferenceUtilityPool; @@ -1219,6 +1226,82 @@ private void testUpdateModelWithEmbeddingDetails_Successful(SimilarityMeasure si } } + public void testGetConfiguration() throws Exception { + try (var service = createGoogleAiStudioService()) { + String content = XContentHelper.stripWhitespace(""" + { + "provider": "googleaistudio", + "task_types": [ + { + "task_type": "text_embedding", + "configuration": {} + }, + { + "task_type": "completion", + "configuration": {} + } + ], + "configuration": { + "api_key": { + "default_value": null, + "depends_on": [], + "display": "textbox", + "label": "API Key", + "order": 1, + "required": true, + "sensitive": true, + "tooltip": "API Key for the provider you're connecting to.", + "type": "str", + "ui_restrictions": [], + "validations": [], + "value": null + }, + "rate_limit.requests_per_minute": { + "default_value": null, + "depends_on": [], + "display": "numeric", + "label": "Rate Limit", + "order": 6, + "required": false, + "sensitive": false, + "tooltip": "Minimize the number of rate limit errors.", + "type": "int", + "ui_restrictions": [], + "validations": [], + "value": null + }, + "model_id": { + "default_value": null, + "depends_on": [], + "display": "textbox", + "label": "Model ID", + "order": 2, + "required": true, + "sensitive": false, + "tooltip": "ID of the LLM you're using.", + "type": "str", + "ui_restrictions": [], + "validations": [], + "value": null + } + } + } + """); + InferenceServiceConfiguration configuration = InferenceServiceConfiguration.fromXContentBytes( + new BytesArray(content), + XContentType.JSON + ); + boolean humanReadable = true; + BytesReference originalBytes = toShuffledXContent(configuration, XContentType.JSON, ToXContent.EMPTY_PARAMS, humanReadable); + InferenceServiceConfiguration serviceConfiguration = service.getConfiguration(); + assertToXContentEquivalent( + originalBytes, + toXContent(serviceConfiguration, XContentType.JSON, humanReadable), + XContentType.JSON + ); + } + } + public void testSupportsStreaming() throws IOException { try (var service = new GoogleAiStudioService(mock(), createWithEmptySettings(mock()))) { assertTrue(service.canStream(TaskType.COMPLETION)); diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/googlevertexai/GoogleVertexAiServiceTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/googlevertexai/GoogleVertexAiServiceTests.java index 70ec6522c0fcb..da38cdc763db4 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/googlevertexai/GoogleVertexAiServiceTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/googlevertexai/GoogleVertexAiServiceTests.java @@ -9,14 +9,20 @@ import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.inference.ChunkingSettings; +import org.elasticsearch.inference.InferenceServiceConfiguration; import org.elasticsearch.inference.Model; import org.elasticsearch.inference.ModelConfigurations; import org.elasticsearch.inference.TaskType; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.http.MockWebServer; import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xcontent.ToXContent; +import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.core.inference.ChunkingSettingsFeatureFlag; import org.elasticsearch.xpack.inference.external.http.HttpClientManager; import org.elasticsearch.xpack.inference.external.http.sender.HttpRequestSender; @@ -36,6 +42,8 @@ import java.util.HashMap; import java.util.Map; +import static org.elasticsearch.common.xcontent.XContentHelper.toXContent; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertToXContentEquivalent; import static org.elasticsearch.xpack.inference.Utils.getPersistedConfigMap; import static org.elasticsearch.xpack.inference.Utils.inferenceUtilityPool; import static org.elasticsearch.xpack.inference.Utils.mockClusterServiceEmpty; @@ -953,6 +961,143 @@ public void testParsePersistedConfig_CreatesAnEmbeddingsModelWhenChunkingSetting // testInfer tested via end-to-end notebook tests in AppEx repo + @SuppressWarnings("checkstyle:LineLength") + public void testGetConfiguration() throws Exception { + try (var service = createGoogleVertexAiService()) { + String content = XContentHelper.stripWhitespace( + """ + { + "provider": "googlevertexai", + "task_types": [ + { + "task_type": "text_embedding", + "configuration": { + "auto_truncate": { + "default_value": null, + "depends_on": [], + "display": "toggle", + "label": "Auto Truncate", + "order": 1, + "required": false, + "sensitive": false, + "tooltip": "Specifies if the API truncates inputs longer than the maximum token length automatically.", + "type": "bool", + "ui_restrictions": [], + "validations": [], + "value": false + } + } + }, + { + "task_type": "rerank", + "configuration": { + "top_n": { + "default_value": null, + "depends_on": [], + "display": "toggle", + "label": "Top N", + "order": 1, + "required": false, + "sensitive": false, + "tooltip": "Specifies the number of the top n documents, which should be returned.", + "type": "bool", + "ui_restrictions": [], + "validations": [], + "value": false + } + } + } + ], + "configuration": { + "service_account_json": { + "default_value": null, + "depends_on": [], + "display": "textbox", + "label": "Credentials JSON", + "order": 1, + "required": true, + "sensitive": true, + "tooltip": "API Key for the provider you're connecting to.", + "type": "str", + "ui_restrictions": [], + "validations": [], + "value": null + }, + "project_id": { + "default_value": null, + "depends_on": [], + "display": "textbox", + "label": "GCP Project", + "order": 4, + "required": true, + "sensitive": false, + "tooltip": "The GCP Project ID which has Vertex AI API(s) enabled. For more information on the URL, refer to the {geminiVertexAIDocs}.", + "type": "str", + "ui_restrictions": [], + "validations": [], + "value": null + }, + "location": { + "default_value": null, + "depends_on": [], + "display": "textbox", + "label": "GCP Region", + "order": 3, + "required": true, + "sensitive": false, + "tooltip": "Please provide the GCP region where the Vertex AI API(s) is enabled. For more information, refer to the {geminiVertexAIDocs}.", + "type": "str", + "ui_restrictions": [], + "validations": [], + "value": null + }, + "rate_limit.requests_per_minute": { + "default_value": null, + "depends_on": [], + "display": "numeric", + "label": "Rate Limit", + "order": 6, + "required": false, + "sensitive": false, + "tooltip": "Minimize the number of rate limit errors.", + "type": "int", + "ui_restrictions": [], + "validations": [], + "value": null + }, + "model_id": { + "default_value": null, + "depends_on": [], + "display": "textbox", + "label": "Model ID", + "order": 2, + "required": true, + "sensitive": false, + "tooltip": "ID of the LLM you're using.", + "type": "str", + "ui_restrictions": [], + "validations": [], + "value": null + } + } + } + """ + ); + InferenceServiceConfiguration configuration = InferenceServiceConfiguration.fromXContentBytes( + new BytesArray(content), + XContentType.JSON + ); + boolean humanReadable = true; + BytesReference originalBytes = toShuffledXContent(configuration, XContentType.JSON, ToXContent.EMPTY_PARAMS, humanReadable); + InferenceServiceConfiguration serviceConfiguration = service.getConfiguration(); + assertToXContentEquivalent( + originalBytes, + toXContent(serviceConfiguration, XContentType.JSON, humanReadable), + XContentType.JSON + ); + } + } + private GoogleVertexAiService createGoogleVertexAiService() { return new GoogleVertexAiService(mock(HttpRequestSender.Factory.class), createWithEmptySettings(threadPool)); } diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/huggingface/HuggingFaceElserServiceTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/huggingface/HuggingFaceElserServiceTests.java index b012da8c51ae4..df82f1ed393bf 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/huggingface/HuggingFaceElserServiceTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/huggingface/HuggingFaceElserServiceTests.java @@ -9,15 +9,20 @@ import org.apache.http.HttpHeaders; import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.core.TimeValue; import org.elasticsearch.inference.ChunkedInferenceServiceResults; import org.elasticsearch.inference.ChunkingOptions; +import org.elasticsearch.inference.InferenceServiceConfiguration; import org.elasticsearch.inference.InputType; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.http.MockResponse; import org.elasticsearch.test.http.MockWebServer; import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.core.inference.action.InferenceAction; import org.elasticsearch.xpack.core.inference.results.InferenceChunkedSparseEmbeddingResults; @@ -38,6 +43,8 @@ import java.util.Map; import java.util.concurrent.TimeUnit; +import static org.elasticsearch.common.xcontent.XContentHelper.toXContent; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertToXContentEquivalent; import static org.elasticsearch.xpack.inference.Utils.inferenceUtilityPool; import static org.elasticsearch.xpack.inference.Utils.mockClusterServiceEmpty; import static org.elasticsearch.xpack.inference.external.http.Utils.entityAsMap; @@ -123,4 +130,81 @@ public void testChunkedInfer_CallsInfer_Elser_ConvertsFloatResponse() throws IOE assertThat(requestMap.get("inputs"), Matchers.is(List.of("abc"))); } } + + public void testGetConfiguration() throws Exception { + try ( + var service = new HuggingFaceElserService( + HttpRequestSenderTests.createSenderFactory(threadPool, clientManager), + createWithEmptySettings(threadPool) + ) + ) { + String content = XContentHelper.stripWhitespace(""" + { + "provider": "hugging_face_elser", + "task_types": [ + { + "task_type": "sparse_embedding", + "configuration": {} + } + ], + "configuration": { + "api_key": { + "default_value": null, + "depends_on": [], + "display": "textbox", + "label": "API Key", + "order": 1, + "required": true, + "sensitive": true, + "tooltip": "API Key for the provider you're connecting to.", + "type": "str", + "ui_restrictions": [], + "validations": [], + "value": null + }, + "rate_limit.requests_per_minute": { + "default_value": null, + "depends_on": [], + "display": "numeric", + "label": "Rate Limit", + "order": 6, + "required": false, + "sensitive": false, + "tooltip": "Minimize the number of rate limit errors.", + "type": "int", + "ui_restrictions": [], + "validations": [], + "value": null + }, + "url": { + "default_value": null, + "depends_on": [], + "display": "textbox", + "label": "URL", + "order": 1, + "required": true, + "sensitive": false, + "tooltip": "The URL endpoint to use for the requests.", + "type": "str", + "ui_restrictions": [], + "validations": [], + "value": null + } + } + } + """); + InferenceServiceConfiguration configuration = InferenceServiceConfiguration.fromXContentBytes( + new BytesArray(content), + XContentType.JSON + ); + boolean humanReadable = true; + BytesReference originalBytes = toShuffledXContent(configuration, XContentType.JSON, ToXContent.EMPTY_PARAMS, humanReadable); + InferenceServiceConfiguration serviceConfiguration = service.getConfiguration(); + assertToXContentEquivalent( + originalBytes, + toXContent(serviceConfiguration, XContentType.JSON, humanReadable), + XContentType.JSON + ); + } + } } diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/huggingface/HuggingFaceServiceTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/huggingface/HuggingFaceServiceTests.java index 8659b811f948e..a683d6e3cb051 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/huggingface/HuggingFaceServiceTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/huggingface/HuggingFaceServiceTests.java @@ -13,11 +13,15 @@ import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.core.TimeValue; import org.elasticsearch.inference.ChunkedInferenceServiceResults; import org.elasticsearch.inference.ChunkingOptions; import org.elasticsearch.inference.ChunkingSettings; +import org.elasticsearch.inference.InferenceServiceConfiguration; import org.elasticsearch.inference.InferenceServiceResults; import org.elasticsearch.inference.InputType; import org.elasticsearch.inference.Model; @@ -28,6 +32,7 @@ import org.elasticsearch.test.http.MockResponse; import org.elasticsearch.test.http.MockWebServer; import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.core.inference.ChunkingSettingsFeatureFlag; import org.elasticsearch.xpack.core.inference.action.InferenceAction; @@ -54,6 +59,8 @@ import java.util.Map; import java.util.concurrent.TimeUnit; +import static org.elasticsearch.common.xcontent.XContentHelper.toXContent; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertToXContentEquivalent; import static org.elasticsearch.xpack.core.inference.results.InferenceChunkedTextEmbeddingFloatResultsTests.asMapWithListsInsteadOfArrays; import static org.elasticsearch.xpack.inference.Utils.getPersistedConfigMap; import static org.elasticsearch.xpack.inference.Utils.inferenceUtilityPool; @@ -938,6 +945,82 @@ public void testChunkedInfer() throws IOException { } } + public void testGetConfiguration() throws Exception { + try (var service = createHuggingFaceService()) { + String content = XContentHelper.stripWhitespace(""" + { + "provider": "hugging_face", + "task_types": [ + { + "task_type": "text_embedding", + "configuration": {} + }, + { + "task_type": "sparse_embedding", + "configuration": {} + } + ], + "configuration": { + "api_key": { + "default_value": null, + "depends_on": [], + "display": "textbox", + "label": "API Key", + "order": 1, + "required": true, + "sensitive": true, + "tooltip": "API Key for the provider you're connecting to.", + "type": "str", + "ui_restrictions": [], + "validations": [], + "value": null + }, + "rate_limit.requests_per_minute": { + "default_value": null, + "depends_on": [], + "display": "numeric", + "label": "Rate Limit", + "order": 6, + "required": false, + "sensitive": false, + "tooltip": "Minimize the number of rate limit errors.", + "type": "int", + "ui_restrictions": [], + "validations": [], + "value": null + }, + "url": { + "default_value": "https://api.openai.com/v1/embeddings", + "depends_on": [], + "display": "textbox", + "label": "URL", + "order": 1, + "required": true, + "sensitive": false, + "tooltip": "The URL endpoint to use for the requests.", + "type": "str", + "ui_restrictions": [], + "validations": [], + "value": "https://api.openai.com/v1/embeddings" + } + } + } + """); + InferenceServiceConfiguration configuration = InferenceServiceConfiguration.fromXContentBytes( + new BytesArray(content), + XContentType.JSON + ); + boolean humanReadable = true; + BytesReference originalBytes = toShuffledXContent(configuration, XContentType.JSON, ToXContent.EMPTY_PARAMS, humanReadable); + InferenceServiceConfiguration serviceConfiguration = service.getConfiguration(); + assertToXContentEquivalent( + originalBytes, + toXContent(serviceConfiguration, XContentType.JSON, humanReadable), + XContentType.JSON + ); + } + } + private HuggingFaceService createHuggingFaceService() { return new HuggingFaceService(mock(HttpRequestSender.Factory.class), createWithEmptySettings(threadPool)); } diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/ibmwatsonx/IbmWatsonxServiceTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/ibmwatsonx/IbmWatsonxServiceTests.java index f8f08e6f880ab..d6c491f2b7cec 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/ibmwatsonx/IbmWatsonxServiceTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/ibmwatsonx/IbmWatsonxServiceTests.java @@ -13,11 +13,15 @@ import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.core.TimeValue; import org.elasticsearch.inference.ChunkedInferenceServiceResults; import org.elasticsearch.inference.ChunkingOptions; import org.elasticsearch.inference.EmptyTaskSettings; +import org.elasticsearch.inference.InferenceServiceConfiguration; import org.elasticsearch.inference.InferenceServiceResults; import org.elasticsearch.inference.InputType; import org.elasticsearch.inference.Model; @@ -28,6 +32,7 @@ import org.elasticsearch.test.http.MockResponse; import org.elasticsearch.test.http.MockWebServer; import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.core.inference.action.InferenceAction; import org.elasticsearch.xpack.core.inference.results.InferenceChunkedTextEmbeddingFloatResults; @@ -58,6 +63,8 @@ import java.util.Map; import java.util.concurrent.TimeUnit; +import static org.elasticsearch.common.xcontent.XContentHelper.toXContent; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertToXContentEquivalent; import static org.elasticsearch.xpack.inference.Utils.getInvalidModel; import static org.elasticsearch.xpack.inference.Utils.getPersistedConfigMap; import static org.elasticsearch.xpack.inference.Utils.inferenceUtilityPool; @@ -764,6 +771,106 @@ public void testCheckModelConfig_DoesNotUpdateSimilarity_WhenItIsSpecifiedAsCosi } } + public void testGetConfiguration() throws Exception { + try (var service = createIbmWatsonxService()) { + String content = XContentHelper.stripWhitespace(""" + { + "provider": "watsonxai", + "task_types": [ + { + "task_type": "text_embedding", + "configuration": {} + } + ], + "configuration": { + "project_id": { + "default_value": null, + "depends_on": [], + "display": "textbox", + "label": "Project ID", + "order": 2, + "required": true, + "sensitive": false, + "tooltip": "", + "type": "str", + "ui_restrictions": [], + "validations": [], + "value": null + }, + "model_id": { + "default_value": null, + "depends_on": [], + "display": "textbox", + "label": "Model ID", + "order": 3, + "required": true, + "sensitive": false, + "tooltip": "The name of the model to use for the inference task.", + "type": "str", + "ui_restrictions": [], + "validations": [], + "value": null + }, + "api_version": { + "default_value": null, + "depends_on": [], + "display": "textbox", + "label": "API Version", + "order": 1, + "required": true, + "sensitive": false, + "tooltip": "The IBM Watsonx API version ID to use.", + "type": "str", + "ui_restrictions": [], + "validations": [], + "value": null + }, + "max_input_tokens": { + "default_value": null, + "depends_on": [], + "display": "numeric", + "label": "Maximum Input Tokens", + "order": 5, + "required": false, + "sensitive": false, + "tooltip": "Allows you to specify the maximum number of tokens per input.", + "type": "int", + "ui_restrictions": [], + "validations": [], + "value": null + }, + "url": { + "default_value": null, + "depends_on": [], + "display": "textbox", + "label": "URL", + "order": 4, + "required": true, + "sensitive": false, + "tooltip": "", + "type": "str", + "ui_restrictions": [], + "validations": [], + "value": null + } + } + } + """); + InferenceServiceConfiguration configuration = InferenceServiceConfiguration.fromXContentBytes( + new BytesArray(content), + XContentType.JSON + ); + boolean humanReadable = true; + BytesReference originalBytes = toShuffledXContent(configuration, XContentType.JSON, ToXContent.EMPTY_PARAMS, humanReadable); + InferenceServiceConfiguration serviceConfiguration = service.getConfiguration(); + assertToXContentEquivalent( + originalBytes, + toXContent(serviceConfiguration, XContentType.JSON, humanReadable), + XContentType.JSON + ); + } + } + private static ActionListener getModelListenerForException(Class exceptionClass, String expectedMessage) { return ActionListener.wrap((model) -> fail("Model parsing should have failed"), e -> { assertThat(e, Matchers.instanceOf(exceptionClass)); diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/mistral/MistralServiceTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/mistral/MistralServiceTests.java index c4a91260d89a0..d9075b7988368 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/mistral/MistralServiceTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/mistral/MistralServiceTests.java @@ -12,12 +12,16 @@ import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.TimeValue; import org.elasticsearch.inference.ChunkedInferenceServiceResults; import org.elasticsearch.inference.ChunkingOptions; import org.elasticsearch.inference.ChunkingSettings; +import org.elasticsearch.inference.InferenceServiceConfiguration; import org.elasticsearch.inference.InferenceServiceResults; import org.elasticsearch.inference.InputType; import org.elasticsearch.inference.Model; @@ -28,6 +32,7 @@ import org.elasticsearch.test.http.MockResponse; import org.elasticsearch.test.http.MockWebServer; import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.core.inference.ChunkingSettingsFeatureFlag; import org.elasticsearch.xpack.core.inference.action.InferenceAction; @@ -55,6 +60,8 @@ import java.util.Map; import java.util.concurrent.TimeUnit; +import static org.elasticsearch.common.xcontent.XContentHelper.toXContent; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertToXContentEquivalent; import static org.elasticsearch.xpack.inference.Utils.getInvalidModel; import static org.elasticsearch.xpack.inference.Utils.getPersistedConfigMap; import static org.elasticsearch.xpack.inference.Utils.inferenceUtilityPool; @@ -825,6 +832,92 @@ public void testInfer_UnauthorisedResponse() throws IOException { } } + public void testGetConfiguration() throws Exception { + try (var service = createService()) { + String content = XContentHelper.stripWhitespace(""" + { + "provider": "mistral", + "task_types": [ + { + "task_type": "text_embedding", + "configuration": {} + } + ], + "configuration": { + "api_key": { + "default_value": null, + "depends_on": [], + "display": "textbox", + "label": "API Key", + "order": 1, + "required": true, + "sensitive": true, + "tooltip": "API Key for the provider you're connecting to.", + "type": "str", + "ui_restrictions": [], + "validations": [], + "value": null + }, + "model": { + "default_value": null, + "depends_on": [], + "display": "textbox", + "label": "Model", + "order": 2, + "required": true, + "sensitive": false, + "tooltip": "Refer to the Mistral models documentation for the list of available text embedding models.", + "type": "str", + "ui_restrictions": [], + "validations": [], + "value": null + }, + "rate_limit.requests_per_minute": { + "default_value": null, + "depends_on": [], + "display": "numeric", + "label": "Rate Limit", + "order": 6, + "required": false, + "sensitive": false, + "tooltip": "Minimize the number of rate limit errors.", + "type": "int", + "ui_restrictions": [], + "validations": [], + "value": null + }, + "max_input_tokens": { + "default_value": null, + "depends_on": [], + "display": "numeric", + "label": "Maximum Input Tokens", + "order": 3, + "required": false, + "sensitive": false, + "tooltip": "Allows you to specify the maximum number of tokens per input.", + "type": "int", + "ui_restrictions": [], + "validations": [], + "value": null + } + } + } + """); + InferenceServiceConfiguration configuration = InferenceServiceConfiguration.fromXContentBytes( + new BytesArray(content), + XContentType.JSON + ); + boolean humanReadable = true; + BytesReference originalBytes = toShuffledXContent(configuration, XContentType.JSON, ToXContent.EMPTY_PARAMS, humanReadable); + InferenceServiceConfiguration serviceConfiguration = service.getConfiguration(); + assertToXContentEquivalent( + originalBytes, + toXContent(serviceConfiguration, XContentType.JSON, humanReadable), + XContentType.JSON + ); + } + } + // ---------------------------------------------------------------- private MistralService createService() { diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/OpenAiServiceTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/OpenAiServiceTests.java index 0698b9652b767..91479b0d18bdb 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/OpenAiServiceTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/OpenAiServiceTests.java @@ -14,11 +14,15 @@ import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.core.TimeValue; import org.elasticsearch.inference.ChunkedInferenceServiceResults; import org.elasticsearch.inference.ChunkingOptions; import org.elasticsearch.inference.ChunkingSettings; +import org.elasticsearch.inference.InferenceServiceConfiguration; import org.elasticsearch.inference.InferenceServiceResults; import org.elasticsearch.inference.InputType; import org.elasticsearch.inference.Model; @@ -28,6 +32,7 @@ import org.elasticsearch.test.http.MockResponse; import org.elasticsearch.test.http.MockWebServer; import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.core.inference.ChunkingSettingsFeatureFlag; import org.elasticsearch.xpack.core.inference.action.InferenceAction; @@ -57,6 +62,8 @@ import java.util.Map; import java.util.concurrent.TimeUnit; +import static org.elasticsearch.common.xcontent.XContentHelper.toXContent; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertToXContentEquivalent; import static org.elasticsearch.xpack.inference.Utils.getInvalidModel; import static org.elasticsearch.xpack.inference.Utils.getPersistedConfigMap; import static org.elasticsearch.xpack.inference.Utils.getRequestConfigMap; @@ -1687,6 +1694,143 @@ private void testChunkedInfer(OpenAiEmbeddingsModel model) throws IOException { } } + @SuppressWarnings("checkstyle:LineLength") + public void testGetConfiguration() throws Exception { + try (var service = createOpenAiService()) { + String content = XContentHelper.stripWhitespace( + """ + { + "provider": "openai", + "task_types": [ + { + "task_type": "text_embedding", + "configuration": { + "user": { + "default_value": null, + "depends_on": [], + "display": "textbox", + "label": "User", + "order": 1, + "required": false, + "sensitive": false, + "tooltip": "Specifies the user issuing the request.", + "type": "str", + "ui_restrictions": [], + "validations": [], + "value": "" + } + } + }, + { + "task_type": "completion", + "configuration": { + "user": { + "default_value": null, + "depends_on": [], + "display": "textbox", + "label": "User", + "order": 1, + "required": false, + "sensitive": false, + "tooltip": "Specifies the user issuing the request.", + "type": "str", + "ui_restrictions": [], + "validations": [], + "value": "" + } + } + } + ], + "configuration": { + "api_key": { + "default_value": null, + "depends_on": [], + "display": "textbox", + "label": "API Key", + "order": 1, + "required": true, + "sensitive": true, + "tooltip": "The OpenAI API authentication key. For more details about generating OpenAI API keys, refer to the https://platform.openai.com/account/api-keys.", + "type": "str", + "ui_restrictions": [], + "validations": [], + "value": null + }, + "organization_id": { + "default_value": null, + "depends_on": [], + "display": "textbox", + "label": "Organization ID", + "order": 3, + "required": false, + "sensitive": false, + "tooltip": "The unique identifier of your organization.", + "type": "str", + "ui_restrictions": [], + "validations": [], + "value": null + }, + "rate_limit.requests_per_minute": { + "default_value": null, + "depends_on": [], + "display": "numeric", + "label": "Rate Limit", + "order": 6, + "required": false, + "sensitive": false, + "tooltip": "Default number of requests allowed per minute. For text_embedding is 3000. For completion is 500.", + "type": "int", + "ui_restrictions": [], + "validations": [], + "value": null + }, + "model_id": { + "default_value": null, + "depends_on": [], + "display": "textbox", + "label": "Model ID", + "order": 2, + "required": true, + "sensitive": false, + "tooltip": "The name of the model to use for the inference task.", + "type": "str", + "ui_restrictions": [], + "validations": [], + "value": null + }, + "url": { + "default_value": "https://api.openai.com/v1/chat/completions", + "depends_on": [], + "display": "textbox", + "label": "URL", + "order": 4, + "required": true, + "sensitive": false, + "tooltip": "The OpenAI API endpoint URL. For more information on the URL, refer to the https://platform.openai.com/docs/api-reference.", + "type": "str", + "ui_restrictions": [], + "validations": [], + "value": null + } + } + } + """ + ); + InferenceServiceConfiguration configuration = InferenceServiceConfiguration.fromXContentBytes( + new BytesArray(content), + XContentType.JSON + ); + boolean humanReadable = true; + BytesReference originalBytes = toShuffledXContent(configuration, XContentType.JSON, ToXContent.EMPTY_PARAMS, humanReadable); + InferenceServiceConfiguration serviceConfiguration = service.getConfiguration(); + assertToXContentEquivalent( + originalBytes, + toXContent(serviceConfiguration, XContentType.JSON, humanReadable), + XContentType.JSON + ); + } + } + private OpenAiService createOpenAiService() { return new OpenAiService(mock(HttpRequestSender.Factory.class), createWithEmptySettings(threadPool)); } diff --git a/x-pack/plugin/security/qa/operator-privileges-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/operator/Constants.java b/x-pack/plugin/security/qa/operator-privileges-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/operator/Constants.java index 4405ef575b24f..df97c489cc6b7 100644 --- a/x-pack/plugin/security/qa/operator-privileges-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/operator/Constants.java +++ b/x-pack/plugin/security/qa/operator-privileges-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/operator/Constants.java @@ -384,6 +384,7 @@ public class Constants { "cluster:monitor/xpack/inference", "cluster:monitor/xpack/inference/get", "cluster:monitor/xpack/inference/diagnostics/get", + "cluster:monitor/xpack/inference/services/get", "cluster:monitor/xpack/info", "cluster:monitor/xpack/info/aggregate_metric", "cluster:monitor/xpack/info/analytics", From 0e1d2d9605d6ee6ac9e0f9d940c4b66d95968fa0 Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Wed, 30 Oct 2024 10:38:48 -0700 Subject: [PATCH 214/324] Prohibit changes to index mode, source, and sort settings during resize (#115812) Relates to #115811, but applies to resize requests. The index.mode, source.mode, and index.sort.* settings cannot be modified during resize, as this may lead to data corruption or issues retrieving _source. This change enforces a restriction on modifying these settings during resize. While a fine-grained check could allow equivalent settings, it seems simpler and safer to reject resize requests if any of these settings are specified. --- docs/changelog/115812.yaml | 5 ++ .../admin/indices/create/CloneIndexIT.java | 47 +++++++++++++++++++ .../index/LookupIndexModeIT.java | 2 +- .../metadata/MetadataCreateIndexService.java | 21 ++++++--- 4 files changed, 67 insertions(+), 8 deletions(-) create mode 100644 docs/changelog/115812.yaml diff --git a/docs/changelog/115812.yaml b/docs/changelog/115812.yaml new file mode 100644 index 0000000000000..c45c97041eb00 --- /dev/null +++ b/docs/changelog/115812.yaml @@ -0,0 +1,5 @@ +pr: 115812 +summary: "Prohibit changes to index mode, source, and sort settings during resize" +area: Logs +type: bug +issues: [] diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/CloneIndexIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/CloneIndexIT.java index d3410165880f3..b6930d06c11ec 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/CloneIndexIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/CloneIndexIT.java @@ -12,6 +12,7 @@ import org.elasticsearch.action.admin.indices.shrink.ResizeType; import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse; import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider; +import org.elasticsearch.common.ValidationException; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.query.TermsQueryBuilder; @@ -20,9 +21,12 @@ import org.elasticsearch.test.index.IndexVersionUtils; import org.elasticsearch.xcontent.XContentType; +import java.util.List; + import static org.elasticsearch.action.admin.indices.create.ShrinkIndexIT.assertNoResizeSourceIndexSettings; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; public class CloneIndexIT extends ESIntegTestCase { @@ -109,4 +113,47 @@ public void testCreateCloneIndex() { } + public void testResizeChangeIndexMode() { + prepareCreate("source").setSettings(indexSettings(1, 0)).setMapping("@timestamp", "type=date", "host.name", "type=keyword").get(); + updateIndexSettings(Settings.builder().put("index.blocks.write", true), "source"); + List indexSettings = List.of( + Settings.builder().put("index.mode", "logsdb").build(), + Settings.builder().put("index.mode", "time_series").put("index.routing_path", "host.name").build(), + Settings.builder().put("index.mode", "lookup").build() + ); + for (Settings settings : indexSettings) { + IllegalArgumentException error = expectThrows(IllegalArgumentException.class, () -> { + indicesAdmin().prepareResizeIndex("source", "target").setResizeType(ResizeType.CLONE).setSettings(settings).get(); + }); + assertThat(error.getMessage(), equalTo("can't change setting [index.mode] during resize")); + } + } + + public void testResizeChangeSyntheticSource() { + prepareCreate("source").setSettings(indexSettings(between(1, 5), 0)) + .setMapping("@timestamp", "type=date", "host.name", "type=keyword") + .get(); + updateIndexSettings(Settings.builder().put("index.blocks.write", true), "source"); + IllegalArgumentException error = expectThrows(IllegalArgumentException.class, () -> { + indicesAdmin().prepareResizeIndex("source", "target") + .setResizeType(ResizeType.CLONE) + .setSettings(Settings.builder().put("index.mapping.source.mode", "synthetic").putNull("index.blocks.write").build()) + .get(); + }); + assertThat(error.getMessage(), containsString("can't change setting [index.mapping.source.mode] during resize")); + } + + public void testResizeChangeIndexSorts() { + prepareCreate("source").setSettings(indexSettings(between(1, 5), 0)) + .setMapping("@timestamp", "type=date", "host.name", "type=keyword") + .get(); + updateIndexSettings(Settings.builder().put("index.blocks.write", true), "source"); + ValidationException error = expectThrows(ValidationException.class, () -> { + indicesAdmin().prepareResizeIndex("source", "target") + .setResizeType(ResizeType.CLONE) + .setSettings(Settings.builder().putList("index.sort.field", List.of("@timestamp")).build()) + .get(); + }); + assertThat(error.getMessage(), containsString("can't override index sort when resizing an index")); + } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/LookupIndexModeIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/LookupIndexModeIT.java index f294d4a2e7943..960ee2fd7ca60 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/index/LookupIndexModeIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/index/LookupIndexModeIT.java @@ -198,7 +198,7 @@ public void testResizeRegularIndexToLookup() { IllegalArgumentException.class, () -> client().admin().indices().execute(ResizeAction.INSTANCE, shrink).actionGet() ); - assertThat(error.getMessage(), equalTo("can't change index.mode of index [regular-1] from [standard] to [lookup]")); + assertThat(error.getMessage(), equalTo("can't change setting [index.mode] during resize")); } public void testDoNotOverrideAutoExpandReplicas() { diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexService.java index ed029db54bf06..1f014a526b9a6 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexService.java @@ -62,11 +62,13 @@ import org.elasticsearch.index.IndexSettingProvider; import org.elasticsearch.index.IndexSettingProviders; import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.IndexSortConfig; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.IndexVersions; import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.MapperService.MergeReason; +import org.elasticsearch.index.mapper.SourceFieldMapper; import org.elasticsearch.index.query.SearchExecutionContext; import org.elasticsearch.index.shard.IndexLongFieldRange; import org.elasticsearch.indices.IndexCreationException; @@ -1567,6 +1569,15 @@ static void validateCloneIndex( IndexMetadata.selectCloneShard(0, sourceMetadata, INDEX_NUMBER_OF_SHARDS_SETTING.get(targetIndexSettings)); } + private static final Set UNMODIFIABLE_SETTINGS_DURING_RESIZE = Set.of( + IndexSettings.MODE.getKey(), + SourceFieldMapper.INDEX_MAPPER_SOURCE_MODE_SETTING.getKey(), + IndexSortConfig.INDEX_SORT_FIELD_SETTING.getKey(), + IndexSortConfig.INDEX_SORT_ORDER_SETTING.getKey(), + IndexSortConfig.INDEX_SORT_MODE_SETTING.getKey(), + IndexSortConfig.INDEX_SORT_MISSING_SETTING.getKey() + ); + static IndexMetadata validateResize( Metadata metadata, ClusterBlocks clusterBlocks, @@ -1604,13 +1615,9 @@ static IndexMetadata validateResize( // of if the source shards are divisible by the number of target shards IndexMetadata.getRoutingFactor(sourceMetadata.getNumberOfShards(), INDEX_NUMBER_OF_SHARDS_SETTING.get(targetIndexSettings)); } - if (targetIndexSettings.hasValue(IndexSettings.MODE.getKey())) { - IndexMode oldMode = Objects.requireNonNullElse(sourceMetadata.getIndexMode(), IndexMode.STANDARD); - IndexMode newMode = IndexSettings.MODE.get(targetIndexSettings); - if (newMode != oldMode) { - throw new IllegalArgumentException( - "can't change index.mode of index [" + sourceIndex + "] from [" + oldMode + "] to [" + newMode + "]" - ); + for (String setting : UNMODIFIABLE_SETTINGS_DURING_RESIZE) { + if (targetIndexSettings.hasValue(setting)) { + throw new IllegalArgumentException("can't change setting [" + setting + "] during resize"); } } return sourceMetadata; From 560b3e3b54f8b830ff3b5d2bc9be6a7faf390549 Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Wed, 30 Oct 2024 10:39:42 -0700 Subject: [PATCH 215/324] Prohibit changes to index mode, source, and sort settings during restore (#115811) The index.mode, source.mode, and index.sort.* settings cannot be modified during restore, as this may lead to data corruption or issues retrieving _source. This change enforces a restriction on modifying these settings during restore. While a fine-grained check could permit equivalent settings, it seems simpler and safer to reject restore requests if any of these settings are specified. --- docs/changelog/115811.yaml | 5 ++ .../snapshots/RestoreSnapshotIT.java | 63 +++++++++++++++++++ .../snapshots/RestoreService.java | 10 ++- 3 files changed, 77 insertions(+), 1 deletion(-) create mode 100644 docs/changelog/115811.yaml diff --git a/docs/changelog/115811.yaml b/docs/changelog/115811.yaml new file mode 100644 index 0000000000000..292dc91ecb928 --- /dev/null +++ b/docs/changelog/115811.yaml @@ -0,0 +1,5 @@ +pr: 115811 +summary: "Prohibit changes to index mode, source, and sort settings during restore" +area: Logs +type: bug +issues: [] diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RestoreSnapshotIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RestoreSnapshotIT.java index 00e13f22012e9..fe83073eeb780 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RestoreSnapshotIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RestoreSnapshotIT.java @@ -23,6 +23,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.index.IndexMode; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.IndexVersions; import org.elasticsearch.indices.InvalidIndexNameException; @@ -761,6 +762,68 @@ public void testChangeSettingsOnRestore() throws Exception { assertHitCount(client.prepareSearch("test-idx").setSize(0).setQuery(matchQuery("field1", "bar")), numdocs); } + public void testRestoreChangeIndexMode() { + Client client = client(); + createRepository("test-repo", "fs"); + String indexName = "test-idx"; + assertAcked(client.admin().indices().prepareCreate(indexName).setSettings(Settings.builder().put(indexSettings()))); + createSnapshot("test-repo", "test-snap", Collections.singletonList(indexName)); + cluster().wipeIndices(indexName); + for (IndexMode mode : IndexMode.values()) { + var error = expectThrows(SnapshotRestoreException.class, () -> { + client.admin() + .cluster() + .prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap") + .setIndexSettings(Settings.builder().put("index.mode", mode.name())) + .setWaitForCompletion(true) + .get(); + }); + assertThat(error.getMessage(), containsString("cannot modify setting [index.mode] on restore")); + } + } + + public void testRestoreChangeSyntheticSource() { + Client client = client(); + createRepository("test-repo", "fs"); + String indexName = "test-idx"; + assertAcked(client.admin().indices().prepareCreate(indexName).setSettings(Settings.builder().put(indexSettings()))); + createSnapshot("test-repo", "test-snap", Collections.singletonList(indexName)); + cluster().wipeIndices(indexName); + var error = expectThrows(SnapshotRestoreException.class, () -> { + client.admin() + .cluster() + .prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap") + .setIndexSettings(Settings.builder().put("index.mapping.source.mode", "synthetic")) + .setWaitForCompletion(true) + .get(); + }); + assertThat(error.getMessage(), containsString("cannot modify setting [index.mapping.source.mode] on restore")); + } + + public void testRestoreChangeIndexSorts() { + Client client = client(); + createRepository("test-repo", "fs"); + String indexName = "test-idx"; + assertAcked( + client.admin() + .indices() + .prepareCreate(indexName) + .setMapping("host.name", "type=keyword", "@timestamp", "type=date") + .setSettings(Settings.builder().put(indexSettings()).putList("index.sort.field", List.of("@timestamp", "host.name"))) + ); + createSnapshot("test-repo", "test-snap", Collections.singletonList(indexName)); + cluster().wipeIndices(indexName); + var error = expectThrows(SnapshotRestoreException.class, () -> { + client.admin() + .cluster() + .prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap") + .setIndexSettings(Settings.builder().putList("index.sort.field", List.of("host.name"))) + .setWaitForCompletion(true) + .get(); + }); + assertThat(error.getMessage(), containsString("cannot modify setting [index.sort.field] on restore")); + } + public void testRecreateBlocksOnRestore() throws Exception { Client client = client(); diff --git a/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java b/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java index cf023b0e629c6..de241301cfef9 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java +++ b/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java @@ -67,9 +67,11 @@ import org.elasticsearch.core.Tuple; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.IndexSortConfig; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.Mapping; +import org.elasticsearch.index.mapper.SourceFieldMapper; import org.elasticsearch.index.shard.IndexLongFieldRange; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.ShardId; @@ -155,7 +157,13 @@ public final class RestoreService implements ClusterStateApplier { SETTING_VERSION_CREATED, SETTING_INDEX_UUID, SETTING_CREATION_DATE, - SETTING_HISTORY_UUID + SETTING_HISTORY_UUID, + IndexSettings.MODE.getKey(), + SourceFieldMapper.INDEX_MAPPER_SOURCE_MODE_SETTING.getKey(), + IndexSortConfig.INDEX_SORT_FIELD_SETTING.getKey(), + IndexSortConfig.INDEX_SORT_ORDER_SETTING.getKey(), + IndexSortConfig.INDEX_SORT_MODE_SETTING.getKey(), + IndexSortConfig.INDEX_SORT_MISSING_SETTING.getKey() ); // It's OK to change some settings, but we shouldn't allow simply removing them From 40625ecee669c74829e1cfc1f240488164526f14 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Wed, 30 Oct 2024 13:43:03 -0400 Subject: [PATCH 216/324] ESQL: Fix a bug in VALUES agg (#115952) This fixes a bug in the VALUES agg when run on IP fields and grouped. Specifically, we could get array-out-of-bounds errors if the array in which we store the ips is oversized but not sized on the bounds of an IP. *AND* some group IDs received only `null` values - specifically those who were assigned the highest ordinals. --- docs/changelog/115952.yaml | 5 + .../aggregation/AbstractArrayState.java | 2 +- .../AbstractFallibleArrayState.java | 2 +- .../compute/aggregation/IpArrayState.java | 2 +- .../compute/aggregation/ArrayStateTests.java | 114 ++++++++++++++++-- 5 files changed, 115 insertions(+), 10 deletions(-) create mode 100644 docs/changelog/115952.yaml diff --git a/docs/changelog/115952.yaml b/docs/changelog/115952.yaml new file mode 100644 index 0000000000000..ec57a639dc0ae --- /dev/null +++ b/docs/changelog/115952.yaml @@ -0,0 +1,5 @@ +pr: 115952 +summary: "ESQL: Fix a bug in VALUES agg" +area: ES|QL +type: bug +issues: [] diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/AbstractArrayState.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/AbstractArrayState.java index f9962922cc4a7..45a45f4337beb 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/AbstractArrayState.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/AbstractArrayState.java @@ -19,7 +19,7 @@ * Most of this class subclasses are autogenerated. *

    */ -public class AbstractArrayState implements Releasable { +public abstract class AbstractArrayState implements Releasable, GroupingAggregatorState { protected final BigArrays bigArrays; private BitArray seen; diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/AbstractFallibleArrayState.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/AbstractFallibleArrayState.java index d5ad3189e2f9e..94caefc55e050 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/AbstractFallibleArrayState.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/AbstractFallibleArrayState.java @@ -18,7 +18,7 @@ * Most of this class subclasses are autogenerated. *

    */ -public class AbstractFallibleArrayState extends AbstractArrayState { +public abstract class AbstractFallibleArrayState extends AbstractArrayState { private BitArray failed; public AbstractFallibleArrayState(BigArrays bigArrays) { diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/IpArrayState.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/IpArrayState.java index 63527f70fb621..0ffe9caff9f6e 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/IpArrayState.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/IpArrayState.java @@ -125,7 +125,7 @@ public void toIntermediate(Block[] blocks, int offset, IntVector selected, Drive for (int i = 0; i < selected.getPositionCount(); i++) { int group = selected.getInt(i); int ipIndex = getIndex(group); - if (ipIndex < values.size()) { + if (ipIndex + IP_LENGTH <= values.size()) { var value = get(group, scratch); valuesBuilder.appendBytesRef(value); } else { diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/ArrayStateTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/ArrayStateTests.java index da10f94f6fb8a..634dc4f7c7ed7 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/ArrayStateTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/ArrayStateTests.java @@ -7,19 +7,28 @@ package org.elasticsearch.compute.aggregation; +import com.carrotsearch.randomizedtesting.annotations.Name; import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; import org.apache.lucene.document.InetAddressPoint; import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.Randomness; import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BlockTestUtils; +import org.elasticsearch.compute.data.BlockUtils; import org.elasticsearch.compute.data.ElementType; +import org.elasticsearch.compute.data.IntVector; +import org.elasticsearch.compute.data.TestBlockFactory; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.core.Releasables; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.esql.core.type.DataType; import java.util.ArrayList; import java.util.List; +import java.util.Locale; +import java.util.function.IntSupplier; import static org.hamcrest.Matchers.equalTo; @@ -29,21 +38,37 @@ public static List params() { List params = new ArrayList<>(); for (boolean inOrder : new boolean[] { true, false }) { - params.add(new Object[] { DataType.INTEGER, 1000, inOrder }); - params.add(new Object[] { DataType.LONG, 1000, inOrder }); - params.add(new Object[] { DataType.FLOAT, 1000, inOrder }); - params.add(new Object[] { DataType.DOUBLE, 1000, inOrder }); - params.add(new Object[] { DataType.IP, 1000, inOrder }); + for (IntSupplier count : new IntSupplier[] { new Fixed(100), new Fixed(1000), new Random(100, 5000) }) { + params.add(new Object[] { DataType.INTEGER, count, inOrder }); + params.add(new Object[] { DataType.LONG, count, inOrder }); + params.add(new Object[] { DataType.FLOAT, count, inOrder }); + params.add(new Object[] { DataType.DOUBLE, count, inOrder }); + params.add(new Object[] { DataType.IP, count, inOrder }); + } } return params; } + private record Fixed(int i) implements IntSupplier { + @Override + public int getAsInt() { + return i; + } + } + + private record Random(int min, int max) implements IntSupplier { + @Override + public int getAsInt() { + return randomIntBetween(min, max); + } + } + private final DataType type; private final ElementType elementType; private final int valueCount; private final boolean inOrder; - public ArrayStateTests(DataType type, int valueCount, boolean inOrder) { + public ArrayStateTests(@Name("type") DataType type, @Name("valueCount") IntSupplier valueCount, @Name("inOrder") boolean inOrder) { this.type = type; this.elementType = switch (type) { case INTEGER -> ElementType.INT; @@ -54,8 +79,9 @@ public ArrayStateTests(DataType type, int valueCount, boolean inOrder) { case IP -> ElementType.BYTES_REF; default -> throw new IllegalArgumentException(); }; - this.valueCount = valueCount; + this.valueCount = valueCount.getAsInt(); this.inOrder = inOrder; + logger.info("value count is {}", this.valueCount); } public void testSetNoTracking() { @@ -146,6 +172,68 @@ public void testSetNullableThenOverwriteNullable() { } } + public void testToIntermediate() { + AbstractArrayState state = newState(); + List values = randomList(valueCount, valueCount, this::randomValue); + setAll(state, values, 0); + Block[] intermediate = new Block[2]; + DriverContext ctx = new DriverContext(BigArrays.NON_RECYCLING_INSTANCE, TestBlockFactory.getNonBreakingInstance()); + state.toIntermediate(intermediate, 0, IntVector.range(0, valueCount, ctx.blockFactory()), ctx); + try { + assertThat(intermediate[0].elementType(), equalTo(elementType)); + assertThat(intermediate[1].elementType(), equalTo(ElementType.BOOLEAN)); + assertThat(intermediate[0].getPositionCount(), equalTo(values.size())); + assertThat(intermediate[1].getPositionCount(), equalTo(values.size())); + for (int i = 0; i < values.size(); i++) { + Object v = values.get(i); + assertThat( + String.format(Locale.ROOT, "%05d: %s", i, v != null ? v : "init"), + BlockUtils.toJavaObject(intermediate[0], i), + equalTo(v != null ? v : initialValue()) + ); + assertThat(BlockUtils.toJavaObject(intermediate[1], i), equalTo(true)); + } + } finally { + Releasables.close(intermediate); + } + } + + /** + * Calls {@link GroupingAggregatorState#toIntermediate} with a range that's greater than + * any collected values. This is acceptable if {@link AbstractArrayState#enableGroupIdTracking} + * is called, so we do that. + */ + public void testToIntermediatePastEnd() { + int end = valueCount + between(1, 10000); + AbstractArrayState state = newState(); + state.enableGroupIdTracking(new SeenGroupIds.Empty()); + List values = randomList(valueCount, valueCount, this::randomValue); + setAll(state, values, 0); + Block[] intermediate = new Block[2]; + DriverContext ctx = new DriverContext(BigArrays.NON_RECYCLING_INSTANCE, TestBlockFactory.getNonBreakingInstance()); + state.toIntermediate(intermediate, 0, IntVector.range(0, end, ctx.blockFactory()), ctx); + try { + assertThat(intermediate[0].elementType(), equalTo(elementType)); + assertThat(intermediate[1].elementType(), equalTo(ElementType.BOOLEAN)); + assertThat(intermediate[0].getPositionCount(), equalTo(end)); + assertThat(intermediate[1].getPositionCount(), equalTo(end)); + for (int i = 0; i < values.size(); i++) { + Object v = values.get(i); + assertThat( + String.format(Locale.ROOT, "%05d: %s", i, v != null ? v : "init"), + BlockUtils.toJavaObject(intermediate[0], i), + equalTo(v != null ? v : initialValue()) + ); + assertThat(BlockUtils.toJavaObject(intermediate[1], i), equalTo(v != null)); + } + for (int i = values.size(); i < end; i++) { + assertThat(BlockUtils.toJavaObject(intermediate[1], i), equalTo(false)); + } + } finally { + Releasables.close(intermediate); + } + } + private record ValueAndIndex(int index, Object value) {} private void setAll(AbstractArrayState state, List values, int offset) { @@ -181,6 +269,18 @@ private AbstractArrayState newState() { }; } + private Object initialValue() { + return switch (type) { + case INTEGER -> 1; + case LONG -> 1L; + case FLOAT -> 1F; + case DOUBLE -> 1d; + case BOOLEAN -> false; + case IP -> new BytesRef(new byte[16]); + default -> throw new IllegalArgumentException(); + }; + } + private void set(AbstractArrayState state, int groupId, Object value) { switch (type) { case INTEGER -> ((IntArrayState) state).set(groupId, (Integer) value); From ca433c10db9d99336bfdf9dc9215fea87156b15c Mon Sep 17 00:00:00 2001 From: Keith Massey Date: Wed, 30 Oct 2024 13:09:21 -0500 Subject: [PATCH 217/324] Removing ESClientYamlSuiteTestCase::getGlobalTemplateSettings (#115941) --- .../test/rest/yaml/ESClientYamlSuiteTestCase.java | 12 ------------ 1 file changed, 12 deletions(-) diff --git a/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCase.java b/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCase.java index d835a8d0c1635..54602090050ab 100644 --- a/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCase.java +++ b/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCase.java @@ -23,7 +23,6 @@ import org.elasticsearch.client.RestClientBuilder; import org.elasticsearch.client.sniff.ElasticsearchNodesSniffer; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.core.IOUtils; import org.elasticsearch.core.UpdateForV9; @@ -517,17 +516,6 @@ public void test() throws IOException { } } - @Deprecated - protected Settings getGlobalTemplateSettings(List features) { - // This method will be deleted once its uses in serverless are deleted - return Settings.EMPTY; - } - - protected Settings getGlobalTemplateSettings(boolean defaultShardsFeature) { - // This method will be deleted once its uses in serverless are deleted - return Settings.EMPTY; - } - protected boolean skipSetupSections() { return false; } From 65a0a1d48163b9b2d7e91f793ee090d850f3f93c Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Thu, 31 Oct 2024 05:16:44 +1100 Subject: [PATCH 218/324] Mute org.elasticsearch.monitor.jvm.JvmStatsTests testJvmStats #115711 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index ae1e641f12347..a533a010f9d37 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -275,6 +275,9 @@ tests: - class: org.elasticsearch.xpack.test.rest.XPackRestIT method: test {p0=ml/inference_crud/Test delete given model referenced by pipeline} issue: https://github.com/elastic/elasticsearch/issues/115970 +- class: org.elasticsearch.monitor.jvm.JvmStatsTests + method: testJvmStats + issue: https://github.com/elastic/elasticsearch/issues/115711 # Examples: # From 5cc2a47eaf4e4e74fe22e31e78f6645da170e73b Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Wed, 30 Oct 2024 14:50:24 -0400 Subject: [PATCH 219/324] ESQL: Basic enrich-like lookup loading (#115667) This adds super basic way to perform a lookup-style LEFT JOIN thing. It's *like* ENRICH, except it can use an index_mode=lookup index rather than an ENRICH policy. It's like a LEFT JOIN but it can't change the output cardinality. That's a genuinely useful thing! This intentionally forks some portion of the ENRICH infrastructure and shares others. I *believe* these are the right parts to fork and the right parts to share. Namely: * We *share* the internal implementaions * We fork the request * We fork the configuration of what to join This should allow us to iterate the on the requests without damaging anything in ENRICH but any speed ups that we build for these lookup joins *can* be shared with ENRICH if we decide that they work. Relies on #115143 --- .../elasticsearch/compute/OperatorTests.java | 12 +- .../xpack/esql/action/LookupFromIndexIT.java | 249 +++++++ .../esql/enrich/AbstractLookupService.java | 593 +++++++++++++++++ .../esql/enrich/EnrichLookupOperator.java | 7 +- .../esql/enrich/EnrichLookupService.java | 611 +++--------------- .../esql/enrich/LookupFromIndexOperator.java | 200 ++++++ .../esql/enrich/LookupFromIndexService.java | 154 +++++ .../xpack/esql/enrich/QueryList.java | 2 +- .../esql/plugin/TransportEsqlQueryAction.java | 7 + 9 files changed, 1293 insertions(+), 542 deletions(-) create mode 100644 x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/LookupFromIndexIT.java create mode 100644 x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/AbstractLookupService.java create mode 100644 x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/LookupFromIndexOperator.java create mode 100644 x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/LookupFromIndexService.java diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/OperatorTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/OperatorTests.java index 8b69b5584e65d..0d39a5bf8227e 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/OperatorTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/OperatorTests.java @@ -89,7 +89,16 @@ import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; -// TODO: Move these tests to the right test classes. +/** + * This venerable test builds {@link Driver}s by hand and runs them together, simulating + * whole runs without needing to involve ESQL-proper. It's a wonderful place to integration + * test new ideas, and it was the first tests the compute engine ever had. But as we plug + * these things into ESQL tests should leave here and just run in csv-spec tests. Or move + * into unit tests for the operators themselves. + *

    + * TODO move any of these we can to unit tests for the operator. + *

    + */ public class OperatorTests extends MapperServiceTestCase { public void testQueryOperator() throws IOException { @@ -355,7 +364,6 @@ public void testHashLookup() { } finally { primesBlock.close(); } - } /** diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/LookupFromIndexIT.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/LookupFromIndexIT.java new file mode 100644 index 0000000000000..cff9604053903 --- /dev/null +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/LookupFromIndexIT.java @@ -0,0 +1,249 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.action; + +import org.apache.lucene.search.DocIdSetIterator; +import org.apache.lucene.search.MatchAllDocsQuery; +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.index.IndexRequestBuilder; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.common.breaker.CircuitBreakingException; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.common.util.MockBigArrays; +import org.elasticsearch.common.util.MockPageCacheRecycler; +import org.elasticsearch.compute.data.BlockFactory; +import org.elasticsearch.compute.data.BytesRefBlock; +import org.elasticsearch.compute.data.BytesRefVector; +import org.elasticsearch.compute.data.ElementType; +import org.elasticsearch.compute.data.LongBlock; +import org.elasticsearch.compute.data.LongVector; +import org.elasticsearch.compute.lucene.DataPartitioning; +import org.elasticsearch.compute.lucene.LuceneSourceOperator; +import org.elasticsearch.compute.lucene.ShardContext; +import org.elasticsearch.compute.lucene.ValuesSourceReaderOperator; +import org.elasticsearch.compute.operator.Driver; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.compute.operator.DriverRunner; +import org.elasticsearch.compute.operator.PageConsumerOperator; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.index.IndexService; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; +import org.elasticsearch.search.SearchService; +import org.elasticsearch.search.internal.AliasFilter; +import org.elasticsearch.search.internal.SearchContext; +import org.elasticsearch.search.internal.ShardSearchRequest; +import org.elasticsearch.tasks.CancellableTask; +import org.elasticsearch.tasks.TaskId; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xpack.core.async.AsyncExecutionId; +import org.elasticsearch.xpack.esql.core.expression.Alias; +import org.elasticsearch.xpack.esql.core.expression.ReferenceAttribute; +import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.core.type.DataType; +import org.elasticsearch.xpack.esql.enrich.LookupFromIndexOperator; +import org.elasticsearch.xpack.esql.planner.EsPhysicalOperationProviders; +import org.elasticsearch.xpack.esql.plugin.EsqlPlugin; +import org.elasticsearch.xpack.esql.plugin.QueryPragmas; +import org.elasticsearch.xpack.esql.plugin.TransportEsqlQueryAction; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.concurrent.CopyOnWriteArrayList; + +import static org.elasticsearch.test.ListMatcher.matchesList; +import static org.elasticsearch.test.MapMatcher.assertMap; +import static org.hamcrest.Matchers.empty; + +public class LookupFromIndexIT extends AbstractEsqlIntegTestCase { + /** + * Quick and dirty test for looking up data from a lookup index. + */ + public void testLookupIndex() throws IOException { + // TODO this should *fail* if the target index isn't a lookup type index - it doesn't now. + int docCount = between(10, 1000); + List expected = new ArrayList<>(docCount); + client().admin() + .indices() + .prepareCreate("source") + .setSettings(Settings.builder().put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1)) + .setMapping("data", "type=keyword") + .get(); + client().admin() + .indices() + .prepareCreate("lookup") + .setSettings( + Settings.builder() + .put(IndexSettings.MODE.getKey(), "lookup") + // TODO lookup index mode doesn't seem to force a single shard. That'll break the lookup command. + .put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1) + ) + .setMapping("data", "type=keyword", "l", "type=long") + .get(); + client().admin().cluster().prepareHealth(TEST_REQUEST_TIMEOUT).setWaitForGreenStatus().get(); + + String[] data = new String[] { "aa", "bb", "cc", "dd" }; + List docs = new ArrayList<>(); + for (int i = 0; i < docCount; i++) { + docs.add(client().prepareIndex("source").setSource(Map.of("data", data[i % data.length]))); + expected.add(data[i % data.length] + ":" + (i % data.length)); + } + for (int i = 0; i < data.length; i++) { + docs.add(client().prepareIndex("lookup").setSource(Map.of("data", data[i], "l", i))); + } + Collections.sort(expected); + indexRandom(true, true, docs); + + /* + * Find the data node hosting the only shard of the source index. + */ + SearchService searchService = null; + String nodeWithShard = null; + ShardId shardId = null; + node: for (String node : internalCluster().getNodeNames()) { + searchService = internalCluster().getInstance(SearchService.class, node); + for (IndexService idx : searchService.getIndicesService()) { + if (idx.index().getName().equals("source")) { + nodeWithShard = node; + shardId = new ShardId(idx.index(), 0); + break node; + } + } + } + if (nodeWithShard == null) { + throw new IllegalStateException("couldn't find any copy of source index"); + } + + List results = new CopyOnWriteArrayList<>(); + /* + * Run the Driver. + */ + try ( + SearchContext searchContext = searchService.createSearchContext( + new ShardSearchRequest(shardId, System.currentTimeMillis(), AliasFilter.EMPTY, null), + SearchService.NO_TIMEOUT + ) + ) { + ShardContext esqlContext = new EsPhysicalOperationProviders.DefaultShardContext( + 0, + searchContext.getSearchExecutionContext(), + AliasFilter.EMPTY + ); + LuceneSourceOperator.Factory source = new LuceneSourceOperator.Factory( + List.of(esqlContext), + ctx -> new MatchAllDocsQuery(), + DataPartitioning.SEGMENT, + 1, + 10000, + DocIdSetIterator.NO_MORE_DOCS + ); + ValuesSourceReaderOperator.Factory reader = new ValuesSourceReaderOperator.Factory( + List.of( + new ValuesSourceReaderOperator.FieldInfo( + "data", + ElementType.BYTES_REF, + shard -> searchContext.getSearchExecutionContext().getFieldType("data").blockLoader(null) + ) + ), + List.of(new ValuesSourceReaderOperator.ShardContext(searchContext.getSearchExecutionContext().getIndexReader(), () -> { + throw new IllegalStateException("can't load source here"); + })), + 0 + ); + CancellableTask parentTask = new EsqlQueryTask( + 1, + "test", + "test", + "test", + null, + Map.of(), + Map.of(), + new AsyncExecutionId("test", TaskId.EMPTY_TASK_ID), + TEST_REQUEST_TIMEOUT + ); + LookupFromIndexOperator.Factory lookup = new LookupFromIndexOperator.Factory( + "test", + parentTask, + QueryPragmas.ENRICH_MAX_WORKERS.get(Settings.EMPTY), + 1, + internalCluster().getInstance(TransportEsqlQueryAction.class, nodeWithShard).getLookupFromIndexService(), + DataType.KEYWORD, + "lookup", + "data", + List.of(new Alias(Source.EMPTY, "l", new ReferenceAttribute(Source.EMPTY, "l", DataType.LONG))) + ); + DriverContext driverContext = driverContext(); + try ( + var driver = new Driver( + driverContext, + source.get(driverContext), + List.of(reader.get(driverContext), lookup.get(driverContext)), + new PageConsumerOperator(page -> { + try { + BytesRefVector dataBlock = page.getBlock(1).asVector(); + LongVector loadedBlock = page.getBlock(2).asVector(); + for (int p = 0; p < page.getPositionCount(); p++) { + results.add(dataBlock.getBytesRef(p, new BytesRef()).utf8ToString() + ":" + loadedBlock.getLong(p)); + } + } finally { + page.releaseBlocks(); + } + }), + () -> {} + ) + ) { + PlainActionFuture future = new PlainActionFuture<>(); + ThreadPool threadPool = internalCluster().getInstance(ThreadPool.class, nodeWithShard); + var driverRunner = new DriverRunner(threadPool.getThreadContext()) { + @Override + protected void start(Driver driver, ActionListener driverListener) { + Driver.start( + threadPool.getThreadContext(), + threadPool.executor(EsqlPlugin.ESQL_WORKER_THREAD_POOL_NAME), + driver, + between(1, 10000), + driverListener + ); + } + }; + driverRunner.runToCompletion(List.of(driver), future); + future.actionGet(TimeValue.timeValueSeconds(30)); + assertMap(results.stream().sorted().toList(), matchesList(expected)); + } + assertDriverContext(driverContext); + } + } + + /** + * Creates a {@link BigArrays} that tracks releases but doesn't throw circuit breaking exceptions. + */ + private BigArrays bigArrays() { + return new MockBigArrays(new MockPageCacheRecycler(Settings.EMPTY), new NoneCircuitBreakerService()); + } + + /** + * A {@link DriverContext} that won't throw {@link CircuitBreakingException}. + */ + protected final DriverContext driverContext() { + var breaker = new MockBigArrays.LimitedBreaker("esql-test-breaker", ByteSizeValue.ofGb(1)); + return new DriverContext(bigArrays(), BlockFactory.getInstance(breaker, bigArrays())); + } + + public static void assertDriverContext(DriverContext driverContext) { + assertTrue(driverContext.isFinished()); + assertThat(driverContext.getSnapshot().releasables(), empty()); + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/AbstractLookupService.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/AbstractLookupService.java new file mode 100644 index 0000000000000..57306d0da38e2 --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/AbstractLookupService.java @@ -0,0 +1,593 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.enrich; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionListenerResponseHandler; +import org.elasticsearch.action.IndicesRequest; +import org.elasticsearch.action.UnavailableShardsException; +import org.elasticsearch.action.support.ChannelActionListener; +import org.elasticsearch.action.support.ContextPreservingActionListener; +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.routing.GroupShardsIterator; +import org.elasticsearch.cluster.routing.ShardIterator; +import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.CheckedBiFunction; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BlockFactory; +import org.elasticsearch.compute.data.BlockStreamInput; +import org.elasticsearch.compute.data.BytesRefBlock; +import org.elasticsearch.compute.data.ElementType; +import org.elasticsearch.compute.data.IntVector; +import org.elasticsearch.compute.data.LocalCircuitBreaker; +import org.elasticsearch.compute.data.OrdinalBytesRefBlock; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.lucene.ValuesSourceReaderOperator; +import org.elasticsearch.compute.operator.Driver; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.compute.operator.Operator; +import org.elasticsearch.compute.operator.OutputOperator; +import org.elasticsearch.core.AbstractRefCounted; +import org.elasticsearch.core.RefCounted; +import org.elasticsearch.core.Releasable; +import org.elasticsearch.core.Releasables; +import org.elasticsearch.index.mapper.BlockLoader; +import org.elasticsearch.index.mapper.MappedFieldType; +import org.elasticsearch.index.query.SearchExecutionContext; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.search.SearchService; +import org.elasticsearch.search.internal.AliasFilter; +import org.elasticsearch.search.internal.SearchContext; +import org.elasticsearch.search.internal.ShardSearchRequest; +import org.elasticsearch.tasks.CancellableTask; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.tasks.TaskId; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportChannel; +import org.elasticsearch.transport.TransportRequestHandler; +import org.elasticsearch.transport.TransportRequestOptions; +import org.elasticsearch.transport.TransportResponse; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.ClientHelper; +import org.elasticsearch.xpack.core.XPackSettings; +import org.elasticsearch.xpack.core.security.SecurityContext; +import org.elasticsearch.xpack.core.security.action.user.HasPrivilegesAction; +import org.elasticsearch.xpack.core.security.action.user.HasPrivilegesRequest; +import org.elasticsearch.xpack.core.security.action.user.HasPrivilegesResponse; +import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; +import org.elasticsearch.xpack.core.security.support.Exceptions; +import org.elasticsearch.xpack.core.security.user.User; +import org.elasticsearch.xpack.esql.EsqlIllegalArgumentException; +import org.elasticsearch.xpack.esql.core.expression.Alias; +import org.elasticsearch.xpack.esql.core.expression.NamedExpression; +import org.elasticsearch.xpack.esql.core.type.DataType; +import org.elasticsearch.xpack.esql.planner.EsPhysicalOperationProviders; +import org.elasticsearch.xpack.esql.planner.PlannerUtils; +import org.elasticsearch.xpack.esql.plugin.EsqlPlugin; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.concurrent.Executor; +import java.util.concurrent.atomic.AtomicReference; +import java.util.function.Function; +import java.util.stream.Collectors; +import java.util.stream.IntStream; + +/** + * {@link AbstractLookupService} performs a single valued {@code LEFT JOIN} for a + * given input page against another index. This is quite similar to a nested loop + * join. It is restricted to indices with only a single shard. + *

    + * This registers a {@link TransportRequestHandler} so we can handle requests + * to join data that isn't local to the node, but it is much faster if the + * data is already local. + *

    + *

    + * The join process spawns a {@link Driver} per incoming page which runs in + * three stages: + *

    + *

    + * Stage 1: Finding matching document IDs for the input page. This stage is done + * by the {@link EnrichQuerySourceOperator}. The output page of this stage is + * represented as {@code [DocVector, IntBlock: positions of the input terms]}. + *

    + *

    + * Stage 2: Extracting field values for the matched document IDs. The output page + * is represented as + * {@code [DocVector, IntBlock: positions, Block: field1, Block: field2,...]}. + *

    + *

    + * Stage 3: Combining the extracted values based on positions and filling nulls for + * positions without matches. This is done by {@link MergePositionsOperator}. The output + * page is represented as {@code [Block: field1, Block: field2,...]}. + *

    + *

    + * The {@link Page#getPositionCount()} of the output {@link Page} is equal to the + * {@link Page#getPositionCount()} of the input page. In other words - it returns + * the same number of rows that it was sent no matter how many documents match. + *

    + */ +abstract class AbstractLookupService { + private final String actionName; + private final String privilegeName; + private final ClusterService clusterService; + private final SearchService searchService; + private final TransportService transportService; + private final Executor executor; + private final BigArrays bigArrays; + private final BlockFactory blockFactory; + private final LocalCircuitBreaker.SizeSettings localBreakerSettings; + + AbstractLookupService( + String actionName, + String privilegeName, + ClusterService clusterService, + SearchService searchService, + TransportService transportService, + BigArrays bigArrays, + BlockFactory blockFactory, + CheckedBiFunction readRequest + ) { + this.actionName = actionName; + this.privilegeName = privilegeName; + this.clusterService = clusterService; + this.searchService = searchService; + this.transportService = transportService; + this.executor = transportService.getThreadPool().executor(ThreadPool.Names.SEARCH); + this.bigArrays = bigArrays; + this.blockFactory = blockFactory; + this.localBreakerSettings = new LocalCircuitBreaker.SizeSettings(clusterService.getSettings()); + transportService.registerRequestHandler( + actionName, + transportService.getThreadPool().executor(EsqlPlugin.ESQL_WORKER_THREAD_POOL_NAME), + in -> readRequest.apply(in, blockFactory), + new TransportHandler() + ); + } + + /** + * Convert a request as sent to {@link #lookupAsync} into a transport request after + * preflight checks have been performed. + */ + protected abstract T transportRequest(R request, ShardId shardId); + + /** + * Build a list of queries to perform inside the actual lookup. + */ + protected abstract QueryList queryList(T request, SearchExecutionContext context, Block inputBlock, DataType inputDataType); + + /** + * Perform the actual lookup. + */ + public final void lookupAsync(R request, CancellableTask parentTask, ActionListener outListener) { + ThreadContext threadContext = transportService.getThreadPool().getThreadContext(); + ActionListener listener = ContextPreservingActionListener.wrapPreservingContext(outListener, threadContext); + hasPrivilege(listener.delegateFailureAndWrap((delegate, ignored) -> { + ClusterState clusterState = clusterService.state(); + GroupShardsIterator shardIterators = clusterService.operationRouting() + .searchShards(clusterState, new String[] { request.index }, Map.of(), "_local"); + if (shardIterators.size() != 1) { + delegate.onFailure(new EsqlIllegalArgumentException("target index {} has more than one shard", request.index)); + return; + } + ShardIterator shardIt = shardIterators.get(0); + ShardRouting shardRouting = shardIt.nextOrNull(); + ShardId shardId = shardIt.shardId(); + if (shardRouting == null) { + delegate.onFailure(new UnavailableShardsException(shardId, "target index is not available")); + return; + } + DiscoveryNode targetNode = clusterState.nodes().get(shardRouting.currentNodeId()); + T transportRequest = transportRequest(request, shardId); + // TODO: handle retry and avoid forking for the local lookup + try (ThreadContext.StoredContext unused = threadContext.stashWithOrigin(ClientHelper.ENRICH_ORIGIN)) { + transportService.sendChildRequest( + targetNode, + actionName, + transportRequest, + parentTask, + TransportRequestOptions.EMPTY, + new ActionListenerResponseHandler<>( + delegate.map(LookupResponse::takePage), + in -> new LookupResponse(in, blockFactory), + executor + ) + ); + } + })); + } + + private void hasPrivilege(ActionListener outListener) { + final Settings settings = clusterService.getSettings(); + if (settings.hasValue(XPackSettings.SECURITY_ENABLED.getKey()) == false || XPackSettings.SECURITY_ENABLED.get(settings) == false) { + outListener.onResponse(null); + return; + } + final ThreadContext threadContext = transportService.getThreadPool().getThreadContext(); + final SecurityContext securityContext = new SecurityContext(Settings.EMPTY, threadContext); + final User user = securityContext.getUser(); + if (user == null) { + outListener.onFailure(new IllegalStateException("missing or unable to read authentication info on request")); + return; + } + HasPrivilegesRequest request = new HasPrivilegesRequest(); + request.username(user.principal()); + request.clusterPrivileges(privilegeName); + request.indexPrivileges(new RoleDescriptor.IndicesPrivileges[0]); + request.applicationPrivileges(new RoleDescriptor.ApplicationResourcePrivileges[0]); + ActionListener listener = outListener.delegateFailureAndWrap((l, resp) -> { + if (resp.isCompleteMatch()) { + l.onResponse(null); + return; + } + String detailed = resp.getClusterPrivileges() + .entrySet() + .stream() + .filter(e -> e.getValue() == false) + .map(e -> "privilege [" + e.getKey() + "] is missing") + .collect(Collectors.joining(", ")); + String message = "user [" + + user.principal() + + "] doesn't have " + + "sufficient privileges to perform enrich lookup: " + + detailed; + l.onFailure(Exceptions.authorizationError(message)); + }); + transportService.sendRequest( + transportService.getLocalNode(), + HasPrivilegesAction.NAME, + request, + TransportRequestOptions.EMPTY, + new ActionListenerResponseHandler<>(listener, HasPrivilegesResponse::new, executor) + ); + } + + private void doLookup(T request, CancellableTask task, ActionListener listener) { + Block inputBlock = request.inputPage.getBlock(0); + if (inputBlock.areAllValuesNull()) { + listener.onResponse(createNullResponse(request.inputPage.getPositionCount(), request.extractFields)); + return; + } + final List releasables = new ArrayList<>(6); + boolean started = false; + try { + final ShardSearchRequest shardSearchRequest = new ShardSearchRequest(request.shardId, 0, AliasFilter.EMPTY); + final SearchContext searchContext = searchService.createSearchContext(shardSearchRequest, SearchService.NO_TIMEOUT); + releasables.add(searchContext); + final LocalCircuitBreaker localBreaker = new LocalCircuitBreaker( + blockFactory.breaker(), + localBreakerSettings.overReservedBytes(), + localBreakerSettings.maxOverReservedBytes() + ); + releasables.add(localBreaker); + final DriverContext driverContext = new DriverContext(bigArrays, blockFactory.newChildFactory(localBreaker)); + final ElementType[] mergingTypes = new ElementType[request.extractFields.size()]; + for (int i = 0; i < request.extractFields.size(); i++) { + mergingTypes[i] = PlannerUtils.toElementType(request.extractFields.get(i).dataType()); + } + final int[] mergingChannels = IntStream.range(0, request.extractFields.size()).map(i -> i + 2).toArray(); + final MergePositionsOperator mergePositionsOperator; + final OrdinalBytesRefBlock ordinalsBytesRefBlock; + if (inputBlock instanceof BytesRefBlock bytesRefBlock && (ordinalsBytesRefBlock = bytesRefBlock.asOrdinals()) != null) { + inputBlock = ordinalsBytesRefBlock.getDictionaryVector().asBlock(); + var selectedPositions = ordinalsBytesRefBlock.getOrdinalsBlock(); + mergePositionsOperator = new MergePositionsOperator( + 1, + mergingChannels, + mergingTypes, + selectedPositions, + driverContext.blockFactory() + ); + + } else { + try (var selectedPositions = IntVector.range(0, inputBlock.getPositionCount(), blockFactory).asBlock()) { + mergePositionsOperator = new MergePositionsOperator( + 1, + mergingChannels, + mergingTypes, + selectedPositions, + driverContext.blockFactory() + ); + } + } + releasables.add(mergePositionsOperator); + SearchExecutionContext searchExecutionContext = searchContext.getSearchExecutionContext(); + QueryList queryList = queryList(request, searchExecutionContext, inputBlock, request.inputDataType); + var queryOperator = new EnrichQuerySourceOperator( + driverContext.blockFactory(), + EnrichQuerySourceOperator.DEFAULT_MAX_PAGE_SIZE, + queryList, + searchExecutionContext.getIndexReader() + ); + releasables.add(queryOperator); + var extractFieldsOperator = extractFieldsOperator(searchContext, driverContext, request.extractFields); + releasables.add(extractFieldsOperator); + + AtomicReference result = new AtomicReference<>(); + OutputOperator outputOperator = new OutputOperator(List.of(), Function.identity(), result::set); + releasables.add(outputOperator); + Driver driver = new Driver( + "enrich-lookup:" + request.sessionId, + System.currentTimeMillis(), + System.nanoTime(), + driverContext, + request::toString, + queryOperator, + List.of(extractFieldsOperator, mergePositionsOperator), + outputOperator, + Driver.DEFAULT_STATUS_INTERVAL, + Releasables.wrap(searchContext, localBreaker) + ); + task.addListener(() -> { + String reason = Objects.requireNonNullElse(task.getReasonCancelled(), "task was cancelled"); + driver.cancel(reason); + }); + var threadContext = transportService.getThreadPool().getThreadContext(); + Driver.start(threadContext, executor, driver, Driver.DEFAULT_MAX_ITERATIONS, listener.map(ignored -> { + Page out = result.get(); + if (out == null) { + out = createNullResponse(request.inputPage.getPositionCount(), request.extractFields); + } + return out; + })); + started = true; + } catch (Exception e) { + listener.onFailure(e); + } finally { + if (started == false) { + Releasables.close(releasables); + } + } + } + + private static Operator extractFieldsOperator( + SearchContext searchContext, + DriverContext driverContext, + List extractFields + ) { + EsPhysicalOperationProviders.ShardContext shardContext = new EsPhysicalOperationProviders.DefaultShardContext( + 0, + searchContext.getSearchExecutionContext(), + searchContext.request().getAliasFilter() + ); + List fields = new ArrayList<>(extractFields.size()); + for (NamedExpression extractField : extractFields) { + BlockLoader loader = shardContext.blockLoader( + extractField instanceof Alias a ? ((NamedExpression) a.child()).name() : extractField.name(), + extractField.dataType() == DataType.UNSUPPORTED, + MappedFieldType.FieldExtractPreference.NONE + ); + fields.add( + new ValuesSourceReaderOperator.FieldInfo( + extractField.name(), + PlannerUtils.toElementType(extractField.dataType()), + shardIdx -> { + if (shardIdx != 0) { + throw new IllegalStateException("only one shard"); + } + return loader; + } + ) + ); + } + return new ValuesSourceReaderOperator( + driverContext.blockFactory(), + fields, + List.of(new ValuesSourceReaderOperator.ShardContext(searchContext.searcher().getIndexReader(), searchContext::newSourceLoader)), + 0 + ); + } + + private Page createNullResponse(int positionCount, List extractFields) { + final Block[] blocks = new Block[extractFields.size()]; + try { + for (int i = 0; i < extractFields.size(); i++) { + blocks[i] = blockFactory.newConstantNullBlock(positionCount); + } + return new Page(blocks); + } finally { + if (blocks[blocks.length - 1] == null) { + Releasables.close(blocks); + } + } + } + + private class TransportHandler implements TransportRequestHandler { + @Override + public void messageReceived(T request, TransportChannel channel, Task task) { + request.incRef(); + ActionListener listener = ActionListener.runBefore(new ChannelActionListener<>(channel), request::decRef); + doLookup( + request, + (CancellableTask) task, + listener.delegateFailureAndWrap( + (l, outPage) -> ActionListener.respondAndRelease(l, new LookupResponse(outPage, blockFactory)) + ) + ); + } + } + + abstract static class Request { + final String sessionId; + final String index; + final DataType inputDataType; + final Page inputPage; + final List extractFields; + + Request(String sessionId, String index, DataType inputDataType, Page inputPage, List extractFields) { + this.sessionId = sessionId; + this.index = index; + this.inputDataType = inputDataType; + this.inputPage = inputPage; + this.extractFields = extractFields; + } + } + + abstract static class TransportRequest extends org.elasticsearch.transport.TransportRequest implements IndicesRequest { + final String sessionId; + final ShardId shardId; + final DataType inputDataType; + final Page inputPage; + final List extractFields; + // TODO: Remove this workaround once we have Block RefCount + final Page toRelease; + final RefCounted refs = AbstractRefCounted.of(this::releasePage); + + TransportRequest( + String sessionId, + ShardId shardId, + DataType inputDataType, + Page inputPage, + Page toRelease, + List extractFields + ) { + this.sessionId = sessionId; + this.shardId = shardId; + this.inputDataType = inputDataType; + this.inputPage = inputPage; + this.toRelease = toRelease; + this.extractFields = extractFields; + } + + @Override + public final String[] indices() { + return new String[] { shardId.getIndexName() }; + } + + @Override + public final IndicesOptions indicesOptions() { + return IndicesOptions.strictSingleIndexNoExpandForbidClosed(); + } + + @Override + public final Task createTask(long id, String type, String action, TaskId parentTaskId, Map headers) { + return new CancellableTask(id, type, action, "", parentTaskId, headers) { + @Override + public String getDescription() { + return this.toString(); + } + }; + } + + private void releasePage() { + if (toRelease != null) { + Releasables.closeExpectNoException(toRelease::releaseBlocks); + } + } + + @Override + public final void incRef() { + refs.incRef(); + } + + @Override + public final boolean tryIncRef() { + return refs.tryIncRef(); + } + + @Override + public final boolean decRef() { + return refs.decRef(); + } + + @Override + public final boolean hasReferences() { + return refs.hasReferences(); + } + + @Override + public final String toString() { + return "LOOKUP(" + + " session=" + + sessionId + + " ,shard=" + + shardId + + " ,input_type=" + + inputDataType + + " ,extract_fields=" + + extractFields + + " ,positions=" + + inputPage.getPositionCount() + + extraDescription() + + ")"; + } + + protected abstract String extraDescription(); + } + + private static class LookupResponse extends TransportResponse { + private final RefCounted refs = AbstractRefCounted.of(this::releasePage); + private final BlockFactory blockFactory; + private Page page; + private long reservedBytes = 0; + + LookupResponse(Page page, BlockFactory blockFactory) { + this.page = page; + this.blockFactory = blockFactory; + } + + LookupResponse(StreamInput in, BlockFactory blockFactory) throws IOException { + try (BlockStreamInput bsi = new BlockStreamInput(in, blockFactory)) { + this.page = new Page(bsi); + } + this.blockFactory = blockFactory; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + long bytes = page.ramBytesUsedByBlocks(); + blockFactory.breaker().addEstimateBytesAndMaybeBreak(bytes, "serialize enrich lookup response"); + reservedBytes += bytes; + page.writeTo(out); + } + + Page takePage() { + var p = page; + page = null; + return p; + } + + private void releasePage() { + blockFactory.breaker().addWithoutBreaking(-reservedBytes); + if (page != null) { + Releasables.closeExpectNoException(page::releaseBlocks); + } + } + + @Override + public void incRef() { + refs.incRef(); + } + + @Override + public boolean tryIncRef() { + return refs.tryIncRef(); + } + + @Override + public boolean decRef() { + return refs.decRef(); + } + + @Override + public boolean hasReferences() { + return refs.hasReferences(); + } + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichLookupOperator.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichLookupOperator.java index 13fbd51a46108..6e5845fae33b7 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichLookupOperator.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichLookupOperator.java @@ -109,17 +109,16 @@ public EnrichLookupOperator( protected void performAsync(Page inputPage, ActionListener listener) { final Block inputBlock = inputPage.getBlock(inputChannel); totalTerms += inputBlock.getTotalValueCount(); - enrichLookupService.lookupAsync( + EnrichLookupService.Request request = new EnrichLookupService.Request( sessionId, - parentTask, enrichIndex, inputDataType, matchType, matchField, - enrichFields, new Page(inputBlock), - listener.map(inputPage::appendPage) + enrichFields ); + enrichLookupService.lookupAsync(request, parentTask, listener.map(inputPage::appendPage)); } @Override diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichLookupService.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichLookupService.java index e4ae181915c8a..9638571fab993 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichLookupService.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichLookupService.java @@ -8,116 +8,39 @@ package org.elasticsearch.xpack.esql.enrich; import org.elasticsearch.TransportVersions; -import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.ActionListenerResponseHandler; -import org.elasticsearch.action.IndicesRequest; -import org.elasticsearch.action.UnavailableShardsException; -import org.elasticsearch.action.support.ChannelActionListener; -import org.elasticsearch.action.support.ContextPreservingActionListener; -import org.elasticsearch.action.support.IndicesOptions; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.cluster.routing.GroupShardsIterator; -import org.elasticsearch.cluster.routing.ShardIterator; -import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.BigArrays; -import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.BlockStreamInput; -import org.elasticsearch.compute.data.BytesRefBlock; -import org.elasticsearch.compute.data.ElementType; -import org.elasticsearch.compute.data.IntVector; -import org.elasticsearch.compute.data.LocalCircuitBreaker; -import org.elasticsearch.compute.data.OrdinalBytesRefBlock; import org.elasticsearch.compute.data.Page; -import org.elasticsearch.compute.lucene.ValuesSourceReaderOperator; -import org.elasticsearch.compute.operator.Driver; -import org.elasticsearch.compute.operator.DriverContext; -import org.elasticsearch.compute.operator.Operator; -import org.elasticsearch.compute.operator.OutputOperator; -import org.elasticsearch.core.AbstractRefCounted; -import org.elasticsearch.core.RefCounted; -import org.elasticsearch.core.Releasable; -import org.elasticsearch.core.Releasables; -import org.elasticsearch.index.mapper.BlockLoader; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.query.SearchExecutionContext; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.search.SearchService; -import org.elasticsearch.search.internal.AliasFilter; -import org.elasticsearch.search.internal.SearchContext; -import org.elasticsearch.search.internal.ShardSearchRequest; -import org.elasticsearch.tasks.CancellableTask; -import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.TaskId; -import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.TransportChannel; -import org.elasticsearch.transport.TransportRequest; -import org.elasticsearch.transport.TransportRequestHandler; -import org.elasticsearch.transport.TransportRequestOptions; -import org.elasticsearch.transport.TransportResponse; import org.elasticsearch.transport.TransportService; -import org.elasticsearch.xpack.core.ClientHelper; -import org.elasticsearch.xpack.core.XPackSettings; -import org.elasticsearch.xpack.core.security.SecurityContext; -import org.elasticsearch.xpack.core.security.action.user.HasPrivilegesAction; -import org.elasticsearch.xpack.core.security.action.user.HasPrivilegesRequest; -import org.elasticsearch.xpack.core.security.action.user.HasPrivilegesResponse; -import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; import org.elasticsearch.xpack.core.security.authz.privilege.ClusterPrivilegeResolver; -import org.elasticsearch.xpack.core.security.support.Exceptions; -import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.esql.EsqlIllegalArgumentException; import org.elasticsearch.xpack.esql.action.EsqlQueryAction; -import org.elasticsearch.xpack.esql.core.expression.Alias; import org.elasticsearch.xpack.esql.core.expression.NamedExpression; import org.elasticsearch.xpack.esql.core.type.DataType; import org.elasticsearch.xpack.esql.io.stream.PlanStreamInput; import org.elasticsearch.xpack.esql.io.stream.PlanStreamOutput; -import org.elasticsearch.xpack.esql.planner.EsPhysicalOperationProviders; -import org.elasticsearch.xpack.esql.planner.PlannerUtils; -import org.elasticsearch.xpack.esql.plugin.EsqlPlugin; import java.io.IOException; -import java.util.ArrayList; import java.util.List; -import java.util.Map; -import java.util.Objects; -import java.util.concurrent.Executor; -import java.util.concurrent.atomic.AtomicReference; -import java.util.function.Function; -import java.util.stream.Collectors; -import java.util.stream.IntStream; /** - * {@link EnrichLookupService} performs enrich lookup for a given input page. The lookup process consists of three stages: - * - Stage 1: Finding matching document IDs for the input page. This stage is done by the {@link EnrichQuerySourceOperator} or its variants. - * The output page of this stage is represented as [DocVector, IntBlock: positions of the input terms]. - *

    - * - Stage 2: Extracting field values for the matched document IDs. The output page is represented as - * [DocVector, IntBlock: positions, Block: field1, Block: field2,...]. - *

    - * - Stage 3: Combining the extracted values based on positions and filling nulls for positions without matches. - * This is done by {@link MergePositionsOperator}. The output page is represented as [Block: field1, Block: field2,...]. - *

    - * The positionCount of the output page must be equal to the positionCount of the input page. + * {@link EnrichLookupService} performs enrich lookup for a given input page. + * See {@link AbstractLookupService} for how it works where it refers to this + * process as a {@code LEFT JOIN}. Which is mostly is. */ -public class EnrichLookupService { +public class EnrichLookupService extends AbstractLookupService { public static final String LOOKUP_ACTION_NAME = EsqlQueryAction.NAME + "/lookup"; - private final ClusterService clusterService; - private final SearchService searchService; - private final TransportService transportService; - private final Executor executor; - private final BigArrays bigArrays; - private final BlockFactory blockFactory; - private final LocalCircuitBreaker.SizeSettings localBreakerSettings; - public EnrichLookupService( ClusterService clusterService, SearchService searchService, @@ -125,353 +48,107 @@ public EnrichLookupService( BigArrays bigArrays, BlockFactory blockFactory ) { - this.clusterService = clusterService; - this.searchService = searchService; - this.transportService = transportService; - this.executor = transportService.getThreadPool().executor(ThreadPool.Names.SEARCH); - this.bigArrays = bigArrays; - this.blockFactory = blockFactory; - this.localBreakerSettings = new LocalCircuitBreaker.SizeSettings(clusterService.getSettings()); - transportService.registerRequestHandler( + super( LOOKUP_ACTION_NAME, - transportService.getThreadPool().executor(EsqlPlugin.ESQL_WORKER_THREAD_POOL_NAME), - in -> new LookupRequest(in, blockFactory), - new TransportHandler() + ClusterPrivilegeResolver.MONITOR_ENRICH.name(), + clusterService, + searchService, + transportService, + bigArrays, + blockFactory, + TransportRequest::readFrom ); } - public void lookupAsync( - String sessionId, - CancellableTask parentTask, - String index, - DataType inputDataType, - String matchType, - String matchField, - List extractFields, - Page inputPage, - ActionListener outListener - ) { - ThreadContext threadContext = transportService.getThreadPool().getThreadContext(); - ActionListener listener = ContextPreservingActionListener.wrapPreservingContext(outListener, threadContext); - hasEnrichPrivilege(listener.delegateFailureAndWrap((delegate, ignored) -> { - ClusterState clusterState = clusterService.state(); - GroupShardsIterator shardIterators = clusterService.operationRouting() - .searchShards(clusterState, new String[] { index }, Map.of(), "_local"); - if (shardIterators.size() != 1) { - delegate.onFailure(new EsqlIllegalArgumentException("target index {} has more than one shard", index)); - return; - } - ShardIterator shardIt = shardIterators.get(0); - ShardRouting shardRouting = shardIt.nextOrNull(); - ShardId shardId = shardIt.shardId(); - if (shardRouting == null) { - delegate.onFailure(new UnavailableShardsException(shardId, "enrich index is not available")); - return; - } - DiscoveryNode targetNode = clusterState.nodes().get(shardRouting.currentNodeId()); - var lookupRequest = new LookupRequest(sessionId, shardId, inputDataType, matchType, matchField, inputPage, extractFields); - // TODO: handle retry and avoid forking for the local lookup - try (ThreadContext.StoredContext unused = threadContext.stashWithOrigin(ClientHelper.ENRICH_ORIGIN)) { - transportService.sendChildRequest( - targetNode, - LOOKUP_ACTION_NAME, - lookupRequest, - parentTask, - TransportRequestOptions.EMPTY, - new ActionListenerResponseHandler<>( - delegate.map(LookupResponse::takePage), - in -> new LookupResponse(in, blockFactory), - executor - ) - ); - } - })); - } - - private void hasEnrichPrivilege(ActionListener outListener) { - final Settings settings = clusterService.getSettings(); - if (settings.hasValue(XPackSettings.SECURITY_ENABLED.getKey()) == false || XPackSettings.SECURITY_ENABLED.get(settings) == false) { - outListener.onResponse(null); - return; - } - final ThreadContext threadContext = transportService.getThreadPool().getThreadContext(); - final SecurityContext securityContext = new SecurityContext(Settings.EMPTY, threadContext); - final User user = securityContext.getUser(); - if (user == null) { - outListener.onFailure(new IllegalStateException("missing or unable to read authentication info on request")); - return; - } - HasPrivilegesRequest request = new HasPrivilegesRequest(); - request.username(user.principal()); - request.clusterPrivileges(ClusterPrivilegeResolver.MONITOR_ENRICH.name()); - request.indexPrivileges(new RoleDescriptor.IndicesPrivileges[0]); - request.applicationPrivileges(new RoleDescriptor.ApplicationResourcePrivileges[0]); - ActionListener listener = outListener.delegateFailureAndWrap((l, resp) -> { - if (resp.isCompleteMatch()) { - l.onResponse(null); - return; - } - String detailed = resp.getClusterPrivileges() - .entrySet() - .stream() - .filter(e -> e.getValue() == false) - .map(e -> "privilege [" + e.getKey() + "] is missing") - .collect(Collectors.joining(", ")); - String message = "user [" - + user.principal() - + "] doesn't have " - + "sufficient privileges to perform enrich lookup: " - + detailed; - l.onFailure(Exceptions.authorizationError(message)); - }); - transportService.sendRequest( - transportService.getLocalNode(), - HasPrivilegesAction.NAME, - request, - TransportRequestOptions.EMPTY, - new ActionListenerResponseHandler<>(listener, HasPrivilegesResponse::new, executor) + @Override + protected TransportRequest transportRequest(EnrichLookupService.Request request, ShardId shardId) { + return new TransportRequest( + request.sessionId, + shardId, + request.inputDataType, + request.matchType, + request.matchField, + request.inputPage, + null, + request.extractFields ); } - private void doLookup( - String sessionId, - CancellableTask task, - ShardId shardId, - DataType inputDataType, - String matchType, - String matchField, - Page inputPage, - List extractFields, - ActionListener listener - ) { - Block inputBlock = inputPage.getBlock(0); - if (inputBlock.areAllValuesNull()) { - listener.onResponse(createNullResponse(inputPage.getPositionCount(), extractFields)); - return; - } - final List releasables = new ArrayList<>(6); - boolean started = false; - try { - final ShardSearchRequest shardSearchRequest = new ShardSearchRequest(shardId, 0, AliasFilter.EMPTY); - final SearchContext searchContext = searchService.createSearchContext(shardSearchRequest, SearchService.NO_TIMEOUT); - releasables.add(searchContext); - final LocalCircuitBreaker localBreaker = new LocalCircuitBreaker( - blockFactory.breaker(), - localBreakerSettings.overReservedBytes(), - localBreakerSettings.maxOverReservedBytes() - ); - releasables.add(localBreaker); - final DriverContext driverContext = new DriverContext(bigArrays, blockFactory.newChildFactory(localBreaker)); - final ElementType[] mergingTypes = new ElementType[extractFields.size()]; - for (int i = 0; i < extractFields.size(); i++) { - mergingTypes[i] = PlannerUtils.toElementType(extractFields.get(i).dataType()); - } - final int[] mergingChannels = IntStream.range(0, extractFields.size()).map(i -> i + 2).toArray(); - final MergePositionsOperator mergePositionsOperator; - final OrdinalBytesRefBlock ordinalsBytesRefBlock; - if (inputBlock instanceof BytesRefBlock bytesRefBlock && (ordinalsBytesRefBlock = bytesRefBlock.asOrdinals()) != null) { - inputBlock = ordinalsBytesRefBlock.getDictionaryVector().asBlock(); - var selectedPositions = ordinalsBytesRefBlock.getOrdinalsBlock(); - mergePositionsOperator = new MergePositionsOperator( - 1, - mergingChannels, - mergingTypes, - selectedPositions, - driverContext.blockFactory() - ); - - } else { - try (var selectedPositions = IntVector.range(0, inputBlock.getPositionCount(), blockFactory).asBlock()) { - mergePositionsOperator = new MergePositionsOperator( - 1, - mergingChannels, - mergingTypes, - selectedPositions, - driverContext.blockFactory() - ); - } - } - releasables.add(mergePositionsOperator); - SearchExecutionContext searchExecutionContext = searchContext.getSearchExecutionContext(); - MappedFieldType fieldType = searchExecutionContext.getFieldType(matchField); - var queryList = switch (matchType) { - case "match", "range" -> QueryList.termQueryList(fieldType, searchExecutionContext, inputBlock, inputDataType); - case "geo_match" -> QueryList.geoShapeQuery(fieldType, searchExecutionContext, inputBlock, inputDataType); - default -> throw new EsqlIllegalArgumentException("illegal match type " + matchType); - }; - var queryOperator = new EnrichQuerySourceOperator( - driverContext.blockFactory(), - EnrichQuerySourceOperator.DEFAULT_MAX_PAGE_SIZE, - queryList, - searchExecutionContext.getIndexReader() - ); - releasables.add(queryOperator); - var extractFieldsOperator = extractFieldsOperator(searchContext, driverContext, extractFields); - releasables.add(extractFieldsOperator); - - AtomicReference result = new AtomicReference<>(); - OutputOperator outputOperator = new OutputOperator(List.of(), Function.identity(), result::set); - releasables.add(outputOperator); - Driver driver = new Driver( - "enrich-lookup:" + sessionId, - System.currentTimeMillis(), - System.nanoTime(), - driverContext, - () -> lookupDescription( - sessionId, - shardId, - inputDataType, - matchType, - matchField, - extractFields, - inputPage.getPositionCount() - ), - queryOperator, - List.of(extractFieldsOperator, mergePositionsOperator), - outputOperator, - Driver.DEFAULT_STATUS_INTERVAL, - Releasables.wrap(searchContext, localBreaker) - ); - task.addListener(() -> { - String reason = Objects.requireNonNullElse(task.getReasonCancelled(), "task was cancelled"); - driver.cancel(reason); - }); - var threadContext = transportService.getThreadPool().getThreadContext(); - Driver.start(threadContext, executor, driver, Driver.DEFAULT_MAX_ITERATIONS, listener.map(ignored -> { - Page out = result.get(); - if (out == null) { - out = createNullResponse(inputPage.getPositionCount(), extractFields); - } - return out; - })); - started = true; - } catch (Exception e) { - listener.onFailure(e); - } finally { - if (started == false) { - Releasables.close(releasables); - } - } + @Override + protected QueryList queryList(TransportRequest request, SearchExecutionContext context, Block inputBlock, DataType inputDataType) { + MappedFieldType fieldType = context.getFieldType(request.matchField); + return switch (request.matchType) { + case "match", "range" -> QueryList.termQueryList(fieldType, context, inputBlock, inputDataType); + case "geo_match" -> QueryList.geoShapeQuery(fieldType, context, inputBlock, inputDataType); + default -> throw new EsqlIllegalArgumentException("illegal match type " + request.matchType); + }; } - private static Operator extractFieldsOperator( - SearchContext searchContext, - DriverContext driverContext, - List extractFields - ) { - EsPhysicalOperationProviders.ShardContext shardContext = new EsPhysicalOperationProviders.DefaultShardContext( - 0, - searchContext.getSearchExecutionContext(), - searchContext.request().getAliasFilter() - ); - List fields = new ArrayList<>(extractFields.size()); - for (NamedExpression extractField : extractFields) { - BlockLoader loader = shardContext.blockLoader( - extractField instanceof Alias a ? ((NamedExpression) a.child()).name() : extractField.name(), - extractField.dataType() == DataType.UNSUPPORTED, - MappedFieldType.FieldExtractPreference.NONE - ); - fields.add( - new ValuesSourceReaderOperator.FieldInfo( - extractField.name(), - PlannerUtils.toElementType(extractField.dataType()), - shardIdx -> { - if (shardIdx != 0) { - throw new IllegalStateException("only one shard"); - } - return loader; - } - ) - ); - } - return new ValuesSourceReaderOperator( - driverContext.blockFactory(), - fields, - List.of(new ValuesSourceReaderOperator.ShardContext(searchContext.searcher().getIndexReader(), searchContext::newSourceLoader)), - 0 - ); - } - - private Page createNullResponse(int positionCount, List extractFields) { - final Block[] blocks = new Block[extractFields.size()]; - try { - for (int i = 0; i < extractFields.size(); i++) { - blocks[i] = blockFactory.newConstantNullBlock(positionCount); - } - return new Page(blocks); - } finally { - if (blocks[blocks.length - 1] == null) { - Releasables.close(blocks); - } - } - } + public static class Request extends AbstractLookupService.Request { + private final String matchType; + private final String matchField; - private class TransportHandler implements TransportRequestHandler { - @Override - public void messageReceived(LookupRequest request, TransportChannel channel, Task task) { - request.incRef(); - ActionListener listener = ActionListener.runBefore(new ChannelActionListener<>(channel), request::decRef); - doLookup( - request.sessionId, - (CancellableTask) task, - request.shardId, - request.inputDataType, - request.matchType, - request.matchField, - request.inputPage, - request.extractFields, - listener.delegateFailureAndWrap( - (l, outPage) -> ActionListener.respondAndRelease(l, new LookupResponse(outPage, blockFactory)) - ) - ); + Request( + String sessionId, + String index, + DataType inputDataType, + String matchType, + String matchField, + Page inputPage, + List extractFields + ) { + super(sessionId, index, inputDataType, inputPage, extractFields); + this.matchType = matchType; + this.matchField = matchField; } } - private static class LookupRequest extends TransportRequest implements IndicesRequest { - private final String sessionId; - private final ShardId shardId; - private final DataType inputDataType; + protected static class TransportRequest extends AbstractLookupService.TransportRequest { private final String matchType; private final String matchField; - private final Page inputPage; - private final List extractFields; - // TODO: Remove this workaround once we have Block RefCount - private final Page toRelease; - private final RefCounted refs = AbstractRefCounted.of(this::releasePage); - LookupRequest( + TransportRequest( String sessionId, ShardId shardId, DataType inputDataType, String matchType, String matchField, Page inputPage, + Page toRelease, List extractFields ) { - this.sessionId = sessionId; - this.shardId = shardId; - this.inputDataType = inputDataType; + super(sessionId, shardId, inputDataType, inputPage, toRelease, extractFields); this.matchType = matchType; this.matchField = matchField; - this.inputPage = inputPage; - this.toRelease = null; - this.extractFields = extractFields; } - LookupRequest(StreamInput in, BlockFactory blockFactory) throws IOException { - super(in); - this.sessionId = in.readString(); - this.shardId = new ShardId(in); - String inputDataType = (in.getTransportVersion().onOrAfter(TransportVersions.V_8_14_0)) ? in.readString() : "unknown"; - this.inputDataType = DataType.fromTypeName(inputDataType); - this.matchType = in.readString(); - this.matchField = in.readString(); + static TransportRequest readFrom(StreamInput in, BlockFactory blockFactory) throws IOException { + TaskId parentTaskId = TaskId.readFromStream(in); + String sessionId = in.readString(); + ShardId shardId = new ShardId(in); + DataType inputDataType = DataType.fromTypeName( + (in.getTransportVersion().onOrAfter(TransportVersions.V_8_14_0)) ? in.readString() : "unknown" + ); + String matchType = in.readString(); + String matchField = in.readString(); + Page inputPage; try (BlockStreamInput bsi = new BlockStreamInput(in, blockFactory)) { - this.inputPage = new Page(bsi); + inputPage = new Page(bsi); } - this.toRelease = inputPage; PlanStreamInput planIn = new PlanStreamInput(in, in.namedWriteableRegistry(), null); - this.extractFields = planIn.readNamedWriteableCollectionAsList(NamedExpression.class); + List extractFields = planIn.readNamedWriteableCollectionAsList(NamedExpression.class); + TransportRequest result = new TransportRequest( + sessionId, + shardId, + inputDataType, + matchType, + matchField, + inputPage, + inputPage, + extractFields + ); + result.setParentTask(parentTaskId); + return result; } @Override @@ -490,144 +167,8 @@ public void writeTo(StreamOutput out) throws IOException { } @Override - public String[] indices() { - return new String[] { shardId.getIndexName() }; - } - - @Override - public IndicesOptions indicesOptions() { - return IndicesOptions.strictSingleIndexNoExpandForbidClosed(); - } - - @Override - public Task createTask(long id, String type, String action, TaskId parentTaskId, Map headers) { - return new CancellableTask(id, type, action, "", parentTaskId, headers) { - @Override - public String getDescription() { - return lookupDescription( - sessionId, - shardId, - inputDataType, - matchType, - matchField, - extractFields, - inputPage.getPositionCount() - ); - } - }; - } - - private void releasePage() { - if (toRelease != null) { - Releasables.closeExpectNoException(toRelease::releaseBlocks); - } - } - - @Override - public void incRef() { - refs.incRef(); - } - - @Override - public boolean tryIncRef() { - return refs.tryIncRef(); - } - - @Override - public boolean decRef() { - return refs.decRef(); - } - - @Override - public boolean hasReferences() { - return refs.hasReferences(); - } - } - - private static String lookupDescription( - String sessionId, - ShardId shardId, - DataType inputDataType, - String matchType, - String matchField, - List extractFields, - int positionCount - ) { - return "ENRICH_LOOKUP(" - + " session=" - + sessionId - + " ,shard=" - + shardId - + " ,input_type=" - + inputDataType - + " ,match_type=" - + matchType - + " ,match_field=" - + matchField - + " ,extract_fields=" - + extractFields - + " ,positions=" - + positionCount - + ")"; - } - - private static class LookupResponse extends TransportResponse { - private Page page; - private final RefCounted refs = AbstractRefCounted.of(this::releasePage); - private final BlockFactory blockFactory; - private long reservedBytes = 0; - - LookupResponse(Page page, BlockFactory blockFactory) { - this.page = page; - this.blockFactory = blockFactory; - } - - LookupResponse(StreamInput in, BlockFactory blockFactory) throws IOException { - try (BlockStreamInput bsi = new BlockStreamInput(in, blockFactory)) { - this.page = new Page(bsi); - } - this.blockFactory = blockFactory; - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - long bytes = page.ramBytesUsedByBlocks(); - blockFactory.breaker().addEstimateBytesAndMaybeBreak(bytes, "serialize enrich lookup response"); - reservedBytes += bytes; - page.writeTo(out); - } - - Page takePage() { - var p = page; - page = null; - return p; - } - - private void releasePage() { - blockFactory.breaker().addWithoutBreaking(-reservedBytes); - if (page != null) { - Releasables.closeExpectNoException(page::releaseBlocks); - } - } - - @Override - public void incRef() { - refs.incRef(); - } - - @Override - public boolean tryIncRef() { - return refs.tryIncRef(); - } - - @Override - public boolean decRef() { - return refs.decRef(); - } - - @Override - public boolean hasReferences() { - return refs.hasReferences(); + protected String extraDescription() { + return " ,match_type=" + matchType + " ,match_field=" + matchField; } } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/LookupFromIndexOperator.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/LookupFromIndexOperator.java new file mode 100644 index 0000000000000..836b400c54f8c --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/LookupFromIndexOperator.java @@ -0,0 +1,200 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.enrich; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.AsyncOperator; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.compute.operator.Operator; +import org.elasticsearch.tasks.CancellableTask; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xpack.esql.core.expression.NamedExpression; +import org.elasticsearch.xpack.esql.core.type.DataType; + +import java.io.IOException; +import java.util.List; +import java.util.Objects; + +// TODO rename package +public final class LookupFromIndexOperator extends AsyncOperator { + public record Factory( + String sessionId, + CancellableTask parentTask, + int maxOutstandingRequests, + int inputChannel, + LookupFromIndexService lookupService, + DataType inputDataType, + String lookupIndex, + String matchField, + List loadFields + ) implements OperatorFactory { + @Override + public String describe() { + return "LookupOperator[index=" + + lookupIndex + + " match_field=" + + matchField + + " load_fields=" + + loadFields + + " inputChannel=" + + inputChannel + + "]"; + } + + @Override + public Operator get(DriverContext driverContext) { + return new LookupFromIndexOperator( + sessionId, + driverContext, + parentTask, + maxOutstandingRequests, + inputChannel, + lookupService, + inputDataType, + lookupIndex, + matchField, + loadFields + ); + } + } + + private final LookupFromIndexService lookupService; + private final String sessionId; + private final CancellableTask parentTask; + private final int inputChannel; + private final DataType inputDataType; + private final String lookupIndex; + private final String matchField; + private final List loadFields; + private long totalTerms = 0L; + + public LookupFromIndexOperator( + String sessionId, + DriverContext driverContext, + CancellableTask parentTask, + int maxOutstandingRequests, + int inputChannel, + LookupFromIndexService lookupService, + DataType inputDataType, + String lookupIndex, + String matchField, + List loadFields + ) { + super(driverContext, maxOutstandingRequests); + this.sessionId = sessionId; + this.parentTask = parentTask; + this.inputChannel = inputChannel; + this.lookupService = lookupService; + this.inputDataType = inputDataType; + this.lookupIndex = lookupIndex; + this.matchField = matchField; + this.loadFields = loadFields; + } + + @Override + protected void performAsync(Page inputPage, ActionListener listener) { + final Block inputBlock = inputPage.getBlock(inputChannel); + totalTerms += inputBlock.getTotalValueCount(); + LookupFromIndexService.Request request = new LookupFromIndexService.Request( + sessionId, + lookupIndex, + inputDataType, + matchField, + new Page(inputBlock), + loadFields + ); + lookupService.lookupAsync(request, parentTask, listener.map(inputPage::appendPage)); + } + + @Override + public String toString() { + return "LookupOperator[index=" + + lookupIndex + + " input_type=" + + inputDataType + + " match_field=" + + matchField + + " load_fields=" + + loadFields + + " inputChannel=" + + inputChannel + + "]"; + } + + @Override + protected void doClose() { + // TODO: Maybe create a sub-task as the parent task of all the lookup tasks + // then cancel it when this operator terminates early (e.g., have enough result). + } + + @Override + protected Operator.Status status(long receivedPages, long completedPages, long totalTimeInMillis) { + return new LookupFromIndexOperator.Status(receivedPages, completedPages, totalTimeInMillis, totalTerms); + } + + public static class Status extends AsyncOperator.Status { + public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry( + Operator.Status.class, + "lookup", + Status::new + ); + + final long totalTerms; + + Status(long receivedPages, long completedPages, long totalTimeInMillis, long totalTerms) { + super(receivedPages, completedPages, totalTimeInMillis); + this.totalTerms = totalTerms; + } + + Status(StreamInput in) throws IOException { + super(in); + this.totalTerms = in.readVLong(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeVLong(totalTerms); + } + + @Override + public String getWriteableName() { + return ENTRY.name; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + innerToXContent(builder); + builder.field("total_terms", totalTerms); + return builder.endObject(); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass() || super.equals(o) == false) { + return false; + } + Status status = (Status) o; + return totalTerms == status.totalTerms; + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), totalTerms); + } + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/LookupFromIndexService.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/LookupFromIndexService.java new file mode 100644 index 0000000000000..b0ee77327690a --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/LookupFromIndexService.java @@ -0,0 +1,154 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.enrich; + +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BlockFactory; +import org.elasticsearch.compute.data.BlockStreamInput; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.index.mapper.MappedFieldType; +import org.elasticsearch.index.query.SearchExecutionContext; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.search.SearchService; +import org.elasticsearch.tasks.TaskId; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.security.authz.privilege.ClusterPrivilegeResolver; +import org.elasticsearch.xpack.esql.action.EsqlQueryAction; +import org.elasticsearch.xpack.esql.core.expression.NamedExpression; +import org.elasticsearch.xpack.esql.core.type.DataType; +import org.elasticsearch.xpack.esql.io.stream.PlanStreamInput; +import org.elasticsearch.xpack.esql.io.stream.PlanStreamOutput; + +import java.io.IOException; +import java.util.List; + +/** + * {@link LookupFromIndexService} performs lookup against a Lookup index for + * a given input page. See {@link AbstractLookupService} for how it works + * where it refers to this process as a {@code LEFT JOIN}. Which is mostly is. + */ +public class LookupFromIndexService extends AbstractLookupService { + public static final String LOOKUP_ACTION_NAME = EsqlQueryAction.NAME + "/lookup_from_index"; + + public LookupFromIndexService( + ClusterService clusterService, + SearchService searchService, + TransportService transportService, + BigArrays bigArrays, + BlockFactory blockFactory + ) { + super( + LOOKUP_ACTION_NAME, + ClusterPrivilegeResolver.MONITOR_ENRICH.name(), // TODO some other privilege + clusterService, + searchService, + transportService, + bigArrays, + blockFactory, + TransportRequest::readFrom + ); + } + + @Override + protected TransportRequest transportRequest(LookupFromIndexService.Request request, ShardId shardId) { + return new TransportRequest( + request.sessionId, + shardId, + request.inputDataType, + request.inputPage, + null, + request.extractFields, + request.matchField + ); + } + + @Override + protected QueryList queryList(TransportRequest request, SearchExecutionContext context, Block inputBlock, DataType inputDataType) { + MappedFieldType fieldType = context.getFieldType(request.matchField); + return QueryList.termQueryList(fieldType, context, inputBlock, inputDataType); + } + + public static class Request extends AbstractLookupService.Request { + private final String matchField; + + Request( + String sessionId, + String index, + DataType inputDataType, + String matchField, + Page inputPage, + List extractFields + ) { + super(sessionId, index, inputDataType, inputPage, extractFields); + this.matchField = matchField; + } + } + + protected static class TransportRequest extends AbstractLookupService.TransportRequest { + private final String matchField; + + TransportRequest( + String sessionId, + ShardId shardId, + DataType inputDataType, + Page inputPage, + Page toRelease, + List extractFields, + String matchField + ) { + super(sessionId, shardId, inputDataType, inputPage, toRelease, extractFields); + this.matchField = matchField; + } + + static TransportRequest readFrom(StreamInput in, BlockFactory blockFactory) throws IOException { + TaskId parentTaskId = TaskId.readFromStream(in); + String sessionId = in.readString(); + ShardId shardId = new ShardId(in); + DataType inputDataType = DataType.fromTypeName(in.readString()); + Page inputPage; + try (BlockStreamInput bsi = new BlockStreamInput(in, blockFactory)) { + inputPage = new Page(bsi); + } + PlanStreamInput planIn = new PlanStreamInput(in, in.namedWriteableRegistry(), null); + List extractFields = planIn.readNamedWriteableCollectionAsList(NamedExpression.class); + String matchField = in.readString(); + TransportRequest result = new TransportRequest( + sessionId, + shardId, + inputDataType, + inputPage, + inputPage, + extractFields, + matchField + ); + result.setParentTask(parentTaskId); + return result; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(sessionId); + out.writeWriteable(shardId); + out.writeString(inputDataType.typeName()); + out.writeWriteable(inputPage); + PlanStreamOutput planOut = new PlanStreamOutput(out, null); + planOut.writeNamedWriteableCollection(extractFields); + out.writeString(matchField); + } + + @Override + protected String extraDescription() { + return " ,match_field=" + matchField; + } + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/QueryList.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/QueryList.java index 417e5777d9e8c..c86f01b045dad 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/QueryList.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/QueryList.java @@ -40,7 +40,7 @@ /** * Generates a list of Lucene queries based on the input block. */ -abstract class QueryList { +public abstract class QueryList { protected final Block block; protected QueryList(Block block) { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/TransportEsqlQueryAction.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/TransportEsqlQueryAction.java index 193930cdf711d..c12de173fa6b8 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/TransportEsqlQueryAction.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/TransportEsqlQueryAction.java @@ -38,6 +38,7 @@ import org.elasticsearch.xpack.esql.core.async.AsyncTaskManagementService; import org.elasticsearch.xpack.esql.enrich.EnrichLookupService; import org.elasticsearch.xpack.esql.enrich.EnrichPolicyResolver; +import org.elasticsearch.xpack.esql.enrich.LookupFromIndexService; import org.elasticsearch.xpack.esql.execution.PlanExecutor; import org.elasticsearch.xpack.esql.plan.physical.PhysicalPlan; import org.elasticsearch.xpack.esql.session.Configuration; @@ -65,6 +66,7 @@ public class TransportEsqlQueryAction extends HandledTransportAction asyncTaskManagementService; private final RemoteClusterService remoteClusterService; @@ -94,6 +96,7 @@ public TransportEsqlQueryAction( this.exchangeService = exchangeService; this.enrichPolicyResolver = new EnrichPolicyResolver(clusterService, transportService, planExecutor.indexResolver()); this.enrichLookupService = new EnrichLookupService(clusterService, searchService, transportService, bigArrays, blockFactory); + this.lookupFromIndexService = new LookupFromIndexService(clusterService, searchService, transportService, bigArrays, blockFactory); this.computeService = new ComputeService( searchService, transportService, @@ -278,4 +281,8 @@ public EsqlQueryResponse readResponse(StreamInput inputStream) throws IOExceptio private static boolean requestIsAsync(EsqlQueryRequest request) { return request.async(); } + + public LookupFromIndexService getLookupFromIndexService() { + return lookupFromIndexService; + } } From 30090b6b602ccc667164f7b12f84db14a38f147b Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Wed, 30 Oct 2024 13:26:13 -0700 Subject: [PATCH 220/324] Move entitlement jars to libs (#115883) The distribution tools are meant to be CLIs. This commit moves the entitlements jar projects to the libs dir, under a single libs/entitlement root directory to keep the related jars together. --- .../entitlement-runtime => libs/entitlement}/README.md | 0 .../entitlement/agent}/README.md | 0 .../entitlement/agent}/build.gradle | 10 +++++----- .../entitlement/agent}/impl/build.gradle | 4 ++-- .../entitlement/agent}/impl/licenses/asm-LICENSE.txt | 0 .../entitlement/agent}/impl/licenses/asm-NOTICE.txt | 0 .../agent}/impl/src/main/java/module-info.java | 0 .../impl/InstrumentationServiceImpl.java | 0 .../instrumentation/impl/InstrumenterImpl.java | 0 ....entitlement.instrumentation.InstrumentationService | 0 .../entitlement/instrumentation/impl/ASMUtils.java | 0 .../instrumentation/impl/InstrumenterTests.java | 0 ...org.elasticsearch.entitlement.api.EntitlementChecks | 0 .../entitlement/agent}/src/main/java/module-info.java | 0 .../entitlement/agent/EntitlementAgent.java | 0 .../elasticsearch/entitlement/agent/Transformer.java | 0 .../instrumentation/InstrumentationService.java | 0 .../entitlement/instrumentation/Instrumenter.java | 0 .../entitlement/instrumentation/MethodKey.java | 0 .../entitlement/agent/EntitlementAgentTests.java | 0 .../entitlement/bridge}/README.md | 0 .../entitlement/bridge}/build.gradle | 0 .../entitlement/bridge}/src/main/java/module-info.java | 0 .../entitlement/api/EntitlementChecks.java | 0 .../entitlement/api/EntitlementProvider.java | 0 .../entitlement}/build.gradle | 2 +- .../entitlement}/src/main/java/module-info.java | 0 .../runtime/api/ElasticsearchEntitlementManager.java | 0 .../entitlement/runtime/api/NotEntitledException.java | 0 .../runtime/internals/EntitlementInternals.java | 0 .../entitlement/runtime/policy/Entitlement.java | 0 .../runtime/policy/ExternalEntitlement.java | 0 .../entitlement/runtime/policy/FileEntitlement.java | 0 .../entitlement/runtime/policy/Policy.java | 0 .../entitlement/runtime/policy/PolicyParser.java | 0 .../runtime/policy/PolicyParserException.java | 0 .../entitlement/runtime/policy/Scope.java | 0 ...org.elasticsearch.entitlement.api.EntitlementChecks | 0 .../runtime/policy/PolicyParserFailureTests.java | 0 .../entitlement/runtime/policy/PolicyParserTests.java | 0 .../entitlement/runtime/policy/test-policy.yaml | 0 settings.gradle | 4 ---- 42 files changed, 8 insertions(+), 12 deletions(-) rename {distribution/tools/entitlement-runtime => libs/entitlement}/README.md (100%) rename {distribution/tools/entitlement-agent => libs/entitlement/agent}/README.md (100%) rename {distribution/tools/entitlement-agent => libs/entitlement/agent}/build.gradle (82%) rename {distribution/tools/entitlement-agent => libs/entitlement/agent}/impl/build.gradle (86%) rename {distribution/tools/entitlement-agent => libs/entitlement/agent}/impl/licenses/asm-LICENSE.txt (100%) rename {distribution/tools/entitlement-agent => libs/entitlement/agent}/impl/licenses/asm-NOTICE.txt (100%) rename {distribution/tools/entitlement-agent => libs/entitlement/agent}/impl/src/main/java/module-info.java (100%) rename {distribution/tools/entitlement-agent => libs/entitlement/agent}/impl/src/main/java/org/elasticsearch/entitlement/instrumentation/impl/InstrumentationServiceImpl.java (100%) rename {distribution/tools/entitlement-agent => libs/entitlement/agent}/impl/src/main/java/org/elasticsearch/entitlement/instrumentation/impl/InstrumenterImpl.java (100%) rename {distribution/tools/entitlement-agent => libs/entitlement/agent}/impl/src/main/resources/META-INF/services/org.elasticsearch.entitlement.instrumentation.InstrumentationService (100%) rename {distribution/tools/entitlement-agent => libs/entitlement/agent}/impl/src/test/java/org/elasticsearch/entitlement/instrumentation/impl/ASMUtils.java (100%) rename {distribution/tools/entitlement-agent => libs/entitlement/agent}/impl/src/test/java/org/elasticsearch/entitlement/instrumentation/impl/InstrumenterTests.java (100%) rename {distribution/tools/entitlement-agent => libs/entitlement/agent}/impl/src/test/resources/META-INF/services/org.elasticsearch.entitlement.api.EntitlementChecks (100%) rename {distribution/tools/entitlement-agent => libs/entitlement/agent}/src/main/java/module-info.java (100%) rename {distribution/tools/entitlement-agent => libs/entitlement/agent}/src/main/java/org/elasticsearch/entitlement/agent/EntitlementAgent.java (100%) rename {distribution/tools/entitlement-agent => libs/entitlement/agent}/src/main/java/org/elasticsearch/entitlement/agent/Transformer.java (100%) rename {distribution/tools/entitlement-agent => libs/entitlement/agent}/src/main/java/org/elasticsearch/entitlement/instrumentation/InstrumentationService.java (100%) rename {distribution/tools/entitlement-agent => libs/entitlement/agent}/src/main/java/org/elasticsearch/entitlement/instrumentation/Instrumenter.java (100%) rename {distribution/tools/entitlement-agent => libs/entitlement/agent}/src/main/java/org/elasticsearch/entitlement/instrumentation/MethodKey.java (100%) rename {distribution/tools/entitlement-agent => libs/entitlement/agent}/src/test/java/org/elasticsearch/entitlement/agent/EntitlementAgentTests.java (100%) rename {distribution/tools/entitlement-bridge => libs/entitlement/bridge}/README.md (100%) rename {distribution/tools/entitlement-bridge => libs/entitlement/bridge}/build.gradle (100%) rename {distribution/tools/entitlement-bridge => libs/entitlement/bridge}/src/main/java/module-info.java (100%) rename {distribution/tools/entitlement-bridge => libs/entitlement/bridge}/src/main/java/org/elasticsearch/entitlement/api/EntitlementChecks.java (100%) rename {distribution/tools/entitlement-bridge => libs/entitlement/bridge}/src/main/java/org/elasticsearch/entitlement/api/EntitlementProvider.java (100%) rename {distribution/tools/entitlement-runtime => libs/entitlement}/build.gradle (93%) rename {distribution/tools/entitlement-runtime => libs/entitlement}/src/main/java/module-info.java (100%) rename {distribution/tools/entitlement-runtime => libs/entitlement}/src/main/java/org/elasticsearch/entitlement/runtime/api/ElasticsearchEntitlementManager.java (100%) rename {distribution/tools/entitlement-runtime => libs/entitlement}/src/main/java/org/elasticsearch/entitlement/runtime/api/NotEntitledException.java (100%) rename {distribution/tools/entitlement-runtime => libs/entitlement}/src/main/java/org/elasticsearch/entitlement/runtime/internals/EntitlementInternals.java (100%) rename {distribution/tools/entitlement-runtime => libs/entitlement}/src/main/java/org/elasticsearch/entitlement/runtime/policy/Entitlement.java (100%) rename {distribution/tools/entitlement-runtime => libs/entitlement}/src/main/java/org/elasticsearch/entitlement/runtime/policy/ExternalEntitlement.java (100%) rename {distribution/tools/entitlement-runtime => libs/entitlement}/src/main/java/org/elasticsearch/entitlement/runtime/policy/FileEntitlement.java (100%) rename {distribution/tools/entitlement-runtime => libs/entitlement}/src/main/java/org/elasticsearch/entitlement/runtime/policy/Policy.java (100%) rename {distribution/tools/entitlement-runtime => libs/entitlement}/src/main/java/org/elasticsearch/entitlement/runtime/policy/PolicyParser.java (100%) rename {distribution/tools/entitlement-runtime => libs/entitlement}/src/main/java/org/elasticsearch/entitlement/runtime/policy/PolicyParserException.java (100%) rename {distribution/tools/entitlement-runtime => libs/entitlement}/src/main/java/org/elasticsearch/entitlement/runtime/policy/Scope.java (100%) rename {distribution/tools/entitlement-runtime => libs/entitlement}/src/main/resources/META-INF/services/org.elasticsearch.entitlement.api.EntitlementChecks (100%) rename {distribution/tools/entitlement-runtime => libs/entitlement}/src/test/java/org/elasticsearch/entitlement/runtime/policy/PolicyParserFailureTests.java (100%) rename {distribution/tools/entitlement-runtime => libs/entitlement}/src/test/java/org/elasticsearch/entitlement/runtime/policy/PolicyParserTests.java (100%) rename {distribution/tools/entitlement-runtime => libs/entitlement}/src/test/resources/org/elasticsearch/entitlement/runtime/policy/test-policy.yaml (100%) diff --git a/distribution/tools/entitlement-runtime/README.md b/libs/entitlement/README.md similarity index 100% rename from distribution/tools/entitlement-runtime/README.md rename to libs/entitlement/README.md diff --git a/distribution/tools/entitlement-agent/README.md b/libs/entitlement/agent/README.md similarity index 100% rename from distribution/tools/entitlement-agent/README.md rename to libs/entitlement/agent/README.md diff --git a/distribution/tools/entitlement-agent/build.gradle b/libs/entitlement/agent/build.gradle similarity index 82% rename from distribution/tools/entitlement-agent/build.gradle rename to libs/entitlement/agent/build.gradle index d3e7ae10dcc6d..5b29ba40b5f25 100644 --- a/distribution/tools/entitlement-agent/build.gradle +++ b/libs/entitlement/agent/build.gradle @@ -13,7 +13,7 @@ apply plugin: 'elasticsearch.build' apply plugin: 'elasticsearch.embedded-providers' embeddedProviders { - impl 'entitlement-agent', project(':distribution:tools:entitlement-agent:impl') + impl 'entitlement-agent', project(':libs:entitlement:agent:impl') } configurations { @@ -21,12 +21,12 @@ configurations { } dependencies { - entitlementBridge project(":distribution:tools:entitlement-bridge") + entitlementBridge project(":libs:entitlement:bridge") compileOnly project(":libs:core") - compileOnly project(":distribution:tools:entitlement-runtime") + compileOnly project(":libs:entitlement") testImplementation project(":test:framework") - testImplementation project(":distribution:tools:entitlement-bridge") - testImplementation project(":distribution:tools:entitlement-agent:impl") + testImplementation project(":libs:entitlement:bridge") + testImplementation project(":libs:entitlement:agent:impl") } tasks.named('test').configure { diff --git a/distribution/tools/entitlement-agent/impl/build.gradle b/libs/entitlement/agent/impl/build.gradle similarity index 86% rename from distribution/tools/entitlement-agent/impl/build.gradle rename to libs/entitlement/agent/impl/build.gradle index 16f134bf0e693..e95f89612700d 100644 --- a/distribution/tools/entitlement-agent/impl/build.gradle +++ b/libs/entitlement/agent/impl/build.gradle @@ -10,10 +10,10 @@ apply plugin: 'elasticsearch.build' dependencies { - compileOnly project(':distribution:tools:entitlement-agent') + compileOnly project(':libs:entitlement:agent') implementation 'org.ow2.asm:asm:9.7' testImplementation project(":test:framework") - testImplementation project(":distribution:tools:entitlement-bridge") + testImplementation project(":libs:entitlement:bridge") testImplementation 'org.ow2.asm:asm-util:9.7' } diff --git a/distribution/tools/entitlement-agent/impl/licenses/asm-LICENSE.txt b/libs/entitlement/agent/impl/licenses/asm-LICENSE.txt similarity index 100% rename from distribution/tools/entitlement-agent/impl/licenses/asm-LICENSE.txt rename to libs/entitlement/agent/impl/licenses/asm-LICENSE.txt diff --git a/distribution/tools/entitlement-agent/impl/licenses/asm-NOTICE.txt b/libs/entitlement/agent/impl/licenses/asm-NOTICE.txt similarity index 100% rename from distribution/tools/entitlement-agent/impl/licenses/asm-NOTICE.txt rename to libs/entitlement/agent/impl/licenses/asm-NOTICE.txt diff --git a/distribution/tools/entitlement-agent/impl/src/main/java/module-info.java b/libs/entitlement/agent/impl/src/main/java/module-info.java similarity index 100% rename from distribution/tools/entitlement-agent/impl/src/main/java/module-info.java rename to libs/entitlement/agent/impl/src/main/java/module-info.java diff --git a/distribution/tools/entitlement-agent/impl/src/main/java/org/elasticsearch/entitlement/instrumentation/impl/InstrumentationServiceImpl.java b/libs/entitlement/agent/impl/src/main/java/org/elasticsearch/entitlement/instrumentation/impl/InstrumentationServiceImpl.java similarity index 100% rename from distribution/tools/entitlement-agent/impl/src/main/java/org/elasticsearch/entitlement/instrumentation/impl/InstrumentationServiceImpl.java rename to libs/entitlement/agent/impl/src/main/java/org/elasticsearch/entitlement/instrumentation/impl/InstrumentationServiceImpl.java diff --git a/distribution/tools/entitlement-agent/impl/src/main/java/org/elasticsearch/entitlement/instrumentation/impl/InstrumenterImpl.java b/libs/entitlement/agent/impl/src/main/java/org/elasticsearch/entitlement/instrumentation/impl/InstrumenterImpl.java similarity index 100% rename from distribution/tools/entitlement-agent/impl/src/main/java/org/elasticsearch/entitlement/instrumentation/impl/InstrumenterImpl.java rename to libs/entitlement/agent/impl/src/main/java/org/elasticsearch/entitlement/instrumentation/impl/InstrumenterImpl.java diff --git a/distribution/tools/entitlement-agent/impl/src/main/resources/META-INF/services/org.elasticsearch.entitlement.instrumentation.InstrumentationService b/libs/entitlement/agent/impl/src/main/resources/META-INF/services/org.elasticsearch.entitlement.instrumentation.InstrumentationService similarity index 100% rename from distribution/tools/entitlement-agent/impl/src/main/resources/META-INF/services/org.elasticsearch.entitlement.instrumentation.InstrumentationService rename to libs/entitlement/agent/impl/src/main/resources/META-INF/services/org.elasticsearch.entitlement.instrumentation.InstrumentationService diff --git a/distribution/tools/entitlement-agent/impl/src/test/java/org/elasticsearch/entitlement/instrumentation/impl/ASMUtils.java b/libs/entitlement/agent/impl/src/test/java/org/elasticsearch/entitlement/instrumentation/impl/ASMUtils.java similarity index 100% rename from distribution/tools/entitlement-agent/impl/src/test/java/org/elasticsearch/entitlement/instrumentation/impl/ASMUtils.java rename to libs/entitlement/agent/impl/src/test/java/org/elasticsearch/entitlement/instrumentation/impl/ASMUtils.java diff --git a/distribution/tools/entitlement-agent/impl/src/test/java/org/elasticsearch/entitlement/instrumentation/impl/InstrumenterTests.java b/libs/entitlement/agent/impl/src/test/java/org/elasticsearch/entitlement/instrumentation/impl/InstrumenterTests.java similarity index 100% rename from distribution/tools/entitlement-agent/impl/src/test/java/org/elasticsearch/entitlement/instrumentation/impl/InstrumenterTests.java rename to libs/entitlement/agent/impl/src/test/java/org/elasticsearch/entitlement/instrumentation/impl/InstrumenterTests.java diff --git a/distribution/tools/entitlement-agent/impl/src/test/resources/META-INF/services/org.elasticsearch.entitlement.api.EntitlementChecks b/libs/entitlement/agent/impl/src/test/resources/META-INF/services/org.elasticsearch.entitlement.api.EntitlementChecks similarity index 100% rename from distribution/tools/entitlement-agent/impl/src/test/resources/META-INF/services/org.elasticsearch.entitlement.api.EntitlementChecks rename to libs/entitlement/agent/impl/src/test/resources/META-INF/services/org.elasticsearch.entitlement.api.EntitlementChecks diff --git a/distribution/tools/entitlement-agent/src/main/java/module-info.java b/libs/entitlement/agent/src/main/java/module-info.java similarity index 100% rename from distribution/tools/entitlement-agent/src/main/java/module-info.java rename to libs/entitlement/agent/src/main/java/module-info.java diff --git a/distribution/tools/entitlement-agent/src/main/java/org/elasticsearch/entitlement/agent/EntitlementAgent.java b/libs/entitlement/agent/src/main/java/org/elasticsearch/entitlement/agent/EntitlementAgent.java similarity index 100% rename from distribution/tools/entitlement-agent/src/main/java/org/elasticsearch/entitlement/agent/EntitlementAgent.java rename to libs/entitlement/agent/src/main/java/org/elasticsearch/entitlement/agent/EntitlementAgent.java diff --git a/distribution/tools/entitlement-agent/src/main/java/org/elasticsearch/entitlement/agent/Transformer.java b/libs/entitlement/agent/src/main/java/org/elasticsearch/entitlement/agent/Transformer.java similarity index 100% rename from distribution/tools/entitlement-agent/src/main/java/org/elasticsearch/entitlement/agent/Transformer.java rename to libs/entitlement/agent/src/main/java/org/elasticsearch/entitlement/agent/Transformer.java diff --git a/distribution/tools/entitlement-agent/src/main/java/org/elasticsearch/entitlement/instrumentation/InstrumentationService.java b/libs/entitlement/agent/src/main/java/org/elasticsearch/entitlement/instrumentation/InstrumentationService.java similarity index 100% rename from distribution/tools/entitlement-agent/src/main/java/org/elasticsearch/entitlement/instrumentation/InstrumentationService.java rename to libs/entitlement/agent/src/main/java/org/elasticsearch/entitlement/instrumentation/InstrumentationService.java diff --git a/distribution/tools/entitlement-agent/src/main/java/org/elasticsearch/entitlement/instrumentation/Instrumenter.java b/libs/entitlement/agent/src/main/java/org/elasticsearch/entitlement/instrumentation/Instrumenter.java similarity index 100% rename from distribution/tools/entitlement-agent/src/main/java/org/elasticsearch/entitlement/instrumentation/Instrumenter.java rename to libs/entitlement/agent/src/main/java/org/elasticsearch/entitlement/instrumentation/Instrumenter.java diff --git a/distribution/tools/entitlement-agent/src/main/java/org/elasticsearch/entitlement/instrumentation/MethodKey.java b/libs/entitlement/agent/src/main/java/org/elasticsearch/entitlement/instrumentation/MethodKey.java similarity index 100% rename from distribution/tools/entitlement-agent/src/main/java/org/elasticsearch/entitlement/instrumentation/MethodKey.java rename to libs/entitlement/agent/src/main/java/org/elasticsearch/entitlement/instrumentation/MethodKey.java diff --git a/distribution/tools/entitlement-agent/src/test/java/org/elasticsearch/entitlement/agent/EntitlementAgentTests.java b/libs/entitlement/agent/src/test/java/org/elasticsearch/entitlement/agent/EntitlementAgentTests.java similarity index 100% rename from distribution/tools/entitlement-agent/src/test/java/org/elasticsearch/entitlement/agent/EntitlementAgentTests.java rename to libs/entitlement/agent/src/test/java/org/elasticsearch/entitlement/agent/EntitlementAgentTests.java diff --git a/distribution/tools/entitlement-bridge/README.md b/libs/entitlement/bridge/README.md similarity index 100% rename from distribution/tools/entitlement-bridge/README.md rename to libs/entitlement/bridge/README.md diff --git a/distribution/tools/entitlement-bridge/build.gradle b/libs/entitlement/bridge/build.gradle similarity index 100% rename from distribution/tools/entitlement-bridge/build.gradle rename to libs/entitlement/bridge/build.gradle diff --git a/distribution/tools/entitlement-bridge/src/main/java/module-info.java b/libs/entitlement/bridge/src/main/java/module-info.java similarity index 100% rename from distribution/tools/entitlement-bridge/src/main/java/module-info.java rename to libs/entitlement/bridge/src/main/java/module-info.java diff --git a/distribution/tools/entitlement-bridge/src/main/java/org/elasticsearch/entitlement/api/EntitlementChecks.java b/libs/entitlement/bridge/src/main/java/org/elasticsearch/entitlement/api/EntitlementChecks.java similarity index 100% rename from distribution/tools/entitlement-bridge/src/main/java/org/elasticsearch/entitlement/api/EntitlementChecks.java rename to libs/entitlement/bridge/src/main/java/org/elasticsearch/entitlement/api/EntitlementChecks.java diff --git a/distribution/tools/entitlement-bridge/src/main/java/org/elasticsearch/entitlement/api/EntitlementProvider.java b/libs/entitlement/bridge/src/main/java/org/elasticsearch/entitlement/api/EntitlementProvider.java similarity index 100% rename from distribution/tools/entitlement-bridge/src/main/java/org/elasticsearch/entitlement/api/EntitlementProvider.java rename to libs/entitlement/bridge/src/main/java/org/elasticsearch/entitlement/api/EntitlementProvider.java diff --git a/distribution/tools/entitlement-runtime/build.gradle b/libs/entitlement/build.gradle similarity index 93% rename from distribution/tools/entitlement-runtime/build.gradle rename to libs/entitlement/build.gradle index aaeee76d8bc57..712cf358f5883 100644 --- a/distribution/tools/entitlement-runtime/build.gradle +++ b/libs/entitlement/build.gradle @@ -13,7 +13,7 @@ dependencies { compileOnly project(':libs:core') // For @SuppressForbidden compileOnly project(":libs:x-content") // for parsing policy files compileOnly project(':server') // To access the main server module for special permission checks - compileOnly project(':distribution:tools:entitlement-bridge') + compileOnly project(':libs:entitlement:bridge') testImplementation project(":test:framework") } diff --git a/distribution/tools/entitlement-runtime/src/main/java/module-info.java b/libs/entitlement/src/main/java/module-info.java similarity index 100% rename from distribution/tools/entitlement-runtime/src/main/java/module-info.java rename to libs/entitlement/src/main/java/module-info.java diff --git a/distribution/tools/entitlement-runtime/src/main/java/org/elasticsearch/entitlement/runtime/api/ElasticsearchEntitlementManager.java b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/api/ElasticsearchEntitlementManager.java similarity index 100% rename from distribution/tools/entitlement-runtime/src/main/java/org/elasticsearch/entitlement/runtime/api/ElasticsearchEntitlementManager.java rename to libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/api/ElasticsearchEntitlementManager.java diff --git a/distribution/tools/entitlement-runtime/src/main/java/org/elasticsearch/entitlement/runtime/api/NotEntitledException.java b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/api/NotEntitledException.java similarity index 100% rename from distribution/tools/entitlement-runtime/src/main/java/org/elasticsearch/entitlement/runtime/api/NotEntitledException.java rename to libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/api/NotEntitledException.java diff --git a/distribution/tools/entitlement-runtime/src/main/java/org/elasticsearch/entitlement/runtime/internals/EntitlementInternals.java b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/internals/EntitlementInternals.java similarity index 100% rename from distribution/tools/entitlement-runtime/src/main/java/org/elasticsearch/entitlement/runtime/internals/EntitlementInternals.java rename to libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/internals/EntitlementInternals.java diff --git a/distribution/tools/entitlement-runtime/src/main/java/org/elasticsearch/entitlement/runtime/policy/Entitlement.java b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/Entitlement.java similarity index 100% rename from distribution/tools/entitlement-runtime/src/main/java/org/elasticsearch/entitlement/runtime/policy/Entitlement.java rename to libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/Entitlement.java diff --git a/distribution/tools/entitlement-runtime/src/main/java/org/elasticsearch/entitlement/runtime/policy/ExternalEntitlement.java b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/ExternalEntitlement.java similarity index 100% rename from distribution/tools/entitlement-runtime/src/main/java/org/elasticsearch/entitlement/runtime/policy/ExternalEntitlement.java rename to libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/ExternalEntitlement.java diff --git a/distribution/tools/entitlement-runtime/src/main/java/org/elasticsearch/entitlement/runtime/policy/FileEntitlement.java b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/FileEntitlement.java similarity index 100% rename from distribution/tools/entitlement-runtime/src/main/java/org/elasticsearch/entitlement/runtime/policy/FileEntitlement.java rename to libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/FileEntitlement.java diff --git a/distribution/tools/entitlement-runtime/src/main/java/org/elasticsearch/entitlement/runtime/policy/Policy.java b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/Policy.java similarity index 100% rename from distribution/tools/entitlement-runtime/src/main/java/org/elasticsearch/entitlement/runtime/policy/Policy.java rename to libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/Policy.java diff --git a/distribution/tools/entitlement-runtime/src/main/java/org/elasticsearch/entitlement/runtime/policy/PolicyParser.java b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/PolicyParser.java similarity index 100% rename from distribution/tools/entitlement-runtime/src/main/java/org/elasticsearch/entitlement/runtime/policy/PolicyParser.java rename to libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/PolicyParser.java diff --git a/distribution/tools/entitlement-runtime/src/main/java/org/elasticsearch/entitlement/runtime/policy/PolicyParserException.java b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/PolicyParserException.java similarity index 100% rename from distribution/tools/entitlement-runtime/src/main/java/org/elasticsearch/entitlement/runtime/policy/PolicyParserException.java rename to libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/PolicyParserException.java diff --git a/distribution/tools/entitlement-runtime/src/main/java/org/elasticsearch/entitlement/runtime/policy/Scope.java b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/Scope.java similarity index 100% rename from distribution/tools/entitlement-runtime/src/main/java/org/elasticsearch/entitlement/runtime/policy/Scope.java rename to libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/Scope.java diff --git a/distribution/tools/entitlement-runtime/src/main/resources/META-INF/services/org.elasticsearch.entitlement.api.EntitlementChecks b/libs/entitlement/src/main/resources/META-INF/services/org.elasticsearch.entitlement.api.EntitlementChecks similarity index 100% rename from distribution/tools/entitlement-runtime/src/main/resources/META-INF/services/org.elasticsearch.entitlement.api.EntitlementChecks rename to libs/entitlement/src/main/resources/META-INF/services/org.elasticsearch.entitlement.api.EntitlementChecks diff --git a/distribution/tools/entitlement-runtime/src/test/java/org/elasticsearch/entitlement/runtime/policy/PolicyParserFailureTests.java b/libs/entitlement/src/test/java/org/elasticsearch/entitlement/runtime/policy/PolicyParserFailureTests.java similarity index 100% rename from distribution/tools/entitlement-runtime/src/test/java/org/elasticsearch/entitlement/runtime/policy/PolicyParserFailureTests.java rename to libs/entitlement/src/test/java/org/elasticsearch/entitlement/runtime/policy/PolicyParserFailureTests.java diff --git a/distribution/tools/entitlement-runtime/src/test/java/org/elasticsearch/entitlement/runtime/policy/PolicyParserTests.java b/libs/entitlement/src/test/java/org/elasticsearch/entitlement/runtime/policy/PolicyParserTests.java similarity index 100% rename from distribution/tools/entitlement-runtime/src/test/java/org/elasticsearch/entitlement/runtime/policy/PolicyParserTests.java rename to libs/entitlement/src/test/java/org/elasticsearch/entitlement/runtime/policy/PolicyParserTests.java diff --git a/distribution/tools/entitlement-runtime/src/test/resources/org/elasticsearch/entitlement/runtime/policy/test-policy.yaml b/libs/entitlement/src/test/resources/org/elasticsearch/entitlement/runtime/policy/test-policy.yaml similarity index 100% rename from distribution/tools/entitlement-runtime/src/test/resources/org/elasticsearch/entitlement/runtime/policy/test-policy.yaml rename to libs/entitlement/src/test/resources/org/elasticsearch/entitlement/runtime/policy/test-policy.yaml diff --git a/settings.gradle b/settings.gradle index 25ed048d57253..54a9514490db0 100644 --- a/settings.gradle +++ b/settings.gradle @@ -89,10 +89,6 @@ List projects = [ 'distribution:tools:keystore-cli', 'distribution:tools:geoip-cli', 'distribution:tools:ansi-console', - 'distribution:tools:entitlement-agent', - 'distribution:tools:entitlement-agent:impl', - 'distribution:tools:entitlement-bridge', - 'distribution:tools:entitlement-runtime', 'server', 'test:framework', 'test:fixtures:azure-fixture', From 1958a6aa47875be322ca93f1229dba23d86bc668 Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Thu, 31 Oct 2024 07:33:50 +1100 Subject: [PATCH 221/324] Mute org.elasticsearch.repositories.s3.S3ServiceTests testRetryOn403RetryPolicy #115986 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index a533a010f9d37..4bf0e7d386caf 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -278,6 +278,9 @@ tests: - class: org.elasticsearch.monitor.jvm.JvmStatsTests method: testJvmStats issue: https://github.com/elastic/elasticsearch/issues/115711 +- class: org.elasticsearch.repositories.s3.S3ServiceTests + method: testRetryOn403RetryPolicy + issue: https://github.com/elastic/elasticsearch/issues/115986 # Examples: # From 309c266a79367dff315630c83257c8751be748dd Mon Sep 17 00:00:00 2001 From: Rene Groeschke Date: Wed, 30 Oct 2024 21:45:45 +0100 Subject: [PATCH 222/324] [Test] Unmute PublishPluginFuncTest (#115902) Closes #114492 --- muted-tests.yml | 2 -- 1 file changed, 2 deletions(-) diff --git a/muted-tests.yml b/muted-tests.yml index 4bf0e7d386caf..a911ca4f71f2a 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -195,8 +195,6 @@ tests: - class: org.elasticsearch.packaging.test.DockerTests method: test022InstallPluginsFromLocalArchive issue: https://github.com/elastic/elasticsearch/issues/111063 -- class: org.elasticsearch.gradle.internal.PublishPluginFuncTest - issue: https://github.com/elastic/elasticsearch/issues/114492 - class: org.elasticsearch.xpack.inference.DefaultElserIT method: testInferCreatesDefaultElser issue: https://github.com/elastic/elasticsearch/issues/114503 From f1794363f0a4eaf4b28de438a95fcf5f7cc56382 Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Thu, 31 Oct 2024 07:55:32 +1100 Subject: [PATCH 223/324] Mute org.elasticsearch.search.slice.SearchSliceIT testPointInTime #115988 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index a911ca4f71f2a..f53f8e970be8f 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -279,6 +279,9 @@ tests: - class: org.elasticsearch.repositories.s3.S3ServiceTests method: testRetryOn403RetryPolicy issue: https://github.com/elastic/elasticsearch/issues/115986 +- class: org.elasticsearch.search.slice.SearchSliceIT + method: testPointInTime + issue: https://github.com/elastic/elasticsearch/issues/115988 # Examples: # From dfd6814d2b1d027b78a2d430f2dd638b4058373c Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Thu, 31 Oct 2024 07:56:46 +1100 Subject: [PATCH 224/324] Mute org.elasticsearch.action.search.PointInTimeIT testPITTiebreak #115810 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index f53f8e970be8f..8e39bd1a58dda 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -282,6 +282,9 @@ tests: - class: org.elasticsearch.search.slice.SearchSliceIT method: testPointInTime issue: https://github.com/elastic/elasticsearch/issues/115988 +- class: org.elasticsearch.action.search.PointInTimeIT + method: testPITTiebreak + issue: https://github.com/elastic/elasticsearch/issues/115810 # Examples: # From efcdd6077695ba6984e5a2114c06e0f0f19bc786 Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Wed, 30 Oct 2024 14:43:43 -0700 Subject: [PATCH 225/324] Update bundled jdk to 23 (#114823) After completing additional validation with the JIT workaround in https://github.com/elastic/elasticsearch/pull/113817, this commit upgrades the bundled JDK to 23. --- build-tools-internal/version.properties | 2 +- gradle/verification-metadata.xml | 21 +++++++++++++++++++++ 2 files changed, 22 insertions(+), 1 deletion(-) diff --git a/build-tools-internal/version.properties b/build-tools-internal/version.properties index 6bc3c2ad4d253..c3511dd5d256c 100644 --- a/build-tools-internal/version.properties +++ b/build-tools-internal/version.properties @@ -2,7 +2,7 @@ elasticsearch = 9.0.0 lucene = 10.0.0 bundled_jdk_vendor = openjdk -bundled_jdk = 22.0.1+8@c7ec1332f7bb44aeba2eb341ae18aca4 +bundled_jdk = 23+37@3c5b90190c68498b986a97f276efd28a # optional dependencies spatial4j = 0.7 jts = 1.15.0 diff --git a/gradle/verification-metadata.xml b/gradle/verification-metadata.xml index 869cb64de54d0..7c1e11f390f04 100644 --- a/gradle/verification-metadata.xml +++ b/gradle/verification-metadata.xml @@ -1841,6 +1841,27 @@ + + + + + + + + + + + + + + + + + + + + + From a3a312f97214eec064cb056b0630de7e8591eab4 Mon Sep 17 00:00:00 2001 From: David Turner Date: Wed, 30 Oct 2024 22:07:13 +0000 Subject: [PATCH 226/324] Fix owner of `?wait_for_active_shards=index-setting` update (#115837) This warning relates to the close index API and the corresponding entry in `RestCloseIndexAction` has `DATA_MANAGEMENT` as the owner, so we should use the same owner here. --- .../main/java/org/elasticsearch/test/rest/ESRestTestCase.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java index 676fb13d29428..0a3cf6726ea4a 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java @@ -1909,7 +1909,7 @@ protected static boolean indexExists(RestClient client, String index) throws IOE * emitted in v8. Note that this message is also permitted in certain YAML test cases, it can be removed there too. * See https://github.com/elastic/elasticsearch/issues/66419 for more details. */ - @UpdateForV9(owner = UpdateForV9.Owner.DISTRIBUTED_COORDINATION) + @UpdateForV9(owner = UpdateForV9.Owner.DATA_MANAGEMENT) private static final String WAIT_FOR_ACTIVE_SHARDS_DEFAULT_DEPRECATION_MESSAGE = "the default value for the ?wait_for_active_shards " + "parameter will change from '0' to 'index-setting' in version 8; specify '?wait_for_active_shards=index-setting' " + "to adopt the future default behaviour, or '?wait_for_active_shards=0' to preserve today's behaviour"; From 5f1f4dcfcba95c8764fd275692808a9c96c13da9 Mon Sep 17 00:00:00 2001 From: Yang Wang Date: Thu, 31 Oct 2024 11:43:10 +1100 Subject: [PATCH 227/324] Add a separate method for rerouting with reset failed counter (#115896) A cluster resets failed counter and calls reroute on node-join. This is a background activity not directly initiated by end-users. Currently it uses the same reroute method that handles the reoute API. This creates some ambiguity on the method's scope. This PR adds a new dedicate method for this use case and leave the existing one dedicated for API usage. --- .../cluster/routing/allocation/AllocationService.java | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java index 83c9c51419a66..5d1e6741c5e22 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java @@ -572,7 +572,7 @@ public void addAllocFailuresResetListenerTo(ClusterService clusterService) { // set retryFailed=true to trigger failures reset during reroute var taskQueue = clusterService.createTaskQueue("reset-allocation-failures", Priority.NORMAL, (batchCtx) -> { batchCtx.taskContexts().forEach((taskCtx) -> taskCtx.success(() -> {})); - return reroute(batchCtx.initialState(), new AllocationCommands(), false, true, false, ActionListener.noop()).clusterState(); + return rerouteWithResetFailedCounter(batchCtx.initialState()); }); clusterService.addListener((changeEvent) -> { @@ -582,6 +582,13 @@ public void addAllocFailuresResetListenerTo(ClusterService clusterService) { }); } + private ClusterState rerouteWithResetFailedCounter(ClusterState clusterState) { + RoutingAllocation allocation = createRoutingAllocation(clusterState, currentNanoTime()); + allocation.routingNodes().resetFailedCounter(allocation.changes()); + reroute(allocation, routingAllocation -> shardsAllocator.allocate(routingAllocation, ActionListener.noop())); + return buildResultAndLogHealthChange(clusterState, allocation, "reroute with reset failed counter"); + } + private static void disassociateDeadNodes(RoutingAllocation allocation) { for (Iterator it = allocation.routingNodes().mutableIterator(); it.hasNext();) { RoutingNode node = it.next(); From 4caddc6f140a8fb4a12b419df0edbaeb0ed59b03 Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Thu, 31 Oct 2024 15:44:12 +1100 Subject: [PATCH 228/324] Mute org.elasticsearch.xpack.searchablesnapshots.hdfs.SecureHdfsSearchableSnapshotsIT org.elasticsearch.xpack.searchablesnapshots.hdfs.SecureHdfsSearchableSnapshotsIT #115995 --- muted-tests.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index 8e39bd1a58dda..2383e3d09f50c 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -285,6 +285,8 @@ tests: - class: org.elasticsearch.action.search.PointInTimeIT method: testPITTiebreak issue: https://github.com/elastic/elasticsearch/issues/115810 +- class: org.elasticsearch.xpack.searchablesnapshots.hdfs.SecureHdfsSearchableSnapshotsIT + issue: https://github.com/elastic/elasticsearch/issues/115995 # Examples: # From 5452fce00c863ef323751e0607a601aa95ce46da Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Thu, 31 Oct 2024 15:50:48 +1100 Subject: [PATCH 229/324] Mute org.elasticsearch.index.reindex.ReindexNodeShutdownIT testReindexWithShutdown #115996 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index 2383e3d09f50c..68ab8eb37a600 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -287,6 +287,9 @@ tests: issue: https://github.com/elastic/elasticsearch/issues/115810 - class: org.elasticsearch.xpack.searchablesnapshots.hdfs.SecureHdfsSearchableSnapshotsIT issue: https://github.com/elastic/elasticsearch/issues/115995 +- class: org.elasticsearch.index.reindex.ReindexNodeShutdownIT + method: testReindexWithShutdown + issue: https://github.com/elastic/elasticsearch/issues/115996 # Examples: # From 4ee98e80b290370c27ab96c623c866a5960d91e5 Mon Sep 17 00:00:00 2001 From: Costin Leau Date: Wed, 30 Oct 2024 22:35:51 -0700 Subject: [PATCH 230/324] ESQL: Refactor Join inside the planner (#115813) First PR that introduces a Join as a first class citizen in the planner. Previously the Join was modeled as a unary node, embedding the right side as a local relationship inside the node but not exposed as a child. This caused a lot the associated methods (like references, output and inputSet) to misbehave and the physical plan rules to pick incorrect information, such as trying to extract the local relationship fields from the underlying source - the fix was to the local relationship fields as ReferenceAttribute (which of course had its own set of issues). Essentially Join was acting both as a source and as a streaming operator. This PR looks to partially address this by: - refactoring Join into a proper binary node with left and right branches which are used for its references and input/outputSet. - refactoring InlineStats to prefer composition and move the Aggregate on the join right branch. This reuses the Aggregate resolution out of the box; in the process remove the Stats interface. - update some of the planner rules that only worked with Unary nodes. - refactor Mapper into (coordinator) Mapper and LocalMapper. - remove Phased interface by moving its functionality inside the planner (no need to unpack the phased classes, the join already indicates the two branches needed). - massage the Phased execution inside EsqlSession - improve FieldExtractor to handle binary nodes - fix incorrect references in Lookup - generalize ProjectAwayColumns rule Relates #112266 Not all inline and lookup tests are passing: - 2 lookup fields are failing due to name clashes (qualifiers should fix this) - 7 or so inline failures with a similar issue I've disabled the tests for now to have them around once we complete adding the functionality. --- .../xpack/esql/ccq/MultiClusterSpecIT.java | 1 + .../xpack/esql/qa/single_node/RestEsqlIT.java | 1 + .../xpack/esql/qa/rest/RestEsqlTestCase.java | 1 + .../xpack/esql/EsqlTestUtils.java | 6 +- .../src/main/resources/inlinestats.csv-spec | 112 +++--- .../src/main/resources/lookup.csv-spec | 6 +- .../src/main/resources/union_types.csv-spec | 2 +- .../xpack/esql/action/TelemetryIT.java | 4 +- .../xpack/esql/action/EsqlCapabilities.java | 9 +- .../xpack/esql/analysis/Analyzer.java | 22 +- .../xpack/esql/execution/PlanExecutor.java | 11 +- .../esql/optimizer/LogicalPlanOptimizer.java | 18 +- .../rules/logical/CombineProjections.java | 3 +- .../rules/logical/PropagateInlineEvals.java | 89 +++++ .../rules/logical/RemoveStatsOverride.java | 26 +- ...eplaceAggregateAggExpressionWithEval.java} | 4 +- ...aceAggregateNestedExpressionWithEval.java} | 15 +- .../logical/SubstituteSurrogatePlans.java | 26 ++ .../rules/physical/ProjectAwayColumns.java | 3 +- .../physical/local/InsertFieldExtraction.java | 32 +- .../xpack/esql/package-info.java | 5 +- .../xpack/esql/parser/LogicalPlanBuilder.java | 10 +- .../xpack/esql/plan/logical/Aggregate.java | 7 +- .../xpack/esql/plan/logical/BinaryPlan.java | 23 ++ .../xpack/esql/plan/logical/InlineStats.java | 208 +++------- .../xpack/esql/plan/logical/LogicalPlan.java | 4 +- .../xpack/esql/plan/logical/Lookup.java | 13 +- .../xpack/esql/plan/logical/Phased.java | 135 ------- .../xpack/esql/plan/logical/Stats.java | 50 --- .../plan/logical/SurrogateLogicalPlan.java | 20 + .../esql/plan/logical/join/InlineJoin.java | 134 +++++++ .../xpack/esql/plan/logical/join/Join.java | 13 +- .../esql/plan/logical/join/StubRelation.java | 98 +++++ .../logical/local/ImmediateLocalSupplier.java | 4 +- .../xpack/esql/plan/physical/BinaryExec.java | 68 ++++ .../esql/plan/physical/FragmentExec.java | 4 + .../esql/plan/physical/HashJoinExec.java | 40 +- .../esql/plan/physical/PhysicalPlan.java | 1 + .../esql/plan/physical/SubqueryExec.java | 73 ++++ .../esql/planner/LocalExecutionPlanner.java | 12 +- .../xpack/esql/planner/Mapper.java | 365 ------------------ .../xpack/esql/planner/PlannerUtils.java | 10 +- .../esql/planner/mapper/LocalMapper.java | 125 ++++++ .../xpack/esql/planner/mapper/Mapper.java | 224 +++++++++++ .../esql/planner/mapper/MapperUtils.java | 142 +++++++ .../esql/plugin/TransportEsqlQueryAction.java | 9 +- .../xpack/esql/session/CcsUtils.java | 221 +++++++++++ .../xpack/esql/session/EsqlSession.java | 337 +++++----------- .../xpack/esql/session/SessionUtils.java | 61 +++ .../elasticsearch/xpack/esql/CsvTests.java | 29 +- .../xpack/esql/analysis/AnalyzerTests.java | 2 +- .../optimizer/LogicalPlanOptimizerTests.java | 25 +- .../optimizer/PhysicalPlanOptimizerTests.java | 12 +- .../esql/optimizer/TestPlannerOptimizer.java | 4 +- .../esql/parser/StatementParserTests.java | 30 +- .../InlineStatsSerializationTests.java | 9 +- .../plan/logical/JoinSerializationTests.java | 5 + .../xpack/esql/plan/logical/JoinTests.java | 3 +- .../xpack/esql/plan/logical/PhasedTests.java | 172 --------- .../HashJoinExecSerializationTests.java | 9 +- .../xpack/esql/planner/FilterTests.java | 3 +- .../esql/plugin/DataNodeRequestTests.java | 5 +- .../xpack/esql/session/EsqlSessionTests.java | 24 +- .../esql/stats/PlanExecutorMetricsTests.java | 5 +- .../esql/tree/EsqlNodeSubclassTests.java | 5 +- 65 files changed, 1757 insertions(+), 1392 deletions(-) create mode 100644 x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/PropagateInlineEvals.java rename x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/{ReplaceStatsAggExpressionWithEval.java => ReplaceAggregateAggExpressionWithEval.java} (97%) rename x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/{ReplaceStatsNestedExpressionWithEval.java => ReplaceAggregateNestedExpressionWithEval.java} (93%) create mode 100644 x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/SubstituteSurrogatePlans.java delete mode 100644 x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Phased.java delete mode 100644 x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Stats.java create mode 100644 x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/SurrogateLogicalPlan.java create mode 100644 x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/join/InlineJoin.java create mode 100644 x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/join/StubRelation.java create mode 100644 x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/BinaryExec.java create mode 100644 x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/SubqueryExec.java delete mode 100644 x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/Mapper.java create mode 100644 x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/mapper/LocalMapper.java create mode 100644 x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/mapper/Mapper.java create mode 100644 x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/mapper/MapperUtils.java create mode 100644 x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/CcsUtils.java create mode 100644 x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/SessionUtils.java delete mode 100644 x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plan/logical/PhasedTests.java diff --git a/x-pack/plugin/esql/qa/server/multi-clusters/src/javaRestTest/java/org/elasticsearch/xpack/esql/ccq/MultiClusterSpecIT.java b/x-pack/plugin/esql/qa/server/multi-clusters/src/javaRestTest/java/org/elasticsearch/xpack/esql/ccq/MultiClusterSpecIT.java index 8446ac63f43a1..3e77bee79dd10 100644 --- a/x-pack/plugin/esql/qa/server/multi-clusters/src/javaRestTest/java/org/elasticsearch/xpack/esql/ccq/MultiClusterSpecIT.java +++ b/x-pack/plugin/esql/qa/server/multi-clusters/src/javaRestTest/java/org/elasticsearch/xpack/esql/ccq/MultiClusterSpecIT.java @@ -112,6 +112,7 @@ protected void shouldSkipTest(String testName) throws IOException { ); assumeFalse("INLINESTATS not yet supported in CCS", testCase.requiredCapabilities.contains("inlinestats")); assumeFalse("INLINESTATS not yet supported in CCS", testCase.requiredCapabilities.contains("inlinestats_v2")); + assumeFalse("INLINESTATS not yet supported in CCS", testCase.requiredCapabilities.contains("join_planning_v1")); } private TestFeatureService remoteFeaturesService() throws IOException { diff --git a/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/RestEsqlIT.java b/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/RestEsqlIT.java index 7de4ee4ccae28..9a184b9a620fd 100644 --- a/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/RestEsqlIT.java +++ b/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/RestEsqlIT.java @@ -360,6 +360,7 @@ public void testProfileOrdinalsGroupingOperator() throws IOException { assertThat(signatures, hasItem(hasItem("OrdinalsGroupingOperator[aggregators=[\"sum of longs\", \"count\"]]"))); } + @AwaitsFix(bugUrl = "disabled until JOIN infrastructrure properly lands") public void testInlineStatsProfile() throws IOException { assumeTrue("INLINESTATS only available on snapshots", Build.current().isSnapshot()); indexTimestampData(1); diff --git a/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/RestEsqlTestCase.java b/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/RestEsqlTestCase.java index 8c52a24231a41..ef1e77280d0ee 100644 --- a/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/RestEsqlTestCase.java +++ b/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/RestEsqlTestCase.java @@ -848,6 +848,7 @@ public void testComplexFieldNames() throws IOException { * query. It's part of the "configuration" of the query. *

    */ + @AwaitsFix(bugUrl = "Disabled temporarily until JOIN implementation is completed") public void testInlineStatsNow() throws IOException { assumeTrue("INLINESTATS only available on snapshots", Build.current().isSnapshot()); indexTimestampData(1); diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/EsqlTestUtils.java b/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/EsqlTestUtils.java index d71c66b4c467f..e755ddb4d0d10 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/EsqlTestUtils.java +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/EsqlTestUtils.java @@ -10,6 +10,7 @@ import org.apache.lucene.document.InetAddressPoint; import org.apache.lucene.sandbox.document.HalfFloatPoint; import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.breaker.CircuitBreaker; import org.elasticsearch.common.breaker.NoopCircuitBreaker; import org.elasticsearch.common.bytes.BytesReference; @@ -600,7 +601,10 @@ else if (Files.isDirectory(path)) { Files.walkFileTree(path, EnumSet.allOf(FileVisitOption.class), 1, new SimpleFileVisitor<>() { @Override public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException { - if (Regex.simpleMatch(filePattern, file.toString())) { + // remove the path folder from the URL + String name = Strings.replace(file.toUri().toString(), path.toUri().toString(), StringUtils.EMPTY); + Tuple entrySplit = pathAndName(name); + if (root.equals(entrySplit.v1()) && Regex.simpleMatch(filePattern, entrySplit.v2())) { matches.add(file.toUri().toURL()); } return FileVisitResult.CONTINUE; diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/inlinestats.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/inlinestats.csv-spec index 3f2e14f74174b..0398921efabfd 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/inlinestats.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/inlinestats.csv-spec @@ -1,6 +1,9 @@ -maxOfInt -required_capability: inlinestats +// +// TODO: re-enable the commented tests once the Join functionality stabilizes +// +maxOfInt-Ignore +required_capability: join_planning_v1 // tag::max-languages[] FROM employees | KEEP emp_no, languages @@ -22,7 +25,7 @@ emp_no:integer | languages:integer | max_lang:integer ; maxOfIntByKeyword -required_capability: inlinestats +required_capability: join_planning_v1 FROM employees | KEEP emp_no, languages, gender @@ -40,7 +43,7 @@ emp_no:integer | languages:integer | gender:keyword | max_lang:integer ; maxOfLongByKeyword -required_capability: inlinestats +required_capability: join_planning_v1 FROM employees | KEEP emp_no, avg_worked_seconds, gender @@ -54,8 +57,8 @@ emp_no:integer | avg_worked_seconds:long | gender:keyword | max_avg_worked_secon 10030 | 394597613 | M | 394597613 ; -maxOfLong -required_capability: inlinestats +maxOfLong-Ignore +required_capability: join_planning_v1 FROM employees | KEEP emp_no, avg_worked_seconds, gender @@ -68,7 +71,7 @@ emp_no:integer | avg_worked_seconds:long | gender:keyword | max_avg_worked_secon ; maxOfLongByCalculatedKeyword -required_capability: inlinestats_v2 +required_capability: join_planning_v1 // tag::longest-tenured-by-first[] FROM employees @@ -91,7 +94,7 @@ emp_no:integer | avg_worked_seconds:long | last_name:keyword | SUBSTRING(last_na ; maxOfLongByCalculatedNamedKeyword -required_capability: inlinestats_v2 +required_capability: join_planning_v1 FROM employees | KEEP emp_no, avg_worked_seconds, last_name @@ -110,7 +113,7 @@ emp_no:integer | avg_worked_seconds:long | last_name:keyword | l:keyword | max_a ; maxOfLongByCalculatedDroppedKeyword -required_capability: inlinestats_v2 +required_capability: join_planning_v1 FROM employees | INLINESTATS max_avg_worked_seconds = MAX(avg_worked_seconds) BY l = SUBSTRING(last_name, 0, 1) @@ -129,7 +132,7 @@ emp_no:integer | avg_worked_seconds:long | last_name:keyword | max_avg_worked_se ; maxOfLongByEvaledKeyword -required_capability: inlinestats +required_capability: join_planning_v1 FROM employees | EVAL l = SUBSTRING(last_name, 0, 1) @@ -149,7 +152,7 @@ emp_no:integer | avg_worked_seconds:long | l:keyword | max_avg_worked_seconds:lo ; maxOfLongByInt -required_capability: inlinestats +required_capability: join_planning_v1 FROM employees | KEEP emp_no, avg_worked_seconds, languages @@ -167,7 +170,7 @@ emp_no:integer | avg_worked_seconds:long | languages:integer | max_avg_worked_se ; maxOfLongByIntDouble -required_capability: inlinestats +required_capability: join_planning_v1 FROM employees | KEEP emp_no, avg_worked_seconds, languages, height @@ -185,8 +188,8 @@ emp_no:integer | avg_worked_seconds:long | languages:integer | height:double | m ; -two -required_capability: inlinestats +two-Ignore +required_capability: join_planning_v1 FROM employees | KEEP emp_no, languages, avg_worked_seconds, gender @@ -203,7 +206,7 @@ emp_no:integer | languages:integer | avg_worked_seconds:long | gender:keyword | ; byMultivaluedSimple -required_capability: inlinestats +required_capability: join_planning_v1 // tag::mv-group[] FROM airports @@ -221,7 +224,7 @@ abbrev:keyword | type:keyword | scalerank:integer | min_scalerank:integer ; byMultivaluedMvExpand -required_capability: inlinestats +required_capability: join_planning_v1 // tag::mv-expand[] FROM airports @@ -241,7 +244,7 @@ abbrev:keyword | type:keyword | scalerank:integer | min_scalerank:integer ; byMvExpand -required_capability: inlinestats +required_capability: join_planning_v1 // tag::extreme-airports[] FROM airports @@ -270,7 +273,7 @@ FROM airports ; brokenwhy-Ignore -required_capability: inlinestats +required_capability: join_planning_v1 FROM airports | INLINESTATS min_scalerank=MIN(scalerank) BY type @@ -281,8 +284,8 @@ abbrev:keyword | type:keyword | scalerank:integer | min_scalerank:integer GWL | [mid, military] | 9 | [2, 4] ; -afterStats -required_capability: inlinestats +afterStats-Ignore +required_capability: join_planning_v1 FROM airports | STATS count=COUNT(*) BY country @@ -305,7 +308,7 @@ count:long | country:keyword | avg:double ; afterWhere -required_capability: inlinestats +required_capability: join_planning_v1 FROM airports | WHERE country != "United States" @@ -322,8 +325,8 @@ abbrev:keyword | country:keyword | count:long BDQ | India | 50 ; -afterLookup -required_capability: inlinestats +afterLookup-Ignore +required_capability: join_planning_v1 FROM airports | RENAME scalerank AS int @@ -343,9 +346,8 @@ abbrev:keyword | scalerank:keyword ACA | four ; -afterEnrich -required_capability: inlinestats -required_capability: enrich_load +afterEnrich-Ignore +required_capability: join_planning_v1 FROM airports | KEEP abbrev, city @@ -364,8 +366,8 @@ abbrev:keyword | city:keyword | region:text | "COUNT(*)":long FUK | Fukuoka | 中央区 | 2 ; -beforeStats -required_capability: inlinestats +beforeStats-Ignore +required_capability: join_planning_v1 FROM airports | EVAL lat = ST_Y(location) @@ -378,7 +380,7 @@ northern:long | southern:long ; beforeKeepSort -required_capability: inlinestats +required_capability: join_planning_v1 FROM employees | INLINESTATS max_salary = MAX(salary) by languages @@ -393,7 +395,7 @@ emp_no:integer | languages:integer | max_salary:integer ; beforeKeepWhere -required_capability: inlinestats +required_capability: join_planning_v1 FROM employees | INLINESTATS max_salary = MAX(salary) by languages @@ -405,9 +407,8 @@ emp_no:integer | languages:integer | max_salary:integer 10003 | 4 | 74572 ; -beforeEnrich -required_capability: inlinestats -required_capability: enrich_load +beforeEnrich-Ignore +required_capability: join_planning_v1 FROM airports | KEEP abbrev, type, city @@ -424,9 +425,8 @@ abbrev:keyword | type:keyword | city:keyword | "COUNT(*)":long | region:te ACA | major | Acapulco de Juárez | 385 | Acapulco de Juárez ; -beforeAndAfterEnrich -required_capability: inlinestats -required_capability: enrich_load +beforeAndAfterEnrich-Ignore +required_capability: join_planning_v1 FROM airports | KEEP abbrev, type, city @@ -445,8 +445,8 @@ abbrev:keyword | type:keyword | city:keyword | "COUNT(*)":long | region:te ; -shadowing -required_capability: inlinestats +shadowing-Ignore +required_capability: join_planning_v1 ROW left = "left", client_ip = "172.21.0.5", env = "env", right = "right" | INLINESTATS env=VALUES(right) BY client_ip @@ -456,8 +456,8 @@ left:keyword | client_ip:keyword | right:keyword | env:keyword left | 172.21.0.5 | right | right ; -shadowingMulti -required_capability: inlinestats +shadowingMulti-Ignore +required_capability: join_planning_v1 ROW left = "left", airport = "Zurich Airport ZRH", city = "Zürich", middle = "middle", region = "North-East Switzerland", right = "right" | INLINESTATS airport=VALUES(left), region=VALUES(left), city_boundary=VALUES(left) BY city @@ -467,8 +467,8 @@ left:keyword | city:keyword | middle:keyword | right:keyword | airport:keyword | left | Zürich | middle | right | left | left | left ; -shadowingSelf -required_capability: inlinestats +shadowingSelf-Ignore +required_capability: join_planning_v1 ROW city="Raleigh" | INLINESTATS city=COUNT(city) @@ -479,7 +479,7 @@ city:long ; shadowingSelfBySelf-Ignore -required_capability: inlinestats +required_capability: join_planning_v1 ROW city="Raleigh" | INLINESTATS city=COUNT(city) BY city @@ -490,7 +490,7 @@ city:long ; shadowingInternal-Ignore -required_capability: inlinestats +required_capability: join_planning_v1 ROW city = "Zürich" | INLINESTATS x=VALUES(city), x=VALUES(city) @@ -501,7 +501,7 @@ Zürich | Zürich ; byConstant-Ignore -required_capability: inlinestats +required_capability: join_planning_v1 FROM employees | KEEP emp_no, languages @@ -520,7 +520,7 @@ emp_no:integer | languages:integer | max_lang:integer | y:integer ; aggConstant -required_capability: inlinestats +required_capability: join_planning_v1 FROM employees | KEEP emp_no @@ -537,8 +537,8 @@ emp_no:integer | one:integer 10005 | 1 ; -percentile -required_capability: inlinestats +percentile-Ignore +required_capability: join_planning_v1 FROM employees | KEEP emp_no, salary @@ -557,7 +557,7 @@ emp_no:integer | salary:integer | ninety_fifth_salary:double ; byTwoCalculated -required_capability: inlinestats_v2 +required_capability: join_planning_v1 FROM airports | WHERE abbrev IS NOT NULL @@ -575,8 +575,8 @@ abbrev:keyword | scalerank:integer | location:geo_point ZLO | 7 | POINT (-104.560095200097 19.1480860285854) | 20 | -100 | 2 ; -byTwoCalculatedSecondOverwrites -required_capability: inlinestats_v2 +byTwoCalculatedSecondOverwrites-Ignore +required_capability: join_planning_v1 FROM airports | WHERE abbrev IS NOT NULL @@ -594,8 +594,8 @@ abbrev:keyword | scalerank:integer | location:geo_point ZLO | 7 | POINT (-104.560095200097 19.1480860285854) | -100 | 2 ; -byTwoCalculatedSecondOverwritesReferencingFirst -required_capability: inlinestats_v2 +byTwoCalculatedSecondOverwritesReferencingFirst-Ignore +required_capability: join_planning_v1 FROM airports | WHERE abbrev IS NOT NULL @@ -615,8 +615,8 @@ abbrev:keyword | scalerank:integer | location:geo_point ; -groupShadowsAgg -required_capability: inlinestats_v2 +groupShadowsAgg-Ignore +required_capability: join_planning_v1 FROM airports | WHERE abbrev IS NOT NULL @@ -636,7 +636,7 @@ abbrev:keyword | scalerank:integer | location:geo_point ; groupShadowsField -required_capability: inlinestats_v2 +required_capability: join_planning_v1 FROM employees | KEEP emp_no, salary, hire_date diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/lookup.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/lookup.csv-spec index 71f74cbb113ef..9cf96f7c0b6de 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/lookup.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/lookup.csv-spec @@ -163,7 +163,8 @@ aa:keyword | ab:keyword | na:integer | nb:integer bar | bar | null | null ; -lookupBeforeStats +# needs qualifiers for proper field resolution and extraction +lookupBeforeStats-Ignore required_capability: lookup_v4 FROM employees | RENAME languages AS int @@ -212,7 +213,8 @@ emp_no:integer | languages:long | name:keyword 10004 | 5 | five ; -lookupBeforeSort +# needs qualifiers for field resolution +lookupBeforeSort-Ignore required_capability: lookup_v4 FROM employees | WHERE emp_no < 10005 diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/union_types.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/union_types.csv-spec index 3218962678d9f..a51e4fe995fb3 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/union_types.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/union_types.csv-spec @@ -1305,7 +1305,7 @@ foo:long | client_ip:ip 8268153 | 172.21.3.15 ; -multiIndexIndirectUseOfUnionTypesInInlineStats +multiIndexIndirectUseOfUnionTypesInInlineStats-Ignore // TODO: `union_types` is required only because this makes the test skip in the csv tests; better solution: // make the csv tests work with multiple indices. required_capability: union_types diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/TelemetryIT.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/TelemetryIT.java index 47eca216cf358..325e8500295ea 100644 --- a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/TelemetryIT.java +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/TelemetryIT.java @@ -136,9 +136,7 @@ public static Iterable parameters() { | EVAL ip = to_ip(host), x = to_string(host), y = to_string(host) | INLINESTATS max(id) """, - Build.current().isSnapshot() - ? Map.ofEntries(Map.entry("FROM", 1), Map.entry("EVAL", 1), Map.entry("INLINESTATS", 1)) - : Collections.emptyMap(), + Build.current().isSnapshot() ? Map.of("FROM", 1, "EVAL", 1, "INLINESTATS", 1, "STATS", 1) : Collections.emptyMap(), Build.current().isSnapshot() ? Map.ofEntries(Map.entry("MAX", 1), Map.entry("TO_IP", 1), Map.entry("TO_STRING", 2)) : Collections.emptyMap(), diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java index 6439df6ee71ee..a17733af6bd64 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java @@ -446,7 +446,14 @@ public enum Cap { /** * Fix pushdown of LIMIT past MV_EXPAND */ - ADD_LIMIT_INSIDE_MV_EXPAND; + ADD_LIMIT_INSIDE_MV_EXPAND, + + /** + * WIP on Join planning + * - Introduce BinaryPlan and co + * - Refactor INLINESTATS and LOOKUP as a JOIN block + */ + JOIN_PLANNING_V1(Build.current().isSnapshot()); private final boolean enabled; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Analyzer.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Analyzer.java index 4768af4bc8edb..9039177e0643d 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Analyzer.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Analyzer.java @@ -61,6 +61,7 @@ import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.In; import org.elasticsearch.xpack.esql.index.EsIndex; import org.elasticsearch.xpack.esql.plan.TableIdentifier; +import org.elasticsearch.xpack.esql.plan.logical.Aggregate; import org.elasticsearch.xpack.esql.plan.logical.Drop; import org.elasticsearch.xpack.esql.plan.logical.Enrich; import org.elasticsearch.xpack.esql.plan.logical.EsRelation; @@ -72,7 +73,6 @@ import org.elasticsearch.xpack.esql.plan.logical.MvExpand; import org.elasticsearch.xpack.esql.plan.logical.Project; import org.elasticsearch.xpack.esql.plan.logical.Rename; -import org.elasticsearch.xpack.esql.plan.logical.Stats; import org.elasticsearch.xpack.esql.plan.logical.UnresolvedRelation; import org.elasticsearch.xpack.esql.plan.logical.local.EsqlProject; import org.elasticsearch.xpack.esql.plan.logical.local.LocalRelation; @@ -405,8 +405,8 @@ protected LogicalPlan doRule(LogicalPlan plan) { childrenOutput.addAll(output); } - if (plan instanceof Stats stats) { - return resolveStats(stats, childrenOutput); + if (plan instanceof Aggregate aggregate) { + return resolveAggregate(aggregate, childrenOutput); } if (plan instanceof Drop d) { @@ -440,12 +440,12 @@ protected LogicalPlan doRule(LogicalPlan plan) { return plan.transformExpressionsOnly(UnresolvedAttribute.class, ua -> maybeResolveAttribute(ua, childrenOutput)); } - private LogicalPlan resolveStats(Stats stats, List childrenOutput) { + private Aggregate resolveAggregate(Aggregate aggregate, List childrenOutput) { // if the grouping is resolved but the aggs are not, use the former to resolve the latter // e.g. STATS a ... GROUP BY a = x + 1 Holder changed = new Holder<>(false); - List groupings = stats.groupings(); - List aggregates = stats.aggregates(); + List groupings = aggregate.groupings(); + List aggregates = aggregate.aggregates(); // first resolve groupings since the aggs might refer to them // trying to globally resolve unresolved attributes will lead to some being marked as unresolvable if (Resolvables.resolved(groupings) == false) { @@ -459,7 +459,7 @@ private LogicalPlan resolveStats(Stats stats, List childrenOutput) { } groupings = newGroupings; if (changed.get()) { - stats = stats.with(stats.child(), newGroupings, stats.aggregates()); + aggregate = aggregate.with(aggregate.child(), newGroupings, aggregate.aggregates()); changed.set(false); } } @@ -475,8 +475,8 @@ private LogicalPlan resolveStats(Stats stats, List childrenOutput) { List resolvedList = NamedExpressions.mergeOutputAttributes(resolved, childrenOutput); List newAggregates = new ArrayList<>(); - for (NamedExpression aggregate : stats.aggregates()) { - var agg = (NamedExpression) aggregate.transformUp(UnresolvedAttribute.class, ua -> { + for (NamedExpression ag : aggregate.aggregates()) { + var agg = (NamedExpression) ag.transformUp(UnresolvedAttribute.class, ua -> { Expression ne = ua; Attribute maybeResolved = maybeResolveAttribute(ua, resolvedList); if (maybeResolved != null) { @@ -489,10 +489,10 @@ private LogicalPlan resolveStats(Stats stats, List childrenOutput) { } // TODO: remove this when Stats interface is removed - stats = changed.get() ? stats.with(stats.child(), groupings, newAggregates) : stats; + aggregate = changed.get() ? aggregate.with(aggregate.child(), groupings, newAggregates) : aggregate; } - return (LogicalPlan) stats; + return aggregate; } private LogicalPlan resolveMvExpand(MvExpand p, List childrenOutput) { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/execution/PlanExecutor.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/execution/PlanExecutor.java index ee8822889bedb..816388193c5f6 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/execution/PlanExecutor.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/execution/PlanExecutor.java @@ -18,8 +18,7 @@ import org.elasticsearch.xpack.esql.expression.function.EsqlFunctionRegistry; import org.elasticsearch.xpack.esql.optimizer.LogicalOptimizerContext; import org.elasticsearch.xpack.esql.optimizer.LogicalPlanOptimizer; -import org.elasticsearch.xpack.esql.plan.physical.PhysicalPlan; -import org.elasticsearch.xpack.esql.planner.Mapper; +import org.elasticsearch.xpack.esql.planner.mapper.Mapper; import org.elasticsearch.xpack.esql.session.Configuration; import org.elasticsearch.xpack.esql.session.EsqlSession; import org.elasticsearch.xpack.esql.session.IndexResolver; @@ -29,8 +28,6 @@ import org.elasticsearch.xpack.esql.stats.PlanningMetricsManager; import org.elasticsearch.xpack.esql.stats.QueryMetric; -import java.util.function.BiConsumer; - import static org.elasticsearch.action.ActionListener.wrap; public class PlanExecutor { @@ -47,7 +44,7 @@ public PlanExecutor(IndexResolver indexResolver, MeterRegistry meterRegistry) { this.indexResolver = indexResolver; this.preAnalyzer = new PreAnalyzer(); this.functionRegistry = new EsqlFunctionRegistry(); - this.mapper = new Mapper(functionRegistry); + this.mapper = new Mapper(); this.metrics = new Metrics(functionRegistry); this.verifier = new Verifier(metrics); this.planningMetricsManager = new PlanningMetricsManager(meterRegistry); @@ -60,7 +57,7 @@ public void esql( EnrichPolicyResolver enrichPolicyResolver, EsqlExecutionInfo executionInfo, IndicesExpressionGrouper indicesExpressionGrouper, - BiConsumer> runPhase, + EsqlSession.PlanRunner planRunner, ActionListener listener ) { final PlanningMetrics planningMetrics = new PlanningMetrics(); @@ -79,7 +76,7 @@ public void esql( ); QueryMetric clientId = QueryMetric.fromString("rest"); metrics.total(clientId); - session.execute(request, executionInfo, runPhase, wrap(x -> { + session.execute(request, executionInfo, planRunner, wrap(x -> { planningMetricsManager.publish(planningMetrics, true); listener.onResponse(x); }, ex -> { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizer.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizer.java index fb3a1b5179beb..77c5a494437ab 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizer.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizer.java @@ -25,6 +25,7 @@ import org.elasticsearch.xpack.esql.optimizer.rules.logical.PropagateEmptyRelation; import org.elasticsearch.xpack.esql.optimizer.rules.logical.PropagateEquals; import org.elasticsearch.xpack.esql.optimizer.rules.logical.PropagateEvalFoldables; +import org.elasticsearch.xpack.esql.optimizer.rules.logical.PropagateInlineEvals; import org.elasticsearch.xpack.esql.optimizer.rules.logical.PropagateNullable; import org.elasticsearch.xpack.esql.optimizer.rules.logical.PruneColumns; import org.elasticsearch.xpack.esql.optimizer.rules.logical.PruneEmptyPlans; @@ -39,13 +40,12 @@ import org.elasticsearch.xpack.esql.optimizer.rules.logical.PushDownEval; import org.elasticsearch.xpack.esql.optimizer.rules.logical.PushDownRegexExtract; import org.elasticsearch.xpack.esql.optimizer.rules.logical.RemoveStatsOverride; +import org.elasticsearch.xpack.esql.optimizer.rules.logical.ReplaceAggregateAggExpressionWithEval; +import org.elasticsearch.xpack.esql.optimizer.rules.logical.ReplaceAggregateNestedExpressionWithEval; import org.elasticsearch.xpack.esql.optimizer.rules.logical.ReplaceAliasingEvalWithProject; import org.elasticsearch.xpack.esql.optimizer.rules.logical.ReplaceLimitAndSortAsTopN; -import org.elasticsearch.xpack.esql.optimizer.rules.logical.ReplaceLookupWithJoin; import org.elasticsearch.xpack.esql.optimizer.rules.logical.ReplaceOrderByExpressionWithEval; import org.elasticsearch.xpack.esql.optimizer.rules.logical.ReplaceRegexMatch; -import org.elasticsearch.xpack.esql.optimizer.rules.logical.ReplaceStatsAggExpressionWithEval; -import org.elasticsearch.xpack.esql.optimizer.rules.logical.ReplaceStatsNestedExpressionWithEval; import org.elasticsearch.xpack.esql.optimizer.rules.logical.ReplaceTrivialTypeConversions; import org.elasticsearch.xpack.esql.optimizer.rules.logical.SetAsOptimized; import org.elasticsearch.xpack.esql.optimizer.rules.logical.SimplifyComparisonsArithmetics; @@ -54,6 +54,7 @@ import org.elasticsearch.xpack.esql.optimizer.rules.logical.SplitInWithFoldableValue; import org.elasticsearch.xpack.esql.optimizer.rules.logical.SubstituteFilteredExpression; import org.elasticsearch.xpack.esql.optimizer.rules.logical.SubstituteSpatialSurrogates; +import org.elasticsearch.xpack.esql.optimizer.rules.logical.SubstituteSurrogatePlans; import org.elasticsearch.xpack.esql.optimizer.rules.logical.SubstituteSurrogates; import org.elasticsearch.xpack.esql.optimizer.rules.logical.TranslateMetricsAggregate; import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; @@ -121,26 +122,27 @@ protected static Batch substitutions() { return new Batch<>( "Substitutions", Limiter.ONCE, - new ReplaceLookupWithJoin(), + new SubstituteSurrogatePlans(), // translate filtered expressions into aggregate with filters - can't use surrogate expressions because it was // retrofitted for constant folding - this needs to be fixed new SubstituteFilteredExpression(), new RemoveStatsOverride(), // first extract nested expressions inside aggs - new ReplaceStatsNestedExpressionWithEval(), + new ReplaceAggregateNestedExpressionWithEval(), // then extract nested aggs top-level - new ReplaceStatsAggExpressionWithEval(), + new ReplaceAggregateAggExpressionWithEval(), // lastly replace surrogate functions new SubstituteSurrogates(), // translate metric aggregates after surrogate substitution and replace nested expressions with eval (again) new TranslateMetricsAggregate(), - new ReplaceStatsNestedExpressionWithEval(), + new ReplaceAggregateNestedExpressionWithEval(), new ReplaceRegexMatch(), new ReplaceTrivialTypeConversions(), new ReplaceAliasingEvalWithProject(), new SkipQueryOnEmptyMappings(), new SubstituteSpatialSurrogates(), - new ReplaceOrderByExpressionWithEval() + new ReplaceOrderByExpressionWithEval(), + new PropagateInlineEvals() // new NormalizeAggregate(), - waits on https://github.com/elastic/elasticsearch/issues/100634 ); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/CombineProjections.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/CombineProjections.java index 64c32367d0d57..1c256012baeb0 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/CombineProjections.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/CombineProjections.java @@ -30,7 +30,6 @@ public CombineProjections() { } @Override - @SuppressWarnings("unchecked") protected LogicalPlan rule(UnaryPlan plan) { LogicalPlan child = plan.child(); @@ -67,7 +66,7 @@ protected LogicalPlan rule(UnaryPlan plan) { if (grouping instanceof Attribute attribute) { groupingAttrs.add(attribute); } else { - // After applying ReplaceStatsNestedExpressionWithEval, groupings can only contain attributes. + // After applying ReplaceAggregateNestedExpressionWithEval, groupings can only contain attributes. throw new EsqlIllegalArgumentException("Expected an Attribute, got {}", grouping); } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/PropagateInlineEvals.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/PropagateInlineEvals.java new file mode 100644 index 0000000000000..d5f131f9f9cef --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/PropagateInlineEvals.java @@ -0,0 +1,89 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.optimizer.rules.logical; + +import org.elasticsearch.xpack.esql.core.expression.Alias; +import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.expression.ReferenceAttribute; +import org.elasticsearch.xpack.esql.plan.logical.Aggregate; +import org.elasticsearch.xpack.esql.plan.logical.Eval; +import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; +import org.elasticsearch.xpack.esql.plan.logical.join.InlineJoin; +import org.elasticsearch.xpack.esql.plan.logical.join.StubRelation; + +import java.util.ArrayList; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; + +/** + * Replace any evaluation from the inlined aggregation side (right side) to the left side (source) to perform the matching. + * In INLINE m = MIN(x) BY a + b the right side contains STATS m = MIN(X) BY a + b. + * As the grouping key is used to perform the join, the evaluation required for creating it has to be copied to the left side + * as well. + */ +public class PropagateInlineEvals extends OptimizerRules.OptimizerRule { + + @Override + protected LogicalPlan rule(InlineJoin plan) { + // check if there's any grouping that uses a reference on the right side + // if so, look for the source until finding a StubReference + // then copy those on the left side as well + + LogicalPlan left = plan.left(); + LogicalPlan right = plan.right(); + + // grouping references + List groupingAlias = new ArrayList<>(); + Map groupingRefs = new LinkedHashMap<>(); + + // perform only one iteration that does two things + // first checks any aggregate that declares expressions inside the grouping + // second that checks any found references to collect their declaration + right = right.transformDown(p -> { + + if (p instanceof Aggregate aggregate) { + // collect references + for (Expression g : aggregate.groupings()) { + if (g instanceof ReferenceAttribute ref) { + groupingRefs.put(ref.name(), ref); + } + } + } + + // find their declaration and remove it + // TODO: this doesn't take into account aliasing + if (p instanceof Eval eval) { + if (groupingRefs.size() > 0) { + List fields = eval.fields(); + List remainingEvals = new ArrayList<>(fields.size()); + for (Alias f : fields) { + if (groupingRefs.remove(f.name()) != null) { + groupingAlias.add(f); + } else { + remainingEvals.add(f); + } + } + if (remainingEvals.size() != fields.size()) { + // if all fields are moved, replace the eval + p = remainingEvals.size() == 0 ? eval.child() : new Eval(eval.source(), eval.child(), remainingEvals); + } + } + } + return p; + }); + + // copy found evals on the left side + if (groupingAlias.size() > 0) { + left = new Eval(plan.source(), plan.left(), groupingAlias); + } + + // replace the old stub with the new out to capture the new output + return plan.replaceChildren(left, InlineJoin.replaceStub(new StubRelation(right.source(), left.output()), right)); + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/RemoveStatsOverride.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/RemoveStatsOverride.java index ad424f6882d26..0cabe4376999f 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/RemoveStatsOverride.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/RemoveStatsOverride.java @@ -8,17 +8,16 @@ package org.elasticsearch.xpack.esql.optimizer.rules.logical; import org.elasticsearch.common.util.set.Sets; -import org.elasticsearch.xpack.esql.analysis.AnalyzerRules; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.Expressions; +import org.elasticsearch.xpack.esql.plan.logical.Aggregate; import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; -import org.elasticsearch.xpack.esql.plan.logical.Stats; import java.util.ArrayList; import java.util.List; /** - * Removes {@link Stats} overrides in grouping, aggregates and across them inside. + * Removes {@link Aggregate} overrides in grouping, aggregates and across them inside. * The overrides appear when the same alias is used multiple times in aggregations * and/or groupings: * {@code STATS x = COUNT(*), x = MIN(a) BY x = b + 1, x = c + 10} @@ -34,26 +33,11 @@ * becomes * {@code STATS max($x + 1) BY $x = a + b} */ -public final class RemoveStatsOverride extends AnalyzerRules.AnalyzerRule { +public final class RemoveStatsOverride extends OptimizerRules.OptimizerRule { @Override - protected boolean skipResolved() { - return false; - } - - @Override - protected LogicalPlan rule(LogicalPlan p) { - if (p.resolved() == false) { - return p; - } - if (p instanceof Stats stats) { - return (LogicalPlan) stats.with( - stats.child(), - removeDuplicateNames(stats.groupings()), - removeDuplicateNames(stats.aggregates()) - ); - } - return p; + protected LogicalPlan rule(Aggregate aggregate) { + return aggregate.with(removeDuplicateNames(aggregate.groupings()), removeDuplicateNames(aggregate.aggregates())); } private static List removeDuplicateNames(List list) { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/ReplaceStatsAggExpressionWithEval.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/ReplaceAggregateAggExpressionWithEval.java similarity index 97% rename from x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/ReplaceStatsAggExpressionWithEval.java rename to x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/ReplaceAggregateAggExpressionWithEval.java index 559546d48eb7d..2361b46b2be6f 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/ReplaceStatsAggExpressionWithEval.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/ReplaceAggregateAggExpressionWithEval.java @@ -40,8 +40,8 @@ * becomes * stats a = min(x), c = count(*) by g | eval b = a, d = c | keep a, b, c, d, g */ -public final class ReplaceStatsAggExpressionWithEval extends OptimizerRules.OptimizerRule { - public ReplaceStatsAggExpressionWithEval() { +public final class ReplaceAggregateAggExpressionWithEval extends OptimizerRules.OptimizerRule { + public ReplaceAggregateAggExpressionWithEval() { super(OptimizerRules.TransformDirection.UP); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/ReplaceStatsNestedExpressionWithEval.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/ReplaceAggregateNestedExpressionWithEval.java similarity index 93% rename from x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/ReplaceStatsNestedExpressionWithEval.java rename to x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/ReplaceAggregateNestedExpressionWithEval.java index c3eff15bcec9e..173940af19935 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/ReplaceStatsNestedExpressionWithEval.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/ReplaceAggregateNestedExpressionWithEval.java @@ -14,9 +14,9 @@ import org.elasticsearch.xpack.esql.core.util.Holder; import org.elasticsearch.xpack.esql.expression.function.aggregate.AggregateFunction; import org.elasticsearch.xpack.esql.expression.function.grouping.GroupingFunction; +import org.elasticsearch.xpack.esql.plan.logical.Aggregate; import org.elasticsearch.xpack.esql.plan.logical.Eval; import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; -import org.elasticsearch.xpack.esql.plan.logical.Stats; import java.util.ArrayList; import java.util.HashMap; @@ -24,7 +24,7 @@ import java.util.Map; /** - * Replace nested expressions inside a {@link Stats} with synthetic eval. + * Replace nested expressions inside a {@link Aggregate} with synthetic eval. * {@code STATS SUM(a + 1) BY x % 2} * becomes * {@code EVAL `a + 1` = a + 1, `x % 2` = x % 2 | STATS SUM(`a+1`_ref) BY `x % 2`_ref} @@ -33,17 +33,10 @@ * becomes * {@code EVAL `a + 1` = a + 1, `x % 2` = x % 2 | INLINESTATS SUM(`a+1`_ref) BY `x % 2`_ref} */ -public final class ReplaceStatsNestedExpressionWithEval extends OptimizerRules.OptimizerRule { +public final class ReplaceAggregateNestedExpressionWithEval extends OptimizerRules.OptimizerRule { @Override - protected LogicalPlan rule(LogicalPlan p) { - if (p instanceof Stats stats) { - return rule(stats); - } - return p; - } - - private LogicalPlan rule(Stats aggregate) { + protected LogicalPlan rule(Aggregate aggregate) { List evals = new ArrayList<>(); Map evalNames = new HashMap<>(); Map groupingAttributes = new HashMap<>(); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/SubstituteSurrogatePlans.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/SubstituteSurrogatePlans.java new file mode 100644 index 0000000000000..05e725a22ccea --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/SubstituteSurrogatePlans.java @@ -0,0 +1,26 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.optimizer.rules.logical; + +import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; +import org.elasticsearch.xpack.esql.plan.logical.SurrogateLogicalPlan; + +public final class SubstituteSurrogatePlans extends OptimizerRules.OptimizerRule { + + public SubstituteSurrogatePlans() { + super(OptimizerRules.TransformDirection.UP); + } + + @Override + protected LogicalPlan rule(LogicalPlan plan) { + if (plan instanceof SurrogateLogicalPlan surrogate) { + plan = surrogate.surrogate(); + } + return plan; + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/physical/ProjectAwayColumns.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/physical/ProjectAwayColumns.java index 290ae2d3ff1be..9f5b35e1eb9fb 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/physical/ProjectAwayColumns.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/physical/ProjectAwayColumns.java @@ -20,7 +20,6 @@ import org.elasticsearch.xpack.esql.plan.physical.ExchangeExec; import org.elasticsearch.xpack.esql.plan.physical.FragmentExec; import org.elasticsearch.xpack.esql.plan.physical.PhysicalPlan; -import org.elasticsearch.xpack.esql.plan.physical.UnaryExec; import org.elasticsearch.xpack.esql.rule.Rule; import java.util.ArrayList; @@ -45,7 +44,7 @@ public PhysicalPlan apply(PhysicalPlan plan) { Holder requiredAttributes = new Holder<>(plan.outputSet()); // This will require updating should we choose to have non-unary execution plans in the future. - return plan.transformDown(UnaryExec.class, currentPlanNode -> { + return plan.transformDown(currentPlanNode -> { if (keepTraversing.get() == false) { return currentPlanNode; } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/physical/local/InsertFieldExtraction.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/physical/local/InsertFieldExtraction.java index c215e86b0045a..1c20f765c6d51 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/physical/local/InsertFieldExtraction.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/physical/local/InsertFieldExtraction.java @@ -14,11 +14,13 @@ import org.elasticsearch.xpack.esql.core.expression.TypedAttribute; import org.elasticsearch.xpack.esql.optimizer.rules.physical.ProjectAwayColumns; import org.elasticsearch.xpack.esql.plan.physical.AggregateExec; +import org.elasticsearch.xpack.esql.plan.physical.EsQueryExec; import org.elasticsearch.xpack.esql.plan.physical.FieldExtractExec; +import org.elasticsearch.xpack.esql.plan.physical.LeafExec; import org.elasticsearch.xpack.esql.plan.physical.PhysicalPlan; -import org.elasticsearch.xpack.esql.plan.physical.UnaryExec; import org.elasticsearch.xpack.esql.rule.Rule; +import java.util.ArrayList; import java.util.LinkedHashSet; import java.util.LinkedList; import java.util.List; @@ -40,7 +42,12 @@ public class InsertFieldExtraction extends Rule { public PhysicalPlan apply(PhysicalPlan plan) { // apply the plan locally, adding a field extractor right before data is loaded // by going bottom-up - plan = plan.transformUp(UnaryExec.class, p -> { + plan = plan.transformUp(p -> { + // skip source nodes + if (p instanceof LeafExec) { + return p; + } + var missing = missingAttributes(p); /* @@ -58,9 +65,24 @@ public PhysicalPlan apply(PhysicalPlan plan) { // add extractor if (missing.isEmpty() == false) { - // collect source attributes and add the extractor - var extractor = new FieldExtractExec(p.source(), p.child(), List.copyOf(missing)); - p = p.replaceChild(extractor); + // identify child (for binary nodes) that exports _doc and place the field extractor there + List newChildren = new ArrayList<>(p.children().size()); + boolean found = false; + for (PhysicalPlan child : p.children()) { + if (found == false) { + if (child.outputSet().stream().anyMatch(EsQueryExec::isSourceAttribute)) { + found = true; + // collect source attributes and add the extractor + child = new FieldExtractExec(p.source(), child, List.copyOf(missing)); + } + } + newChildren.add(child); + } + // somehow no doc id + if (found == false) { + throw new IllegalArgumentException("No child with doc id found"); + } + return p.replaceChildren(newChildren); } return p; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/package-info.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/package-info.java index d86729fe785b1..19dbb2deae780 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/package-info.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/package-info.java @@ -121,8 +121,7 @@ * function implementations. *
  • {@link org.elasticsearch.xpack.esql.action.RestEsqlQueryAction Sync} and * {@link org.elasticsearch.xpack.esql.action.RestEsqlAsyncQueryAction async} HTTP API entry points
  • - *
  • {@link org.elasticsearch.xpack.esql.plan.logical.Phased} - Marks a {@link org.elasticsearch.xpack.esql.plan.logical.LogicalPlan} - * node as requiring multiple ESQL executions to run.
  • + * * *

    Query Planner

    @@ -144,7 +143,7 @@ *
  • {@link org.elasticsearch.xpack.esql.analysis.Analyzer Analyzer} resolves references
  • *
  • {@link org.elasticsearch.xpack.esql.analysis.Verifier Verifier} does type checking
  • *
  • {@link org.elasticsearch.xpack.esql.optimizer.LogicalPlanOptimizer LogicalPlanOptimizer} applies many optimizations
  • - *
  • {@link org.elasticsearch.xpack.esql.planner.Mapper Mapper} translates logical plans to phyisical plans
  • + *
  • {@link org.elasticsearch.xpack.esql.planner.mapper.Mapper Mapper} translates logical plans to phyisical plans
  • *
  • {@link org.elasticsearch.xpack.esql.optimizer.PhysicalPlanOptimizer PhysicalPlanOptimizer} - decides what plan fragments to * send to which data nodes
  • *
  • {@link org.elasticsearch.xpack.esql.optimizer.LocalLogicalPlanOptimizer LocalLogicalPlanOptimizer} applies index-specific diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/LogicalPlanBuilder.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/LogicalPlanBuilder.java index dc913cd2f14f4..f83af534eaa72 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/LogicalPlanBuilder.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/LogicalPlanBuilder.java @@ -84,7 +84,7 @@ */ public class LogicalPlanBuilder extends ExpressionBuilder { - private int queryDepth = 0; + interface PlanFactory extends Function {} /** * Maximum number of commands allowed per query @@ -95,6 +95,8 @@ public LogicalPlanBuilder(QueryParams params) { super(params); } + private int queryDepth = 0; + protected LogicalPlan plan(ParseTree ctx) { LogicalPlan p = ParserUtils.typedParsing(this, ctx, LogicalPlan.class); var errors = this.params.parsingErrors(); @@ -345,7 +347,10 @@ public PlanFactory visitInlinestatsCommand(EsqlBaseParser.InlinestatsCommandCont List groupings = visitGrouping(ctx.grouping); aggregates.addAll(groupings); // TODO: add support for filters - return input -> new InlineStats(source(ctx), input, new ArrayList<>(groupings), aggregates); + return input -> new InlineStats( + source(ctx), + new Aggregate(source(ctx), input, Aggregate.AggregateType.STANDARD, new ArrayList<>(groupings), aggregates) + ); } @Override @@ -519,5 +524,4 @@ public PlanFactory visitLookupCommand(EsqlBaseParser.LookupCommandContext ctx) { return p -> new Lookup(source, p, tableName, matchFields, null /* localRelation will be resolved later*/); } - interface PlanFactory extends Function {} } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Aggregate.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Aggregate.java index e1632db4f79a2..e362c9646a8e0 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Aggregate.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Aggregate.java @@ -28,7 +28,7 @@ import static java.util.Collections.emptyList; import static org.elasticsearch.xpack.esql.expression.NamedExpressions.mergeOutputAttributes; -public class Aggregate extends UnaryPlan implements Stats { +public class Aggregate extends UnaryPlan { public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry( LogicalPlan.class, "Aggregate", @@ -110,7 +110,10 @@ public Aggregate replaceChild(LogicalPlan newChild) { return new Aggregate(source(), newChild, aggregateType, groupings, aggregates); } - @Override + public Aggregate with(List newGroupings, List newAggregates) { + return with(child(), newGroupings, newAggregates); + } + public Aggregate with(LogicalPlan child, List newGroupings, List newAggregates) { return new Aggregate(source(), child, aggregateType(), newGroupings, newAggregates); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/BinaryPlan.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/BinaryPlan.java index 579b67eb891ac..e65cdda4b6069 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/BinaryPlan.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/BinaryPlan.java @@ -6,9 +6,12 @@ */ package org.elasticsearch.xpack.esql.plan.logical; +import org.elasticsearch.xpack.esql.core.expression.AttributeSet; +import org.elasticsearch.xpack.esql.core.expression.Expressions; import org.elasticsearch.xpack.esql.core.tree.Source; import java.util.Arrays; +import java.util.List; import java.util.Objects; public abstract class BinaryPlan extends LogicalPlan { @@ -29,6 +32,26 @@ public LogicalPlan right() { return right; } + @Override + public final BinaryPlan replaceChildren(List newChildren) { + return replaceChildren(newChildren.get(0), newChildren.get(1)); + } + + public final BinaryPlan replaceLeft(LogicalPlan newLeft) { + return replaceChildren(newLeft, right); + } + + public final BinaryPlan replaceRight(LogicalPlan newRight) { + return replaceChildren(left, newRight); + } + + protected AttributeSet computeReferences() { + // TODO: this needs to be driven by the join config + return Expressions.references(output()); + } + + public abstract BinaryPlan replaceChildren(LogicalPlan left, LogicalPlan right); + @Override public boolean equals(Object obj) { if (this == obj) { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/InlineStats.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/InlineStats.java index dd71d1d85c8e2..9e854450a2d34 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/InlineStats.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/InlineStats.java @@ -11,27 +11,16 @@ import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.unit.ByteSizeValue; -import org.elasticsearch.compute.data.Block; -import org.elasticsearch.compute.data.BlockUtils; -import org.elasticsearch.compute.data.Page; -import org.elasticsearch.core.Releasables; -import org.elasticsearch.xpack.esql.core.capabilities.Resolvables; -import org.elasticsearch.xpack.esql.core.expression.Alias; import org.elasticsearch.xpack.esql.core.expression.Attribute; -import org.elasticsearch.xpack.esql.core.expression.AttributeSet; import org.elasticsearch.xpack.esql.core.expression.Expression; -import org.elasticsearch.xpack.esql.core.expression.Literal; -import org.elasticsearch.xpack.esql.core.expression.NamedExpression; +import org.elasticsearch.xpack.esql.core.expression.Expressions; import org.elasticsearch.xpack.esql.core.tree.NodeInfo; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.io.stream.PlanStreamInput; +import org.elasticsearch.xpack.esql.plan.logical.join.InlineJoin; import org.elasticsearch.xpack.esql.plan.logical.join.Join; import org.elasticsearch.xpack.esql.plan.logical.join.JoinConfig; import org.elasticsearch.xpack.esql.plan.logical.join.JoinType; -import org.elasticsearch.xpack.esql.plan.logical.local.LocalRelation; -import org.elasticsearch.xpack.esql.plan.logical.local.LocalSupplier; -import org.elasticsearch.xpack.esql.planner.PlannerUtils; import java.io.IOException; import java.util.ArrayList; @@ -43,43 +32,33 @@ /** * Enriches the stream of data with the results of running a {@link Aggregate STATS}. *

    - * This is a {@link Phased} operation that doesn't have a "native" implementation. - * Instead, it's implemented as first running a {@link Aggregate STATS} and then - * a {@link Join}. + * Maps to a dedicated Join implementation, InlineJoin, which is a left join between the main relation and the + * underlying aggregate. *

    */ -public class InlineStats extends UnaryPlan implements NamedWriteable, Phased, Stats { +public class InlineStats extends UnaryPlan implements NamedWriteable, SurrogateLogicalPlan { public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry( LogicalPlan.class, "InlineStats", InlineStats::new ); - private final List groupings; - private final List aggregates; + private final Aggregate aggregate; private List lazyOutput; - public InlineStats(Source source, LogicalPlan child, List groupings, List aggregates) { - super(source, child); - this.groupings = groupings; - this.aggregates = aggregates; + public InlineStats(Source source, Aggregate aggregate) { + super(source, aggregate); + this.aggregate = aggregate; } public InlineStats(StreamInput in) throws IOException { - this( - Source.readFrom((PlanStreamInput) in), - in.readNamedWriteable(LogicalPlan.class), - in.readNamedWriteableCollectionAsList(Expression.class), - in.readNamedWriteableCollectionAsList(NamedExpression.class) - ); + this(Source.readFrom((PlanStreamInput) in), (Aggregate) in.readNamedWriteable(LogicalPlan.class)); } @Override public void writeTo(StreamOutput out) throws IOException { source().writeTo(out); - out.writeNamedWriteable(child()); - out.writeNamedWriteableCollection(groupings); - out.writeNamedWriteableCollection(aggregates); + out.writeNamedWriteable(aggregate); } @Override @@ -89,27 +68,16 @@ public String getWriteableName() { @Override protected NodeInfo info() { - return NodeInfo.create(this, InlineStats::new, child(), groupings, aggregates); + return NodeInfo.create(this, InlineStats::new, aggregate); } @Override public InlineStats replaceChild(LogicalPlan newChild) { - return new InlineStats(source(), newChild, groupings, aggregates); + return new InlineStats(source(), (Aggregate) newChild); } - @Override - public InlineStats with(LogicalPlan child, List newGroupings, List newAggregates) { - return new InlineStats(source(), child, newGroupings, newAggregates); - } - - @Override - public List groupings() { - return groupings; - } - - @Override - public List aggregates() { - return aggregates; + public Aggregate aggregate() { + return aggregate; } @Override @@ -119,31 +87,51 @@ public String commandName() { @Override public boolean expressionsResolved() { - return Resolvables.resolved(groupings) && Resolvables.resolved(aggregates); + return aggregate.expressionsResolved(); } @Override public List output() { if (this.lazyOutput == null) { - List addedFields = new ArrayList<>(); - AttributeSet set = child().outputSet(); + this.lazyOutput = mergeOutputAttributes(aggregate.output(), aggregate.child().output()); + } + return lazyOutput; + } + + // TODO: in case of inlinestats, the join key is always the grouping + private JoinConfig joinConfig() { + List groupings = aggregate.groupings(); + List namedGroupings = new ArrayList<>(groupings.size()); + for (Expression g : groupings) { + namedGroupings.add(Expressions.attribute(g)); + } - for (NamedExpression agg : aggregates) { - Attribute att = agg.toAttribute(); - if (set.contains(att) == false) { - addedFields.add(agg); - set.add(att); + List leftFields = new ArrayList<>(groupings.size()); + List rightFields = new ArrayList<>(groupings.size()); + List rhsOutput = Join.makeReference(aggregate.output()); + for (Attribute lhs : namedGroupings) { + for (Attribute rhs : rhsOutput) { + if (lhs.name().equals(rhs.name())) { + leftFields.add(lhs); + rightFields.add(rhs); + break; } } - - this.lazyOutput = mergeOutputAttributes(addedFields, child().output()); } - return lazyOutput; + return new JoinConfig(JoinType.LEFT, namedGroupings, leftFields, rightFields); + } + + @Override + public LogicalPlan surrogate() { + // left join between the main relation and the local, lookup relation + Source source = source(); + LogicalPlan left = aggregate.child(); + return new InlineJoin(source, left, InlineJoin.stubSource(aggregate, left), joinConfig()); } @Override public int hashCode() { - return Objects.hash(groupings, aggregates, child()); + return Objects.hash(aggregate, child()); } @Override @@ -157,106 +145,6 @@ public boolean equals(Object obj) { } InlineStats other = (InlineStats) obj; - return Objects.equals(groupings, other.groupings) - && Objects.equals(aggregates, other.aggregates) - && Objects.equals(child(), other.child()); - } - - @Override - public LogicalPlan firstPhase() { - return new Aggregate(source(), child(), Aggregate.AggregateType.STANDARD, groupings, aggregates); - } - - @Override - public LogicalPlan nextPhase(List schema, List firstPhaseResult) { - if (equalsAndSemanticEquals(firstPhase().output(), schema) == false) { - throw new IllegalStateException("Unexpected first phase outputs: " + firstPhase().output() + " vs " + schema); - } - if (groupings.isEmpty()) { - return ungroupedNextPhase(schema, firstPhaseResult); - } - return groupedNextPhase(schema, firstPhaseResult); + return Objects.equals(aggregate, other.aggregate); } - - private LogicalPlan ungroupedNextPhase(List schema, List firstPhaseResult) { - if (firstPhaseResult.size() != 1) { - throw new IllegalArgumentException("expected single row"); - } - Page p = firstPhaseResult.get(0); - if (p.getPositionCount() != 1) { - throw new IllegalArgumentException("expected single row"); - } - List values = new ArrayList<>(schema.size()); - for (int i = 0; i < schema.size(); i++) { - Attribute s = schema.get(i); - Object value = BlockUtils.toJavaObject(p.getBlock(i), 0); - values.add(new Alias(source(), s.name(), new Literal(source(), value, s.dataType()), aggregates.get(i).id())); - } - return new Eval(source(), child(), values); - } - - private static boolean equalsAndSemanticEquals(List left, List right) { - if (left.equals(right) == false) { - return false; - } - for (int i = 0; i < left.size(); i++) { - if (left.get(i).semanticEquals(right.get(i)) == false) { - return false; - } - } - return true; - } - - private LogicalPlan groupedNextPhase(List schema, List firstPhaseResult) { - LocalRelation local = firstPhaseResultsToLocalRelation(schema, firstPhaseResult); - List groupingAttributes = new ArrayList<>(groupings.size()); - for (Expression g : groupings) { - if (g instanceof Attribute a) { - groupingAttributes.add(a); - } else { - throw new IllegalStateException("optimized plans should only have attributes in groups, but got [" + g + "]"); - } - } - List leftFields = new ArrayList<>(groupingAttributes.size()); - List rightFields = new ArrayList<>(groupingAttributes.size()); - List rhsOutput = Join.makeReference(local.output()); - for (Attribute lhs : groupingAttributes) { - for (Attribute rhs : rhsOutput) { - if (lhs.name().equals(rhs.name())) { - leftFields.add(lhs); - rightFields.add(rhs); - break; - } - } - } - JoinConfig config = new JoinConfig(JoinType.LEFT, groupingAttributes, leftFields, rightFields); - return new Join(source(), child(), local, config); - } - - private LocalRelation firstPhaseResultsToLocalRelation(List schema, List firstPhaseResult) { - // Limit ourselves to 1mb of results similar to LOOKUP for now. - long bytesUsed = firstPhaseResult.stream().mapToLong(Page::ramBytesUsedByBlocks).sum(); - if (bytesUsed > ByteSizeValue.ofMb(1).getBytes()) { - throw new IllegalArgumentException("first phase result too large [" + ByteSizeValue.ofBytes(bytesUsed) + "] > 1mb"); - } - int positionCount = firstPhaseResult.stream().mapToInt(Page::getPositionCount).sum(); - Block.Builder[] builders = new Block.Builder[schema.size()]; - Block[] blocks; - try { - for (int b = 0; b < builders.length; b++) { - builders[b] = PlannerUtils.toElementType(schema.get(b).dataType()) - .newBlockBuilder(positionCount, PlannerUtils.NON_BREAKING_BLOCK_FACTORY); - } - for (Page p : firstPhaseResult) { - for (int b = 0; b < builders.length; b++) { - builders[b].copyFrom(p.getBlock(b), 0, p.getPositionCount()); - } - } - blocks = Block.Builder.buildAll(builders); - } finally { - Releasables.closeExpectNoException(builders); - } - return new LocalRelation(source(), schema, LocalSupplier.of(blocks)); - } - } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/LogicalPlan.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/LogicalPlan.java index df81d730bcf1b..e07dd9e14649e 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/LogicalPlan.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/LogicalPlan.java @@ -11,6 +11,7 @@ import org.elasticsearch.xpack.esql.core.capabilities.Resolvables; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.plan.QueryPlan; +import org.elasticsearch.xpack.esql.plan.logical.join.InlineJoin; import org.elasticsearch.xpack.esql.plan.logical.join.Join; import org.elasticsearch.xpack.esql.plan.logical.local.EsqlProject; import org.elasticsearch.xpack.esql.plan.logical.local.LocalRelation; @@ -33,11 +34,12 @@ public static List getNamedWriteables() { Filter.ENTRY, Grok.ENTRY, InlineStats.ENTRY, + InlineJoin.ENTRY, + Join.ENTRY, LocalRelation.ENTRY, Limit.ENTRY, Lookup.ENTRY, MvExpand.ENTRY, - Join.ENTRY, OrderBy.ENTRY, Project.ENTRY, TopN.ENTRY diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Lookup.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Lookup.java index d6ab24fe44c99..70f8a24cfc87e 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Lookup.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Lookup.java @@ -13,7 +13,6 @@ import org.elasticsearch.core.Nullable; import org.elasticsearch.xpack.esql.core.capabilities.Resolvables; import org.elasticsearch.xpack.esql.core.expression.Attribute; -import org.elasticsearch.xpack.esql.core.expression.AttributeSet; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.tree.NodeInfo; import org.elasticsearch.xpack.esql.core.tree.Source; @@ -32,7 +31,7 @@ * Looks up values from the associated {@code tables}. * The class is supposed to be substituted by a {@link Join}. */ -public class Lookup extends UnaryPlan { +public class Lookup extends UnaryPlan implements SurrogateLogicalPlan { public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry(LogicalPlan.class, "Lookup", Lookup::new); private final Expression tableName; @@ -96,6 +95,12 @@ public LocalRelation localRelation() { return localRelation; } + @Override + public LogicalPlan surrogate() { + // left join between the main relation and the local, lookup relation + return new Join(source(), child(), localRelation, joinConfig()); + } + public JoinConfig joinConfig() { List leftFields = new ArrayList<>(matchFields.size()); List rightFields = new ArrayList<>(matchFields.size()); @@ -113,10 +118,6 @@ public JoinConfig joinConfig() { } @Override - protected AttributeSet computeReferences() { - return new AttributeSet(matchFields); - } - public String commandName() { return "LOOKUP"; } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Phased.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Phased.java deleted file mode 100644 index 6923f9e137eab..0000000000000 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Phased.java +++ /dev/null @@ -1,135 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -package org.elasticsearch.xpack.esql.plan.logical; - -import org.elasticsearch.compute.data.Page; -import org.elasticsearch.xpack.esql.analysis.Analyzer; -import org.elasticsearch.xpack.esql.core.expression.Attribute; -import org.elasticsearch.xpack.esql.core.util.Holder; - -import java.util.List; - -/** - * Marks a {@link LogicalPlan} node as requiring multiple ESQL executions to run. - * All logical plans are now run by: - *
      - *
    1. {@link Analyzer analyzing} the entire query
    2. - *
    3. {@link Phased#extractFirstPhase extracting} the first phase from the - * logical plan
    4. - *
    5. if there isn't a first phase, run the entire logical plan and return the - * results. you are done.
    6. - *
    7. if there is first phase, run that
    8. - *
    9. {@link Phased#applyResultsFromFirstPhase applying} the results from the - * first phase into the logical plan
    10. - *
    11. start over from step 2 using the new logical plan
    12. - *
    - *

    For example, {@code INLINESTATS} is written like this:

    - *
    {@code
    - * FROM foo
    - * | EVAL bar = a * b
    - * | INLINESTATS m = MAX(bar) BY b
    - * | WHERE m = bar
    - * | LIMIT 1
    - * }
    - *

    And it's split into:

    - *
    {@code
    - * FROM foo
    - * | EVAL bar = a * b
    - * | STATS m = MAX(bar) BY b
    - * }
    - *

    and

    - *
    {@code
    - * FROM foo
    - * | EVAL bar = a * b
    - * | LOOKUP (results of m = MAX(bar) BY b) ON b
    - * | WHERE m = bar
    - * | LIMIT 1
    - * }
    - *

    If there are multiple {@linkplain Phased} nodes in the plan we always - * operate on the lowest one first, counting from the data source "upwards". - * Generally that'll read left to right in the query. So:

    - *
    {@code
    - * FROM foo | INLINESTATS | INLINESTATS
    - * }
    - * becomes - *
    {@code
    - * FROM foo | STATS
    - * }
    - * and - *
    {@code
    - * FROM foo | HASHJOIN | INLINESTATS
    - * }
    - * which is further broken into - *
    {@code
    - * FROM foo | HASHJOIN | STATS
    - * }
    - * and finally: - *
    {@code
    - * FROM foo | HASHJOIN | HASHJOIN
    - * }
    - */ -public interface Phased { - /** - * Return a {@link LogicalPlan} for the first "phase" of this operation. - * The result of this phase will be provided to {@link #nextPhase}. - */ - LogicalPlan firstPhase(); - - /** - * Use the results of plan provided from {@link #firstPhase} to produce the - * next phase of the query. - */ - LogicalPlan nextPhase(List schema, List firstPhaseResult); - - /** - * Find the first {@link Phased} operation and return it's {@link #firstPhase}. - * Or {@code null} if there aren't any {@linkplain Phased} operations. - */ - static LogicalPlan extractFirstPhase(LogicalPlan plan) { - if (false == plan.optimized()) { - throw new IllegalArgumentException("plan must be optimized"); - } - var firstPhase = new Holder(); - plan.forEachUp(t -> { - if (firstPhase.get() == null && t instanceof Phased phased) { - firstPhase.set(phased.firstPhase()); - } - }); - LogicalPlan firstPhasePlan = firstPhase.get(); - if (firstPhasePlan != null) { - firstPhasePlan.setAnalyzed(); - } - return firstPhasePlan; - } - - /** - * Merge the results of {@link #extractFirstPhase} into a {@link LogicalPlan} - * and produce a new {@linkplain LogicalPlan} that will execute the rest of the - * query. This plan may contain another - * {@link #firstPhase}. If it does then it will also need to be - * {@link #extractFirstPhase extracted} and the results will need to be applied - * again by calling this method. Eventually this will produce a plan which - * does not have a {@link #firstPhase} and that is the "final" - * phase of the plan. - */ - static LogicalPlan applyResultsFromFirstPhase(LogicalPlan plan, List schema, List result) { - if (false == plan.analyzed()) { - throw new IllegalArgumentException("plan must be analyzed"); - } - Holder seen = new Holder<>(false); - LogicalPlan applied = plan.transformUp(logicalPlan -> { - if (seen.get() == false && logicalPlan instanceof Phased phased) { - seen.set(true); - return phased.nextPhase(schema, result); - } - return logicalPlan; - }); - applied.setAnalyzed(); - return applied; - } -} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Stats.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Stats.java deleted file mode 100644 index c46c735e7482e..0000000000000 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Stats.java +++ /dev/null @@ -1,50 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -package org.elasticsearch.xpack.esql.plan.logical; - -import org.elasticsearch.xpack.esql.core.expression.Expression; -import org.elasticsearch.xpack.esql.core.expression.NamedExpression; -import org.elasticsearch.xpack.esql.core.tree.Source; - -import java.util.List; - -/** - * STATS-like operations. Like {@link Aggregate} and {@link InlineStats}. - */ -public interface Stats { - /** - * The user supplied text in the query for this command. - */ - Source source(); - - /** - * Rebuild this plan with new groupings and new aggregates. - */ - Stats with(LogicalPlan child, List newGroupings, List newAggregates); - - /** - * Have all the expressions in this plan been resolved? - */ - boolean expressionsResolved(); - - /** - * The operation directly before this one in the plan. - */ - LogicalPlan child(); - - /** - * List containing both the aggregate expressions and grouping expressions. - */ - List aggregates(); - - /** - * List containing just the grouping expressions. - */ - List groupings(); - -} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/SurrogateLogicalPlan.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/SurrogateLogicalPlan.java new file mode 100644 index 0000000000000..96a64452ea762 --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/SurrogateLogicalPlan.java @@ -0,0 +1,20 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.plan.logical; + +/** + * Interface signaling to the planner that the declaring plan should be replaced with the surrogate plan. + * This usually occurs for predefined commands that get "normalized" into a more generic form. + * @see org.elasticsearch.xpack.esql.expression.SurrogateExpression + */ +public interface SurrogateLogicalPlan { + /** + * Returns the plan to be replaced with. + */ + LogicalPlan surrogate(); +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/join/InlineJoin.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/join/InlineJoin.java new file mode 100644 index 0000000000000..87c9db1db4807 --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/join/InlineJoin.java @@ -0,0 +1,134 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.plan.logical.join; + +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BlockUtils; +import org.elasticsearch.xpack.esql.core.expression.Alias; +import org.elasticsearch.xpack.esql.core.expression.Attribute; +import org.elasticsearch.xpack.esql.core.expression.Literal; +import org.elasticsearch.xpack.esql.core.tree.NodeInfo; +import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.core.util.CollectionUtils; +import org.elasticsearch.xpack.esql.io.stream.PlanStreamInput; +import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; +import org.elasticsearch.xpack.esql.plan.logical.Project; +import org.elasticsearch.xpack.esql.plan.logical.UnaryPlan; +import org.elasticsearch.xpack.esql.plan.logical.local.LocalRelation; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +/** + * Specialized type of join where the source of the left and right plans are the same. The plans themselves can contain different nodes + * however at the core, both have the same source. + *

    Furthermore, this type of join indicates the right side is performing a subquery identical to the left side - meaning its result is + * required before joining with the left side. + *

    + * This helps the model since we want any transformation applied to the source to show up on both sides of the join - due the immutability + * of the tree (which uses value instead of reference semantics), even if the same node instance would be used, any transformation applied + * on one side (which would create a new source) would not be reflected on the other side (still use the old source instance). + * This dedicated instance handles that by replacing the source of the right with a StubRelation that simplifies copies the output of the + * source, making it easy to serialize/deserialize as well as traversing the plan. + */ +public class InlineJoin extends Join { + public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry( + LogicalPlan.class, + "InlineJoin", + InlineJoin::readFrom + ); + + /** + * Replaces the source of the target plan with a stub preserving the output of the source plan. + */ + public static LogicalPlan stubSource(UnaryPlan sourcePlan, LogicalPlan target) { + return sourcePlan.replaceChild(new StubRelation(sourcePlan.source(), target.output())); + } + + /** + * Replaces the stubbed source with the actual source. + */ + public static LogicalPlan replaceStub(LogicalPlan source, LogicalPlan stubbed) { + return stubbed.transformUp(StubRelation.class, stubRelation -> source); + } + + /** + * TODO: perform better planning + * Keep the join in place or replace it with a projection in case no grouping is necessary. + */ + public static LogicalPlan inlineData(InlineJoin target, LocalRelation data) { + if (target.config().matchFields().isEmpty()) { + List schema = data.output(); + Block[] blocks = data.supplier().get(); + List aliases = new ArrayList<>(schema.size()); + for (int i = 0; i < schema.size(); i++) { + Attribute attr = schema.get(i); + aliases.add(new Alias(attr.source(), attr.name(), Literal.of(attr, BlockUtils.toJavaObject(blocks[i], 0)))); + } + LogicalPlan left = target.left(); + return new Project(target.source(), left, CollectionUtils.combine(left.output(), aliases)); + } else { + return target.replaceRight(data); + } + } + + public InlineJoin(Source source, LogicalPlan left, LogicalPlan right, JoinConfig config) { + super(source, left, right, config); + } + + public InlineJoin( + Source source, + LogicalPlan left, + LogicalPlan right, + JoinType type, + List matchFields, + List leftFields, + List rightFields + ) { + super(source, left, right, type, matchFields, leftFields, rightFields); + } + + private static InlineJoin readFrom(StreamInput in) throws IOException { + PlanStreamInput planInput = (PlanStreamInput) in; + Source source = Source.readFrom(planInput); + LogicalPlan left = in.readNamedWriteable(LogicalPlan.class); + LogicalPlan right = in.readNamedWriteable(LogicalPlan.class); + JoinConfig config = new JoinConfig(in); + return new InlineJoin(source, left, replaceStub(left, right), config); + } + + @Override + public String getWriteableName() { + return ENTRY.name; + } + + @Override + protected NodeInfo info() { + // Do not just add the JoinConfig as a whole - this would prevent correctly registering the + // expressions and references. + JoinConfig config = config(); + return NodeInfo.create( + this, + InlineJoin::new, + left(), + right(), + config.type(), + config.matchFields(), + config.leftFields(), + config.rightFields() + ); + } + + @Override + public Join replaceChildren(LogicalPlan left, LogicalPlan right) { + return new InlineJoin(source(), left, right, config()); + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/join/Join.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/join/Join.java index e920028f04cb9..f9be61ed2c8d7 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/join/Join.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/join/Join.java @@ -61,7 +61,7 @@ public Join(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { - source().writeTo(out); + Source.EMPTY.writeTo(out); out.writeNamedWriteable(left()); out.writeNamedWriteable(right()); config.writeTo(out); @@ -76,11 +76,6 @@ public JoinConfig config() { return config; } - @Override - protected AttributeSet computeReferences() { - return Expressions.references(config.leftFields()).combine(Expressions.references(config.rightFields())); - } - @Override protected NodeInfo info() { // Do not just add the JoinConfig as a whole - this would prevent correctly registering the @@ -98,10 +93,6 @@ protected NodeInfo info() { } @Override - public Join replaceChildren(List newChildren) { - return new Join(source(), newChildren.get(0), newChildren.get(1), config); - } - public Join replaceChildren(LogicalPlan left, LogicalPlan right) { return new Join(source(), left, right, config); } @@ -126,7 +117,7 @@ public static List computeOutput(List leftOutput, List { // Right side becomes nullable. List fieldsAddedFromRight = removeCollisionsWithMatchFields(rightOutput, matchFieldSet, matchFieldNames); - yield mergeOutputAttributes(makeNullable(makeReference(fieldsAddedFromRight)), leftOutput); + yield mergeOutputAttributes(fieldsAddedFromRight, leftOutput); } default -> throw new UnsupportedOperationException("Other JOINs than LEFT not supported"); }; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/join/StubRelation.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/join/StubRelation.java new file mode 100644 index 0000000000000..4f04024d61d46 --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/join/StubRelation.java @@ -0,0 +1,98 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.plan.logical.join; + +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.xpack.esql.core.expression.Attribute; +import org.elasticsearch.xpack.esql.core.tree.NodeInfo; +import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.io.stream.PlanStreamInput; +import org.elasticsearch.xpack.esql.plan.logical.LeafPlan; +import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; + +import java.io.IOException; +import java.util.List; +import java.util.Objects; + +import static java.util.Collections.emptyList; + +/** + * Synthetic {@link LogicalPlan} used by the planner that the child plan is referred elsewhere. + * Essentially this means + * referring to another node in the plan and acting as a relationship. + * Used for duplicating parts of the plan without having to clone the nodes. + */ +public class StubRelation extends LeafPlan { + public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry( + LogicalPlan.class, + "StubRelation", + StubRelation::new + ); + + private final List output; + + public StubRelation(Source source, List output) { + super(source); + this.output = output; + } + + public StubRelation(StreamInput in) throws IOException { + this(Source.readFrom((PlanStreamInput) in), emptyList()); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + Source.EMPTY.writeTo(out); + } + + @Override + public List output() { + return output; + } + + @Override + public boolean expressionsResolved() { + return true; + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, StubRelation::new, output); + } + + @Override + public String commandName() { + return ""; + } + + @Override + public int hashCode() { + return Objects.hash(StubRelation.class, output); + } + + @Override + public String getWriteableName() { + return ENTRY.name; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + StubRelation other = (StubRelation) obj; + return Objects.equals(output, other.output()); + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/local/ImmediateLocalSupplier.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/local/ImmediateLocalSupplier.java index 8bcf5c472b2d0..c076a23891bd8 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/local/ImmediateLocalSupplier.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/local/ImmediateLocalSupplier.java @@ -17,10 +17,10 @@ /** * A {@link LocalSupplier} that contains already filled {@link Block}s. */ -class ImmediateLocalSupplier implements LocalSupplier { +public class ImmediateLocalSupplier implements LocalSupplier { private final Block[] blocks; - ImmediateLocalSupplier(Block[] blocks) { + public ImmediateLocalSupplier(Block[] blocks) { this.blocks = blocks; } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/BinaryExec.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/BinaryExec.java new file mode 100644 index 0000000000000..6f200bad17a72 --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/BinaryExec.java @@ -0,0 +1,68 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.plan.physical; + +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.xpack.esql.core.tree.Source; + +import java.io.IOException; +import java.util.Arrays; +import java.util.List; +import java.util.Objects; + +public abstract class BinaryExec extends PhysicalPlan { + + private final PhysicalPlan left, right; + + protected BinaryExec(Source source, PhysicalPlan left, PhysicalPlan right) { + super(source, Arrays.asList(left, right)); + this.left = left; + this.right = right; + } + + @Override + public final BinaryExec replaceChildren(List newChildren) { + return replaceChildren(newChildren.get(0), newChildren.get(1)); + } + + protected abstract BinaryExec replaceChildren(PhysicalPlan newLeft, PhysicalPlan newRight); + + public PhysicalPlan left() { + return left; + } + + public PhysicalPlan right() { + return right; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + Source.EMPTY.writeTo(out); + out.writeNamedWriteable(left); + out.writeNamedWriteable(right); + } + + @Override + public int hashCode() { + return Objects.hash(left, right); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + BinaryExec other = (BinaryExec) obj; + return Objects.equals(left, other.left) && Objects.equals(right, other.right); + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/FragmentExec.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/FragmentExec.java index 7594c971b7ffc..5b1ee14642dbe 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/FragmentExec.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/FragmentExec.java @@ -111,6 +111,10 @@ public PhysicalPlan estimateRowSize(State state) { : new FragmentExec(source(), fragment, esFilter, estimatedRowSize, reducer); } + public FragmentExec withFragment(LogicalPlan fragment) { + return Objects.equals(fragment, this.fragment) ? this : new FragmentExec(source(), fragment, esFilter, estimatedRowSize, reducer); + } + public FragmentExec withFilter(QueryBuilder filter) { return Objects.equals(filter, this.esFilter) ? this : new FragmentExec(source(), fragment, filter, estimatedRowSize, reducer); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/HashJoinExec.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/HashJoinExec.java index 5b83c4d95cabf..4574c3720f8ee 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/HashJoinExec.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/HashJoinExec.java @@ -22,14 +22,13 @@ import java.util.Objects; import java.util.Set; -public class HashJoinExec extends UnaryExec implements EstimatesRowSize { +public class HashJoinExec extends BinaryExec implements EstimatesRowSize { public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry( PhysicalPlan.class, "HashJoinExec", HashJoinExec::new ); - private final LocalSourceExec joinData; private final List matchFields; private final List leftFields; private final List rightFields; @@ -38,15 +37,14 @@ public class HashJoinExec extends UnaryExec implements EstimatesRowSize { public HashJoinExec( Source source, - PhysicalPlan child, - LocalSourceExec hashData, + PhysicalPlan left, + PhysicalPlan hashData, List matchFields, List leftFields, List rightFields, List output ) { - super(source, child); - this.joinData = hashData; + super(source, left, hashData); this.matchFields = matchFields; this.leftFields = leftFields; this.rightFields = rightFields; @@ -54,8 +52,7 @@ public HashJoinExec( } private HashJoinExec(StreamInput in) throws IOException { - super(Source.readFrom((PlanStreamInput) in), in.readNamedWriteable(PhysicalPlan.class)); - this.joinData = new LocalSourceExec(in); + super(Source.readFrom((PlanStreamInput) in), in.readNamedWriteable(PhysicalPlan.class), in.readNamedWriteable(PhysicalPlan.class)); this.matchFields = in.readNamedWriteableCollectionAsList(Attribute.class); this.leftFields = in.readNamedWriteableCollectionAsList(Attribute.class); this.rightFields = in.readNamedWriteableCollectionAsList(Attribute.class); @@ -64,9 +61,7 @@ private HashJoinExec(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { - source().writeTo(out); - out.writeNamedWriteable(child()); - joinData.writeTo(out); + super.writeTo(out); out.writeNamedWriteableCollection(matchFields); out.writeNamedWriteableCollection(leftFields); out.writeNamedWriteableCollection(rightFields); @@ -78,8 +73,8 @@ public String getWriteableName() { return ENTRY.name; } - public LocalSourceExec joinData() { - return joinData; + public PhysicalPlan joinData() { + return right(); } public List matchFields() { @@ -97,7 +92,7 @@ public List rightFields() { public Set addedFields() { if (lazyAddedFields == null) { lazyAddedFields = outputSet(); - lazyAddedFields.removeAll(child().output()); + lazyAddedFields.removeAll(left().output()); } return lazyAddedFields; } @@ -113,19 +108,25 @@ public List output() { return output; } + @Override + public AttributeSet inputSet() { + // TODO: this is a hack until qualifiers land since the right side is always materialized + return left().outputSet(); + } + @Override protected AttributeSet computeReferences() { return Expressions.references(leftFields); } @Override - public HashJoinExec replaceChild(PhysicalPlan newChild) { - return new HashJoinExec(source(), newChild, joinData, matchFields, leftFields, rightFields, output); + public HashJoinExec replaceChildren(PhysicalPlan left, PhysicalPlan right) { + return new HashJoinExec(source(), left, right, matchFields, leftFields, rightFields, output); } @Override protected NodeInfo info() { - return NodeInfo.create(this, HashJoinExec::new, child(), joinData, matchFields, leftFields, rightFields, output); + return NodeInfo.create(this, HashJoinExec::new, left(), right(), matchFields, leftFields, rightFields, output); } @Override @@ -140,8 +141,7 @@ public boolean equals(Object o) { return false; } HashJoinExec hash = (HashJoinExec) o; - return joinData.equals(hash.joinData) - && matchFields.equals(hash.matchFields) + return matchFields.equals(hash.matchFields) && leftFields.equals(hash.leftFields) && rightFields.equals(hash.rightFields) && output.equals(hash.output); @@ -149,6 +149,6 @@ public boolean equals(Object o) { @Override public int hashCode() { - return Objects.hash(super.hashCode(), joinData, matchFields, leftFields, rightFields, output); + return Objects.hash(super.hashCode(), matchFields, leftFields, rightFields, output); } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/PhysicalPlan.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/PhysicalPlan.java index 9ddcd97218069..ecf78908d6d3e 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/PhysicalPlan.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/PhysicalPlan.java @@ -43,6 +43,7 @@ public static List getNamedWriteables() { ProjectExec.ENTRY, RowExec.ENTRY, ShowExec.ENTRY, + SubqueryExec.ENTRY, TopNExec.ENTRY ); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/SubqueryExec.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/SubqueryExec.java new file mode 100644 index 0000000000000..adc84f06a939e --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/SubqueryExec.java @@ -0,0 +1,73 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.plan.physical; + +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.xpack.esql.core.tree.NodeInfo; +import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.io.stream.PlanStreamInput; + +import java.io.IOException; +import java.util.Objects; + +/** + * Physical plan representing a subquery, meaning a section of the plan that needs to be executed independently. + */ +public class SubqueryExec extends UnaryExec { + + public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry( + PhysicalPlan.class, + "SubqueryExec", + SubqueryExec::new + ); + + public SubqueryExec(Source source, PhysicalPlan child) { + super(source, child); + } + + private SubqueryExec(StreamInput in) throws IOException { + super(Source.readFrom((PlanStreamInput) in), in.readNamedWriteable(PhysicalPlan.class)); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + source().writeTo(out); + out.writeNamedWriteable(child()); + } + + @Override + public SubqueryExec replaceChild(PhysicalPlan newChild) { + return new SubqueryExec(source(), newChild); + } + + @Override + public String getWriteableName() { + return ENTRY.name; + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, SubqueryExec::new, child()); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + if (super.equals(o) == false) return false; + SubqueryExec that = (SubqueryExec) o; + return Objects.equals(child(), that.child()); + } + + @Override + public int hashCode() { + return super.hashCode(); + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/LocalExecutionPlanner.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/LocalExecutionPlanner.java index dc732258d9fa5..0d0b8dda5fc74 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/LocalExecutionPlanner.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/LocalExecutionPlanner.java @@ -496,18 +496,19 @@ private PhysicalOperation planEnrich(EnrichExec enrich, LocalExecutionPlannerCon } private PhysicalOperation planHashJoin(HashJoinExec join, LocalExecutionPlannerContext context) { - PhysicalOperation source = plan(join.child(), context); + PhysicalOperation source = plan(join.left(), context); int positionsChannel = source.layout.numberOfChannels(); Layout.Builder layoutBuilder = source.layout.builder(); for (Attribute f : join.output()) { - if (join.child().outputSet().contains(f)) { + if (join.left().outputSet().contains(f)) { continue; } layoutBuilder.append(f); } Layout layout = layoutBuilder.build(); - Block[] localData = join.joinData().supplier().get(); + LocalSourceExec localSourceExec = (LocalSourceExec) join.joinData(); + Block[] localData = localSourceExec.supplier().get(); RowInTableLookupOperator.Key[] keys = new RowInTableLookupOperator.Key[join.leftFields().size()]; int[] blockMapping = new int[join.leftFields().size()]; @@ -515,8 +516,9 @@ private PhysicalOperation planHashJoin(HashJoinExec join, LocalExecutionPlannerC Attribute left = join.leftFields().get(k); Attribute right = join.rightFields().get(k); Block localField = null; - for (int l = 0; l < join.joinData().output().size(); l++) { - if (join.joinData().output().get(l).name().equals((((NamedExpression) right).name()))) { + List output = join.joinData().output(); + for (int l = 0; l < output.size(); l++) { + if (output.get(l).name().equals(right.name())) { localField = localData[l]; } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/Mapper.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/Mapper.java deleted file mode 100644 index a8f820c8ef3fd..0000000000000 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/Mapper.java +++ /dev/null @@ -1,365 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -package org.elasticsearch.xpack.esql.planner; - -import org.elasticsearch.common.lucene.BytesRefs; -import org.elasticsearch.compute.aggregation.AggregatorMode; -import org.elasticsearch.xpack.esql.EsqlIllegalArgumentException; -import org.elasticsearch.xpack.esql.core.expression.Attribute; -import org.elasticsearch.xpack.esql.core.expression.Literal; -import org.elasticsearch.xpack.esql.core.tree.Source; -import org.elasticsearch.xpack.esql.core.type.DataType; -import org.elasticsearch.xpack.esql.expression.function.EsqlFunctionRegistry; -import org.elasticsearch.xpack.esql.plan.logical.Aggregate; -import org.elasticsearch.xpack.esql.plan.logical.BinaryPlan; -import org.elasticsearch.xpack.esql.plan.logical.Dissect; -import org.elasticsearch.xpack.esql.plan.logical.Enrich; -import org.elasticsearch.xpack.esql.plan.logical.EsRelation; -import org.elasticsearch.xpack.esql.plan.logical.Eval; -import org.elasticsearch.xpack.esql.plan.logical.Filter; -import org.elasticsearch.xpack.esql.plan.logical.Grok; -import org.elasticsearch.xpack.esql.plan.logical.Limit; -import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; -import org.elasticsearch.xpack.esql.plan.logical.MvExpand; -import org.elasticsearch.xpack.esql.plan.logical.OrderBy; -import org.elasticsearch.xpack.esql.plan.logical.Project; -import org.elasticsearch.xpack.esql.plan.logical.Row; -import org.elasticsearch.xpack.esql.plan.logical.TopN; -import org.elasticsearch.xpack.esql.plan.logical.UnaryPlan; -import org.elasticsearch.xpack.esql.plan.logical.join.Join; -import org.elasticsearch.xpack.esql.plan.logical.join.JoinConfig; -import org.elasticsearch.xpack.esql.plan.logical.join.JoinType; -import org.elasticsearch.xpack.esql.plan.logical.local.LocalRelation; -import org.elasticsearch.xpack.esql.plan.logical.show.ShowInfo; -import org.elasticsearch.xpack.esql.plan.physical.AggregateExec; -import org.elasticsearch.xpack.esql.plan.physical.DissectExec; -import org.elasticsearch.xpack.esql.plan.physical.EnrichExec; -import org.elasticsearch.xpack.esql.plan.physical.EsSourceExec; -import org.elasticsearch.xpack.esql.plan.physical.EvalExec; -import org.elasticsearch.xpack.esql.plan.physical.ExchangeExec; -import org.elasticsearch.xpack.esql.plan.physical.FilterExec; -import org.elasticsearch.xpack.esql.plan.physical.FragmentExec; -import org.elasticsearch.xpack.esql.plan.physical.GrokExec; -import org.elasticsearch.xpack.esql.plan.physical.HashJoinExec; -import org.elasticsearch.xpack.esql.plan.physical.LimitExec; -import org.elasticsearch.xpack.esql.plan.physical.LocalSourceExec; -import org.elasticsearch.xpack.esql.plan.physical.MvExpandExec; -import org.elasticsearch.xpack.esql.plan.physical.OrderExec; -import org.elasticsearch.xpack.esql.plan.physical.PhysicalPlan; -import org.elasticsearch.xpack.esql.plan.physical.ProjectExec; -import org.elasticsearch.xpack.esql.plan.physical.RowExec; -import org.elasticsearch.xpack.esql.plan.physical.ShowExec; -import org.elasticsearch.xpack.esql.plan.physical.TopNExec; -import org.elasticsearch.xpack.esql.plan.physical.UnaryExec; - -import java.util.List; -import java.util.concurrent.atomic.AtomicBoolean; - -/** - *

    This class is part of the planner

    - * - *

    Translates the logical plan into a physical plan. This is where we start to decide what will be executed on the data nodes and what - * will be executed on the coordinator nodes. This step creates {@link org.elasticsearch.xpack.esql.plan.physical.FragmentExec} instances, - * which represent logical plan fragments to be sent to the data nodes and {@link org.elasticsearch.xpack.esql.plan.physical.ExchangeExec} - * instances, which represent data being sent back from the data nodes to the coordinating node.

    - */ -public class Mapper { - - private final EsqlFunctionRegistry functionRegistry; - private final boolean localMode; // non-coordinator (data node) mode - - public Mapper(EsqlFunctionRegistry functionRegistry) { - this.functionRegistry = functionRegistry; - localMode = false; - } - - public Mapper(boolean localMode) { - this.functionRegistry = null; - this.localMode = localMode; - } - - public PhysicalPlan map(LogicalPlan p) { - // - // Leaf Node - // - - // Source - if (p instanceof EsRelation esRelation) { - return localMode ? new EsSourceExec(esRelation) : new FragmentExec(p); - } - - if (p instanceof Row row) { - return new RowExec(row.source(), row.fields()); - } - - if (p instanceof LocalRelation local) { - return new LocalSourceExec(local.source(), local.output(), local.supplier()); - } - - // Commands - if (p instanceof ShowInfo showInfo) { - return new ShowExec(showInfo.source(), showInfo.output(), showInfo.values()); - } - - // - // Unary Plan - // - if (localMode == false && p instanceof Enrich enrich && enrich.mode() == Enrich.Mode.REMOTE) { - // When we have remote enrich, we want to put it under FragmentExec, so it would be executed remotely. - // We're only going to do it on the coordinator node. - // The way we're going to do it is as follows: - // 1. Locate FragmentExec in the tree. If we have no FragmentExec, we won't do anything. - // 2. Put this Enrich under it, removing everything that was below it previously. - // 3. Above FragmentExec, we should deal with pipeline breakers, since pipeline ops already are supposed to go under - // FragmentExec. - // 4. Aggregates can't appear here since the plan should have errored out if we have aggregate inside remote Enrich. - // 5. So we should be keeping: LimitExec, ExchangeExec, OrderExec, TopNExec (actually OrderExec probably can't happen anyway). - - var child = map(enrich.child()); - AtomicBoolean hasFragment = new AtomicBoolean(false); - - var childTransformed = child.transformUp((f) -> { - // Once we reached FragmentExec, we stuff our Enrich under it - if (f instanceof FragmentExec) { - hasFragment.set(true); - return new FragmentExec(p); - } - if (f instanceof EnrichExec enrichExec) { - // It can only be ANY because COORDINATOR would have errored out earlier, and REMOTE should be under FragmentExec - assert enrichExec.mode() == Enrich.Mode.ANY : "enrich must be in ANY mode here"; - return enrichExec.child(); - } - if (f instanceof UnaryExec unaryExec) { - if (f instanceof LimitExec || f instanceof ExchangeExec || f instanceof OrderExec || f instanceof TopNExec) { - return f; - } else { - return unaryExec.child(); - } - } - // Currently, it's either UnaryExec or LeafExec. Leaf will either resolve to FragmentExec or we'll ignore it. - return f; - }); - - if (hasFragment.get()) { - return childTransformed; - } - } - - if (p instanceof UnaryPlan ua) { - var child = map(ua.child()); - if (child instanceof FragmentExec) { - // COORDINATOR enrich must not be included to the fragment as it has to be executed on the coordinating node - if (p instanceof Enrich enrich && enrich.mode() == Enrich.Mode.COORDINATOR) { - assert localMode == false : "coordinator enrich must not be included to a fragment and re-planned locally"; - child = addExchangeForFragment(enrich.child(), child); - return map(enrich, child); - } - // in case of a fragment, push to it any current streaming operator - if (isPipelineBreaker(p) == false) { - return new FragmentExec(p); - } - } - return map(ua, child); - } - - if (p instanceof BinaryPlan bp) { - var left = map(bp.left()); - var right = map(bp.right()); - - if (left instanceof FragmentExec) { - if (right instanceof FragmentExec) { - throw new EsqlIllegalArgumentException("can't plan binary [" + p.nodeName() + "]"); - } - // in case of a fragment, push to it any current streaming operator - return new FragmentExec(p); - } - if (right instanceof FragmentExec) { - // in case of a fragment, push to it any current streaming operator - return new FragmentExec(p); - } - return map(bp, left, right); - } - - throw new EsqlIllegalArgumentException("unsupported logical plan node [" + p.nodeName() + "]"); - } - - static boolean isPipelineBreaker(LogicalPlan p) { - return p instanceof Aggregate || p instanceof TopN || p instanceof Limit || p instanceof OrderBy; - } - - private PhysicalPlan map(UnaryPlan p, PhysicalPlan child) { - // - // Pipeline operators - // - if (p instanceof Filter f) { - return new FilterExec(f.source(), child, f.condition()); - } - - if (p instanceof Project pj) { - return new ProjectExec(pj.source(), child, pj.projections()); - } - - if (p instanceof Eval eval) { - return new EvalExec(eval.source(), child, eval.fields()); - } - - if (p instanceof Dissect dissect) { - return new DissectExec(dissect.source(), child, dissect.input(), dissect.parser(), dissect.extractedFields()); - } - - if (p instanceof Grok grok) { - return new GrokExec(grok.source(), child, grok.input(), grok.parser(), grok.extractedFields()); - } - - if (p instanceof Enrich enrich) { - return new EnrichExec( - enrich.source(), - child, - enrich.mode(), - enrich.policy().getType(), - enrich.matchField(), - BytesRefs.toString(enrich.policyName().fold()), - enrich.policy().getMatchField(), - enrich.concreteIndices(), - enrich.enrichFields() - ); - } - - if (p instanceof MvExpand mvExpand) { - MvExpandExec result = new MvExpandExec(mvExpand.source(), map(mvExpand.child()), mvExpand.target(), mvExpand.expanded()); - if (mvExpand.limit() != null) { - // MvExpand could have an inner limit - // see PushDownAndCombineLimits rule - return new LimitExec(result.source(), result, new Literal(Source.EMPTY, mvExpand.limit(), DataType.INTEGER)); - } - return result; - } - - // - // Pipeline breakers - // - if (p instanceof Limit limit) { - return map(limit, child); - } - - if (p instanceof OrderBy o) { - return map(o, child); - } - - if (p instanceof TopN topN) { - return map(topN, child); - } - - if (p instanceof Aggregate aggregate) { - return map(aggregate, child); - } - - throw new EsqlIllegalArgumentException("unsupported logical plan node [" + p.nodeName() + "]"); - } - - private PhysicalPlan map(Aggregate aggregate, PhysicalPlan child) { - List intermediateAttributes = AbstractPhysicalOperationProviders.intermediateAttributes( - aggregate.aggregates(), - aggregate.groupings() - ); - // in local mode the only aggregate that can appear is the partial side under an exchange - if (localMode) { - child = aggExec(aggregate, child, AggregatorMode.INITIAL, intermediateAttributes); - } - // otherwise create both sides of the aggregate (for parallelism purposes), if no fragment is present - // TODO: might be easier long term to end up with just one node and split if necessary instead of doing that always at this stage - else { - child = addExchangeForFragment(aggregate, child); - // exchange was added - use the intermediates for the output - if (child instanceof ExchangeExec exchange) { - child = new ExchangeExec(child.source(), intermediateAttributes, true, exchange.child()); - } - // if no exchange was added, create the partial aggregate - else { - child = aggExec(aggregate, child, AggregatorMode.INITIAL, intermediateAttributes); - } - - // regardless, always add the final agg - child = aggExec(aggregate, child, AggregatorMode.FINAL, intermediateAttributes); - } - - return child; - } - - private static AggregateExec aggExec( - Aggregate aggregate, - PhysicalPlan child, - AggregatorMode aggMode, - List intermediateAttributes - ) { - return new AggregateExec( - aggregate.source(), - child, - aggregate.groupings(), - aggregate.aggregates(), - aggMode, - intermediateAttributes, - null - ); - } - - private PhysicalPlan map(Limit limit, PhysicalPlan child) { - child = addExchangeForFragment(limit, child); - return new LimitExec(limit.source(), child, limit.limit()); - } - - private PhysicalPlan map(OrderBy o, PhysicalPlan child) { - child = addExchangeForFragment(o, child); - return new OrderExec(o.source(), child, o.order()); - } - - private PhysicalPlan map(TopN topN, PhysicalPlan child) { - child = addExchangeForFragment(topN, child); - return new TopNExec(topN.source(), child, topN.order(), topN.limit(), null); - } - - private PhysicalPlan addExchangeForFragment(LogicalPlan logical, PhysicalPlan child) { - // in case of fragment, preserve the streaming operator (order-by, limit or topN) for local replanning - // no need to do it for an aggregate since it gets split - // and clone it as a physical node along with the exchange - if (child instanceof FragmentExec) { - child = new FragmentExec(logical); - child = new ExchangeExec(child.source(), child); - } - return child; - } - - private PhysicalPlan map(BinaryPlan p, PhysicalPlan lhs, PhysicalPlan rhs) { - if (p instanceof Join join) { - PhysicalPlan hash = tryHashJoin(join, lhs, rhs); - if (hash != null) { - return hash; - } - } - throw new EsqlIllegalArgumentException("unsupported logical plan node [" + p.nodeName() + "]"); - } - - private PhysicalPlan tryHashJoin(Join join, PhysicalPlan lhs, PhysicalPlan rhs) { - JoinConfig config = join.config(); - if (config.type() != JoinType.LEFT) { - return null; - } - if (rhs instanceof LocalSourceExec local) { - return new HashJoinExec( - join.source(), - lhs, - local, - config.matchFields(), - config.leftFields(), - config.rightFields(), - join.output() - ); - } - return null; - } -} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/PlannerUtils.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/PlannerUtils.java index 7868984d6b6e2..1758edb386e59 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/PlannerUtils.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/PlannerUtils.java @@ -49,6 +49,8 @@ import org.elasticsearch.xpack.esql.plan.physical.OrderExec; import org.elasticsearch.xpack.esql.plan.physical.PhysicalPlan; import org.elasticsearch.xpack.esql.plan.physical.TopNExec; +import org.elasticsearch.xpack.esql.planner.mapper.LocalMapper; +import org.elasticsearch.xpack.esql.planner.mapper.Mapper; import org.elasticsearch.xpack.esql.session.Configuration; import org.elasticsearch.xpack.esql.stats.SearchStats; @@ -88,7 +90,7 @@ public static PhysicalPlan dataNodeReductionPlan(LogicalPlan plan, PhysicalPlan if (pipelineBreakers.isEmpty() == false) { UnaryPlan pipelineBreaker = (UnaryPlan) pipelineBreakers.get(0); if (pipelineBreaker instanceof TopN) { - Mapper mapper = new Mapper(true); + LocalMapper mapper = new LocalMapper(); var physicalPlan = EstimatesRowSize.estimateRowSize(0, mapper.map(plan)); return physicalPlan.collectFirstChildren(TopNExec.class::isInstance).get(0); } else if (pipelineBreaker instanceof Limit limit) { @@ -96,7 +98,7 @@ public static PhysicalPlan dataNodeReductionPlan(LogicalPlan plan, PhysicalPlan } else if (pipelineBreaker instanceof OrderBy order) { return new OrderExec(order.source(), unused, order.order()); } else if (pipelineBreaker instanceof Aggregate) { - Mapper mapper = new Mapper(true); + LocalMapper mapper = new LocalMapper(); var physicalPlan = EstimatesRowSize.estimateRowSize(0, mapper.map(plan)); var aggregate = (AggregateExec) physicalPlan.collectFirstChildren(AggregateExec.class::isInstance).get(0); return aggregate.withMode(AggregatorMode.INITIAL); @@ -151,13 +153,13 @@ public static PhysicalPlan localPlan( LocalLogicalPlanOptimizer logicalOptimizer, LocalPhysicalPlanOptimizer physicalOptimizer ) { - final Mapper mapper = new Mapper(true); + final LocalMapper localMapper = new LocalMapper(); var isCoordPlan = new Holder<>(Boolean.TRUE); var localPhysicalPlan = plan.transformUp(FragmentExec.class, f -> { isCoordPlan.set(Boolean.FALSE); var optimizedFragment = logicalOptimizer.localOptimize(f.fragment()); - var physicalFragment = mapper.map(optimizedFragment); + var physicalFragment = localMapper.map(optimizedFragment); var filter = f.esFilter(); if (filter != null) { physicalFragment = physicalFragment.transformUp( diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/mapper/LocalMapper.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/mapper/LocalMapper.java new file mode 100644 index 0000000000000..ceffae704cff0 --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/mapper/LocalMapper.java @@ -0,0 +1,125 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.planner.mapper; + +import org.elasticsearch.compute.aggregation.AggregatorMode; +import org.elasticsearch.xpack.esql.EsqlIllegalArgumentException; +import org.elasticsearch.xpack.esql.core.expression.Attribute; +import org.elasticsearch.xpack.esql.plan.logical.Aggregate; +import org.elasticsearch.xpack.esql.plan.logical.BinaryPlan; +import org.elasticsearch.xpack.esql.plan.logical.EsRelation; +import org.elasticsearch.xpack.esql.plan.logical.LeafPlan; +import org.elasticsearch.xpack.esql.plan.logical.Limit; +import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; +import org.elasticsearch.xpack.esql.plan.logical.OrderBy; +import org.elasticsearch.xpack.esql.plan.logical.TopN; +import org.elasticsearch.xpack.esql.plan.logical.UnaryPlan; +import org.elasticsearch.xpack.esql.plan.logical.join.Join; +import org.elasticsearch.xpack.esql.plan.logical.join.JoinConfig; +import org.elasticsearch.xpack.esql.plan.logical.join.JoinType; +import org.elasticsearch.xpack.esql.plan.physical.EsSourceExec; +import org.elasticsearch.xpack.esql.plan.physical.HashJoinExec; +import org.elasticsearch.xpack.esql.plan.physical.LimitExec; +import org.elasticsearch.xpack.esql.plan.physical.LocalSourceExec; +import org.elasticsearch.xpack.esql.plan.physical.OrderExec; +import org.elasticsearch.xpack.esql.plan.physical.PhysicalPlan; +import org.elasticsearch.xpack.esql.plan.physical.TopNExec; + +import java.util.List; + +/** + *

    Maps a (local) logical plan into a (local) physical plan. This class is the equivalent of {@link Mapper} but for data nodes. + * + */ +public class LocalMapper { + + public PhysicalPlan map(LogicalPlan p) { + + if (p instanceof LeafPlan leaf) { + return mapLeaf(leaf); + } + + if (p instanceof UnaryPlan unary) { + return mapUnary(unary); + } + + if (p instanceof BinaryPlan binary) { + return mapBinary(binary); + } + + return MapperUtils.unsupported(p); + } + + private PhysicalPlan mapLeaf(LeafPlan leaf) { + if (leaf instanceof EsRelation esRelation) { + return new EsSourceExec(esRelation); + } + + return MapperUtils.mapLeaf(leaf); + } + + private PhysicalPlan mapUnary(UnaryPlan unary) { + PhysicalPlan mappedChild = map(unary.child()); + + // + // Pipeline breakers + // + + if (unary instanceof Aggregate aggregate) { + List intermediate = MapperUtils.intermediateAttributes(aggregate); + return MapperUtils.aggExec(aggregate, mappedChild, AggregatorMode.INITIAL, intermediate); + } + + if (unary instanceof Limit limit) { + return new LimitExec(limit.source(), mappedChild, limit.limit()); + } + + if (unary instanceof OrderBy o) { + return new OrderExec(o.source(), mappedChild, o.order()); + } + + if (unary instanceof TopN topN) { + return new TopNExec(topN.source(), mappedChild, topN.order(), topN.limit(), null); + } + + // + // Pipeline operators + // + + return MapperUtils.mapUnary(unary, mappedChild); + } + + private PhysicalPlan mapBinary(BinaryPlan binary) { + // special handling for inlinejoin - join + subquery which has to be executed first (async) and replaced by its result + if (binary instanceof Join join) { + JoinConfig config = join.config(); + if (config.type() != JoinType.LEFT) { + throw new EsqlIllegalArgumentException("unsupported join type [" + config.type() + "]"); + } + + PhysicalPlan left = map(binary.left()); + PhysicalPlan right = map(binary.right()); + + if (right instanceof LocalSourceExec == false) { + throw new EsqlIllegalArgumentException("right side of a join must be a local source"); + } + + return new HashJoinExec( + join.source(), + left, + right, + config.matchFields(), + config.leftFields(), + config.rightFields(), + join.output() + ); + } + + return MapperUtils.unsupported(binary); + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/mapper/Mapper.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/mapper/Mapper.java new file mode 100644 index 0000000000000..b717af650b7a6 --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/mapper/Mapper.java @@ -0,0 +1,224 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.planner.mapper; + +import org.elasticsearch.compute.aggregation.AggregatorMode; +import org.elasticsearch.xpack.esql.EsqlIllegalArgumentException; +import org.elasticsearch.xpack.esql.core.expression.Attribute; +import org.elasticsearch.xpack.esql.core.util.Holder; +import org.elasticsearch.xpack.esql.plan.logical.Aggregate; +import org.elasticsearch.xpack.esql.plan.logical.BinaryPlan; +import org.elasticsearch.xpack.esql.plan.logical.Enrich; +import org.elasticsearch.xpack.esql.plan.logical.EsRelation; +import org.elasticsearch.xpack.esql.plan.logical.LeafPlan; +import org.elasticsearch.xpack.esql.plan.logical.Limit; +import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; +import org.elasticsearch.xpack.esql.plan.logical.OrderBy; +import org.elasticsearch.xpack.esql.plan.logical.TopN; +import org.elasticsearch.xpack.esql.plan.logical.UnaryPlan; +import org.elasticsearch.xpack.esql.plan.logical.join.Join; +import org.elasticsearch.xpack.esql.plan.logical.join.JoinConfig; +import org.elasticsearch.xpack.esql.plan.logical.join.JoinType; +import org.elasticsearch.xpack.esql.plan.physical.EnrichExec; +import org.elasticsearch.xpack.esql.plan.physical.ExchangeExec; +import org.elasticsearch.xpack.esql.plan.physical.FragmentExec; +import org.elasticsearch.xpack.esql.plan.physical.HashJoinExec; +import org.elasticsearch.xpack.esql.plan.physical.LimitExec; +import org.elasticsearch.xpack.esql.plan.physical.LocalSourceExec; +import org.elasticsearch.xpack.esql.plan.physical.OrderExec; +import org.elasticsearch.xpack.esql.plan.physical.PhysicalPlan; +import org.elasticsearch.xpack.esql.plan.physical.TopNExec; +import org.elasticsearch.xpack.esql.plan.physical.UnaryExec; + +import java.util.List; + +/** + *

    This class is part of the planner

    + * + *

    Translates the logical plan into a physical plan. This is where we start to decide what will be executed on the data nodes and what + * will be executed on the coordinator nodes. This step creates {@link org.elasticsearch.xpack.esql.plan.physical.FragmentExec} instances, + * which represent logical plan fragments to be sent to the data nodes and {@link org.elasticsearch.xpack.esql.plan.physical.ExchangeExec} + * instances, which represent data being sent back from the data nodes to the coordinating node.

    + */ +public class Mapper { + + public PhysicalPlan map(LogicalPlan p) { + + if (p instanceof LeafPlan leaf) { + return mapLeaf(leaf); + } + + if (p instanceof UnaryPlan unary) { + return mapUnary(unary); + } + + if (p instanceof BinaryPlan binary) { + return mapBinary(binary); + } + + return MapperUtils.unsupported(p); + } + + private PhysicalPlan mapLeaf(LeafPlan leaf) { + if (leaf instanceof EsRelation esRelation) { + return new FragmentExec(esRelation); + } + + return MapperUtils.mapLeaf(leaf); + } + + private PhysicalPlan mapUnary(UnaryPlan unary) { + PhysicalPlan mappedChild = map(unary.child()); + + // + // TODO - this is hard to follow and needs reworking + // https://github.com/elastic/elasticsearch/issues/115897 + // + if (unary instanceof Enrich enrich && enrich.mode() == Enrich.Mode.REMOTE) { + // When we have remote enrich, we want to put it under FragmentExec, so it would be executed remotely. + // We're only going to do it on the coordinator node. + // The way we're going to do it is as follows: + // 1. Locate FragmentExec in the tree. If we have no FragmentExec, we won't do anything. + // 2. Put this Enrich under it, removing everything that was below it previously. + // 3. Above FragmentExec, we should deal with pipeline breakers, since pipeline ops already are supposed to go under + // FragmentExec. + // 4. Aggregates can't appear here since the plan should have errored out if we have aggregate inside remote Enrich. + // 5. So we should be keeping: LimitExec, ExchangeExec, OrderExec, TopNExec (actually OrderExec probably can't happen anyway). + Holder hasFragment = new Holder<>(false); + + var childTransformed = mappedChild.transformUp(f -> { + // Once we reached FragmentExec, we stuff our Enrich under it + if (f instanceof FragmentExec) { + hasFragment.set(true); + return new FragmentExec(enrich); + } + if (f instanceof EnrichExec enrichExec) { + // It can only be ANY because COORDINATOR would have errored out earlier, and REMOTE should be under FragmentExec + assert enrichExec.mode() == Enrich.Mode.ANY : "enrich must be in ANY mode here"; + return enrichExec.child(); + } + if (f instanceof UnaryExec unaryExec) { + if (f instanceof LimitExec || f instanceof ExchangeExec || f instanceof OrderExec || f instanceof TopNExec) { + return f; + } else { + return unaryExec.child(); + } + } + // Currently, it's either UnaryExec or LeafExec. Leaf will either resolve to FragmentExec or we'll ignore it. + return f; + }); + + if (hasFragment.get()) { + return childTransformed; + } + } + + if (mappedChild instanceof FragmentExec) { + // COORDINATOR enrich must not be included to the fragment as it has to be executed on the coordinating node + if (unary instanceof Enrich enrich && enrich.mode() == Enrich.Mode.COORDINATOR) { + mappedChild = addExchangeForFragment(enrich.child(), mappedChild); + return MapperUtils.mapUnary(unary, mappedChild); + } + // in case of a fragment, push to it any current streaming operator + if (isPipelineBreaker(unary) == false) { + return new FragmentExec(unary); + } + } + + // + // Pipeline breakers + // + if (unary instanceof Aggregate aggregate) { + List intermediate = MapperUtils.intermediateAttributes(aggregate); + + // create both sides of the aggregate (for parallelism purposes), if no fragment is present + // TODO: might be easier long term to end up with just one node and split if necessary instead of doing that always at this + // stage + mappedChild = addExchangeForFragment(aggregate, mappedChild); + + // exchange was added - use the intermediates for the output + if (mappedChild instanceof ExchangeExec exchange) { + mappedChild = new ExchangeExec(mappedChild.source(), intermediate, true, exchange.child()); + } + // if no exchange was added (aggregation happening on the coordinator), create the initial agg + else { + mappedChild = MapperUtils.aggExec(aggregate, mappedChild, AggregatorMode.INITIAL, intermediate); + } + + // always add the final/reduction agg + return MapperUtils.aggExec(aggregate, mappedChild, AggregatorMode.FINAL, intermediate); + } + + if (unary instanceof Limit limit) { + mappedChild = addExchangeForFragment(limit, mappedChild); + return new LimitExec(limit.source(), mappedChild, limit.limit()); + } + + if (unary instanceof OrderBy o) { + mappedChild = addExchangeForFragment(o, mappedChild); + return new OrderExec(o.source(), mappedChild, o.order()); + } + + if (unary instanceof TopN topN) { + mappedChild = addExchangeForFragment(topN, mappedChild); + return new TopNExec(topN.source(), mappedChild, topN.order(), topN.limit(), null); + } + + // + // Pipeline operators + // + return MapperUtils.mapUnary(unary, mappedChild); + } + + private PhysicalPlan mapBinary(BinaryPlan bp) { + if (bp instanceof Join join) { + JoinConfig config = join.config(); + if (config.type() != JoinType.LEFT) { + throw new EsqlIllegalArgumentException("unsupported join type [" + config.type() + "]"); + } + + PhysicalPlan left = map(bp.left()); + + // only broadcast joins supported for now - hence push down as a streaming operator + if (left instanceof FragmentExec fragment) { + return new FragmentExec(bp); + } + + PhysicalPlan right = map(bp.right()); + // no fragment means lookup + if (right instanceof LocalSourceExec localData) { + return new HashJoinExec( + join.source(), + left, + localData, + config.matchFields(), + config.leftFields(), + config.rightFields(), + join.output() + ); + } + } + + return MapperUtils.unsupported(bp); + } + + public static boolean isPipelineBreaker(LogicalPlan p) { + return p instanceof Aggregate || p instanceof TopN || p instanceof Limit || p instanceof OrderBy; + } + + private PhysicalPlan addExchangeForFragment(LogicalPlan logical, PhysicalPlan child) { + // in case of fragment, preserve the streaming operator (order-by, limit or topN) for local replanning + // no need to do it for an aggregate since it gets split + // and clone it as a physical node along with the exchange + if (child instanceof FragmentExec) { + child = new FragmentExec(logical); + child = new ExchangeExec(child.source(), child); + } + return child; + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/mapper/MapperUtils.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/mapper/MapperUtils.java new file mode 100644 index 0000000000000..213e33f3712b1 --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/mapper/MapperUtils.java @@ -0,0 +1,142 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.planner.mapper; + +import org.elasticsearch.common.lucene.BytesRefs; +import org.elasticsearch.compute.aggregation.AggregatorMode; +import org.elasticsearch.xpack.esql.EsqlIllegalArgumentException; +import org.elasticsearch.xpack.esql.core.expression.Attribute; +import org.elasticsearch.xpack.esql.core.expression.Literal; +import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.core.type.DataType; +import org.elasticsearch.xpack.esql.plan.logical.Aggregate; +import org.elasticsearch.xpack.esql.plan.logical.Dissect; +import org.elasticsearch.xpack.esql.plan.logical.Enrich; +import org.elasticsearch.xpack.esql.plan.logical.Eval; +import org.elasticsearch.xpack.esql.plan.logical.Filter; +import org.elasticsearch.xpack.esql.plan.logical.Grok; +import org.elasticsearch.xpack.esql.plan.logical.LeafPlan; +import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; +import org.elasticsearch.xpack.esql.plan.logical.MvExpand; +import org.elasticsearch.xpack.esql.plan.logical.Project; +import org.elasticsearch.xpack.esql.plan.logical.Row; +import org.elasticsearch.xpack.esql.plan.logical.UnaryPlan; +import org.elasticsearch.xpack.esql.plan.logical.local.LocalRelation; +import org.elasticsearch.xpack.esql.plan.logical.show.ShowInfo; +import org.elasticsearch.xpack.esql.plan.physical.AggregateExec; +import org.elasticsearch.xpack.esql.plan.physical.DissectExec; +import org.elasticsearch.xpack.esql.plan.physical.EnrichExec; +import org.elasticsearch.xpack.esql.plan.physical.EvalExec; +import org.elasticsearch.xpack.esql.plan.physical.FilterExec; +import org.elasticsearch.xpack.esql.plan.physical.GrokExec; +import org.elasticsearch.xpack.esql.plan.physical.LimitExec; +import org.elasticsearch.xpack.esql.plan.physical.LocalSourceExec; +import org.elasticsearch.xpack.esql.plan.physical.MvExpandExec; +import org.elasticsearch.xpack.esql.plan.physical.PhysicalPlan; +import org.elasticsearch.xpack.esql.plan.physical.ProjectExec; +import org.elasticsearch.xpack.esql.plan.physical.RowExec; +import org.elasticsearch.xpack.esql.plan.physical.ShowExec; +import org.elasticsearch.xpack.esql.planner.AbstractPhysicalOperationProviders; + +import java.util.List; + +/** + * Class for sharing code across Mappers. + */ +class MapperUtils { + private MapperUtils() {} + + static PhysicalPlan mapLeaf(LeafPlan p) { + if (p instanceof Row row) { + return new RowExec(row.source(), row.fields()); + } + + if (p instanceof LocalRelation local) { + return new LocalSourceExec(local.source(), local.output(), local.supplier()); + } + + // Commands + if (p instanceof ShowInfo showInfo) { + return new ShowExec(showInfo.source(), showInfo.output(), showInfo.values()); + } + + return unsupported(p); + } + + static PhysicalPlan mapUnary(UnaryPlan p, PhysicalPlan child) { + if (p instanceof Filter f) { + return new FilterExec(f.source(), child, f.condition()); + } + + if (p instanceof Project pj) { + return new ProjectExec(pj.source(), child, pj.projections()); + } + + if (p instanceof Eval eval) { + return new EvalExec(eval.source(), child, eval.fields()); + } + + if (p instanceof Dissect dissect) { + return new DissectExec(dissect.source(), child, dissect.input(), dissect.parser(), dissect.extractedFields()); + } + + if (p instanceof Grok grok) { + return new GrokExec(grok.source(), child, grok.input(), grok.parser(), grok.extractedFields()); + } + + if (p instanceof Enrich enrich) { + return new EnrichExec( + enrich.source(), + child, + enrich.mode(), + enrich.policy().getType(), + enrich.matchField(), + BytesRefs.toString(enrich.policyName().fold()), + enrich.policy().getMatchField(), + enrich.concreteIndices(), + enrich.enrichFields() + ); + } + + if (p instanceof MvExpand mvExpand) { + MvExpandExec result = new MvExpandExec(mvExpand.source(), child, mvExpand.target(), mvExpand.expanded()); + if (mvExpand.limit() != null) { + // MvExpand could have an inner limit + // see PushDownAndCombineLimits rule + return new LimitExec(result.source(), result, new Literal(Source.EMPTY, mvExpand.limit(), DataType.INTEGER)); + } + return result; + } + + return unsupported(p); + } + + static List intermediateAttributes(Aggregate aggregate) { + List intermediateAttributes = AbstractPhysicalOperationProviders.intermediateAttributes( + aggregate.aggregates(), + aggregate.groupings() + ); + return intermediateAttributes; + } + + static AggregateExec aggExec(Aggregate aggregate, PhysicalPlan child, AggregatorMode aggMode, List intermediateAttributes) { + return new AggregateExec( + aggregate.source(), + child, + aggregate.groupings(), + aggregate.aggregates(), + aggMode, + intermediateAttributes, + null + ); + } + + static PhysicalPlan unsupported(LogicalPlan p) { + throw new EsqlIllegalArgumentException("unsupported logical plan node [" + p.nodeName() + "]"); + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/TransportEsqlQueryAction.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/TransportEsqlQueryAction.java index c12de173fa6b8..04e5fdc4b3bd2 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/TransportEsqlQueryAction.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/TransportEsqlQueryAction.java @@ -40,8 +40,8 @@ import org.elasticsearch.xpack.esql.enrich.EnrichPolicyResolver; import org.elasticsearch.xpack.esql.enrich.LookupFromIndexService; import org.elasticsearch.xpack.esql.execution.PlanExecutor; -import org.elasticsearch.xpack.esql.plan.physical.PhysicalPlan; import org.elasticsearch.xpack.esql.session.Configuration; +import org.elasticsearch.xpack.esql.session.EsqlSession.PlanRunner; import org.elasticsearch.xpack.esql.session.Result; import java.io.IOException; @@ -50,7 +50,6 @@ import java.util.Locale; import java.util.Map; import java.util.concurrent.Executor; -import java.util.function.BiConsumer; import static org.elasticsearch.xpack.core.ClientHelper.ASYNC_SEARCH_ORIGIN; @@ -174,10 +173,10 @@ private void innerExecute(Task task, EsqlQueryRequest request, ActionListener remoteClusterService.isSkipUnavailable(clusterAlias), request.includeCCSMetadata() ); - BiConsumer> runPhase = (physicalPlan, resultListener) -> computeService.execute( + PlanRunner planRunner = (plan, resultListener) -> computeService.execute( sessionId, (CancellableTask) task, - physicalPlan, + plan, configuration, executionInfo, resultListener @@ -189,7 +188,7 @@ private void innerExecute(Task task, EsqlQueryRequest request, ActionListener toResponse(task, request, configuration, result)) ); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/CcsUtils.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/CcsUtils.java new file mode 100644 index 0000000000000..a9314e6f65d87 --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/CcsUtils.java @@ -0,0 +1,221 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.session; + +import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.fieldcaps.FieldCapabilitiesFailure; +import org.elasticsearch.action.search.ShardSearchFailure; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.util.set.Sets; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.transport.ConnectTransportException; +import org.elasticsearch.transport.RemoteClusterAware; +import org.elasticsearch.transport.RemoteTransportException; +import org.elasticsearch.xpack.esql.action.EsqlExecutionInfo; +import org.elasticsearch.xpack.esql.analysis.Analyzer; +import org.elasticsearch.xpack.esql.index.IndexResolution; +import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; + +import java.util.Collections; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; + +class CcsUtils { + + private CcsUtils() {} + + /** + * ActionListener that receives LogicalPlan or error from logical planning. + * Any Exception sent to onFailure stops processing, but not all are fatal (return a 4xx or 5xx), so + * the onFailure handler determines whether to return an empty successful result or a 4xx/5xx error. + */ + abstract static class CssPartialErrorsActionListener implements ActionListener { + private final EsqlExecutionInfo executionInfo; + private final ActionListener listener; + + CssPartialErrorsActionListener(EsqlExecutionInfo executionInfo, ActionListener listener) { + this.executionInfo = executionInfo; + this.listener = listener; + } + + /** + * Whether to return an empty result (HTTP status 200) for a CCS rather than a top level 4xx/5xx error. + * + * For cases where field-caps had no indices to search and the remotes were unavailable, we + * return an empty successful response (200) if all remotes are marked with skip_unavailable=true. + * + * Note: a follow-on PR will expand this logic to handle cases where no indices could be found to match + * on any of the requested clusters. + */ + private boolean returnSuccessWithEmptyResult(Exception e) { + if (executionInfo.isCrossClusterSearch() == false) { + return false; + } + + if (e instanceof NoClustersToSearchException || ExceptionsHelper.isRemoteUnavailableException(e)) { + for (String clusterAlias : executionInfo.clusterAliases()) { + if (executionInfo.isSkipUnavailable(clusterAlias) == false + && clusterAlias.equals(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY) == false) { + return false; + } + } + return true; + } + return false; + } + + @Override + public void onFailure(Exception e) { + if (returnSuccessWithEmptyResult(e)) { + executionInfo.markEndQuery(); + Exception exceptionForResponse; + if (e instanceof ConnectTransportException) { + // when field-caps has no field info (since no clusters could be connected to or had matching indices) + // it just throws the first exception in its list, so this odd special handling is here is to avoid + // having one specific remote alias name in all failure lists in the metadata response + exceptionForResponse = new RemoteTransportException( + "connect_transport_exception - unable to connect to remote cluster", + null + ); + } else { + exceptionForResponse = e; + } + for (String clusterAlias : executionInfo.clusterAliases()) { + executionInfo.swapCluster(clusterAlias, (k, v) -> { + EsqlExecutionInfo.Cluster.Builder builder = new EsqlExecutionInfo.Cluster.Builder(v).setTook( + executionInfo.overallTook() + ).setTotalShards(0).setSuccessfulShards(0).setSkippedShards(0).setFailedShards(0); + if (RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY.equals(clusterAlias)) { + // never mark local cluster as skipped + builder.setStatus(EsqlExecutionInfo.Cluster.Status.SUCCESSFUL); + } else { + builder.setStatus(EsqlExecutionInfo.Cluster.Status.SKIPPED); + // add this exception to the failures list only if there is no failure already recorded there + if (v.getFailures() == null || v.getFailures().size() == 0) { + builder.setFailures(List.of(new ShardSearchFailure(exceptionForResponse))); + } + } + return builder.build(); + }); + } + listener.onResponse(new Result(Analyzer.NO_FIELDS, Collections.emptyList(), Collections.emptyList(), executionInfo)); + } else { + listener.onFailure(e); + } + } + } + + // visible for testing + static String createIndexExpressionFromAvailableClusters(EsqlExecutionInfo executionInfo) { + StringBuilder sb = new StringBuilder(); + for (String clusterAlias : executionInfo.clusterAliases()) { + EsqlExecutionInfo.Cluster cluster = executionInfo.getCluster(clusterAlias); + if (cluster.getStatus() != EsqlExecutionInfo.Cluster.Status.SKIPPED) { + if (cluster.getClusterAlias().equals(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY)) { + sb.append(executionInfo.getCluster(clusterAlias).getIndexExpression()).append(','); + } else { + String indexExpression = executionInfo.getCluster(clusterAlias).getIndexExpression(); + for (String index : indexExpression.split(",")) { + sb.append(clusterAlias).append(':').append(index).append(','); + } + } + } + } + + if (sb.length() > 0) { + return sb.substring(0, sb.length() - 1); + } else { + return ""; + } + } + + static void updateExecutionInfoWithUnavailableClusters(EsqlExecutionInfo execInfo, Map unavailable) { + for (Map.Entry entry : unavailable.entrySet()) { + String clusterAlias = entry.getKey(); + boolean skipUnavailable = execInfo.getCluster(clusterAlias).isSkipUnavailable(); + RemoteTransportException e = new RemoteTransportException( + Strings.format("Remote cluster [%s] (with setting skip_unavailable=%s) is not available", clusterAlias, skipUnavailable), + entry.getValue().getException() + ); + if (skipUnavailable) { + execInfo.swapCluster( + clusterAlias, + (k, v) -> new EsqlExecutionInfo.Cluster.Builder(v).setStatus(EsqlExecutionInfo.Cluster.Status.SKIPPED) + .setTotalShards(0) + .setSuccessfulShards(0) + .setSkippedShards(0) + .setFailedShards(0) + .setFailures(List.of(new ShardSearchFailure(e))) + .build() + ); + } else { + throw e; + } + } + } + + // visible for testing + static void updateExecutionInfoWithClustersWithNoMatchingIndices(EsqlExecutionInfo executionInfo, IndexResolution indexResolution) { + Set clustersWithResolvedIndices = new HashSet<>(); + // determine missing clusters + for (String indexName : indexResolution.get().indexNameWithModes().keySet()) { + clustersWithResolvedIndices.add(RemoteClusterAware.parseClusterAlias(indexName)); + } + Set clustersRequested = executionInfo.clusterAliases(); + Set clustersWithNoMatchingIndices = Sets.difference(clustersRequested, clustersWithResolvedIndices); + clustersWithNoMatchingIndices.removeAll(indexResolution.getUnavailableClusters().keySet()); + /* + * These are clusters in the original request that are not present in the field-caps response. They were + * specified with an index or indices that do not exist, so the search on that cluster is done. + * Mark it as SKIPPED with 0 shards searched and took=0. + */ + for (String c : clustersWithNoMatchingIndices) { + // TODO: in a follow-on PR, throw a Verification(400 status code) for local and remotes with skip_unavailable=false if + // they were requested with one or more concrete indices + // for now we never mark the local cluster as SKIPPED + final var status = RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY.equals(c) + ? EsqlExecutionInfo.Cluster.Status.SUCCESSFUL + : EsqlExecutionInfo.Cluster.Status.SKIPPED; + executionInfo.swapCluster( + c, + (k, v) -> new EsqlExecutionInfo.Cluster.Builder(v).setStatus(status) + .setTook(new TimeValue(0)) + .setTotalShards(0) + .setSuccessfulShards(0) + .setSkippedShards(0) + .setFailedShards(0) + .build() + ); + } + } + + // visible for testing + static void updateExecutionInfoAtEndOfPlanning(EsqlExecutionInfo execInfo) { + // TODO: this logic assumes a single phase execution model, so it may need to altered once INLINESTATS is made CCS compatible + if (execInfo.isCrossClusterSearch()) { + execInfo.markEndPlanning(); + for (String clusterAlias : execInfo.clusterAliases()) { + EsqlExecutionInfo.Cluster cluster = execInfo.getCluster(clusterAlias); + if (cluster.getStatus() == EsqlExecutionInfo.Cluster.Status.SKIPPED) { + execInfo.swapCluster( + clusterAlias, + (k, v) -> new EsqlExecutionInfo.Cluster.Builder(v).setTook(execInfo.planningTookTime()) + .setTotalShards(0) + .setSuccessfulShards(0) + .setSkippedShards(0) + .setFailedShards(0) + .build() + ); + } + } + } + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/EsqlSession.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/EsqlSession.java index 1e78f454b7531..a4405c32ff91c 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/EsqlSession.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/EsqlSession.java @@ -7,16 +7,15 @@ package org.elasticsearch.xpack.esql.session; -import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.OriginalIndices; -import org.elasticsearch.action.fieldcaps.FieldCapabilitiesFailure; import org.elasticsearch.action.search.ShardSearchFailure; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.regex.Regex; -import org.elasticsearch.common.util.set.Sets; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.Page; import org.elasticsearch.compute.operator.DriverProfile; import org.elasticsearch.core.Releasables; import org.elasticsearch.core.TimeValue; @@ -25,9 +24,6 @@ import org.elasticsearch.indices.IndicesExpressionGrouper; import org.elasticsearch.logging.LogManager; import org.elasticsearch.logging.Logger; -import org.elasticsearch.transport.ConnectTransportException; -import org.elasticsearch.transport.RemoteClusterAware; -import org.elasticsearch.transport.RemoteTransportException; import org.elasticsearch.xpack.esql.action.EsqlExecutionInfo; import org.elasticsearch.xpack.esql.action.EsqlQueryRequest; import org.elasticsearch.xpack.esql.analysis.Analyzer; @@ -62,24 +58,24 @@ import org.elasticsearch.xpack.esql.plan.logical.Enrich; import org.elasticsearch.xpack.esql.plan.logical.Keep; import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; -import org.elasticsearch.xpack.esql.plan.logical.Phased; import org.elasticsearch.xpack.esql.plan.logical.Project; import org.elasticsearch.xpack.esql.plan.logical.RegexExtract; import org.elasticsearch.xpack.esql.plan.logical.UnresolvedRelation; +import org.elasticsearch.xpack.esql.plan.logical.join.InlineJoin; +import org.elasticsearch.xpack.esql.plan.logical.local.LocalRelation; +import org.elasticsearch.xpack.esql.plan.logical.local.LocalSupplier; import org.elasticsearch.xpack.esql.plan.physical.EstimatesRowSize; import org.elasticsearch.xpack.esql.plan.physical.FragmentExec; import org.elasticsearch.xpack.esql.plan.physical.PhysicalPlan; -import org.elasticsearch.xpack.esql.planner.Mapper; +import org.elasticsearch.xpack.esql.planner.mapper.Mapper; import org.elasticsearch.xpack.esql.stats.PlanningMetrics; import java.util.ArrayList; import java.util.Arrays; -import java.util.Collections; -import java.util.HashSet; +import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Set; -import java.util.function.BiConsumer; import java.util.function.BiFunction; import java.util.function.Predicate; import java.util.stream.Collectors; @@ -91,6 +87,14 @@ public class EsqlSession { private static final Logger LOGGER = LogManager.getLogger(EsqlSession.class); + /** + * Interface for running the underlying plan. + * Abstracts away the underlying execution engine. + */ + public interface PlanRunner { + void run(PhysicalPlan plan, ActionListener listener); + } + private final String sessionId; private final Configuration configuration; private final IndexResolver indexResolver; @@ -140,158 +144,107 @@ public String sessionId() { /** * Execute an ESQL request. */ - public void execute( - EsqlQueryRequest request, - EsqlExecutionInfo executionInfo, - BiConsumer> runPhase, - ActionListener listener - ) { + public void execute(EsqlQueryRequest request, EsqlExecutionInfo executionInfo, PlanRunner planRunner, ActionListener listener) { LOGGER.debug("ESQL query:\n{}", request.query()); analyzedPlan( parse(request.query(), request.params()), executionInfo, - new LogicalPlanActionListener(request, executionInfo, runPhase, listener) - ); - } - - /** - * ActionListener that receives LogicalPlan or error from logical planning. - * Any Exception sent to onFailure stops processing, but not all are fatal (return a 4xx or 5xx), so - * the onFailure handler determines whether to return an empty successful result or a 4xx/5xx error. - */ - class LogicalPlanActionListener implements ActionListener { - private final EsqlQueryRequest request; - private final EsqlExecutionInfo executionInfo; - private final BiConsumer> runPhase; - private final ActionListener listener; - - LogicalPlanActionListener( - EsqlQueryRequest request, - EsqlExecutionInfo executionInfo, - BiConsumer> runPhase, - ActionListener listener - ) { - this.request = request; - this.executionInfo = executionInfo; - this.runPhase = runPhase; - this.listener = listener; - } - - @Override - public void onResponse(LogicalPlan analyzedPlan) { - executeOptimizedPlan(request, executionInfo, runPhase, optimizedPlan(analyzedPlan), listener); - } - - /** - * Whether to return an empty result (HTTP status 200) for a CCS rather than a top level 4xx/5xx error. - * - * For cases where field-caps had no indices to search and the remotes were unavailable, we - * return an empty successful response (200) if all remotes are marked with skip_unavailable=true. - * - * Note: a follow-on PR will expand this logic to handle cases where no indices could be found to match - * on any of the requested clusters. - */ - private boolean returnSuccessWithEmptyResult(Exception e) { - if (executionInfo.isCrossClusterSearch() == false) { - return false; - } - - if (e instanceof NoClustersToSearchException || ExceptionsHelper.isRemoteUnavailableException(e)) { - for (String clusterAlias : executionInfo.clusterAliases()) { - if (executionInfo.isSkipUnavailable(clusterAlias) == false - && clusterAlias.equals(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY) == false) { - return false; - } - } - return true; - } - return false; - } - - @Override - public void onFailure(Exception e) { - if (returnSuccessWithEmptyResult(e)) { - executionInfo.markEndQuery(); - Exception exceptionForResponse; - if (e instanceof ConnectTransportException) { - // when field-caps has no field info (since no clusters could be connected to or had matching indices) - // it just throws the first exception in its list, so this odd special handling is here is to avoid - // having one specific remote alias name in all failure lists in the metadata response - exceptionForResponse = new RemoteTransportException( - "connect_transport_exception - unable to connect to remote cluster", - null - ); - } else { - exceptionForResponse = e; - } - for (String clusterAlias : executionInfo.clusterAliases()) { - executionInfo.swapCluster(clusterAlias, (k, v) -> { - EsqlExecutionInfo.Cluster.Builder builder = new EsqlExecutionInfo.Cluster.Builder(v).setTook( - executionInfo.overallTook() - ).setTotalShards(0).setSuccessfulShards(0).setSkippedShards(0).setFailedShards(0); - if (RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY.equals(clusterAlias)) { - // never mark local cluster as skipped - builder.setStatus(EsqlExecutionInfo.Cluster.Status.SUCCESSFUL); - } else { - builder.setStatus(EsqlExecutionInfo.Cluster.Status.SKIPPED); - // add this exception to the failures list only if there is no failure already recorded there - if (v.getFailures() == null || v.getFailures().size() == 0) { - builder.setFailures(List.of(new ShardSearchFailure(exceptionForResponse))); - } - } - return builder.build(); - }); + new CcsUtils.CssPartialErrorsActionListener(executionInfo, listener) { + @Override + public void onResponse(LogicalPlan analyzedPlan) { + executeOptimizedPlan(request, executionInfo, planRunner, optimizedPlan(analyzedPlan), listener); } - listener.onResponse(new Result(Analyzer.NO_FIELDS, Collections.emptyList(), Collections.emptyList(), executionInfo)); - } else { - listener.onFailure(e); } - } + ); } /** * Execute an analyzed plan. Most code should prefer calling {@link #execute} but - * this is public for testing. See {@link Phased} for the sequence of operations. + * this is public for testing. */ public void executeOptimizedPlan( EsqlQueryRequest request, EsqlExecutionInfo executionInfo, - BiConsumer> runPhase, + PlanRunner planRunner, LogicalPlan optimizedPlan, ActionListener listener ) { - LogicalPlan firstPhase = Phased.extractFirstPhase(optimizedPlan); - updateExecutionInfoAtEndOfPlanning(executionInfo); - if (firstPhase == null) { - runPhase.accept(logicalPlanToPhysicalPlan(optimizedPlan, request), listener); + PhysicalPlan physicalPlan = logicalPlanToPhysicalPlan(optimizedPlan, request); + // TODO: this could be snuck into the underlying listener + CcsUtils.updateExecutionInfoAtEndOfPlanning(executionInfo); + // execute any potential subplans + executeSubPlans(physicalPlan, planRunner, executionInfo, request, listener); + } + + private record PlanTuple(PhysicalPlan physical, LogicalPlan logical) {}; + + private void executeSubPlans( + PhysicalPlan physicalPlan, + PlanRunner runner, + EsqlExecutionInfo executionInfo, + EsqlQueryRequest request, + ActionListener listener + ) { + List subplans = new ArrayList<>(); + + // Currently the inlinestats are limited and supported as streaming operators, thus present inside the fragment as logical plans + // Below they get collected, translated into a separate, coordinator based plan and the results 'broadcasted' as a local relation + physicalPlan.forEachUp(FragmentExec.class, f -> { + f.fragment().forEachUp(InlineJoin.class, ij -> { + // extract the right side of the plan and replace its source + LogicalPlan subplan = InlineJoin.replaceStub(ij.left(), ij.right()); + // mark the new root node as optimized + subplan.setOptimized(); + PhysicalPlan subqueryPlan = logicalPlanToPhysicalPlan(subplan, request); + subplans.add(new PlanTuple(subqueryPlan, ij.right())); + }); + }); + + Iterator iterator = subplans.iterator(); + + // TODO: merge into one method + if (subplans.size() > 0) { + // code-path to execute subplans + executeSubPlan(new ArrayList<>(), physicalPlan, iterator, executionInfo, runner, listener); } else { - executePhased(new ArrayList<>(), optimizedPlan, request, executionInfo, firstPhase, runPhase, listener); + // execute main plan + runner.run(physicalPlan, listener); } } - private void executePhased( + private void executeSubPlan( List profileAccumulator, - LogicalPlan mainPlan, - EsqlQueryRequest request, + PhysicalPlan plan, + Iterator subPlanIterator, EsqlExecutionInfo executionInfo, - LogicalPlan firstPhase, - BiConsumer> runPhase, + PlanRunner runner, ActionListener listener ) { - PhysicalPlan physicalPlan = logicalPlanToPhysicalPlan(optimizedPlan(firstPhase), request); - runPhase.accept(physicalPlan, listener.delegateFailureAndWrap((next, result) -> { + PlanTuple tuple = subPlanIterator.next(); + + runner.run(tuple.physical, listener.delegateFailureAndWrap((next, result) -> { try { profileAccumulator.addAll(result.profiles()); - LogicalPlan newMainPlan = optimizedPlan(Phased.applyResultsFromFirstPhase(mainPlan, physicalPlan.output(), result.pages())); - LogicalPlan newFirstPhase = Phased.extractFirstPhase(newMainPlan); - if (newFirstPhase == null) { - PhysicalPlan finalPhysicalPlan = logicalPlanToPhysicalPlan(newMainPlan, request); - runPhase.accept(finalPhysicalPlan, next.delegateFailureAndWrap((finalListener, finalResult) -> { + LocalRelation resultWrapper = resultToPlan(tuple.logical, result); + + // replace the original logical plan with the backing result + final PhysicalPlan newPlan = plan.transformUp(FragmentExec.class, f -> { + LogicalPlan frag = f.fragment(); + return f.withFragment( + frag.transformUp( + InlineJoin.class, + ij -> ij.right() == tuple.logical ? InlineJoin.inlineData(ij, resultWrapper) : ij + ) + ); + }); + if (subPlanIterator.hasNext() == false) { + runner.run(newPlan, next.delegateFailureAndWrap((finalListener, finalResult) -> { profileAccumulator.addAll(finalResult.profiles()); finalListener.onResponse(new Result(finalResult.schema(), finalResult.pages(), profileAccumulator, executionInfo)); })); } else { - executePhased(profileAccumulator, newMainPlan, request, executionInfo, newFirstPhase, runPhase, next); + // continue executing the subplans + executeSubPlan(profileAccumulator, newPlan, subPlanIterator, executionInfo, runner, next); } } finally { Releasables.closeExpectNoException(Releasables.wrap(Iterators.map(result.pages().iterator(), p -> p::releaseBlocks))); @@ -299,6 +252,14 @@ private void executePhased( })); } + private LocalRelation resultToPlan(LogicalPlan plan, Result result) { + List pages = result.pages(); + List schema = result.schema(); + // if (pages.size() > 1) { + Block[] blocks = SessionUtils.fromPages(schema, pages); + return new LocalRelation(plan.source(), schema, LocalSupplier.of(blocks)); + } + private LogicalPlan parse(String query, QueryParams params) { var parsed = new EsqlParser().createStatement(query, params); LOGGER.debug("Parsed logical plan:\n{}", parsed); @@ -347,8 +308,8 @@ private void preAnalyze( // TODO in follow-PR (for skip_unavailble handling of missing concrete indexes) add some tests for invalid index // resolution to updateExecutionInfo if (indexResolution.isValid()) { - updateExecutionInfoWithClustersWithNoMatchingIndices(executionInfo, indexResolution); - updateExecutionInfoWithUnavailableClusters(executionInfo, indexResolution.getUnavailableClusters()); + CcsUtils.updateExecutionInfoWithClustersWithNoMatchingIndices(executionInfo, indexResolution); + CcsUtils.updateExecutionInfoWithUnavailableClusters(executionInfo, indexResolution.getUnavailableClusters()); if (executionInfo.isCrossClusterSearch() && executionInfo.getClusterStateCount(EsqlExecutionInfo.Cluster.Status.RUNNING) == 0) { // for a CCS, if all clusters have been marked as SKIPPED, nothing to search so send a sentinel @@ -422,7 +383,7 @@ private void preAnalyzeIndices( } // if the preceding call to the enrich policy API found unavailable clusters, recreate the index expression to search // based only on available clusters (which could now be an empty list) - String indexExpressionToResolve = createIndexExpressionFromAvailableClusters(executionInfo); + String indexExpressionToResolve = CcsUtils.createIndexExpressionFromAvailableClusters(executionInfo); if (indexExpressionToResolve.isEmpty()) { // if this was a pure remote CCS request (no local indices) and all remotes are offline, return an empty IndexResolution listener.onResponse(IndexResolution.valid(new EsIndex(table.index(), Map.of(), Map.of()))); @@ -440,30 +401,6 @@ private void preAnalyzeIndices( } } - // visible for testing - static String createIndexExpressionFromAvailableClusters(EsqlExecutionInfo executionInfo) { - StringBuilder sb = new StringBuilder(); - for (String clusterAlias : executionInfo.clusterAliases()) { - EsqlExecutionInfo.Cluster cluster = executionInfo.getCluster(clusterAlias); - if (cluster.getStatus() != EsqlExecutionInfo.Cluster.Status.SKIPPED) { - if (cluster.getClusterAlias().equals(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY)) { - sb.append(executionInfo.getCluster(clusterAlias).getIndexExpression()).append(','); - } else { - String indexExpression = executionInfo.getCluster(clusterAlias).getIndexExpression(); - for (String index : indexExpression.split(",")) { - sb.append(clusterAlias).append(':').append(index).append(','); - } - } - } - } - - if (sb.length() > 0) { - return sb.substring(0, sb.length() - 1); - } else { - return ""; - } - } - static Set fieldNames(LogicalPlan parsed, Set enrichPolicyMatchFields) { if (false == parsed.anyMatch(plan -> plan instanceof Aggregate || plan instanceof Project)) { // no explicit columns selection, for example "from employees" @@ -607,86 +544,4 @@ public PhysicalPlan optimizedPhysicalPlan(LogicalPlan optimizedPlan) { LOGGER.debug("Optimized physical plan:\n{}", plan); return plan; } - - static void updateExecutionInfoWithUnavailableClusters(EsqlExecutionInfo execInfo, Map unavailable) { - for (Map.Entry entry : unavailable.entrySet()) { - String clusterAlias = entry.getKey(); - boolean skipUnavailable = execInfo.getCluster(clusterAlias).isSkipUnavailable(); - RemoteTransportException e = new RemoteTransportException( - Strings.format("Remote cluster [%s] (with setting skip_unavailable=%s) is not available", clusterAlias, skipUnavailable), - entry.getValue().getException() - ); - if (skipUnavailable) { - execInfo.swapCluster( - clusterAlias, - (k, v) -> new EsqlExecutionInfo.Cluster.Builder(v).setStatus(EsqlExecutionInfo.Cluster.Status.SKIPPED) - .setTotalShards(0) - .setSuccessfulShards(0) - .setSkippedShards(0) - .setFailedShards(0) - .setFailures(List.of(new ShardSearchFailure(e))) - .build() - ); - } else { - throw e; - } - } - } - - // visible for testing - static void updateExecutionInfoWithClustersWithNoMatchingIndices(EsqlExecutionInfo executionInfo, IndexResolution indexResolution) { - Set clustersWithResolvedIndices = new HashSet<>(); - // determine missing clusters - for (String indexName : indexResolution.get().indexNameWithModes().keySet()) { - clustersWithResolvedIndices.add(RemoteClusterAware.parseClusterAlias(indexName)); - } - Set clustersRequested = executionInfo.clusterAliases(); - Set clustersWithNoMatchingIndices = Sets.difference(clustersRequested, clustersWithResolvedIndices); - clustersWithNoMatchingIndices.removeAll(indexResolution.getUnavailableClusters().keySet()); - /* - * These are clusters in the original request that are not present in the field-caps response. They were - * specified with an index or indices that do not exist, so the search on that cluster is done. - * Mark it as SKIPPED with 0 shards searched and took=0. - */ - for (String c : clustersWithNoMatchingIndices) { - // TODO: in a follow-on PR, throw a Verification(400 status code) for local and remotes with skip_unavailable=false if - // they were requested with one or more concrete indices - // for now we never mark the local cluster as SKIPPED - final var status = RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY.equals(c) - ? EsqlExecutionInfo.Cluster.Status.SUCCESSFUL - : EsqlExecutionInfo.Cluster.Status.SKIPPED; - executionInfo.swapCluster( - c, - (k, v) -> new EsqlExecutionInfo.Cluster.Builder(v).setStatus(status) - .setTook(new TimeValue(0)) - .setTotalShards(0) - .setSuccessfulShards(0) - .setSkippedShards(0) - .setFailedShards(0) - .build() - ); - } - } - - // visible for testing - static void updateExecutionInfoAtEndOfPlanning(EsqlExecutionInfo execInfo) { - // TODO: this logic assumes a single phase execution model, so it may need to altered once INLINESTATS is made CCS compatible - if (execInfo.isCrossClusterSearch()) { - execInfo.markEndPlanning(); - for (String clusterAlias : execInfo.clusterAliases()) { - EsqlExecutionInfo.Cluster cluster = execInfo.getCluster(clusterAlias); - if (cluster.getStatus() == EsqlExecutionInfo.Cluster.Status.SKIPPED) { - execInfo.swapCluster( - clusterAlias, - (k, v) -> new EsqlExecutionInfo.Cluster.Builder(v).setTook(execInfo.planningTookTime()) - .setTotalShards(0) - .setSuccessfulShards(0) - .setSkippedShards(0) - .setFailedShards(0) - .build() - ); - } - } - } - } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/SessionUtils.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/SessionUtils.java new file mode 100644 index 0000000000000..85abc635967a6 --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/SessionUtils.java @@ -0,0 +1,61 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.session; + +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BlockUtils; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.core.Releasables; +import org.elasticsearch.xpack.esql.core.expression.Attribute; +import org.elasticsearch.xpack.esql.planner.PlannerUtils; + +import java.util.ArrayList; +import java.util.List; + +public class SessionUtils { + + private SessionUtils() {} + + public static Block[] fromPages(List schema, List pages) { + // Limit ourselves to 1mb of results similar to LOOKUP for now. + long bytesUsed = pages.stream().mapToLong(Page::ramBytesUsedByBlocks).sum(); + if (bytesUsed > ByteSizeValue.ofMb(1).getBytes()) { + throw new IllegalArgumentException("first phase result too large [" + ByteSizeValue.ofBytes(bytesUsed) + "] > 1mb"); + } + int positionCount = pages.stream().mapToInt(Page::getPositionCount).sum(); + Block.Builder[] builders = new Block.Builder[schema.size()]; + Block[] blocks; + try { + for (int b = 0; b < builders.length; b++) { + builders[b] = PlannerUtils.toElementType(schema.get(b).dataType()) + .newBlockBuilder(positionCount, PlannerUtils.NON_BREAKING_BLOCK_FACTORY); + } + for (Page p : pages) { + for (int b = 0; b < builders.length; b++) { + builders[b].copyFrom(p.getBlock(b), 0, p.getPositionCount()); + } + } + blocks = Block.Builder.buildAll(builders); + } finally { + Releasables.closeExpectNoException(builders); + } + return blocks; + } + + public static List fromPage(List schema, Page page) { + if (page.getPositionCount() != 1) { + throw new IllegalArgumentException("expected single row"); + } + List values = new ArrayList<>(schema.size()); + for (int i = 0; i < schema.size(); i++) { + values.add(BlockUtils.toJavaObject(page.getBlock(i), 0)); + } + return values; + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java index 3119fd4b52153..4bf02d947c1e0 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java @@ -71,18 +71,20 @@ import org.elasticsearch.xpack.esql.parser.EsqlParser; import org.elasticsearch.xpack.esql.plan.logical.Enrich; import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; +import org.elasticsearch.xpack.esql.plan.physical.HashJoinExec; import org.elasticsearch.xpack.esql.plan.physical.LocalSourceExec; import org.elasticsearch.xpack.esql.plan.physical.OutputExec; import org.elasticsearch.xpack.esql.plan.physical.PhysicalPlan; import org.elasticsearch.xpack.esql.planner.LocalExecutionPlanner; import org.elasticsearch.xpack.esql.planner.LocalExecutionPlanner.LocalExecutionPlan; -import org.elasticsearch.xpack.esql.planner.Mapper; import org.elasticsearch.xpack.esql.planner.PlannerUtils; import org.elasticsearch.xpack.esql.planner.TestPhysicalOperationProviders; +import org.elasticsearch.xpack.esql.planner.mapper.Mapper; import org.elasticsearch.xpack.esql.plugin.EsqlFeatures; import org.elasticsearch.xpack.esql.plugin.QueryPragmas; import org.elasticsearch.xpack.esql.session.Configuration; import org.elasticsearch.xpack.esql.session.EsqlSession; +import org.elasticsearch.xpack.esql.session.EsqlSession.PlanRunner; import org.elasticsearch.xpack.esql.session.Result; import org.elasticsearch.xpack.esql.stats.DisabledSearchStats; import org.elasticsearch.xpack.esql.stats.PlanningMetrics; @@ -99,7 +101,6 @@ import java.util.TreeMap; import java.util.concurrent.Executor; import java.util.concurrent.TimeUnit; -import java.util.function.BiConsumer; import static org.elasticsearch.xpack.esql.CsvSpecReader.specParser; import static org.elasticsearch.xpack.esql.CsvTestUtils.ExpectedResults; @@ -163,7 +164,7 @@ public class CsvTests extends ESTestCase { ); private final EsqlFunctionRegistry functionRegistry = new EsqlFunctionRegistry(); private final EsqlParser parser = new EsqlParser(); - private final Mapper mapper = new Mapper(functionRegistry); + private final Mapper mapper = new Mapper(); private ThreadPool threadPool; private Executor executor; @@ -438,7 +439,7 @@ private ActualResults executePlan(BigArrays bigArrays) throws Exception { session.executeOptimizedPlan( new EsqlQueryRequest(), new EsqlExecutionInfo(randomBoolean()), - runPhase(bigArrays, physicalOperationProviders), + planRunner(bigArrays, physicalOperationProviders), session.optimizedPlan(analyzed), listener.delegateFailureAndWrap( // Wrap so we can capture the warnings in the calling thread @@ -477,12 +478,11 @@ private Throwable reworkException(Throwable th) { // Asserts that the serialization and deserialization of the plan creates an equivalent plan. private void opportunisticallyAssertPlanSerialization(PhysicalPlan plan) { - var tmp = plan; - do { - if (tmp instanceof LocalSourceExec) { - return; // skip plans with localSourceExec - } - } while (tmp.children().isEmpty() == false && (tmp = tmp.children().get(0)) != null); + + // skip plans with localSourceExec + if (plan.anyMatch(p -> p instanceof LocalSourceExec || p instanceof HashJoinExec)) { + return; + } SerializationTestUtils.assertSerialization(plan, configuration); } @@ -499,14 +499,11 @@ private void assertWarnings(List warnings) { EsqlTestUtils.assertWarnings(normalized, testCase.expectedWarnings(), testCase.expectedWarningsRegex()); } - BiConsumer> runPhase( - BigArrays bigArrays, - TestPhysicalOperationProviders physicalOperationProviders - ) { - return (physicalPlan, listener) -> runPhase(bigArrays, physicalOperationProviders, physicalPlan, listener); + PlanRunner planRunner(BigArrays bigArrays, TestPhysicalOperationProviders physicalOperationProviders) { + return (physicalPlan, listener) -> executeSubPlan(bigArrays, physicalOperationProviders, physicalPlan, listener); } - void runPhase( + void executeSubPlan( BigArrays bigArrays, TestPhysicalOperationProviders physicalOperationProviders, PhysicalPlan physicalPlan, diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTests.java index b86935dcd03da..8674fb5f6c7c9 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTests.java @@ -1954,7 +1954,7 @@ public void testLookup() { * on it and discover that it doesn't exist in the index. It doesn't! * We don't expect it to. It exists only in the lookup table. */ - .item(containsString("name{r}")) + .item(containsString("name{f}")) ); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java index 59ba8352d2aaf..b022f955fd458 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java @@ -100,7 +100,6 @@ import org.elasticsearch.xpack.esql.plan.logical.Eval; import org.elasticsearch.xpack.esql.plan.logical.Filter; import org.elasticsearch.xpack.esql.plan.logical.Grok; -import org.elasticsearch.xpack.esql.plan.logical.InlineStats; import org.elasticsearch.xpack.esql.plan.logical.Limit; import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; import org.elasticsearch.xpack.esql.plan.logical.MvExpand; @@ -109,6 +108,7 @@ import org.elasticsearch.xpack.esql.plan.logical.Row; import org.elasticsearch.xpack.esql.plan.logical.TopN; import org.elasticsearch.xpack.esql.plan.logical.UnaryPlan; +import org.elasticsearch.xpack.esql.plan.logical.join.InlineJoin; import org.elasticsearch.xpack.esql.plan.logical.join.Join; import org.elasticsearch.xpack.esql.plan.logical.join.JoinType; import org.elasticsearch.xpack.esql.plan.logical.local.EsqlProject; @@ -4639,10 +4639,14 @@ public void testReplaceSortByExpressionsWithStats() { /** * Expects * Limit[1000[INTEGER]] - * \_InlineStats[[emp_no % 2{r}#6],[COUNT(salary{f}#12) AS c, emp_no % 2{r}#6]] - * \_Eval[[emp_no{f}#7 % 2[INTEGER] AS emp_no % 2]] - * \_EsRelation[test][_meta_field{f}#13, emp_no{f}#7, first_name{f}#8, ge..] + * \_InlineJoin[LEFT OUTER,[emp_no % 2{r}#1793],[emp_no % 2{r}#1793],[emp_no % 2{r}#1793]] + * |_Eval[[emp_no{f}#1794 % 2[INTEGER] AS emp_no % 2]] + * | \_EsRelation[test][_meta_field{f}#1800, emp_no{f}#1794, first_name{f}#..] + * \_Aggregate[STANDARD,[emp_no % 2{r}#1793],[COUNT(salary{f}#1799,true[BOOLEAN]) AS c, emp_no % 2{r}#1793]] + * \_StubRelation[[_meta_field{f}#1800, emp_no{f}#1794, first_name{f}#1795, gender{f}#1796, job{f}#1801, job.raw{f}#1802, langua + * ges{f}#1797, last_name{f}#1798, long_noidx{f}#1803, salary{f}#1799, emp_no % 2{r}#1793]] */ + @AwaitsFix(bugUrl = "Needs updating to join plan per above") public void testInlinestatsNestedExpressionsInGroups() { var query = """ FROM test @@ -4655,7 +4659,8 @@ public void testInlinestatsNestedExpressionsInGroups() { } var plan = optimizedPlan(query); var limit = as(plan, Limit.class); - var agg = as(limit.child(), InlineStats.class); + var inline = as(limit.child(), InlineJoin.class); + var agg = as(inline.left(), Aggregate.class); var groupings = agg.groupings(); var aggs = agg.aggregates(); var ref = as(groupings.get(0), ReferenceAttribute.class); @@ -5112,6 +5117,7 @@ public void testLookupSimple() { assertTrue(join.children().get(0).outputSet() + " contains " + lhs, join.children().get(0).outputSet().contains(lhs)); assertTrue(join.children().get(1).outputSet() + " contains " + rhs, join.children().get(1).outputSet().contains(rhs)); + // TODO: this needs to be fixed // Join's output looks sensible too assertMap( join.output().stream().map(Object::toString).toList(), @@ -5136,7 +5142,7 @@ public void testLookupSimple() { * on it and discover that it doesn't exist in the index. It doesn't! * We don't expect it to. It exists only in the lookup table. */ - .item(containsString("name{r}")) + .item(containsString("name")) ); } @@ -5171,9 +5177,9 @@ public void testLookupStats() { var agg = as(limit.child(), Aggregate.class); assertMap( agg.aggregates().stream().map(Object::toString).sorted().toList(), - matchesList().item(startsWith("MIN(emp_no)")).item(startsWith("name{r}")) + matchesList().item(startsWith("MIN(emp_no)")).item(startsWith("name")) ); - assertMap(agg.groupings().stream().map(Object::toString).toList(), matchesList().item(startsWith("name{r}"))); + assertMap(agg.groupings().stream().map(Object::toString).toList(), matchesList().item(startsWith("name"))); var join = as(agg.child(), Join.class); // Right is the lookup table @@ -5197,6 +5203,7 @@ public void testLookupStats() { assertThat(lhs.toString(), startsWith("int{r}")); assertThat(rhs.toString(), startsWith("int{r}")); + // TODO: fixme // Join's output looks sensible too assertMap( join.output().stream().map(Object::toString).toList(), @@ -5221,7 +5228,7 @@ public void testLookupStats() { * on it and discover that it doesn't exist in the index. It doesn't! * We don't expect it to. It exists only in the lookup table. */ - .item(containsString("name{r}")) + .item(containsString("name")) ); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/PhysicalPlanOptimizerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/PhysicalPlanOptimizerTests.java index 961c70acada7b..3b59a1d176a98 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/PhysicalPlanOptimizerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/PhysicalPlanOptimizerTests.java @@ -115,8 +115,8 @@ import org.elasticsearch.xpack.esql.plan.physical.RowExec; import org.elasticsearch.xpack.esql.plan.physical.TopNExec; import org.elasticsearch.xpack.esql.plan.physical.UnaryExec; -import org.elasticsearch.xpack.esql.planner.Mapper; import org.elasticsearch.xpack.esql.planner.PlannerUtils; +import org.elasticsearch.xpack.esql.planner.mapper.Mapper; import org.elasticsearch.xpack.esql.plugin.QueryPragmas; import org.elasticsearch.xpack.esql.querydsl.query.SingleValueQuery; import org.elasticsearch.xpack.esql.querydsl.query.SpatialRelatesQuery; @@ -220,7 +220,7 @@ public void init() { logicalOptimizer = new LogicalPlanOptimizer(new LogicalOptimizerContext(EsqlTestUtils.TEST_CFG)); physicalPlanOptimizer = new PhysicalPlanOptimizer(new PhysicalOptimizerContext(config)); EsqlFunctionRegistry functionRegistry = new EsqlFunctionRegistry(); - mapper = new Mapper(functionRegistry); + mapper = new Mapper(); var enrichResolution = setupEnrichResolution(); // Most tests used data from the test index, so we load it here, and use it in the plan() function. this.testData = makeTestDataSource("test", "mapping-basic.json", functionRegistry, enrichResolution); @@ -6300,7 +6300,7 @@ public void testLookupSimple() { .item(startsWith("last_name{f}")) .item(startsWith("long_noidx{f}")) .item(startsWith("salary{f}")) - .item(startsWith("name{r}")) + .item(startsWith("name{f}")) ); } @@ -6352,10 +6352,10 @@ public void testLookupThenProject() { .item(startsWith("last_name{f}")) .item(startsWith("long_noidx{f}")) .item(startsWith("salary{f}")) - .item(startsWith("name{r}")) + .item(startsWith("name{f}")) ); - var middleProject = as(join.child(), ProjectExec.class); + var middleProject = as(join.left(), ProjectExec.class); assertThat(middleProject.projections().stream().map(Objects::toString).toList(), not(hasItem(startsWith("name{f}")))); /* * At the moment we don't push projections past the HashJoin so we still include first_name here @@ -6402,7 +6402,7 @@ public void testLookupThenTopN() { TopN innerTopN = as(opt, TopN.class); assertMap( innerTopN.order().stream().map(o -> o.child().toString()).toList(), - matchesList().item(startsWith("name{r}")).item(startsWith("emp_no{f}")) + matchesList().item(startsWith("name{f}")).item(startsWith("emp_no{f}")) ); Join join = as(innerTopN.child(), Join.class); assertThat(join.config().type(), equalTo(JoinType.LEFT)); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/TestPlannerOptimizer.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/TestPlannerOptimizer.java index 1d25146ee4e2d..595f0aaa91f0d 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/TestPlannerOptimizer.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/TestPlannerOptimizer.java @@ -13,8 +13,8 @@ import org.elasticsearch.xpack.esql.parser.EsqlParser; import org.elasticsearch.xpack.esql.plan.physical.EstimatesRowSize; import org.elasticsearch.xpack.esql.plan.physical.PhysicalPlan; -import org.elasticsearch.xpack.esql.planner.Mapper; import org.elasticsearch.xpack.esql.planner.PlannerUtils; +import org.elasticsearch.xpack.esql.planner.mapper.Mapper; import org.elasticsearch.xpack.esql.session.Configuration; import org.elasticsearch.xpack.esql.stats.SearchStats; @@ -35,7 +35,7 @@ public TestPlannerOptimizer(Configuration config, Analyzer analyzer) { logicalOptimizer = new LogicalPlanOptimizer(new LogicalOptimizerContext(config)); physicalPlanOptimizer = new PhysicalPlanOptimizer(new PhysicalOptimizerContext(config)); functionRegistry = new EsqlFunctionRegistry(); - mapper = new Mapper(functionRegistry); + mapper = new Mapper(); } public PhysicalPlan plan(String query) { diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/parser/StatementParserTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/parser/StatementParserTests.java index 97de0caa93b5c..1e9fc5c281c45 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/parser/StatementParserTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/parser/StatementParserTests.java @@ -392,12 +392,16 @@ public void testInlineStatsWithGroups() { assertEquals( new InlineStats( EMPTY, - PROCESSING_CMD_INPUT, - List.of(attribute("c"), attribute("d.e")), - List.of( - new Alias(EMPTY, "b", new UnresolvedFunction(EMPTY, "min", DEFAULT, List.of(attribute("a")))), - attribute("c"), - attribute("d.e") + new Aggregate( + EMPTY, + PROCESSING_CMD_INPUT, + Aggregate.AggregateType.STANDARD, + List.of(attribute("c"), attribute("d.e")), + List.of( + new Alias(EMPTY, "b", new UnresolvedFunction(EMPTY, "min", DEFAULT, List.of(attribute("a")))), + attribute("c"), + attribute("d.e") + ) ) ), processingCommand(query) @@ -414,11 +418,15 @@ public void testInlineStatsWithoutGroups() { assertEquals( new InlineStats( EMPTY, - PROCESSING_CMD_INPUT, - List.of(), - List.of( - new Alias(EMPTY, "min(a)", new UnresolvedFunction(EMPTY, "min", DEFAULT, List.of(attribute("a")))), - new Alias(EMPTY, "c", integer(1)) + new Aggregate( + EMPTY, + PROCESSING_CMD_INPUT, + Aggregate.AggregateType.STANDARD, + List.of(), + List.of( + new Alias(EMPTY, "min(a)", new UnresolvedFunction(EMPTY, "min", DEFAULT, List.of(attribute("a")))), + new Alias(EMPTY, "c", integer(1)) + ) ) ), processingCommand(query) diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plan/logical/InlineStatsSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plan/logical/InlineStatsSerializationTests.java index 5366fca1fbd71..f91e61e41ea05 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plan/logical/InlineStatsSerializationTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plan/logical/InlineStatsSerializationTests.java @@ -21,14 +21,14 @@ protected InlineStats createTestInstance() { LogicalPlan child = randomChild(0); List groupings = randomFieldAttributes(0, 5, false).stream().map(a -> (Expression) a).toList(); List aggregates = randomFieldAttributes(0, 5, false).stream().map(a -> (NamedExpression) a).toList(); - return new InlineStats(source, child, groupings, aggregates); + return new InlineStats(source, new Aggregate(source, child, Aggregate.AggregateType.STANDARD, groupings, aggregates)); } @Override protected InlineStats mutateInstance(InlineStats instance) throws IOException { LogicalPlan child = instance.child(); - List groupings = instance.groupings(); - List aggregates = instance.aggregates(); + List groupings = instance.aggregate().groupings(); + List aggregates = instance.aggregate().aggregates(); switch (between(0, 2)) { case 0 -> child = randomValueOtherThan(child, () -> randomChild(0)); case 1 -> groupings = randomValueOtherThan( @@ -40,6 +40,7 @@ protected InlineStats mutateInstance(InlineStats instance) throws IOException { () -> randomFieldAttributes(0, 5, false).stream().map(a -> (NamedExpression) a).toList() ); } - return new InlineStats(instance.source(), child, groupings, aggregates); + Aggregate agg = new Aggregate(instance.source(), child, Aggregate.AggregateType.STANDARD, groupings, aggregates); + return new InlineStats(instance.source(), agg); } } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plan/logical/JoinSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plan/logical/JoinSerializationTests.java index 1a7f29303e635..6b17e4efd4de7 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plan/logical/JoinSerializationTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plan/logical/JoinSerializationTests.java @@ -46,4 +46,9 @@ protected Join mutateInstance(Join instance) throws IOException { } return new Join(instance.source(), left, right, config); } + + @Override + protected boolean alwaysEmptySource() { + return true; + } } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plan/logical/JoinTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plan/logical/JoinTests.java index 91f25e6f83579..dde70d85ba259 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plan/logical/JoinTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plan/logical/JoinTests.java @@ -24,6 +24,7 @@ import java.util.Set; public class JoinTests extends ESTestCase { + @AwaitsFix(bugUrl = "Test needs updating to the new JOIN planning") public void testExpressionsAndReferences() { int numMatchFields = between(1, 10); @@ -51,7 +52,7 @@ public void testExpressionsAndReferences() { Join join = new Join(Source.EMPTY, left, right, joinConfig); // matchfields are a subset of the left and right fields, so they don't contribute to the size of the references set. - assertEquals(2 * numMatchFields, join.references().size()); + // assertEquals(2 * numMatchFields, join.references().size()); AttributeSet refs = join.references(); assertTrue(refs.containsAll(matchFields)); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plan/logical/PhasedTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plan/logical/PhasedTests.java deleted file mode 100644 index a4aef74d0e10a..0000000000000 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plan/logical/PhasedTests.java +++ /dev/null @@ -1,172 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -package org.elasticsearch.xpack.esql.plan.logical; - -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.compute.data.Page; -import org.elasticsearch.index.IndexMode; -import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.xpack.esql.core.expression.Alias; -import org.elasticsearch.xpack.esql.core.expression.Attribute; -import org.elasticsearch.xpack.esql.core.expression.AttributeSet; -import org.elasticsearch.xpack.esql.core.expression.Literal; -import org.elasticsearch.xpack.esql.core.expression.ReferenceAttribute; -import org.elasticsearch.xpack.esql.core.tree.NodeInfo; -import org.elasticsearch.xpack.esql.core.tree.Source; -import org.elasticsearch.xpack.esql.core.type.DataType; -import org.elasticsearch.xpack.esql.index.EsIndex; - -import java.io.IOException; -import java.util.List; -import java.util.Map; - -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.nullValue; -import static org.hamcrest.Matchers.sameInstance; - -public class PhasedTests extends ESTestCase { - public void testZeroLayers() { - EsRelation relation = new EsRelation(Source.synthetic("relation"), new EsIndex("foo", Map.of()), IndexMode.STANDARD, false); - relation.setOptimized(); - assertThat(Phased.extractFirstPhase(relation), nullValue()); - } - - public void testOneLayer() { - EsRelation relation = new EsRelation(Source.synthetic("relation"), new EsIndex("foo", Map.of()), IndexMode.STANDARD, false); - LogicalPlan orig = new Dummy(Source.synthetic("orig"), relation); - orig.setOptimized(); - assertThat(Phased.extractFirstPhase(orig), sameInstance(relation)); - LogicalPlan finalPhase = Phased.applyResultsFromFirstPhase( - orig, - List.of(new ReferenceAttribute(Source.EMPTY, "foo", DataType.KEYWORD)), - List.of() - ); - assertThat( - finalPhase, - equalTo(new Row(orig.source(), List.of(new Alias(orig.source(), "foo", new Literal(orig.source(), "foo", DataType.KEYWORD))))) - ); - finalPhase.setOptimized(); - assertThat(Phased.extractFirstPhase(finalPhase), nullValue()); - } - - public void testTwoLayer() { - EsRelation relation = new EsRelation(Source.synthetic("relation"), new EsIndex("foo", Map.of()), IndexMode.STANDARD, false); - LogicalPlan inner = new Dummy(Source.synthetic("inner"), relation); - LogicalPlan orig = new Dummy(Source.synthetic("outer"), inner); - orig.setOptimized(); - assertThat( - "extractFirstPhase should call #firstPhase on the earliest child in the plan", - Phased.extractFirstPhase(orig), - sameInstance(relation) - ); - LogicalPlan secondPhase = Phased.applyResultsFromFirstPhase( - orig, - List.of(new ReferenceAttribute(Source.EMPTY, "foo", DataType.KEYWORD)), - List.of() - ); - secondPhase.setOptimized(); - assertThat( - "applyResultsFromFirstPhase should call #nextPhase one th earliest child in the plan", - secondPhase, - equalTo( - new Dummy( - Source.synthetic("outer"), - new Row(orig.source(), List.of(new Alias(orig.source(), "foo", new Literal(orig.source(), "foo", DataType.KEYWORD)))) - ) - ) - ); - - assertThat(Phased.extractFirstPhase(secondPhase), sameInstance(secondPhase.children().get(0))); - LogicalPlan finalPhase = Phased.applyResultsFromFirstPhase( - secondPhase, - List.of(new ReferenceAttribute(Source.EMPTY, "foo", DataType.KEYWORD)), - List.of() - ); - finalPhase.setOptimized(); - assertThat( - finalPhase, - equalTo(new Row(orig.source(), List.of(new Alias(orig.source(), "foo", new Literal(orig.source(), "foo", DataType.KEYWORD))))) - ); - - assertThat(Phased.extractFirstPhase(finalPhase), nullValue()); - } - - public class Dummy extends UnaryPlan implements Phased { - Dummy(Source source, LogicalPlan child) { - super(source, child); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - throw new UnsupportedOperationException("not serialized"); - } - - @Override - public String getWriteableName() { - throw new UnsupportedOperationException("not serialized"); - } - - @Override - public String commandName() { - return "DUMMY"; - } - - @Override - public boolean expressionsResolved() { - throw new UnsupportedOperationException(); - } - - @Override - protected NodeInfo info() { - return NodeInfo.create(this, Dummy::new, child()); - } - - @Override - public int hashCode() { - return child().hashCode(); - } - - @Override - public boolean equals(Object obj) { - if (obj instanceof Dummy == false) { - return false; - } - Dummy other = (Dummy) obj; - return child().equals(other.child()); - } - - @Override - public UnaryPlan replaceChild(LogicalPlan newChild) { - return new Dummy(source(), newChild); - } - - @Override - public List output() { - return child().output(); - } - - @Override - protected AttributeSet computeReferences() { - return AttributeSet.EMPTY; - } - - @Override - public LogicalPlan firstPhase() { - return child(); - } - - @Override - public LogicalPlan nextPhase(List schema, List firstPhaseResult) { - // Replace myself with a dummy "row" command - return new Row( - source(), - schema.stream().map(a -> new Alias(source(), a.name(), new Literal(source(), a.name(), DataType.KEYWORD))).toList() - ); - } - } -} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plan/physical/HashJoinExecSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plan/physical/HashJoinExecSerializationTests.java index 23f9c050c7c78..78ff1a5973ea3 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plan/physical/HashJoinExecSerializationTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plan/physical/HashJoinExecSerializationTests.java @@ -36,8 +36,8 @@ protected HashJoinExec createTestInstance() { @Override protected HashJoinExec mutateInstance(HashJoinExec instance) throws IOException { - PhysicalPlan child = instance.child(); - LocalSourceExec joinData = instance.joinData(); + PhysicalPlan child = instance.left(); + PhysicalPlan joinData = instance.joinData(); List matchFields = randomFieldAttributes(1, 5, false); List leftFields = randomFieldAttributes(1, 5, false); List rightFields = randomFieldAttributes(1, 5, false); @@ -53,4 +53,9 @@ protected HashJoinExec mutateInstance(HashJoinExec instance) throws IOException } return new HashJoinExec(instance.source(), child, joinData, matchFields, leftFields, rightFields, output); } + + @Override + protected boolean alwaysEmptySource() { + return true; + } } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/planner/FilterTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/planner/FilterTests.java index 06fe05896a57c..bb937700ef771 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/planner/FilterTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/planner/FilterTests.java @@ -37,6 +37,7 @@ import org.elasticsearch.xpack.esql.parser.EsqlParser; import org.elasticsearch.xpack.esql.plan.physical.FragmentExec; import org.elasticsearch.xpack.esql.plan.physical.PhysicalPlan; +import org.elasticsearch.xpack.esql.planner.mapper.Mapper; import org.elasticsearch.xpack.esql.querydsl.query.SingleValueQuery; import org.elasticsearch.xpack.esql.session.Configuration; import org.junit.BeforeClass; @@ -79,7 +80,7 @@ public static void init() { IndexResolution getIndexResult = IndexResolution.valid(test); logicalOptimizer = new LogicalPlanOptimizer(new LogicalOptimizerContext(EsqlTestUtils.TEST_CFG)); physicalPlanOptimizer = new PhysicalPlanOptimizer(new PhysicalOptimizerContext(EsqlTestUtils.TEST_CFG)); - mapper = new Mapper(false); + mapper = new Mapper(); analyzer = new Analyzer( new AnalyzerContext(EsqlTestUtils.TEST_CFG, new EsqlFunctionRegistry(), getIndexResult, EsqlTestUtils.emptyPolicyResolution()), diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plugin/DataNodeRequestTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plugin/DataNodeRequestTests.java index 325e8fbb6b652..4553551c40cd3 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plugin/DataNodeRequestTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plugin/DataNodeRequestTests.java @@ -32,7 +32,7 @@ import org.elasticsearch.xpack.esql.parser.EsqlParser; import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; import org.elasticsearch.xpack.esql.plan.physical.PhysicalPlan; -import org.elasticsearch.xpack.esql.planner.Mapper; +import org.elasticsearch.xpack.esql.planner.mapper.Mapper; import java.io.IOException; import java.util.ArrayList; @@ -274,8 +274,7 @@ static LogicalPlan parse(String query) { static PhysicalPlan mapAndMaybeOptimize(LogicalPlan logicalPlan) { var physicalPlanOptimizer = new PhysicalPlanOptimizer(new PhysicalOptimizerContext(TEST_CFG)); - EsqlFunctionRegistry functionRegistry = new EsqlFunctionRegistry(); - var mapper = new Mapper(functionRegistry); + var mapper = new Mapper(); var physical = mapper.map(logicalPlan); if (randomBoolean()) { physical = physicalPlanOptimizer.optimize(physical); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/session/EsqlSessionTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/session/EsqlSessionTests.java index dddfa67338419..1f814b841f19d 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/session/EsqlSessionTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/session/EsqlSessionTests.java @@ -45,7 +45,7 @@ public void testCreateIndexExpressionFromAvailableClusters() { executionInfo.swapCluster(remote1Alias, (k, v) -> new EsqlExecutionInfo.Cluster(remote1Alias, "*", true)); executionInfo.swapCluster(remote2Alias, (k, v) -> new EsqlExecutionInfo.Cluster(remote2Alias, "mylogs1,mylogs2,logs*", true)); - String indexExpr = EsqlSession.createIndexExpressionFromAvailableClusters(executionInfo); + String indexExpr = CcsUtils.createIndexExpressionFromAvailableClusters(executionInfo); List list = Arrays.stream(Strings.splitStringByCommaToArray(indexExpr)).toList(); assertThat(list.size(), equalTo(5)); assertThat( @@ -69,7 +69,7 @@ public void testCreateIndexExpressionFromAvailableClusters() { ) ); - String indexExpr = EsqlSession.createIndexExpressionFromAvailableClusters(executionInfo); + String indexExpr = CcsUtils.createIndexExpressionFromAvailableClusters(executionInfo); List list = Arrays.stream(Strings.splitStringByCommaToArray(indexExpr)).toList(); assertThat(list.size(), equalTo(3)); assertThat(new HashSet<>(list), equalTo(Strings.commaDelimitedListToSet("logs*,remote1:*,remote1:foo"))); @@ -93,7 +93,7 @@ public void testCreateIndexExpressionFromAvailableClusters() { ) ); - assertThat(EsqlSession.createIndexExpressionFromAvailableClusters(executionInfo), equalTo("logs*")); + assertThat(CcsUtils.createIndexExpressionFromAvailableClusters(executionInfo), equalTo("logs*")); } // only remotes present and all marked as skipped, so in revised index expression should be empty string @@ -113,7 +113,7 @@ public void testCreateIndexExpressionFromAvailableClusters() { ) ); - assertThat(EsqlSession.createIndexExpressionFromAvailableClusters(executionInfo), equalTo("")); + assertThat(CcsUtils.createIndexExpressionFromAvailableClusters(executionInfo), equalTo("")); } } @@ -131,7 +131,7 @@ public void testUpdateExecutionInfoWithUnavailableClusters() { var failure = new FieldCapabilitiesFailure(new String[] { "logs-a" }, new NoSeedNodeLeftException("unable to connect")); var unvailableClusters = Map.of(remote1Alias, failure, remote2Alias, failure); - EsqlSession.updateExecutionInfoWithUnavailableClusters(executionInfo, unvailableClusters); + CcsUtils.updateExecutionInfoWithUnavailableClusters(executionInfo, unvailableClusters); assertThat(executionInfo.clusterAliases(), equalTo(Set.of(localClusterAlias, remote1Alias, remote2Alias))); assertNull(executionInfo.overallTook()); @@ -159,7 +159,7 @@ public void testUpdateExecutionInfoWithUnavailableClusters() { var failure = new FieldCapabilitiesFailure(new String[] { "logs-a" }, new NoSeedNodeLeftException("unable to connect")); RemoteTransportException e = expectThrows( RemoteTransportException.class, - () -> EsqlSession.updateExecutionInfoWithUnavailableClusters(executionInfo, Map.of(remote2Alias, failure)) + () -> CcsUtils.updateExecutionInfoWithUnavailableClusters(executionInfo, Map.of(remote2Alias, failure)) ); assertThat(e.status().getStatus(), equalTo(500)); assertThat( @@ -176,7 +176,7 @@ public void testUpdateExecutionInfoWithUnavailableClusters() { executionInfo.swapCluster(remote1Alias, (k, v) -> new EsqlExecutionInfo.Cluster(remote1Alias, "*", true)); executionInfo.swapCluster(remote2Alias, (k, v) -> new EsqlExecutionInfo.Cluster(remote2Alias, "mylogs1,mylogs2,logs*", false)); - EsqlSession.updateExecutionInfoWithUnavailableClusters(executionInfo, Map.of()); + CcsUtils.updateExecutionInfoWithUnavailableClusters(executionInfo, Map.of()); assertThat(executionInfo.clusterAliases(), equalTo(Set.of(localClusterAlias, remote1Alias, remote2Alias))); assertNull(executionInfo.overallTook()); @@ -224,7 +224,7 @@ public void testUpdateExecutionInfoWithClustersWithNoMatchingIndices() { ); IndexResolution indexResolution = IndexResolution.valid(esIndex, Map.of()); - EsqlSession.updateExecutionInfoWithClustersWithNoMatchingIndices(executionInfo, indexResolution); + CcsUtils.updateExecutionInfoWithClustersWithNoMatchingIndices(executionInfo, indexResolution); EsqlExecutionInfo.Cluster localCluster = executionInfo.getCluster(localClusterAlias); assertThat(localCluster.getIndexExpression(), equalTo("logs*")); @@ -262,7 +262,7 @@ public void testUpdateExecutionInfoWithClustersWithNoMatchingIndices() { ); IndexResolution indexResolution = IndexResolution.valid(esIndex, Map.of()); - EsqlSession.updateExecutionInfoWithClustersWithNoMatchingIndices(executionInfo, indexResolution); + CcsUtils.updateExecutionInfoWithClustersWithNoMatchingIndices(executionInfo, indexResolution); EsqlExecutionInfo.Cluster localCluster = executionInfo.getCluster(localClusterAlias); assertThat(localCluster.getIndexExpression(), equalTo("logs*")); @@ -298,7 +298,7 @@ public void testUpdateExecutionInfoWithClustersWithNoMatchingIndices() { var failure = new FieldCapabilitiesFailure(new String[] { "logs-a" }, new NoSeedNodeLeftException("unable to connect")); IndexResolution indexResolution = IndexResolution.valid(esIndex, Map.of(remote1Alias, failure)); - EsqlSession.updateExecutionInfoWithClustersWithNoMatchingIndices(executionInfo, indexResolution); + CcsUtils.updateExecutionInfoWithClustersWithNoMatchingIndices(executionInfo, indexResolution); EsqlExecutionInfo.Cluster localCluster = executionInfo.getCluster(localClusterAlias); assertThat(localCluster.getIndexExpression(), equalTo("logs*")); @@ -336,7 +336,7 @@ public void testUpdateExecutionInfoWithClustersWithNoMatchingIndices() { var failure = new FieldCapabilitiesFailure(new String[] { "logs-a" }, new NoSeedNodeLeftException("unable to connect")); IndexResolution indexResolution = IndexResolution.valid(esIndex, Map.of(remote1Alias, failure)); - EsqlSession.updateExecutionInfoWithClustersWithNoMatchingIndices(executionInfo, indexResolution); + CcsUtils.updateExecutionInfoWithClustersWithNoMatchingIndices(executionInfo, indexResolution); } } @@ -358,7 +358,7 @@ public void testUpdateExecutionInfoAtEndOfPlanning() { Thread.sleep(1); } catch (InterruptedException e) {} - EsqlSession.updateExecutionInfoAtEndOfPlanning(executionInfo); + CcsUtils.updateExecutionInfoAtEndOfPlanning(executionInfo); assertThat(executionInfo.planningTookTime().millis(), greaterThanOrEqualTo(0L)); assertNull(executionInfo.overallTook()); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/stats/PlanExecutorMetricsTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/stats/PlanExecutorMetricsTests.java index 9edc85223e7b3..116df21a33ac0 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/stats/PlanExecutorMetricsTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/stats/PlanExecutorMetricsTests.java @@ -29,7 +29,7 @@ import org.elasticsearch.xpack.esql.analysis.EnrichResolution; import org.elasticsearch.xpack.esql.enrich.EnrichPolicyResolver; import org.elasticsearch.xpack.esql.execution.PlanExecutor; -import org.elasticsearch.xpack.esql.plan.physical.PhysicalPlan; +import org.elasticsearch.xpack.esql.session.EsqlSession; import org.elasticsearch.xpack.esql.session.IndexResolver; import org.elasticsearch.xpack.esql.session.Result; import org.junit.After; @@ -40,7 +40,6 @@ import java.util.HashMap; import java.util.List; import java.util.Map; -import java.util.function.BiConsumer; import static org.elasticsearch.xpack.esql.EsqlTestUtils.withDefaultLimitWarning; import static org.hamcrest.Matchers.instanceOf; @@ -109,7 +108,7 @@ public void testFailedMetric() { var request = new EsqlQueryRequest(); // test a failed query: xyz field doesn't exist request.query("from test | stats m = max(xyz)"); - BiConsumer> runPhase = (p, r) -> fail("this shouldn't happen"); + EsqlSession.PlanRunner runPhase = (p, r) -> fail("this shouldn't happen"); IndicesExpressionGrouper groupIndicesByCluster = (indicesOptions, indexExpressions) -> Map.of( "", new OriginalIndices(new String[] { "test" }, IndicesOptions.DEFAULT) diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/tree/EsqlNodeSubclassTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/tree/EsqlNodeSubclassTests.java index 2bee0188b9fab..b8a64be5dfd35 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/tree/EsqlNodeSubclassTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/tree/EsqlNodeSubclassTests.java @@ -45,7 +45,6 @@ import org.elasticsearch.xpack.esql.plan.logical.Dissect; import org.elasticsearch.xpack.esql.plan.logical.Grok; import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; -import org.elasticsearch.xpack.esql.plan.logical.PhasedTests; import org.elasticsearch.xpack.esql.plan.logical.join.JoinType; import org.elasticsearch.xpack.esql.plan.physical.EsQueryExec; import org.elasticsearch.xpack.esql.plan.physical.EsStatsQueryExec.Stat; @@ -118,7 +117,7 @@ public class EsqlNodeSubclassTests> extends NodeS private static final Predicate CLASSNAME_FILTER = className -> { boolean esqlCore = className.startsWith("org.elasticsearch.xpack.esql.core") != false; boolean esqlProper = className.startsWith("org.elasticsearch.xpack.esql") != false; - return (esqlCore || esqlProper) && className.equals(PhasedTests.Dummy.class.getName()) == false; + return (esqlCore || esqlProper); }; /** @@ -129,7 +128,7 @@ public class EsqlNodeSubclassTests> extends NodeS @SuppressWarnings("rawtypes") public static List nodeSubclasses() throws IOException { return subclassesOf(Node.class, CLASSNAME_FILTER).stream() - .filter(c -> testClassFor(c) == null || c != PhasedTests.Dummy.class) + .filter(c -> testClassFor(c) == null) .map(c -> new Object[] { c }) .toList(); } From 130cc74d8a3b7f230c3bfb624bd2f8419a05e389 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mariusz=20J=C3=B3zala?= <377355+jozala@users.noreply.github.com> Date: Thu, 31 Oct 2024 11:55:47 +0100 Subject: [PATCH 231/324] [TEST] Migrated REST tests extending JsonLogsIntegTestCase (#115188) REST tests extending JsonLogsIntegTestCase migrated to the new REST testing framework, using 'elasticsearch.internal-java-rest-test' Gradle plugin Explicit handling of test single-cluster config by checking if cluster is explicitly configured as single-node to avoid an incorrect configuration which may cause split brain. Replaced `ElasticsearchJavaPlugin` with `ElasticsearchJavaBasePlugin` in `RestTestBasePlugin` for better granularity. Additionally, updated the `DistributionDownloadPlugin` to set configurations as non-consumable and the `InternalDistributionArchiveSetupPlugin` to mark certain configurations as consumable. This ensures that configurations are correctly utilized during build setup and execution phases. --- ...nternalDistributionArchiveSetupPlugin.java | 2 + .../internal/RestrictedBuildApiService.java | 4 -- .../test/rest/RestTestBasePlugin.java | 4 +- .../gradle/DistributionDownloadPlugin.java | 2 + .../archives/integ-test-zip/build.gradle | 18 +---- .../test/rest/CreatedLocationHeaderIT.java | 10 +++ .../test/rest/JsonLogsFormatAndParseIT.java | 40 +++++------ .../test/rest/NodeRestUsageIT.java | 10 +++ .../test/rest/RequestsWithoutContentIT.java | 10 +++ .../test/rest/WaitForRefreshAndCloseIT.java | 11 +++ qa/logging-config/build.gradle | 22 +----- .../common/logging/CustomLoggingConfigIT.java | 68 ++++++++----------- .../common/logging/ESJsonLogsConfigIT.java | 48 ++++++------- .../resources}/es-v7-log4j2.properties | 0 qa/unconfigured-node-name/build.gradle | 15 +--- .../JsonLogsFormatAndParseIT.java | 42 +++++++----- .../common/logging/JsonLogsIntegTestCase.java | 41 ++++------- .../local/AbstractLocalClusterFactory.java | 7 +- .../AbstractLocalClusterSpecBuilder.java | 36 ++++++++-- .../local/DefaultSettingsProvider.java | 32 ++++++--- .../test/cluster/local/LocalClusterSpec.java | 5 +- .../cluster/local/LocalNodeSpecBuilder.java | 5 ++ .../qa/full-cluster-restart/build.gradle | 1 - .../qa/inference-service-tests/build.gradle | 1 - .../inference/qa/mixed-cluster/build.gradle | 3 - .../inference/qa/rolling-upgrade/build.gradle | 3 - .../plugin/transform/qa/common/build.gradle | 1 + 27 files changed, 229 insertions(+), 212 deletions(-) rename qa/logging-config/{ => src/javaRestTest/resources}/es-v7-log4j2.properties (100%) diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalDistributionArchiveSetupPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalDistributionArchiveSetupPlugin.java index bfa1bcd34749f..f7e2f3d0d6c30 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalDistributionArchiveSetupPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalDistributionArchiveSetupPlugin.java @@ -76,12 +76,14 @@ private void registerAndConfigureDistributionArchivesExtension(Project project) sub.getArtifacts().add(DEFAULT_CONFIGURATION_NAME, distributionArchive.getArchiveTask()); var extractedConfiguration = sub.getConfigurations().create(EXTRACTED_CONFIGURATION_NAME); extractedConfiguration.setCanBeResolved(false); + extractedConfiguration.setCanBeConsumed(true); extractedConfiguration.getAttributes() .attribute(ArtifactTypeDefinition.ARTIFACT_TYPE_ATTRIBUTE, ArtifactTypeDefinition.DIRECTORY_TYPE); sub.getArtifacts().add(EXTRACTED_CONFIGURATION_NAME, distributionArchive.getExpandedDistTask()); // The "composite" configuration is specifically used for resolving transformed artifacts in an included build var compositeConfiguration = sub.getConfigurations().create(COMPOSITE_CONFIGURATION_NAME); compositeConfiguration.setCanBeResolved(false); + compositeConfiguration.setCanBeConsumed(true); compositeConfiguration.getAttributes() .attribute(ArtifactTypeDefinition.ARTIFACT_TYPE_ATTRIBUTE, ArtifactTypeDefinition.DIRECTORY_TYPE); compositeConfiguration.getAttributes().attribute(Attribute.of("composite", Boolean.class), true); diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/RestrictedBuildApiService.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/RestrictedBuildApiService.java index 564bc44ab5d8b..7e3e8bd458c92 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/RestrictedBuildApiService.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/RestrictedBuildApiService.java @@ -53,8 +53,6 @@ private static ListMultimap, String> createLegacyRestTestBasePluginUsag map.put(LegacyRestTestBasePlugin.class, ":plugins:repository-hdfs"); map.put(LegacyRestTestBasePlugin.class, ":plugins:store-smb"); map.put(LegacyRestTestBasePlugin.class, ":qa:ccs-rolling-upgrade-remote-cluster"); - map.put(LegacyRestTestBasePlugin.class, ":qa:ccs-unavailable-clusters"); - map.put(LegacyRestTestBasePlugin.class, ":qa:logging-config"); map.put(LegacyRestTestBasePlugin.class, ":qa:mixed-cluster"); map.put(LegacyRestTestBasePlugin.class, ":qa:multi-cluster-search"); map.put(LegacyRestTestBasePlugin.class, ":qa:remote-clusters"); @@ -65,7 +63,6 @@ private static ListMultimap, String> createLegacyRestTestBasePluginUsag map.put(LegacyRestTestBasePlugin.class, ":qa:smoke-test-ingest-with-all-dependencies"); map.put(LegacyRestTestBasePlugin.class, ":qa:smoke-test-plugins"); map.put(LegacyRestTestBasePlugin.class, ":qa:system-indices"); - map.put(LegacyRestTestBasePlugin.class, ":qa:unconfigured-node-name"); map.put(LegacyRestTestBasePlugin.class, ":qa:verify-version-constants"); map.put(LegacyRestTestBasePlugin.class, ":test:external-modules:test-apm-integration"); map.put(LegacyRestTestBasePlugin.class, ":test:external-modules:test-delayed-aggs"); @@ -73,7 +70,6 @@ private static ListMultimap, String> createLegacyRestTestBasePluginUsag map.put(LegacyRestTestBasePlugin.class, ":test:external-modules:test-error-query"); map.put(LegacyRestTestBasePlugin.class, ":test:external-modules:test-latency-simulating-directory"); map.put(LegacyRestTestBasePlugin.class, ":test:yaml-rest-runner"); - map.put(LegacyRestTestBasePlugin.class, ":distribution:archives:integ-test-zip"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:core"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:ent-search"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:fleet"); diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/RestTestBasePlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/RestTestBasePlugin.java index fbcfcb8202104..777a6d931e50e 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/RestTestBasePlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/RestTestBasePlugin.java @@ -18,7 +18,7 @@ import org.elasticsearch.gradle.Version; import org.elasticsearch.gradle.VersionProperties; import org.elasticsearch.gradle.distribution.ElasticsearchDistributionTypes; -import org.elasticsearch.gradle.internal.ElasticsearchJavaPlugin; +import org.elasticsearch.gradle.internal.ElasticsearchJavaBasePlugin; import org.elasticsearch.gradle.internal.InternalDistributionDownloadPlugin; import org.elasticsearch.gradle.internal.info.BuildParams; import org.elasticsearch.gradle.internal.test.ErrorReportingTestListener; @@ -90,7 +90,7 @@ public RestTestBasePlugin(ProviderFactory providerFactory) { @Override public void apply(Project project) { - project.getPluginManager().apply(ElasticsearchJavaPlugin.class); + project.getPluginManager().apply(ElasticsearchJavaBasePlugin.class); project.getPluginManager().apply(InternalDistributionDownloadPlugin.class); // Register integ-test and default distributions diff --git a/build-tools/src/main/java/org/elasticsearch/gradle/DistributionDownloadPlugin.java b/build-tools/src/main/java/org/elasticsearch/gradle/DistributionDownloadPlugin.java index fe4d7148e18d6..4c7290457e7df 100644 --- a/build-tools/src/main/java/org/elasticsearch/gradle/DistributionDownloadPlugin.java +++ b/build-tools/src/main/java/org/elasticsearch/gradle/DistributionDownloadPlugin.java @@ -89,7 +89,9 @@ public void apply(Project project) { private void setupDistributionContainer(Project project) { distributionsContainer = project.container(ElasticsearchDistribution.class, name -> { var fileConfiguration = project.getConfigurations().create(DISTRO_CONFIG_PREFIX + name); + fileConfiguration.setCanBeConsumed(false); var extractedConfiguration = project.getConfigurations().create(DISTRO_EXTRACTED_CONFIG_PREFIX + name); + extractedConfiguration.setCanBeConsumed(false); extractedConfiguration.getAttributes() .attribute(ArtifactTypeDefinition.ARTIFACT_TYPE_ATTRIBUTE, ArtifactTypeDefinition.DIRECTORY_TYPE); var distribution = new ElasticsearchDistribution( diff --git a/distribution/archives/integ-test-zip/build.gradle b/distribution/archives/integ-test-zip/build.gradle index 4a5e0eb0b59b3..6fbb73cf64aad 100644 --- a/distribution/archives/integ-test-zip/build.gradle +++ b/distribution/archives/integ-test-zip/build.gradle @@ -9,7 +9,7 @@ import org.apache.tools.ant.filters.ReplaceTokens -apply plugin: 'elasticsearch.legacy-java-rest-test' +apply plugin: 'elasticsearch.internal-java-rest-test' // The integ-test-distribution is published to maven apply plugin: 'elasticsearch.publish' @@ -35,19 +35,3 @@ publishing { } } } - -tasks.named("javaRestTest").configure { - dependsOn "assemble" - /* - * There are two unique things going on here: - * 1. These tests can be run against an external cluster with - * -Dtests.rest.cluster=whatever and -Dtest.cluster=whatever - * 2. *One* of these tests is incompatible with that and should be skipped - * when running against an external cluster. - */ - if (project.providers.systemProperty("tests.rest.cluster").isPresent()) { - nonInputProperties.systemProperty 'tests.logfile', testClusters.named('javaRestTest').map(c -> c.singleNode().serverLog) - } else { - systemProperty 'tests.logfile', '--external--' - } -} diff --git a/distribution/archives/integ-test-zip/src/javaRestTest/java/org/elasticsearch/test/rest/CreatedLocationHeaderIT.java b/distribution/archives/integ-test-zip/src/javaRestTest/java/org/elasticsearch/test/rest/CreatedLocationHeaderIT.java index eea049c433c07..b28f7c020be53 100644 --- a/distribution/archives/integ-test-zip/src/javaRestTest/java/org/elasticsearch/test/rest/CreatedLocationHeaderIT.java +++ b/distribution/archives/integ-test-zip/src/javaRestTest/java/org/elasticsearch/test/rest/CreatedLocationHeaderIT.java @@ -11,6 +11,8 @@ import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.junit.ClassRule; import java.io.IOException; @@ -23,6 +25,14 @@ */ public class CreatedLocationHeaderIT extends ESRestTestCase { + @ClassRule + public static ElasticsearchCluster cluster = ElasticsearchCluster.local().build(); + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } + public void testCreate() throws IOException { locationTestCase("PUT", "test/_doc/1"); } diff --git a/distribution/archives/integ-test-zip/src/javaRestTest/java/org/elasticsearch/test/rest/JsonLogsFormatAndParseIT.java b/distribution/archives/integ-test-zip/src/javaRestTest/java/org/elasticsearch/test/rest/JsonLogsFormatAndParseIT.java index 47bdf1b425684..aef7a876ffadf 100644 --- a/distribution/archives/integ-test-zip/src/javaRestTest/java/org/elasticsearch/test/rest/JsonLogsFormatAndParseIT.java +++ b/distribution/archives/integ-test-zip/src/javaRestTest/java/org/elasticsearch/test/rest/JsonLogsFormatAndParseIT.java @@ -10,36 +10,36 @@ package org.elasticsearch.test.rest; import org.elasticsearch.common.logging.JsonLogsIntegTestCase; +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.test.cluster.LogType; import org.hamcrest.Matcher; +import org.junit.ClassRule; -import java.io.BufferedReader; -import java.io.IOException; -import java.nio.charset.StandardCharsets; -import java.nio.file.Files; -import java.nio.file.Path; -import java.security.AccessController; -import java.security.PrivilegedAction; +import java.io.InputStream; import static org.hamcrest.Matchers.is; public class JsonLogsFormatAndParseIT extends JsonLogsIntegTestCase { + + private static final String NODE_NAME = "test-node-0"; + + @ClassRule + public static ElasticsearchCluster cluster = ElasticsearchCluster.local() + .withNode(localNodeSpecBuilder -> localNodeSpecBuilder.name(NODE_NAME)) + .build(); + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } + @Override protected Matcher nodeNameMatcher() { - return is("integTest-0"); + return is(NODE_NAME); } @Override - protected BufferedReader openReader(Path logFile) { - assumeFalse( - "Skipping test because it is being run against an external cluster.", - logFile.getFileName().toString().equals("--external--") - ); - return AccessController.doPrivileged((PrivilegedAction) () -> { - try { - return Files.newBufferedReader(logFile, StandardCharsets.UTF_8); - } catch (IOException e) { - throw new RuntimeException(e); - } - }); + protected InputStream openLogsStream() { + return cluster.getNodeLog(0, LogType.SERVER_JSON); } } diff --git a/distribution/archives/integ-test-zip/src/javaRestTest/java/org/elasticsearch/test/rest/NodeRestUsageIT.java b/distribution/archives/integ-test-zip/src/javaRestTest/java/org/elasticsearch/test/rest/NodeRestUsageIT.java index 461c23e91ccd2..17bdf2a748bd7 100644 --- a/distribution/archives/integ-test-zip/src/javaRestTest/java/org/elasticsearch/test/rest/NodeRestUsageIT.java +++ b/distribution/archives/integ-test-zip/src/javaRestTest/java/org/elasticsearch/test/rest/NodeRestUsageIT.java @@ -15,6 +15,8 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.search.aggregations.AggregationBuilders; import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.junit.ClassRule; import java.io.IOException; import java.util.Collections; @@ -28,6 +30,14 @@ public class NodeRestUsageIT extends ESRestTestCase { + @ClassRule + public static ElasticsearchCluster cluster = ElasticsearchCluster.local().build(); + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } + @SuppressWarnings("unchecked") public void testWithRestUsage() throws IOException { // First get the current usage figures diff --git a/distribution/archives/integ-test-zip/src/javaRestTest/java/org/elasticsearch/test/rest/RequestsWithoutContentIT.java b/distribution/archives/integ-test-zip/src/javaRestTest/java/org/elasticsearch/test/rest/RequestsWithoutContentIT.java index 8732110bb1937..abb92ebad9d52 100644 --- a/distribution/archives/integ-test-zip/src/javaRestTest/java/org/elasticsearch/test/rest/RequestsWithoutContentIT.java +++ b/distribution/archives/integ-test-zip/src/javaRestTest/java/org/elasticsearch/test/rest/RequestsWithoutContentIT.java @@ -11,6 +11,8 @@ import org.elasticsearch.client.Request; import org.elasticsearch.client.ResponseException; +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.junit.ClassRule; import java.io.IOException; @@ -18,6 +20,14 @@ public class RequestsWithoutContentIT extends ESRestTestCase { + @ClassRule + public static ElasticsearchCluster cluster = ElasticsearchCluster.local().build(); + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } + public void testIndexMissingBody() throws IOException { ResponseException responseException = expectThrows( ResponseException.class, diff --git a/distribution/archives/integ-test-zip/src/javaRestTest/java/org/elasticsearch/test/rest/WaitForRefreshAndCloseIT.java b/distribution/archives/integ-test-zip/src/javaRestTest/java/org/elasticsearch/test/rest/WaitForRefreshAndCloseIT.java index 285ba29848a2a..37efc2d7f2b43 100644 --- a/distribution/archives/integ-test-zip/src/javaRestTest/java/org/elasticsearch/test/rest/WaitForRefreshAndCloseIT.java +++ b/distribution/archives/integ-test-zip/src/javaRestTest/java/org/elasticsearch/test/rest/WaitForRefreshAndCloseIT.java @@ -17,8 +17,10 @@ import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseException; import org.elasticsearch.client.ResponseListener; +import org.elasticsearch.test.cluster.ElasticsearchCluster; import org.junit.After; import org.junit.Before; +import org.junit.ClassRule; import java.io.IOException; import java.util.Map; @@ -32,6 +34,15 @@ * Tests that wait for refresh is fired if the index is closed. */ public class WaitForRefreshAndCloseIT extends ESRestTestCase { + + @ClassRule + public static ElasticsearchCluster cluster = ElasticsearchCluster.local().build(); + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } + @Before public void setupIndex() throws IOException { Request request = new Request("PUT", "/test"); diff --git a/qa/logging-config/build.gradle b/qa/logging-config/build.gradle index 4d65c4384afa1..255a513543c92 100644 --- a/qa/logging-config/build.gradle +++ b/qa/logging-config/build.gradle @@ -7,7 +7,7 @@ * License v3.0 only", or the "Server Side Public License, v 1". */ apply plugin: 'elasticsearch.build' -apply plugin: 'elasticsearch.legacy-java-rest-test' +apply plugin: 'elasticsearch.internal-java-rest-test' dependencies { testImplementation project(":libs:x-content") @@ -15,26 +15,6 @@ dependencies { } -testClusters.all { - setting 'xpack.security.enabled', 'false' -} - -testClusters.matching { it.name == "javaRestTest" }.configureEach { - /** - * Provide a custom log4j configuration where layout is an old style pattern and confirm that Elasticsearch - * can successfully startup. - */ - extraConfigFile 'log4j2.properties', file('es-v7-log4j2.properties') -} - -tasks.named("javaRestTest").configure { - nonInputProperties.systemProperty 'tests.logfile', - "${-> testClusters.javaRestTest.singleNode().getServerLog().absolutePath.replaceAll("_server.json", ".log")}" - - nonInputProperties.systemProperty 'tests.jsonLogfile', - "${-> testClusters.javaRestTest.singleNode().getServerLog()}" -} - tasks.named("test").configure { systemProperty 'tests.security.manager', 'false' } diff --git a/qa/logging-config/src/javaRestTest/java/org/elasticsearch/common/logging/CustomLoggingConfigIT.java b/qa/logging-config/src/javaRestTest/java/org/elasticsearch/common/logging/CustomLoggingConfigIT.java index 765020b9c65e7..085fad8bcdd7f 100644 --- a/qa/logging-config/src/javaRestTest/java/org/elasticsearch/common/logging/CustomLoggingConfigIT.java +++ b/qa/logging-config/src/javaRestTest/java/org/elasticsearch/common/logging/CustomLoggingConfigIT.java @@ -8,18 +8,19 @@ */ package org.elasticsearch.common.logging; -import org.elasticsearch.core.SuppressForbidden; +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.test.cluster.LogType; +import org.elasticsearch.test.cluster.util.resource.Resource; import org.elasticsearch.test.rest.ESRestTestCase; import org.hamcrest.Matchers; +import org.junit.ClassRule; +import java.io.BufferedReader; import java.io.IOException; +import java.io.InputStream; +import java.io.InputStreamReader; import java.io.UncheckedIOException; import java.nio.charset.StandardCharsets; -import java.nio.file.Files; -import java.nio.file.Path; -import java.nio.file.Paths; -import java.security.AccessController; -import java.security.PrivilegedAction; import java.util.List; import static org.hamcrest.Matchers.matchesRegex; @@ -34,53 +35,44 @@ public class CustomLoggingConfigIT extends ESRestTestCase { // [2020-03-20T14:51:59,989][INFO ][o.e.g.GatewayService ] [integTest-0] recovered [0] indices into cluster_state private static final String NODE_STARTED = ".*recovered.*cluster_state.*"; + @ClassRule + public static ElasticsearchCluster cluster = ElasticsearchCluster.local() + .setting("xpack.security.enabled", "false") + .configFile("log4j2.properties", Resource.fromClasspath("es-v7-log4j2.properties")) + .build(); + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } + public void testSuccessfulStartupWithCustomConfig() throws Exception { assertBusy(() -> { - List lines = readAllLines(getPlaintextLogFile()); + List lines = getPlaintextLog(); assertThat(lines, Matchers.hasItem(matchesRegex(NODE_STARTED))); }); } public void testParseAllV7JsonLines() throws Exception { assertBusy(() -> { - List lines = readAllLines(getJSONLogFile()); + List lines = getJSONLog(); assertThat(lines, Matchers.hasItem(matchesRegex(NODE_STARTED))); }); } - private List readAllLines(Path logFile) { - return AccessController.doPrivileged((PrivilegedAction>) () -> { - try { - return Files.readAllLines(logFile, StandardCharsets.UTF_8); - } catch (IOException e) { - throw new UncheckedIOException(e); - } - }); - } - - @SuppressForbidden(reason = "PathUtils doesn't have permission to read this file") - private Path getJSONLogFile() { - String logFileString = System.getProperty("tests.logfile"); - if (logFileString == null) { - fail( - "tests.logfile must be set to run this test. It is automatically " - + "set by gradle. If you must set it yourself then it should be the absolute path to the " - + "log file." - ); + private List getJSONLog() { + try (InputStream nodeLog = cluster.getNodeLog(0, LogType.SERVER_JSON)) { + return new BufferedReader(new InputStreamReader(nodeLog, StandardCharsets.UTF_8)).lines().toList(); + } catch (IOException e) { + throw new UncheckedIOException(e); } - return Paths.get(logFileString); } - @SuppressForbidden(reason = "PathUtils doesn't have permission to read this file") - private Path getPlaintextLogFile() { - String logFileString = System.getProperty("tests.logfile"); - if (logFileString == null) { - fail( - "tests.logfile must be set to run this test. It is automatically " - + "set by gradle. If you must set it yourself then it should be the absolute path to the " - + "log file." - ); + private List getPlaintextLog() { + try (InputStream nodeLog = cluster.getNodeLog(0, LogType.SERVER)) { + return new BufferedReader(new InputStreamReader(nodeLog, StandardCharsets.UTF_8)).lines().toList(); + } catch (IOException e) { + throw new UncheckedIOException(e); } - return Paths.get(logFileString); } } diff --git a/qa/logging-config/src/javaRestTest/java/org/elasticsearch/common/logging/ESJsonLogsConfigIT.java b/qa/logging-config/src/javaRestTest/java/org/elasticsearch/common/logging/ESJsonLogsConfigIT.java index d8ff9c88a459f..fa3da02ffbf40 100644 --- a/qa/logging-config/src/javaRestTest/java/org/elasticsearch/common/logging/ESJsonLogsConfigIT.java +++ b/qa/logging-config/src/javaRestTest/java/org/elasticsearch/common/logging/ESJsonLogsConfigIT.java @@ -8,49 +8,45 @@ */ package org.elasticsearch.common.logging; +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.test.cluster.LogType; +import org.elasticsearch.test.cluster.util.resource.Resource; import org.elasticsearch.xcontent.ObjectParser; import org.hamcrest.Matcher; +import org.junit.ClassRule; -import java.io.BufferedReader; -import java.io.IOException; -import java.nio.charset.StandardCharsets; -import java.nio.file.Files; -import java.nio.file.Path; -import java.security.AccessController; -import java.security.PrivilegedAction; +import java.io.InputStream; import static org.hamcrest.Matchers.is; /** * Test to verify ES JSON log format. Used in ES v7. Some users might decide to keep that format. + * Provide a custom log4j configuration where layout is an old style pattern and confirm that Elasticsearch can successfully startup. */ public class ESJsonLogsConfigIT extends JsonLogsIntegTestCase { + + private static final String NODE_NAME = "test-node-0"; + + @ClassRule + public static ElasticsearchCluster cluster = ElasticsearchCluster.local() + .setting("xpack.security.enabled", "false") + .withNode(localNodeSpecBuilder -> localNodeSpecBuilder.name(NODE_NAME)) + .configFile("log4j2.properties", Resource.fromClasspath("es-v7-log4j2.properties")) + .build(); + @Override - protected Matcher nodeNameMatcher() { - return is("javaRestTest-0"); + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); } @Override - protected BufferedReader openReader(Path logFile) { - assumeFalse( - "Skipping test because it is being run against an external cluster.", - logFile.getFileName().toString().equals("--external--") - ); - - return AccessController.doPrivileged((PrivilegedAction) () -> { - try { - String temp = Files.readString(logFile); - - return Files.newBufferedReader(logFile, StandardCharsets.UTF_8); - } catch (IOException e) { - throw new RuntimeException(e); - } - }); + protected InputStream openLogsStream() { + return cluster.getNodeLog(0, LogType.SERVER_JSON); } @Override - protected String getLogFileName() { - return System.getProperty("tests.jsonLogfile"); + protected Matcher nodeNameMatcher() { + return is(NODE_NAME); } @Override diff --git a/qa/logging-config/es-v7-log4j2.properties b/qa/logging-config/src/javaRestTest/resources/es-v7-log4j2.properties similarity index 100% rename from qa/logging-config/es-v7-log4j2.properties rename to qa/logging-config/src/javaRestTest/resources/es-v7-log4j2.properties diff --git a/qa/unconfigured-node-name/build.gradle b/qa/unconfigured-node-name/build.gradle index 07aafd8f5c269..ecc088e5972ef 100644 --- a/qa/unconfigured-node-name/build.gradle +++ b/qa/unconfigured-node-name/build.gradle @@ -7,17 +7,4 @@ * License v3.0 only", or the "Server Side Public License, v 1". */ -apply plugin: 'elasticsearch.legacy-java-rest-test' - -testClusters.configureEach { - setting 'xpack.security.enabled', 'false' -} - -testClusters.matching { it.name == "javaRestTest" }.configureEach { - nameCustomization = { null } -} - -tasks.named("javaRestTest").configure { - nonInputProperties.systemProperty 'tests.logfile', - "${-> testClusters.javaRestTest.singleNode().getServerLog()}" -} +apply plugin: 'elasticsearch.internal-java-rest-test' diff --git a/qa/unconfigured-node-name/src/javaRestTest/java/org/elasticsearch/unconfigured_node_name/JsonLogsFormatAndParseIT.java b/qa/unconfigured-node-name/src/javaRestTest/java/org/elasticsearch/unconfigured_node_name/JsonLogsFormatAndParseIT.java index 30804adca88b3..7ceda070adadb 100644 --- a/qa/unconfigured-node-name/src/javaRestTest/java/org/elasticsearch/unconfigured_node_name/JsonLogsFormatAndParseIT.java +++ b/qa/unconfigured-node-name/src/javaRestTest/java/org/elasticsearch/unconfigured_node_name/JsonLogsFormatAndParseIT.java @@ -10,15 +10,12 @@ package org.elasticsearch.unconfigured_node_name; import org.elasticsearch.common.logging.JsonLogsIntegTestCase; +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.test.cluster.LogType; import org.hamcrest.Matcher; +import org.junit.ClassRule; -import java.io.BufferedReader; -import java.io.IOException; -import java.nio.charset.StandardCharsets; -import java.nio.file.Files; -import java.nio.file.Path; -import java.security.AccessController; -import java.security.PrivilegedAction; +import java.io.InputStream; import static org.hamcrest.Matchers.equalTo; @@ -26,9 +23,24 @@ public class JsonLogsFormatAndParseIT extends JsonLogsIntegTestCase { private static final String OS_NAME = System.getProperty("os.name"); private static final boolean WINDOWS = OS_NAME.startsWith("Windows"); - // These match the values defined in org.elasticsearch.gradle.testclusters.ElasticsearchNode - private static final String COMPUTERNAME = "WindowsComputername"; - private static final String HOSTNAME = "LinuxDarwinHostname"; + private static final String COMPUTERNAME = "WindowsTestComputername"; + private static final String HOSTNAME = "LinuxDarwinTestHostname"; + + @ClassRule + public static ElasticsearchCluster cluster = ElasticsearchCluster.local() + .setting("xpack.security.enabled", "false") + .setting("discovery.type", "single-node") + .withNode( + localNodeSpecBuilder -> localNodeSpecBuilder.withoutName() + .environment("HOSTNAME", HOSTNAME) + .environment("COMPUTERNAME", COMPUTERNAME) + ) + .build(); + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } @Override protected Matcher nodeNameMatcher() { @@ -39,13 +51,7 @@ protected Matcher nodeNameMatcher() { } @Override - protected BufferedReader openReader(Path logFile) { - return AccessController.doPrivileged((PrivilegedAction) () -> { - try { - return Files.newBufferedReader(logFile, StandardCharsets.UTF_8); - } catch (IOException e) { - throw new RuntimeException(e); - } - }); + protected InputStream openLogsStream() { + return cluster.getNodeLog(0, LogType.SERVER_JSON); } } diff --git a/test/framework/src/main/java/org/elasticsearch/common/logging/JsonLogsIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/common/logging/JsonLogsIntegTestCase.java index 2643959a559fa..4ba2b983fb9cc 100644 --- a/test/framework/src/main/java/org/elasticsearch/common/logging/JsonLogsIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/common/logging/JsonLogsIntegTestCase.java @@ -9,14 +9,14 @@ package org.elasticsearch.common.logging; -import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.test.rest.ESRestTestCase; import org.elasticsearch.xcontent.ObjectParser; import java.io.BufferedReader; import java.io.IOException; -import java.nio.file.Path; -import java.nio.file.Paths; +import java.io.InputStream; +import java.io.InputStreamReader; +import java.nio.charset.StandardCharsets; import java.util.Iterator; import java.util.stream.Stream; @@ -50,18 +50,21 @@ public abstract class JsonLogsIntegTestCase extends ESRestTestCase { */ protected abstract org.hamcrest.Matcher nodeNameMatcher(); + private BufferedReader openReader() { + return new BufferedReader(new InputStreamReader(openLogsStream(), StandardCharsets.UTF_8)); + } + /** - * Open the log file. This is delegated to subclasses because the test - * framework doesn't have permission to read from the log file but - * subclasses can grant themselves that permission. + * Reads the logs. + * This is delegated to subclasses because they configure the cluster and have access to the nodes logs InputStream. */ - protected abstract BufferedReader openReader(Path logFile); + protected abstract InputStream openLogsStream(); public void testElementsPresentOnAllLinesOfLog() throws IOException { JsonLogLine firstLine = findFirstLine(); assertNotNull(firstLine); - try (Stream stream = JsonLogsStream.from(openReader(getLogFile()), getParser())) { + try (Stream stream = JsonLogsStream.from(openReader(), getParser())) { stream.limit(LINES_TO_CHECK).forEach(jsonLogLine -> { assertThat(jsonLogLine.getDataset(), is(not(emptyOrNullString()))); assertThat(jsonLogLine.getTimestamp(), is(not(emptyOrNullString()))); @@ -77,13 +80,13 @@ public void testElementsPresentOnAllLinesOfLog() throws IOException { } private JsonLogLine findFirstLine() throws IOException { - try (Stream stream = JsonLogsStream.from(openReader(getLogFile()), getParser())) { + try (Stream stream = JsonLogsStream.from(openReader(), getParser())) { return stream.findFirst().orElseThrow(() -> new AssertionError("no logs at all?!")); } } public void testNodeIdAndClusterIdConsistentOnceAvailable() throws IOException { - try (Stream stream = JsonLogsStream.from(openReader(getLogFile()), getParser())) { + try (Stream stream = JsonLogsStream.from(openReader(), getParser())) { Iterator iterator = stream.iterator(); JsonLogLine firstLine = null; @@ -91,6 +94,7 @@ public void testNodeIdAndClusterIdConsistentOnceAvailable() throws IOException { JsonLogLine jsonLogLine = iterator.next(); if (jsonLogLine.getNodeId() != null) { firstLine = jsonLogLine; + break; } } assertNotNull(firstLine); @@ -106,23 +110,6 @@ public void testNodeIdAndClusterIdConsistentOnceAvailable() throws IOException { } } - @SuppressForbidden(reason = "PathUtils doesn't have permission to read this file") - private Path getLogFile() { - String logFileString = getLogFileName(); - if (logFileString == null) { - fail( - "tests.logfile must be set to run this test. It is automatically " - + "set by gradle. If you must set it yourself then it should be the absolute path to the " - + "log file." - ); - } - return Paths.get(logFileString); - } - - protected String getLogFileName() { - return System.getProperty("tests.logfile"); - } - protected ObjectParser getParser() { return JsonLogLine.ECS_LOG_LINE; } diff --git a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/AbstractLocalClusterFactory.java b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/AbstractLocalClusterFactory.java index 6a975a2a08518..ec1bf13bd993b 100644 --- a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/AbstractLocalClusterFactory.java +++ b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/AbstractLocalClusterFactory.java @@ -51,6 +51,7 @@ import java.util.Map; import java.util.Properties; import java.util.Set; +import java.util.UUID; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeoutException; @@ -130,7 +131,7 @@ public Node( this.distributionResolver = distributionResolver; this.spec = spec; this.name = suffix == null ? spec.getName() : spec.getName() + "-" + suffix; - this.workingDir = baseWorkingDir.resolve(name); + this.workingDir = baseWorkingDir.resolve(name != null ? name : UUID.randomUUID().toString()); this.repoDir = baseWorkingDir.resolve("repo"); this.dataDir = workingDir.resolve("data"); this.logsDir = workingDir.resolve("logs"); @@ -386,7 +387,9 @@ private void writeConfiguration() { // Write settings to elasticsearch.yml Map finalSettings = new HashMap<>(); finalSettings.put("cluster.name", spec.getCluster().getName()); - finalSettings.put("node.name", name); + if (name != null) { + finalSettings.put("node.name", name); + } finalSettings.put("path.repo", repoDir.toString()); finalSettings.put("path.data", dataDir.toString()); finalSettings.put("path.logs", logsDir.toString()); diff --git a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/AbstractLocalClusterSpecBuilder.java b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/AbstractLocalClusterSpecBuilder.java index fd49d3ab8c4e4..7f1a384ebb43d 100644 --- a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/AbstractLocalClusterSpecBuilder.java +++ b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/AbstractLocalClusterSpecBuilder.java @@ -18,7 +18,9 @@ import java.util.ArrayList; import java.util.List; +import java.util.Objects; import java.util.Optional; +import java.util.concurrent.atomic.AtomicInteger; import java.util.function.Consumer; import java.util.function.Supplier; @@ -141,9 +143,10 @@ protected LocalClusterSpec buildClusterSpec() { if (nodeBuilders.isEmpty()) { // No node-specific configuration so assume a single-node cluster - nodeSpecs = List.of(new DefaultLocalNodeSpecBuilder(this).build(clusterSpec)); + nodeSpecs = List.of(new DefaultLocalNodeSpecBuilder(this).build(clusterSpec, 0)); } else { - nodeSpecs = nodeBuilders.stream().map(node -> node.build(clusterSpec)).toList(); + AtomicInteger nodeIndex = new AtomicInteger(0); + nodeSpecs = nodeBuilders.stream().map(node -> node.build(clusterSpec, nodeIndex.getAndIncrement())).toList(); } clusterSpec.setNodes(nodeSpecs); @@ -154,6 +157,7 @@ protected LocalClusterSpec buildClusterSpec() { public static class DefaultLocalNodeSpecBuilder extends AbstractLocalSpecBuilder implements LocalNodeSpecBuilder { private String name; + private boolean unsetName = false; protected DefaultLocalNodeSpecBuilder(AbstractLocalSpecBuilder parent) { super(parent); @@ -161,15 +165,37 @@ protected DefaultLocalNodeSpecBuilder(AbstractLocalSpecBuilder parent) { @Override public DefaultLocalNodeSpecBuilder name(String name) { - this.name = name; + if (unsetName) { + throw new IllegalStateException("Cannot set name when 'withoutName()` has been used"); + } + this.name = Objects.requireNonNull( + name, + "Name cannot be set to null. Consider using '.withoutName()' method if you need node without explicitly set name" + ); return this; } - private LocalNodeSpec build(LocalClusterSpec cluster) { + @Override + public DefaultLocalNodeSpecBuilder withoutName() { + if (name != null) { + throw new IllegalStateException("Cannot use 'withoutName()', because name has been set for the node"); + } + this.unsetName = true; + return this; + } + + private String resolveName(LocalClusterSpec cluster, int nodeIndex) { + if (unsetName) { + return null; + } + return name == null ? cluster.getName() + "-" + nodeIndex : name; + } + + private LocalNodeSpec build(LocalClusterSpec cluster, int nodeIndex) { return new LocalNodeSpec( cluster, - name, + resolveName(cluster, nodeIndex), Optional.ofNullable(getVersion()).orElse(Version.CURRENT), getSettingsProviders(), getSettings(), diff --git a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/DefaultSettingsProvider.java b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/DefaultSettingsProvider.java index 98e6919f95d0c..67d19895ccc30 100644 --- a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/DefaultSettingsProvider.java +++ b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/DefaultSettingsProvider.java @@ -15,9 +15,14 @@ import java.util.HashMap; import java.util.Map; +import java.util.Objects; import java.util.stream.Collectors; public class DefaultSettingsProvider implements SettingsProvider { + + private static final String MULTI_NODE_DISCOVERY_TYPE = "multi-node"; + private static final String DISCOVERY_TYPE_SETTING = "discovery.type"; + @Override public Map get(LocalNodeSpec nodeSpec) { Map settings = new HashMap<>(); @@ -70,18 +75,29 @@ public Map get(LocalNodeSpec nodeSpec) { .stream() .filter(LocalNodeSpec::isMasterEligible) .map(LocalNodeSpec::getName) + .filter(Objects::nonNull) .collect(Collectors.joining(",")); - if (masterEligibleNodes.isEmpty()) { - throw new IllegalStateException( - "Cannot start cluster '" + nodeSpec.getCluster().getName() + "' as it configured with no master-eligible nodes." - ); - } + if (isMultiNodeCluster(nodeSpec.getCluster())) { + if (masterEligibleNodes.isEmpty()) { + throw new IllegalStateException( + "Cannot start multi-node cluster '" + nodeSpec.getCluster().getName() + "' as configured with no master-eligible nodes." + ); + } - settings.put("cluster.initial_master_nodes", "[" + masterEligibleNodes + "]"); - settings.put("discovery.seed_providers", "file"); - settings.put("discovery.seed_hosts", "[]"); + settings.put("cluster.initial_master_nodes", "[" + masterEligibleNodes + "]"); + settings.put("discovery.seed_providers", "file"); + settings.put("discovery.seed_hosts", "[]"); + } return settings; } + + private boolean isMultiNodeCluster(LocalClusterSpec cluster) { + return cluster.getNodes().size() > 1 + || cluster.getNodes() + .getFirst() + .getSetting(DISCOVERY_TYPE_SETTING, MULTI_NODE_DISCOVERY_TYPE) + .equals(MULTI_NODE_DISCOVERY_TYPE); + } } diff --git a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterSpec.java b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterSpec.java index d89540d25c6cd..02fdb45dffa37 100644 --- a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterSpec.java +++ b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterSpec.java @@ -22,6 +22,7 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Objects; import java.util.Set; import java.util.stream.Collectors; @@ -155,7 +156,7 @@ public LocalClusterSpec getCluster() { } public String getName() { - return name == null ? cluster.getName() + "-" + cluster.getNodes().indexOf(this) : name; + return name; } public Version getVersion() { @@ -345,7 +346,7 @@ private LocalNodeSpec getFilteredSpec(SettingsProvider filteredProvider, Setting newCluster.setNodes(nodeSpecs); - return nodeSpecs.stream().filter(n -> n.getName().equals(this.getName())).findFirst().get(); + return nodeSpecs.stream().filter(n -> Objects.equals(n.getName(), this.getName())).findFirst().get(); } } } diff --git a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalNodeSpecBuilder.java b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalNodeSpecBuilder.java index 7992c34dc153f..b01b50d24f5e7 100644 --- a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalNodeSpecBuilder.java +++ b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalNodeSpecBuilder.java @@ -15,4 +15,9 @@ public interface LocalNodeSpecBuilder extends LocalSpecBuilder Date: Thu, 31 Oct 2024 11:12:15 +0000 Subject: [PATCH 232/324] Remove Version from system index descriptors (#115793) Now it just uses mapping versions --- .../ingest/geoip/IngestGeoIpPlugin.java | 1 - .../AbstractFeatureMigrationIntegTest.java | 7 - .../migration/MultiFeatureMigrationIT.java | 3 - .../elasticsearch/http/SystemIndexRestIT.java | 1 - .../system/indices/SystemIndicesQA.java | 2 - .../aliases/NetNewSystemIndexAliasIT.java | 1 - .../indices/TestSystemIndexDescriptor.java | 5 - .../indices/SystemIndexDescriptor.java | 162 +----------------- .../SynonymsManagementAPIService.java | 1 - .../tasks/TaskResultsService.java | 1 - .../get/TransportGetAliasesActionTests.java | 1 - .../TransportCreateIndexActionTests.java | 1 - .../indices/resolve/ResolveIndexTests.java | 1 - ...ystemIndexMetadataUpgradeServiceTests.java | 1 - .../indices/SystemIndexDescriptorTests.java | 48 +----- .../SystemIndexMappingUpdateServiceTests.java | 4 - .../indices/SystemIndicesTests.java | 1 - .../core/async/AsyncTaskIndexService.java | 1 - .../security/test/TestRestrictedIndices.java | 3 +- .../secrets/ConnectorSecretsIndexService.java | 1 - .../rules/QueryRulesIndexService.java | 1 - .../search/SearchApplicationIndexService.java | 1 - .../org/elasticsearch/xpack/fleet/Fleet.java | 9 - .../xpack/inference/InferencePlugin.java | 3 - .../xpack/logstash/Logstash.java | 1 - .../xpack/ml/MachineLearning.java | 3 - .../SearchableSnapshots.java | 1 - .../support/SecuritySystemIndices.java | 5 - .../persistence/TransformInternalIndex.java | 1 - .../elasticsearch/xpack/watcher/Watcher.java | 2 - 30 files changed, 14 insertions(+), 259 deletions(-) diff --git a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/IngestGeoIpPlugin.java b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/IngestGeoIpPlugin.java index 3107f0bed55e8..a09069dcf438f 100644 --- a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/IngestGeoIpPlugin.java +++ b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/IngestGeoIpPlugin.java @@ -275,7 +275,6 @@ public Collection getSystemIndexDescriptors(Settings sett .build() ) .setOrigin(INGEST_ORIGIN) - .setVersionMetaKey("version") .setPrimaryIndex(DATABASES_INDEX) .setNetNew() .build(); diff --git a/modules/reindex/src/internalClusterTest/java/org/elasticsearch/migration/AbstractFeatureMigrationIntegTest.java b/modules/reindex/src/internalClusterTest/java/org/elasticsearch/migration/AbstractFeatureMigrationIntegTest.java index 4b21db497ead7..ea1c8ade00abe 100644 --- a/modules/reindex/src/internalClusterTest/java/org/elasticsearch/migration/AbstractFeatureMigrationIntegTest.java +++ b/modules/reindex/src/internalClusterTest/java/org/elasticsearch/migration/AbstractFeatureMigrationIntegTest.java @@ -78,7 +78,6 @@ public abstract class AbstractFeatureMigrationIntegTest extends ESIntegTestCase .setType(SystemIndexDescriptor.Type.EXTERNAL_UNMANAGED) .setOrigin(ORIGIN) .setAllowedElasticProductOrigins(Collections.singletonList(ORIGIN)) - .setMinimumNodeVersion(NEEDS_UPGRADE_VERSION) .setPriorSystemIndexDescriptors(Collections.emptyList()) .build(); static final SystemIndexDescriptor INTERNAL_UNMANAGED = SystemIndexDescriptor.builder() @@ -86,7 +85,6 @@ public abstract class AbstractFeatureMigrationIntegTest extends ESIntegTestCase .setType(SystemIndexDescriptor.Type.INTERNAL_UNMANAGED) .setOrigin(ORIGIN) .setAllowedElasticProductOrigins(Collections.emptyList()) - .setMinimumNodeVersion(NEEDS_UPGRADE_VERSION) .setPriorSystemIndexDescriptors(Collections.emptyList()) .build(); @@ -98,9 +96,7 @@ public abstract class AbstractFeatureMigrationIntegTest extends ESIntegTestCase .setSettings(createSettings(NEEDS_UPGRADE_INDEX_VERSION, INTERNAL_MANAGED_FLAG_VALUE)) .setMappings(createMapping(true, true)) .setOrigin(ORIGIN) - .setVersionMetaKey(VERSION_META_KEY) .setAllowedElasticProductOrigins(Collections.emptyList()) - .setMinimumNodeVersion(NEEDS_UPGRADE_VERSION) .setPriorSystemIndexDescriptors(Collections.emptyList()) .build(); static final int INTERNAL_UNMANAGED_FLAG_VALUE = 2; @@ -113,9 +109,7 @@ public abstract class AbstractFeatureMigrationIntegTest extends ESIntegTestCase .setSettings(createSettings(NEEDS_UPGRADE_INDEX_VERSION, EXTERNAL_MANAGED_FLAG_VALUE)) .setMappings(createMapping(true, false)) .setOrigin(ORIGIN) - .setVersionMetaKey(VERSION_META_KEY) .setAllowedElasticProductOrigins(Collections.singletonList(ORIGIN)) - .setMinimumNodeVersion(NEEDS_UPGRADE_VERSION) .setPriorSystemIndexDescriptors(Collections.emptyList()) .build(); static final int EXTERNAL_UNMANAGED_FLAG_VALUE = 4; @@ -128,7 +122,6 @@ public abstract class AbstractFeatureMigrationIntegTest extends ESIntegTestCase .setType(SystemIndexDescriptor.Type.EXTERNAL_UNMANAGED) .setAllowedElasticProductOrigins(Collections.emptyList()) .setAllowedElasticProductOrigins(Collections.singletonList(ORIGIN)) - .setMinimumNodeVersion(NEEDS_UPGRADE_VERSION) .setPriorSystemIndexDescriptors(Collections.emptyList()) .setAllowsTemplates() .build(); diff --git a/modules/reindex/src/internalClusterTest/java/org/elasticsearch/migration/MultiFeatureMigrationIT.java b/modules/reindex/src/internalClusterTest/java/org/elasticsearch/migration/MultiFeatureMigrationIT.java index 4fcd061f421aa..1ee5519593569 100644 --- a/modules/reindex/src/internalClusterTest/java/org/elasticsearch/migration/MultiFeatureMigrationIT.java +++ b/modules/reindex/src/internalClusterTest/java/org/elasticsearch/migration/MultiFeatureMigrationIT.java @@ -10,7 +10,6 @@ package org.elasticsearch.migration; import org.apache.lucene.util.SetOnce; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.cluster.migration.GetFeatureUpgradeStatusAction; import org.elasticsearch.action.admin.cluster.migration.GetFeatureUpgradeStatusRequest; @@ -268,9 +267,7 @@ public void testMultipleFeatureMigration() throws Exception { .setSettings(createSettings(IndexVersions.MINIMUM_COMPATIBLE, 0)) .setMappings(createMapping(true, true)) .setOrigin(ORIGIN) - .setVersionMetaKey(VERSION_META_KEY) .setAllowedElasticProductOrigins(Collections.emptyList()) - .setMinimumNodeVersion(Version.CURRENT.minimumCompatibilityVersion()) .setPriorSystemIndexDescriptors(Collections.emptyList()) .build(); diff --git a/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/SystemIndexRestIT.java b/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/SystemIndexRestIT.java index 2e48860e613ee..e37ea125ac070 100644 --- a/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/SystemIndexRestIT.java +++ b/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/SystemIndexRestIT.java @@ -196,7 +196,6 @@ public Collection getSystemIndexDescriptors(Settings sett .setPrimaryIndex(SYSTEM_INDEX_NAME) .setDescription("Test system index") .setOrigin(getClass().getName()) - .setVersionMetaKey("version") .setMappings(builder) .setSettings(SETTINGS) .setType(Type.INTERNAL_MANAGED) diff --git a/qa/system-indices/src/main/java/org/elasticsearch/system/indices/SystemIndicesQA.java b/qa/system-indices/src/main/java/org/elasticsearch/system/indices/SystemIndicesQA.java index 57b0908508bb3..6e15e40efa69a 100644 --- a/qa/system-indices/src/main/java/org/elasticsearch/system/indices/SystemIndicesQA.java +++ b/qa/system-indices/src/main/java/org/elasticsearch/system/indices/SystemIndicesQA.java @@ -77,7 +77,6 @@ public Collection getSystemIndexDescriptors(Settings sett .build() ) .setOrigin(TASKS_ORIGIN) - .setVersionMetaKey("version") .setPrimaryIndex(".net-new-system-index-primary") .build(), SystemIndexDescriptor.builder() @@ -99,7 +98,6 @@ public Collection getSystemIndexDescriptors(Settings sett .build() ) .setOrigin(TASKS_ORIGIN) - .setVersionMetaKey("version") .setPrimaryIndex(".internal-managed-index-primary") .setAliasName(".internal-managed-alias") .build() diff --git a/server/src/internalClusterTest/java/org/elasticsearch/aliases/NetNewSystemIndexAliasIT.java b/server/src/internalClusterTest/java/org/elasticsearch/aliases/NetNewSystemIndexAliasIT.java index 27392fefbf4ca..cf54bc50398c4 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/aliases/NetNewSystemIndexAliasIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/aliases/NetNewSystemIndexAliasIT.java @@ -89,7 +89,6 @@ public Collection getSystemIndexDescriptors(Settings sett .setPrimaryIndex(SYSTEM_INDEX_NAME) .setDescription("Test system index") .setOrigin(getClass().getName()) - .setVersionMetaKey("version") .setMappings(builder) .setSettings(SETTINGS) .setType(SystemIndexDescriptor.Type.INTERNAL_MANAGED) diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/TestSystemIndexDescriptor.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/TestSystemIndexDescriptor.java index cc05bb4c82ceb..487bbf7c9a4be 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/TestSystemIndexDescriptor.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/TestSystemIndexDescriptor.java @@ -9,7 +9,6 @@ package org.elasticsearch.indices; -import org.elasticsearch.Build; import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.common.Strings; @@ -51,9 +50,7 @@ public class TestSystemIndexDescriptor extends SystemIndexDescriptor { SETTINGS, INDEX_NAME, 0, - "version", "stack", - null, Type.INTERNAL_MANAGED, List.of(), List.of(), @@ -72,9 +69,7 @@ public class TestSystemIndexDescriptor extends SystemIndexDescriptor { SETTINGS, name, 0, - "version", "stack", - Version.fromString(Build.current().minWireCompatVersion()), Type.INTERNAL_MANAGED, List.of(), List.of(), diff --git a/server/src/main/java/org/elasticsearch/indices/SystemIndexDescriptor.java b/server/src/main/java/org/elasticsearch/indices/SystemIndexDescriptor.java index f3456870114f5..8b9610a52cc3d 100644 --- a/server/src/main/java/org/elasticsearch/indices/SystemIndexDescriptor.java +++ b/server/src/main/java/org/elasticsearch/indices/SystemIndexDescriptor.java @@ -13,7 +13,6 @@ import org.apache.lucene.util.automaton.CharacterRunAutomaton; import org.apache.lucene.util.automaton.Operations; import org.apache.lucene.util.automaton.RegExp; -import org.elasticsearch.Version; import org.elasticsearch.action.admin.indices.create.AutoCreateAction; import org.elasticsearch.action.admin.indices.create.TransportCreateIndexAction; import org.elasticsearch.cluster.metadata.IndexMetadata; @@ -36,7 +35,6 @@ import java.util.ArrayList; import java.util.Collections; import java.util.List; -import java.util.Locale; import java.util.Map; import java.util.Objects; import java.util.Set; @@ -141,25 +139,12 @@ public class SystemIndexDescriptor implements IndexPatternMatcher, Comparable_meta in the index mappings - * that contains the index's mappings' {@link Version}. We need to read and write this field for - * backwards compatibility. - */ - private final String mappingsNodeVersionMetaKey; - /** The version meta key for the integer system index mapping version */ public static final String VERSION_META_KEY = "managed_index_mappings_version"; /** For internally-managed indices, specifies the origin to use when creating or updating the index */ private final String origin; - /** The minimum cluster node version required for this descriptor */ - private final Version minimumNodeVersion; - - /** Legacy mapping version from the descriptor */ - private final Version mappingsNodeVersion; - /** Mapping version from the descriptor */ private final MappingsVersion mappingsVersion; @@ -209,11 +194,8 @@ public class SystemIndexDescriptor implements IndexPatternMatcher, Comparable_meta where a version can be found, which indicates the - * Elasticsearch version when the index was created. * @param origin the client origin to use when creating this index. Internal system indices must not provide an origin, while external * system indices must do so. - * @param minimumNodeVersion the minimum cluster node version required for this descriptor * @param type The {@link Type} of system index * @param allowedElasticProductOrigins A list of allowed origin values that should be allowed access in the case of external system * indices @@ -229,9 +211,7 @@ protected SystemIndexDescriptor( Settings settings, String aliasName, int indexFormat, - String mappingsNodeVersionMetaKey, String origin, - @Deprecated Version minimumNodeVersion, Type type, List allowedElasticProductOrigins, List priorSystemIndexDescriptors, @@ -282,12 +262,10 @@ protected SystemIndexDescriptor( Objects.requireNonNull(settings, "Must supply settings for a managed system index"); Strings.requireNonEmpty(mappings, "Must supply mappings for a managed system index"); Strings.requireNonEmpty(primaryIndex, "Must supply primaryIndex for a managed system index"); - Strings.requireNonEmpty(mappingsNodeVersionMetaKey, "Must supply nodeVersionMetaKey for a managed system index"); Strings.requireNonEmpty(origin, "Must supply origin for a managed system index"); if (settings.getAsInt(IndexMetadata.INDEX_FORMAT_SETTING.getKey(), 0) != indexFormat) { throw new IllegalArgumentException("Descriptor index format does not match index format in managed settings"); } - this.mappingsNodeVersion = bestEffortExtractNodeVersionFromMappings(mappings, mappingsNodeVersionMetaKey); this.mappingsVersion = extractVersionFromMappings(mappings); assert mappingsVersion.version >= 0 : "The mappings version must not be negative"; @@ -295,8 +273,6 @@ protected SystemIndexDescriptor( assert Objects.isNull(settings) : "Unmanaged index descriptors should not have settings"; assert Objects.isNull(mappings) : "Unmanaged index descriptors should not have mappings"; assert Objects.isNull(primaryIndex) : "Unmanaged index descriptors should not have a primary index"; - assert Objects.isNull(mappingsNodeVersionMetaKey) : "Unmanaged index descriptors should not have a version meta key"; - this.mappingsNodeVersion = null; this.mappingsVersion = null; } @@ -310,8 +286,8 @@ protected SystemIndexDescriptor( Objects.requireNonNull(priorSystemIndexDescriptors, "priorSystemIndexDescriptors must not be null"); if (priorSystemIndexDescriptors.isEmpty() == false) { // the rules for prior system index descriptors - // 1. No values with the same minimum node version - // 2. All prior system index descriptors must have a minimumNodeVersion before this one + // 1. No values with the same minimum mappings version + // 2. All prior system index descriptors must have a lower mappings version // 3. Prior system index descriptors may not have other prior system index descriptors // to avoid multiple branches that need followed // 4. Must have same indexPattern, primaryIndex, and alias @@ -383,9 +359,7 @@ protected SystemIndexDescriptor( throw new IllegalArgumentException("System indices must have " + IndexMetadata.SETTING_INDEX_HIDDEN + " set to true."); } this.indexFormat = indexFormat; - this.mappingsNodeVersionMetaKey = mappingsNodeVersionMetaKey; this.origin = origin; - this.minimumNodeVersion = minimumNodeVersion; this.type = type; this.allowedElasticProductOrigins = allowedElasticProductOrigins; this.hasDynamicMappings = this.mappings != null @@ -478,16 +452,6 @@ public int getIndexFormat() { return this.indexFormat; } - public String getMappingsNodeVersionMetaKey() { - assert isAutomaticallyManaged() : "Do not request version meta keys for unmanaged system indices"; - return this.mappingsNodeVersionMetaKey; - } - - public Version getMinimumNodeVersion() { - assert isAutomaticallyManaged() : "Do not request version minimum node version for unmanaged system indices"; - return minimumNodeVersion; - } - public boolean isAutomaticallyManaged() { return type.isManaged(); } @@ -534,18 +498,6 @@ public boolean allowsTemplates() { return allowsTemplates; } - /** - * Use of the mappings {@link Version} should be replaced with the value returned from {@link #getMappingsVersion()} - * @return Elasticsearch version associated with this descriptor's mappings. - */ - @Deprecated - public Version getMappingsNodeVersion() { - if (isAutomaticallyManaged() == false) { - throw new IllegalStateException(this + " is not managed so there are no mappings or version"); - } - return mappingsNodeVersion; - } - public MappingsVersion getMappingsVersion() { if (isAutomaticallyManaged() == false) { throw new IllegalStateException(this + " is not managed so there are no mappings or version"); @@ -554,7 +506,7 @@ public MappingsVersion getMappingsVersion() { } /** - * Gets a standardized message when the node contains a data or master node whose version is less + * Gets a standardized message when the node contains a data or master node whose mappings version is less * than that of the minimum supported version of this descriptor and its prior descriptors. * * @param cause the action being attempted that triggered the check. Used in the error message. @@ -573,55 +525,12 @@ public String getMinimumMappingsVersionMessage(String cause) { ); } - /** - * Gets a standardized message when the node contains a data or master node whose version is less - * than that of the minimum supported version of this descriptor and its prior descriptors. - * - * @param cause the action being attempted that triggered the check. Used in the error message. - * @return the standardized error message - */ - @Deprecated - public String getMinimumNodeVersionMessage(String cause) { - Objects.requireNonNull(cause); - final Version actualMinimumVersion = priorSystemIndexDescriptors.isEmpty() - ? minimumNodeVersion - : priorSystemIndexDescriptors.get(priorSystemIndexDescriptors.size() - 1).minimumNodeVersion; - return String.format( - Locale.ROOT, - "[%s] failed - system index [%s] requires all data and master nodes to be at least version [%s]", - cause, - this.getPrimaryIndex(), - actualMinimumVersion - ); - } - /** * Finds the descriptor that can be used within this cluster, by comparing the supplied minimum - * node version to this descriptor's minimum version and the prior descriptors minimum version. + * mappings version to this descriptor's minimum version and the prior descriptors minimum version. * - * @param version the lower node version in the cluster - * @return null if the lowest node version is lower than the minimum version in this descriptor, - * or the appropriate descriptor if the supplied version is acceptable. - */ - @Deprecated - public SystemIndexDescriptor getDescriptorCompatibleWith(Version version) { - if (minimumNodeVersion.onOrBefore(version)) { - return this; - } - for (SystemIndexDescriptor prior : priorSystemIndexDescriptors) { - if (version.onOrAfter(prior.minimumNodeVersion)) { - return prior; - } - } - return null; - } - - /** - * Finds the descriptor that can be used within this cluster, by comparing the supplied minimum - * node version to this descriptor's minimum version and the prior descriptors minimum version. - * - * @param version the lower node version in the cluster - * @return null if the lowest node version is lower than the minimum version in this descriptor, + * @param version the lower mappings version in the cluster + * @return null if the lowest mappings version is lower than the minimum version in this descriptor, * or the appropriate descriptor if the supplied version is acceptable. */ public SystemIndexDescriptor getDescriptorCompatibleWith(MappingsVersion version) { @@ -637,8 +546,7 @@ public SystemIndexDescriptor getDescriptorCompatibleWith(MappingsVersion version } /** - * @return The names of thread pools that should be used for operations on this - * system index. + * @return The names of thread pools that should be used for operations on this system index. */ public ExecutorNames getThreadPoolNames() { return this.executorNames; @@ -650,7 +558,7 @@ public static Builder builder() { @Override public int compareTo(SystemIndexDescriptor other) { - return minimumNodeVersion.compareTo(other.minimumNodeVersion) * -1; + return mappingsVersion.compareTo(other.mappingsVersion) * -1; } /** @@ -745,9 +653,7 @@ public static class Builder { private Settings settings = null; private String aliasName = null; private int indexFormat = 0; - private String versionMetaKey = null; private String origin = null; - private Version minimumNodeVersion = Version.CURRENT.minimumCompatibilityVersion(); private Type type = Type.INTERNAL_MANAGED; private List allowedElasticProductOrigins = List.of(); private List priorSystemIndexDescriptors = List.of(); @@ -797,11 +703,6 @@ public Builder setIndexFormat(int indexFormat) { return this; } - public Builder setVersionMetaKey(String versionMetaKey) { - this.versionMetaKey = versionMetaKey; - return this; - } - /** * Sometimes a system operation will need to dispatch sub-actions. A product origin string will tell the system which component * generated the sub-action. Internal system indices must not provide an origin, since they are supposed to reject access from @@ -815,11 +716,6 @@ public Builder setOrigin(String origin) { return this; } - public Builder setMinimumNodeVersion(Version version) { - this.minimumNodeVersion = version; - return this; - } - public Builder setType(Type type) { this.type = type; return this; @@ -863,9 +759,7 @@ public SystemIndexDescriptor build() { settings, aliasName, indexFormat, - versionMetaKey, origin, - minimumNodeVersion, type, allowedElasticProductOrigins, priorSystemIndexDescriptors, @@ -962,44 +856,4 @@ private static MappingsVersion extractVersionFromMappings(String mappings) { } return new MappingsVersion(value, Objects.hash(properties)); } - - /** - * An accurate node version is no longer required in system index mappings metadata. - * because the mappings version should be used to determine if an upgrade is required, - * not the node version. However, some parts of the code are still relying on - * mappingsNodeVersion. This method allows sections of the code to stop - * accurately setting node version in their mappings while other sections continue to - * use it. Once all uses of mappingsNodeVersion are removed this method - * can be removed too. - */ - @Deprecated - private static Version bestEffortExtractNodeVersionFromMappings(String mappings, String versionMetaKey) { - try { - return extractNodeVersionFromMappings(mappings, versionMetaKey); - } catch (Exception e) { - return null; - } - } - - @Deprecated - @SuppressWarnings("unchecked") - private static Version extractNodeVersionFromMappings(String mappings, String versionMetaKey) { - final Map mappingsMap = XContentHelper.convertToMap(XContentType.JSON.xContent(), mappings, false); - final Map doc = (Map) mappingsMap.get("_doc"); - final Map meta; - if (doc == null) { - meta = (Map) mappingsMap.get("_meta"); - } else { - meta = (Map) doc.get("_meta"); - } - if (meta == null) { - throw new IllegalStateException("mappings do not have _meta field"); - } - final String value = (String) meta.get(versionMetaKey); - if (value == null) { - throw new IllegalArgumentException("mappings do not have a version in _meta." + versionMetaKey); - } - return Version.fromString(value); - } - } diff --git a/server/src/main/java/org/elasticsearch/synonyms/SynonymsManagementAPIService.java b/server/src/main/java/org/elasticsearch/synonyms/SynonymsManagementAPIService.java index c760e8043e262..72d0a1330a249 100644 --- a/server/src/main/java/org/elasticsearch/synonyms/SynonymsManagementAPIService.java +++ b/server/src/main/java/org/elasticsearch/synonyms/SynonymsManagementAPIService.java @@ -106,7 +106,6 @@ public class SynonymsManagementAPIService { .setIndexFormat(SYNONYMS_INDEX_FORMAT) .setMappings(mappings()) .setSettings(settings()) - .setVersionMetaKey("version") .setOrigin(SYNONYMS_ORIGIN) .build(); diff --git a/server/src/main/java/org/elasticsearch/tasks/TaskResultsService.java b/server/src/main/java/org/elasticsearch/tasks/TaskResultsService.java index 0c9833fad7640..cafad960c168f 100644 --- a/server/src/main/java/org/elasticsearch/tasks/TaskResultsService.java +++ b/server/src/main/java/org/elasticsearch/tasks/TaskResultsService.java @@ -59,7 +59,6 @@ public class TaskResultsService { .setDescription("Task Result Index") .setSettings(getTaskResultIndexSettings()) .setMappings(getTaskResultIndexMappings()) - .setVersionMetaKey(TASK_RESULT_MAPPING_VERSION_META_FIELD) .setOrigin(TASKS_ORIGIN) .build(); diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/alias/get/TransportGetAliasesActionTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/alias/get/TransportGetAliasesActionTests.java index b40078ca7f1a9..ec5f0b489d6ef 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/alias/get/TransportGetAliasesActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/alias/get/TransportGetAliasesActionTests.java @@ -277,7 +277,6 @@ public void testNetNewSystemIndicesDontErrorWhenNotRequested() { .setDescription(this.getTestName()) .setMappings("{\"_meta\": {\"version\": \"1.0.0\", \"" + SystemIndexDescriptor.VERSION_META_KEY + "\": 0}}") .setSettings(Settings.EMPTY) - .setVersionMetaKey("version") .setOrigin(this.getTestName()) .setNetNew() .build(); diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/create/TransportCreateIndexActionTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/create/TransportCreateIndexActionTests.java index 7e590dc2cdeec..9f8b4da357ee5 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/create/TransportCreateIndexActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/create/TransportCreateIndexActionTests.java @@ -104,7 +104,6 @@ public class TransportCreateIndexActionTests extends ESTestCase { } }" """, SystemIndexDescriptor.VERSION_META_KEY)) - .setVersionMetaKey("version") .setOrigin("origin") .build() ) diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/resolve/ResolveIndexTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/resolve/ResolveIndexTests.java index 834bacd9e6a04..423de4b43088f 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/resolve/ResolveIndexTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/resolve/ResolveIndexTests.java @@ -525,7 +525,6 @@ private SystemIndices addSystemIndex(Metadata.Builder mdBuilder) { } """, SystemIndexDescriptor.VERSION_META_KEY)) .setPrimaryIndex(".test-net-new-system-1") - .setVersionMetaKey("version") .setOrigin("system") .build() ) diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/SystemIndexMetadataUpgradeServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/SystemIndexMetadataUpgradeServiceTests.java index ac9d5021329ab..0dcea706e7f94 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/SystemIndexMetadataUpgradeServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/SystemIndexMetadataUpgradeServiceTests.java @@ -46,7 +46,6 @@ public class SystemIndexMetadataUpgradeServiceTests extends ESTestCase { .setAliasName(SYSTEM_ALIAS_NAME) .setSettings(getSettingsBuilder().build()) .setMappings(MAPPINGS) - .setVersionMetaKey("version") .setOrigin("FAKE_ORIGIN") .build(); diff --git a/server/src/test/java/org/elasticsearch/indices/SystemIndexDescriptorTests.java b/server/src/test/java/org/elasticsearch/indices/SystemIndexDescriptorTests.java index 59d5cf4359bb1..eedd7c2594691 100644 --- a/server/src/test/java/org/elasticsearch/indices/SystemIndexDescriptorTests.java +++ b/server/src/test/java/org/elasticsearch/indices/SystemIndexDescriptorTests.java @@ -10,14 +10,12 @@ package org.elasticsearch.indices; import org.apache.lucene.util.automaton.CharacterRunAutomaton; -import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.indices.SystemIndexDescriptor.Type; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.VersionUtils; import org.elasticsearch.xcontent.json.JsonXContent; import java.util.List; @@ -38,7 +36,6 @@ public class SystemIndexDescriptorTests extends ESTestCase { { "_doc": { "_meta": { - "version": "7.4.0", "%s": %d } } @@ -211,7 +208,6 @@ public void testPriorSystemIndexDescriptorValidation() { .setType(Type.INTERNAL_MANAGED) .setSettings(Settings.EMPTY) .setMappings(getVersionedMappings(TEST_MAPPINGS_VERSION + 1)) - .setVersionMetaKey("version") .setOrigin("system") .setPriorSystemIndexDescriptors(List.of(prior)) .build() @@ -272,9 +268,7 @@ public void testGetDescriptorCompatibleWith() { .setType(Type.INTERNAL_MANAGED) .setSettings(Settings.EMPTY) .setMappings(getVersionedMappings(TEST_MAPPINGS_PRIOR_VERSION)) - .setVersionMetaKey("version") .setOrigin("system") - .setMinimumNodeVersion(Version.V_7_0_0) .build(); final SystemIndexDescriptor descriptor = SystemIndexDescriptor.builder() .setIndexPattern(".system*") @@ -284,27 +278,15 @@ public void testGetDescriptorCompatibleWith() { .setType(Type.INTERNAL_MANAGED) .setSettings(Settings.EMPTY) .setMappings(MAPPINGS) - .setVersionMetaKey("version") .setOrigin("system") .setPriorSystemIndexDescriptors(List.of(prior)) .build(); - SystemIndexDescriptor compat = descriptor.getDescriptorCompatibleWith(Version.CURRENT); + SystemIndexDescriptor compat = descriptor.getDescriptorCompatibleWith(descriptor.getMappingsVersion()); assertSame(descriptor, compat); - compat = descriptor.getDescriptorCompatibleWith(descriptor.getMappingsVersion()); - assertSame(descriptor, compat); - - assertNull(descriptor.getDescriptorCompatibleWith(Version.fromString("6.8.0"))); assertNull(descriptor.getDescriptorCompatibleWith(new SystemIndexDescriptor.MappingsVersion(TEST_MAPPINGS_NONEXISTENT_VERSION, 1))); - compat = descriptor.getDescriptorCompatibleWith(Version.CURRENT.minimumCompatibilityVersion()); - assertSame(descriptor, compat); - - Version priorToMin = VersionUtils.getPreviousVersion(descriptor.getMinimumNodeVersion()); - compat = descriptor.getDescriptorCompatibleWith(priorToMin); - assertSame(prior, compat); - SystemIndexDescriptor.MappingsVersion priorToMinMappingsVersion = new SystemIndexDescriptor.MappingsVersion( TEST_MAPPINGS_PRIOR_VERSION, 1 @@ -312,11 +294,6 @@ public void testGetDescriptorCompatibleWith() { compat = descriptor.getDescriptorCompatibleWith(priorToMinMappingsVersion); assertSame(prior, compat); - compat = descriptor.getDescriptorCompatibleWith( - VersionUtils.randomVersionBetween(random(), prior.getMinimumNodeVersion(), priorToMin) - ); - assertSame(prior, compat); - compat = descriptor.getDescriptorCompatibleWith( new SystemIndexDescriptor.MappingsVersion(randomIntBetween(TEST_MAPPINGS_PRIOR_VERSION, TEST_MAPPINGS_VERSION - 1), 1) ); @@ -331,7 +308,6 @@ public void testSystemIndicesMustBeHidden() { .setAliasName(".system") .setType(Type.INTERNAL_MANAGED) .setMappings(MAPPINGS) - .setVersionMetaKey("version") .setOrigin("system"); builder.setSettings(Settings.builder().put(IndexMetadata.SETTING_INDEX_HIDDEN, false).build()); @@ -371,7 +347,6 @@ public void testManagedSystemIndexMustHaveMatchingIndexFormat() { .setMappings(MAPPINGS) .setSettings(Settings.builder().put("index.format", 5).build()) .setIndexFormat(0) - .setVersionMetaKey("version") .setOrigin("system"); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, builder::build); @@ -394,16 +369,7 @@ public void testUnmanagedIndexMappingsVersion() { // test mapping versions can't be negative public void testNegativeMappingsVersion() { int negativeVersion = randomIntBetween(Integer.MIN_VALUE, -1); - String mappings = Strings.format(""" - { - "_doc": { - "_meta": { - "version": "7.4.0", - "%s": %d - } - } - } - """, SystemIndexDescriptor.VERSION_META_KEY, negativeVersion); + String mappings = Strings.format(MAPPINGS_FORMAT_STRING, SystemIndexDescriptor.VERSION_META_KEY, negativeVersion); SystemIndexDescriptor.Builder builder = priorSystemIndexDescriptorBuilder().setMappings(mappings); @@ -429,7 +395,6 @@ public void testHashesIgnoreMappingMetadata() { { "_doc": { "_meta": { - "version": "%s", "%s": %d } }, @@ -441,15 +406,14 @@ public void testHashesIgnoreMappingMetadata() { } """; - String mappings1 = Strings.format(mappingFormatString, "8.9.0", SystemIndexDescriptor.VERSION_META_KEY, randomIntBetween(1, 10)); - String mappings2 = Strings.format(mappingFormatString, "8.10.0", SystemIndexDescriptor.VERSION_META_KEY, randomIntBetween(11, 20)); + String mappings1 = Strings.format(mappingFormatString, SystemIndexDescriptor.VERSION_META_KEY, randomIntBetween(1, 10)); + String mappings2 = Strings.format(mappingFormatString, SystemIndexDescriptor.VERSION_META_KEY, randomIntBetween(11, 20)); SystemIndexDescriptor descriptor1 = priorSystemIndexDescriptorBuilder().setMappings(mappings1).build(); SystemIndexDescriptor descriptor2 = priorSystemIndexDescriptorBuilder().setMappings(mappings2).build(); assertThat(descriptor1.getMappingsVersion().hash(), equalTo(descriptor2.getMappingsVersion().hash())); assertThat(descriptor1.getMappingsVersion().version(), not(equalTo(descriptor2.getMappingsVersion().version()))); - assertThat(descriptor1.getMappingsNodeVersion(), not(equalTo(descriptor2.getMappingsNodeVersion()))); } private SystemIndexDescriptor.Builder priorSystemIndexDescriptorBuilder() { @@ -461,8 +425,6 @@ private SystemIndexDescriptor.Builder priorSystemIndexDescriptorBuilder() { .setType(Type.INTERNAL_MANAGED) .setSettings(Settings.EMPTY) .setMappings(MAPPINGS) - .setVersionMetaKey("version") - .setOrigin("system") - .setMinimumNodeVersion(Version.V_7_0_0); + .setOrigin("system"); } } diff --git a/server/src/test/java/org/elasticsearch/indices/SystemIndexMappingUpdateServiceTests.java b/server/src/test/java/org/elasticsearch/indices/SystemIndexMappingUpdateServiceTests.java index 14536e76f3c04..795c3782ac47c 100644 --- a/server/src/test/java/org/elasticsearch/indices/SystemIndexMappingUpdateServiceTests.java +++ b/server/src/test/java/org/elasticsearch/indices/SystemIndexMappingUpdateServiceTests.java @@ -74,7 +74,6 @@ public class SystemIndexMappingUpdateServiceTests extends ESTestCase { .setIndexFormat(6) .setSettings(getSettings()) .setMappings(getMappings()) - .setVersionMetaKey("version") .setOrigin("FAKE_ORIGIN") .build(); @@ -103,7 +102,6 @@ public void testManagerSkipsDescriptorsThatAreNotManaged() { .setMappings(getMappings()) .setSettings(getSettings()) .setIndexFormat(6) - .setVersionMetaKey("version") .setOrigin("FAKE_ORIGIN") .build(); @@ -137,7 +135,6 @@ public void testManagerSkipsDescriptorsForIndicesThatDoNotExist() { .setMappings(getMappings()) .setSettings(getSettings()) .setIndexFormat(6) - .setVersionMetaKey("version") .setOrigin("FAKE_ORIGIN") .build(); SystemIndexDescriptor d2 = SystemIndexDescriptor.builder() @@ -146,7 +143,6 @@ public void testManagerSkipsDescriptorsForIndicesThatDoNotExist() { .setMappings(getMappings()) .setSettings(getSettings()) .setIndexFormat(6) - .setVersionMetaKey("version") .setOrigin("FAKE_ORIGIN") .build(); diff --git a/server/src/test/java/org/elasticsearch/indices/SystemIndicesTests.java b/server/src/test/java/org/elasticsearch/indices/SystemIndicesTests.java index 77f3d75015d0d..dfcb1c0f1db9b 100644 --- a/server/src/test/java/org/elasticsearch/indices/SystemIndicesTests.java +++ b/server/src/test/java/org/elasticsearch/indices/SystemIndicesTests.java @@ -263,7 +263,6 @@ public void testMappingsVersions() { SystemIndexDescriptor managed = SystemIndexDescriptor.builder() .setIndexPattern(".managed-*") .setPrimaryIndex(".managed-primary") - .setVersionMetaKey("version") .setOrigin("system") .setSettings(Settings.EMPTY) .setMappings(""" diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/async/AsyncTaskIndexService.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/async/AsyncTaskIndexService.java index e44af60a45e08..fe970bef87145 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/async/AsyncTaskIndexService.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/async/AsyncTaskIndexService.java @@ -144,7 +144,6 @@ public static SystemIndexDescriptor getSystemIndexDescriptor() { .setPrimaryIndex(XPackPlugin.ASYNC_RESULTS_INDEX) .setMappings(mappings()) .setSettings(settings()) - .setVersionMetaKey("version") .setOrigin(ASYNC_SEARCH_ORIGIN) .build(); } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/test/TestRestrictedIndices.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/test/TestRestrictedIndices.java index 9e26444040b03..3848d785275d4 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/test/TestRestrictedIndices.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/test/TestRestrictedIndices.java @@ -179,8 +179,7 @@ public class TestRestrictedIndices { private static SystemIndexDescriptor.Builder getInitializedDescriptorBuilder(int indexFormat) { return SystemIndexDescriptor.builder() .setMappings(mockMappings()) - .setSettings(Settings.builder().put(IndexMetadata.INDEX_FORMAT_SETTING.getKey(), indexFormat).build()) - .setVersionMetaKey("version"); + .setSettings(Settings.builder().put(IndexMetadata.INDEX_FORMAT_SETTING.getKey(), indexFormat).build()); } private static SystemIndexDescriptor getMainSecurityDescriptor() { diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/secrets/ConnectorSecretsIndexService.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/secrets/ConnectorSecretsIndexService.java index ba9a3e78281dd..7c26c2975d8ee 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/secrets/ConnectorSecretsIndexService.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/secrets/ConnectorSecretsIndexService.java @@ -70,7 +70,6 @@ public static SystemIndexDescriptor getSystemIndexDescriptor() { .setMappings(request.mappings()) .setSettings(request.settings()) .setAliasName(CONNECTOR_SECRETS_INDEX_NAME) - .setVersionMetaKey("version") .setOrigin(CONNECTORS_ORIGIN) .setType(SystemIndexDescriptor.Type.INTERNAL_MANAGED) .build(); diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/QueryRulesIndexService.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/QueryRulesIndexService.java index 8bf4bbd5716b7..3ce51ae5d832d 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/QueryRulesIndexService.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/QueryRulesIndexService.java @@ -92,7 +92,6 @@ public static SystemIndexDescriptor getSystemIndexDescriptor() { .setSettings(getIndexSettings()) .setAliasName(QUERY_RULES_ALIAS_NAME) .setIndexFormat(QueryRulesIndexMappingVersion.latest().id) - .setVersionMetaKey("version") .setOrigin(ENT_SEARCH_ORIGIN) .setThreadPools(ExecutorNames.DEFAULT_SYSTEM_INDEX_THREAD_POOLS); diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/search/SearchApplicationIndexService.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/search/SearchApplicationIndexService.java index 30d533aeb9ae5..2219a2bebbd25 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/search/SearchApplicationIndexService.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/search/SearchApplicationIndexService.java @@ -130,7 +130,6 @@ public static SystemIndexDescriptor getSystemIndexDescriptor() { .setMappings(getIndexMappings()) .setSettings(getIndexSettings()) .setAliasName(SEARCH_APPLICATION_ALIAS_NAME) - .setVersionMetaKey("version") .setOrigin(ENT_SEARCH_ORIGIN) .setThreadPools(ExecutorNames.DEFAULT_SYSTEM_INDEX_THREAD_POOLS) .build(); diff --git a/x-pack/plugin/fleet/src/main/java/org/elasticsearch/xpack/fleet/Fleet.java b/x-pack/plugin/fleet/src/main/java/org/elasticsearch/xpack/fleet/Fleet.java index 40e41cb18bdf8..138a07833e584 100644 --- a/x-pack/plugin/fleet/src/main/java/org/elasticsearch/xpack/fleet/Fleet.java +++ b/x-pack/plugin/fleet/src/main/java/org/elasticsearch/xpack/fleet/Fleet.java @@ -77,7 +77,6 @@ public class Fleet extends Plugin implements SystemIndexPlugin { public static final String FLEET_SECRETS_INDEX_NAME = ".fleet-secrets"; private static final int CURRENT_INDEX_VERSION = 7; - private static final String VERSION_KEY = "version"; private static final String MAPPING_VERSION_VARIABLE = "fleet.version"; private static final List ALLOWED_PRODUCTS = List.of("kibana", "fleet"); private static final int FLEET_ACTIONS_MAPPINGS_VERSION = 1; @@ -140,7 +139,6 @@ private static SystemIndexDescriptor fleetActionsSystemIndexDescriptor() { .setType(Type.EXTERNAL_MANAGED) .setAllowedElasticProductOrigins(ALLOWED_PRODUCTS) .setOrigin(FLEET_ORIGIN) - .setVersionMetaKey(VERSION_KEY) .setMappings(request.mappings()) .setSettings(request.settings()) .setPrimaryIndex(".fleet-actions-" + CURRENT_INDEX_VERSION) @@ -158,7 +156,6 @@ private static SystemIndexDescriptor fleetAgentsSystemIndexDescriptor() { .setType(Type.EXTERNAL_MANAGED) .setAllowedElasticProductOrigins(ALLOWED_PRODUCTS) .setOrigin(FLEET_ORIGIN) - .setVersionMetaKey(VERSION_KEY) .setMappings(request.mappings()) .setSettings(request.settings()) .setPrimaryIndex(".fleet-agents-" + CURRENT_INDEX_VERSION) @@ -179,7 +176,6 @@ private static SystemIndexDescriptor fleetEnrollmentApiKeysSystemIndexDescriptor .setType(Type.EXTERNAL_MANAGED) .setAllowedElasticProductOrigins(ALLOWED_PRODUCTS) .setOrigin(FLEET_ORIGIN) - .setVersionMetaKey(VERSION_KEY) .setMappings(request.mappings()) .setSettings(request.settings()) .setPrimaryIndex(".fleet-enrollment-api-keys-" + CURRENT_INDEX_VERSION) @@ -195,7 +191,6 @@ private static SystemIndexDescriptor fleetSecretsSystemIndexDescriptor() { return SystemIndexDescriptor.builder() .setType(Type.INTERNAL_MANAGED) .setOrigin(FLEET_ORIGIN) - .setVersionMetaKey(VERSION_KEY) .setMappings(request.mappings()) .setSettings(request.settings()) .setPrimaryIndex(FLEET_SECRETS_INDEX_NAME + "-" + CURRENT_INDEX_VERSION) @@ -213,7 +208,6 @@ private static SystemIndexDescriptor fleetPoliciesSystemIndexDescriptor() { .setType(Type.EXTERNAL_MANAGED) .setAllowedElasticProductOrigins(ALLOWED_PRODUCTS) .setOrigin(FLEET_ORIGIN) - .setVersionMetaKey(VERSION_KEY) .setMappings(request.mappings()) .setSettings(request.settings()) .setPrimaryIndex(".fleet-policies-" + CURRENT_INDEX_VERSION) @@ -231,7 +225,6 @@ private static SystemIndexDescriptor fleetPoliciesLeaderSystemIndexDescriptor() .setType(Type.EXTERNAL_MANAGED) .setAllowedElasticProductOrigins(ALLOWED_PRODUCTS) .setOrigin(FLEET_ORIGIN) - .setVersionMetaKey(VERSION_KEY) .setMappings(request.mappings()) .setSettings(request.settings()) .setPrimaryIndex(".fleet-policies-leader-" + CURRENT_INDEX_VERSION) @@ -249,7 +242,6 @@ private static SystemIndexDescriptor fleetServersSystemIndexDescriptors() { .setType(Type.EXTERNAL_MANAGED) .setAllowedElasticProductOrigins(ALLOWED_PRODUCTS) .setOrigin(FLEET_ORIGIN) - .setVersionMetaKey(VERSION_KEY) .setMappings(request.mappings()) .setSettings(request.settings()) .setPrimaryIndex(".fleet-servers-" + CURRENT_INDEX_VERSION) @@ -267,7 +259,6 @@ private static SystemIndexDescriptor fleetArtifactsSystemIndexDescriptors() { .setType(Type.EXTERNAL_MANAGED) .setAllowedElasticProductOrigins(ALLOWED_PRODUCTS) .setOrigin(FLEET_ORIGIN) - .setVersionMetaKey(VERSION_KEY) .setMappings(request.mappings()) .setSettings(request.settings()) .setPrimaryIndex(".fleet-artifacts-" + CURRENT_INDEX_VERSION) diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferencePlugin.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferencePlugin.java index 0450400e5ca8b..3ad1e626481bc 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferencePlugin.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferencePlugin.java @@ -288,7 +288,6 @@ public Collection getSystemIndexDescriptors(Settings sett .setDescription("Contains inference service and model configuration") .setMappings(InferenceIndex.mappingsV1()) .setSettings(InferenceIndex.settings()) - .setVersionMetaKey("version") .setOrigin(ClientHelper.INFERENCE_ORIGIN) .build(); @@ -301,7 +300,6 @@ public Collection getSystemIndexDescriptors(Settings sett .setDescription("Contains inference service and model configuration") .setMappings(InferenceIndex.mappings()) .setSettings(InferenceIndex.settings()) - .setVersionMetaKey("version") .setOrigin(ClientHelper.INFERENCE_ORIGIN) .setPriorSystemIndexDescriptors(List.of(inferenceIndexV1Descriptor)) .build(), @@ -312,7 +310,6 @@ public Collection getSystemIndexDescriptors(Settings sett .setDescription("Contains inference service secrets") .setMappings(InferenceSecretsIndex.mappings()) .setSettings(InferenceSecretsIndex.settings()) - .setVersionMetaKey("version") .setOrigin(ClientHelper.INFERENCE_ORIGIN) .setNetNew() .build() diff --git a/x-pack/plugin/logstash/src/main/java/org/elasticsearch/xpack/logstash/Logstash.java b/x-pack/plugin/logstash/src/main/java/org/elasticsearch/xpack/logstash/Logstash.java index 819f41781a307..15582e2b74768 100644 --- a/x-pack/plugin/logstash/src/main/java/org/elasticsearch/xpack/logstash/Logstash.java +++ b/x-pack/plugin/logstash/src/main/java/org/elasticsearch/xpack/logstash/Logstash.java @@ -102,7 +102,6 @@ public Collection getSystemIndexDescriptors(Settings sett .setDescription("Contains data for Logstash Central Management") .setMappings(getIndexMappings()) .setSettings(getIndexSettings()) - .setVersionMetaKey("logstash-version") .setOrigin(LOGSTASH_MANAGEMENT_ORIGIN) .build() ); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java index 6d21654f9e161..1feb95661f33a 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java @@ -1980,7 +1980,6 @@ public Collection getSystemIndexDescriptors(Settings unus .setDescription("Contains scheduling and anomaly tracking metadata") .setMappings(MlMetaIndex.mapping()) .setSettings(MlMetaIndex.settings()) - .setVersionMetaKey("version") .setOrigin(ML_ORIGIN) .build(), SystemIndexDescriptor.builder() @@ -1989,7 +1988,6 @@ public Collection getSystemIndexDescriptors(Settings unus .setDescription("Contains ML configuration data") .setMappings(MlConfigIndex.mapping()) .setSettings(MlConfigIndex.settings()) - .setVersionMetaKey("version") .setOrigin(ML_ORIGIN) .build(), getInferenceIndexSystemIndexDescriptor() @@ -2003,7 +2001,6 @@ public static SystemIndexDescriptor getInferenceIndexSystemIndexDescriptor() { .setDescription("Contains ML model configuration and statistics") .setMappings(InferenceIndexConstants.mapping()) .setSettings(InferenceIndexConstants.settings()) - .setVersionMetaKey("version") .setOrigin(ML_ORIGIN) .build(); } diff --git a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshots.java b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshots.java index 5ac8cdb43aa33..eabdf7c9bf46c 100644 --- a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshots.java +++ b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshots.java @@ -397,7 +397,6 @@ public Collection getSystemIndexDescriptors(Settings unus .setMappings(getIndexMappings()) .setSettings(getIndexSettings()) .setOrigin(SEARCHABLE_SNAPSHOTS_ORIGIN) - .setVersionMetaKey("version") .build() ); } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/SecuritySystemIndices.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/SecuritySystemIndices.java index 77c7d19e94a9b..609e6696bcb0f 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/SecuritySystemIndices.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/SecuritySystemIndices.java @@ -143,7 +143,6 @@ private SystemIndexDescriptor getSecurityMainIndexDescriptor() { .setSettings(getMainIndexSettings()) .setAliasName(SECURITY_MAIN_ALIAS) .setIndexFormat(INTERNAL_MAIN_INDEX_FORMAT) - .setVersionMetaKey(SECURITY_VERSION_STRING) .setOrigin(SECURITY_ORIGIN) .setThreadPools(ExecutorNames.CRITICAL_SYSTEM_INDEX_THREAD_POOLS); @@ -695,7 +694,6 @@ private static SystemIndexDescriptor getSecurityTokenIndexDescriptor() { .setSettings(getTokenIndexSettings()) .setAliasName(SECURITY_TOKENS_ALIAS) .setIndexFormat(INTERNAL_TOKENS_INDEX_FORMAT) - .setVersionMetaKey(SECURITY_VERSION_STRING) .setOrigin(SECURITY_ORIGIN) .setThreadPools(ExecutorNames.CRITICAL_SYSTEM_INDEX_THREAD_POOLS) .build(); @@ -879,10 +877,8 @@ private SystemIndexDescriptor getSecurityProfileIndexDescriptor(Settings setting .setSettings(getProfileIndexSettings(settings)) .setAliasName(SECURITY_PROFILE_ALIAS) .setIndexFormat(INTERNAL_PROFILE_INDEX_FORMAT) - .setVersionMetaKey(SECURITY_VERSION_STRING) .setOrigin(SECURITY_PROFILE_ORIGIN) // new origin since 8.3 .setThreadPools(ExecutorNames.CRITICAL_SYSTEM_INDEX_THREAD_POOLS) - .setMinimumNodeVersion(VERSION_SECURITY_PROFILE_ORIGIN) .setPriorSystemIndexDescriptors( List.of( SystemIndexDescriptor.builder() @@ -893,7 +889,6 @@ private SystemIndexDescriptor getSecurityProfileIndexDescriptor(Settings setting .setSettings(getProfileIndexSettings(settings)) .setAliasName(SECURITY_PROFILE_ALIAS) .setIndexFormat(INTERNAL_PROFILE_INDEX_FORMAT) - .setVersionMetaKey(SECURITY_VERSION_STRING) .setOrigin(SECURITY_ORIGIN) .setThreadPools(ExecutorNames.CRITICAL_SYSTEM_INDEX_THREAD_POOLS) .build() diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/persistence/TransformInternalIndex.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/persistence/TransformInternalIndex.java index e2f9af2c676ce..14e9292d10fd1 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/persistence/TransformInternalIndex.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/persistence/TransformInternalIndex.java @@ -111,7 +111,6 @@ public static SystemIndexDescriptor getSystemIndexDescriptor(Settings transformI .setDescription("Contains Transform configuration data") .setMappings(mappings()) .setSettings(settings(transformInternalIndexAdditionalSettings)) - .setVersionMetaKey("version") .setOrigin(TRANSFORM_ORIGIN) .build(); } diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/Watcher.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/Watcher.java index f6f8d3f336e21..cd965bb677526 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/Watcher.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/Watcher.java @@ -802,7 +802,6 @@ public Collection getSystemIndexDescriptors(Settings sett .setDescription("Contains Watch definitions") .setMappings(getWatchesIndexMappings()) .setSettings(getWatchesIndexSettings()) - .setVersionMetaKey("version") .setOrigin(WATCHER_ORIGIN) .setIndexFormat(6) .build(), @@ -812,7 +811,6 @@ public Collection getSystemIndexDescriptors(Settings sett .setDescription("Used to track current and queued Watch execution") .setMappings(getTriggeredWatchesIndexMappings()) .setSettings(getTriggeredWatchesIndexSettings()) - .setVersionMetaKey("version") .setOrigin(WATCHER_ORIGIN) .setIndexFormat(6) .build() From 4d2a59833b43905c38829f9b6b108077a1747991 Mon Sep 17 00:00:00 2001 From: Mayya Sharipova Date: Thu, 31 Oct 2024 07:29:44 -0400 Subject: [PATCH 233/324] Unmute SearchStatesIT::testCanMatch (#115960) The original failure reported in #108991 seems to be from a temporary connection failure: java.net.ConnectException: Connection refused so we umuting the test. Closes #108991 --- muted-tests.yml | 3 --- 1 file changed, 3 deletions(-) diff --git a/muted-tests.yml b/muted-tests.yml index 68ab8eb37a600..a97164586848c 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -1,7 +1,4 @@ tests: -- class: "org.elasticsearch.upgrades.SearchStatesIT" - issue: "https://github.com/elastic/elasticsearch/issues/108991" - method: "testCanMatch" - class: org.elasticsearch.smoketest.DocsClientYamlTestSuiteIT method: test {yaml=reference/esql/esql-async-query-api/line_17} issue: https://github.com/elastic/elasticsearch/issues/109260 From 681f5096e24967fc99699e682906776c1a75d39e Mon Sep 17 00:00:00 2001 From: Luigi Dell'Aquila Date: Thu, 31 Oct 2024 13:16:01 +0100 Subject: [PATCH 234/324] ES|QL: Mute test for #116003 (#116005) --- .../xpack/esql/action/TelemetryIT.java | 28 ++++++++++--------- 1 file changed, 15 insertions(+), 13 deletions(-) diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/TelemetryIT.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/TelemetryIT.java index 325e8500295ea..25603acece3cb 100644 --- a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/TelemetryIT.java +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/TelemetryIT.java @@ -128,20 +128,22 @@ public static Iterable parameters() { : Collections.emptyMap(), Build.current().isSnapshot() ? Map.ofEntries(Map.entry("MAX", 1)) : Collections.emptyMap(), Build.current().isSnapshot() - ) }, - new Object[] { - new Test( - """ - FROM idx - | EVAL ip = to_ip(host), x = to_string(host), y = to_string(host) - | INLINESTATS max(id) - """, - Build.current().isSnapshot() ? Map.of("FROM", 1, "EVAL", 1, "INLINESTATS", 1, "STATS", 1) : Collections.emptyMap(), - Build.current().isSnapshot() - ? Map.ofEntries(Map.entry("MAX", 1), Map.entry("TO_IP", 1), Map.entry("TO_STRING", 2)) - : Collections.emptyMap(), - Build.current().isSnapshot() ) } + // awaits fix for https://github.com/elastic/elasticsearch/issues/116003 + // , + // new Object[] { + // new Test( + // """ + // FROM idx + // | EVAL ip = to_ip(host), x = to_string(host), y = to_string(host) + // | INLINESTATS max(id) + // """, + // Build.current().isSnapshot() ? Map.of("FROM", 1, "EVAL", 1, "INLINESTATS", 1, "STATS", 1) : Collections.emptyMap(), + // Build.current().isSnapshot() + // ? Map.ofEntries(Map.entry("MAX", 1), Map.entry("TO_IP", 1), Map.entry("TO_STRING", 2)) + // : Collections.emptyMap(), + // Build.current().isSnapshot() + // ) } ); } From 1c644cc78702cca6ac7fd17d58b001c0581b1259 Mon Sep 17 00:00:00 2001 From: Michael Peterson Date: Thu, 31 Oct 2024 08:42:36 -0400 Subject: [PATCH 235/324] Unmute esql-across-clusters yaml docs test (#115880) --- muted-tests.yml | 3 --- 1 file changed, 3 deletions(-) diff --git a/muted-tests.yml b/muted-tests.yml index a97164586848c..dd26d342df79b 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -228,9 +228,6 @@ tests: - class: org.elasticsearch.xpack.restart.MLModelDeploymentFullClusterRestartIT method: testDeploymentSurvivesRestart {cluster=UPGRADED} issue: https://github.com/elastic/elasticsearch/issues/115528 -- class: org.elasticsearch.smoketest.DocsClientYamlTestSuiteIT - method: test {yaml=reference/esql/esql-across-clusters/line_197} - issue: https://github.com/elastic/elasticsearch/issues/115575 - class: org.elasticsearch.oldrepos.OldRepositoryAccessIT method: testOldRepoAccess issue: https://github.com/elastic/elasticsearch/issues/115631 From 37a4ee3102a070c29c7d414bfaeb809a3a59e76b Mon Sep 17 00:00:00 2001 From: Kostas Krikellas <131142368+kkrik-es@users.noreply.github.com> Date: Thu, 31 Oct 2024 15:28:30 +0200 Subject: [PATCH 236/324] Check index setting for source mode in SourceOnlySnapshotRepository (#116002) * Check index setting for source mode in SourceOnlySnapshotRepository * update * Revert "update" This reverts commit 9bbf0490f7d21b8c08489e05da563e3a76815847. --- .../index/mapper/SourceFieldMapper.java | 6 ++- .../SourceOnlySnapshotRepository.java | 6 ++- .../SourceOnlySnapshotShardTests.java | 50 +++++++++++++++++++ 3 files changed, 59 insertions(+), 3 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/index/mapper/SourceFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/SourceFieldMapper.java index 372e0bbdfecf4..1162734c0dc81 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/SourceFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/SourceFieldMapper.java @@ -242,7 +242,7 @@ public SourceFieldMapper build() { } private Mode resolveSourceMode() { - // If the `index.mapper.source.mode` exists it takes precedence to determine the source mode for `_source` + // If the `index.mapping.source.mode` exists it takes precedence to determine the source mode for `_source` // otherwise the mode is determined according to `_source.mode`. if (INDEX_MAPPER_SOURCE_MODE_SETTING.exists(settings)) { return INDEX_MAPPER_SOURCE_MODE_SETTING.get(settings); @@ -439,6 +439,10 @@ public static boolean isSynthetic(IndexSettings indexSettings) { return INDEX_MAPPER_SOURCE_MODE_SETTING.get(indexSettings.getSettings()) == SourceFieldMapper.Mode.SYNTHETIC; } + public static boolean isStored(IndexSettings indexSettings) { + return INDEX_MAPPER_SOURCE_MODE_SETTING.get(indexSettings.getSettings()) == Mode.STORED; + } + public boolean isDisabled() { return mode == Mode.DISABLED; } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/snapshots/sourceonly/SourceOnlySnapshotRepository.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/snapshots/sourceonly/SourceOnlySnapshotRepository.java index 698193d5c1c28..579e3fb43b194 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/snapshots/sourceonly/SourceOnlySnapshotRepository.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/snapshots/sourceonly/SourceOnlySnapshotRepository.java @@ -33,6 +33,7 @@ import org.elasticsearch.index.engine.EngineFactory; import org.elasticsearch.index.engine.ReadOnlyEngine; import org.elasticsearch.index.mapper.MapperService; +import org.elasticsearch.index.mapper.SourceFieldMapper; import org.elasticsearch.index.store.Store; import org.elasticsearch.index.translog.TranslogStats; import org.elasticsearch.repositories.FilterRepository; @@ -134,8 +135,9 @@ private static Metadata metadataToSnapshot(Collection indices, Metadata @Override public void snapshotShard(SnapshotShardContext context) { final MapperService mapperService = context.mapperService(); - if (mapperService.documentMapper() != null // if there is no mapping this is null - && mapperService.documentMapper().sourceMapper().isComplete() == false) { + if ((mapperService.documentMapper() != null // if there is no mapping this is null + && mapperService.documentMapper().sourceMapper().isComplete() == false) + || (mapperService.documentMapper() == null && SourceFieldMapper.isStored(mapperService.getIndexSettings()) == false)) { context.onFailure( new IllegalStateException( "Can't snapshot _source only on an index that has incomplete source ie. has _source disabled or filters the source" diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/snapshots/sourceonly/SourceOnlySnapshotShardTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/snapshots/sourceonly/SourceOnlySnapshotShardTests.java index 54390365c62af..81d194ce84131 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/snapshots/sourceonly/SourceOnlySnapshotShardTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/snapshots/sourceonly/SourceOnlySnapshotShardTests.java @@ -53,6 +53,7 @@ import org.elasticsearch.index.engine.InternalEngineFactory; import org.elasticsearch.index.fieldvisitor.FieldsVisitor; import org.elasticsearch.index.mapper.SeqNoFieldMapper; +import org.elasticsearch.index.mapper.SourceFieldMapper; import org.elasticsearch.index.mapper.SourceToParse; import org.elasticsearch.index.seqno.RetentionLeaseSyncer; import org.elasticsearch.index.seqno.SeqNoStats; @@ -150,6 +151,55 @@ public void testSourceIncomplete() throws IOException { closeShards(shard); } + public void testSourceIncompleteSyntheticSourceNoDoc() throws IOException { + ShardRouting shardRouting = shardRoutingBuilder( + new ShardId("index", "_na_", 0), + randomAlphaOfLength(10), + true, + ShardRoutingState.INITIALIZING + ).withRecoverySource(RecoverySource.EmptyStoreRecoverySource.INSTANCE).build(); + Settings settings = Settings.builder() + .put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current()) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(SourceFieldMapper.INDEX_MAPPER_SOURCE_MODE_SETTING.getKey(), "synthetic") + .build(); + IndexMetadata metadata = IndexMetadata.builder(shardRouting.getIndexName()).settings(settings).primaryTerm(0, primaryTerm).build(); + IndexShard shard = newShard(shardRouting, metadata, null, new InternalEngineFactory()); + recoverShardFromStore(shard); + SnapshotId snapshotId = new SnapshotId("test", "test"); + IndexId indexId = new IndexId(shard.shardId().getIndexName(), shard.shardId().getIndex().getUUID()); + SourceOnlySnapshotRepository repository = new SourceOnlySnapshotRepository(createRepository()); + repository.start(); + try (Engine.IndexCommitRef snapshotRef = shard.acquireLastIndexCommit(true)) { + IndexShardSnapshotStatus indexShardSnapshotStatus = IndexShardSnapshotStatus.newInitializing(new ShardGeneration(-1L)); + final PlainActionFuture future = new PlainActionFuture<>(); + runAsSnapshot( + shard.getThreadPool(), + () -> repository.snapshotShard( + new SnapshotShardContext( + shard.store(), + shard.mapperService(), + snapshotId, + indexId, + new SnapshotIndexCommit(snapshotRef), + null, + indexShardSnapshotStatus, + IndexVersion.current(), + randomMillisUpToYear9999(), + future + ) + ) + ); + IllegalStateException illegalStateException = expectThrows(IllegalStateException.class, future::actionGet); + assertEquals( + "Can't snapshot _source only on an index that has incomplete source ie. has _source disabled or filters the source", + illegalStateException.getMessage() + ); + } + closeShards(shard); + } + public void testIncrementalSnapshot() throws IOException { IndexShard shard = newStartedShard(); for (int i = 0; i < 10; i++) { From 70516c795cc8a9b1318fa3f46599f09c524b3746 Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Fri, 1 Nov 2024 00:37:24 +1100 Subject: [PATCH 237/324] Mute org.elasticsearch.search.query.SearchQueryIT testAllDocsQueryString #115728 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index dd26d342df79b..c781a7d30a597 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -284,6 +284,9 @@ tests: - class: org.elasticsearch.index.reindex.ReindexNodeShutdownIT method: testReindexWithShutdown issue: https://github.com/elastic/elasticsearch/issues/115996 +- class: org.elasticsearch.search.query.SearchQueryIT + method: testAllDocsQueryString + issue: https://github.com/elastic/elasticsearch/issues/115728 # Examples: # From c77fb33070077e50c09def22c121ecc05e0be092 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Istv=C3=A1n=20Zolt=C3=A1n=20Szab=C3=B3?= Date: Thu, 31 Oct 2024 14:49:11 +0100 Subject: [PATCH 238/324] Adds hands-on learning for Search link to the landing page (#116007) Co-authored-by: Liam Thompson <32779855+leemthompo@users.noreply.github.com> --- docs/reference/landing-page.asciidoc | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/docs/reference/landing-page.asciidoc b/docs/reference/landing-page.asciidoc index 1f2145a3aae82..6449a799ffd16 100644 --- a/docs/reference/landing-page.asciidoc +++ b/docs/reference/landing-page.asciidoc @@ -79,6 +79,11 @@

    Get to know Elasticsearch

    +

    +Demos: + Hands-on learning for Search +

    +

    New webinar: Architect search apps with Google Cloud From d9f6451b2f67ec4d471718ceb2e80c79df5aeca6 Mon Sep 17 00:00:00 2001 From: Simon Cooper Date: Thu, 31 Oct 2024 13:56:58 +0000 Subject: [PATCH 239/324] Remove BuildVersion.id() (#115795) BuildVersion should be entirely opaque, it should be only possible to do equality comparisons --- .../org/elasticsearch/env/BuildVersion.java | 40 ++++++++++++++++--- .../env/DefaultBuildVersion.java | 24 +++++++++-- .../org/elasticsearch/env/NodeMetadata.java | 2 +- .../gateway/PersistedClusterStateService.java | 6 +-- .../internal/BuildExtension.java | 15 +++++++ .../service/ReservedStateVersion.java | 6 +-- .../elasticsearch/env/BuildVersionTests.java | 4 ++ .../ReservedClusterStateServiceTests.java | 5 ++- 8 files changed, 85 insertions(+), 17 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/env/BuildVersion.java b/server/src/main/java/org/elasticsearch/env/BuildVersion.java index 5536b06d4d587..7a6b27eab2330 100644 --- a/server/src/main/java/org/elasticsearch/env/BuildVersion.java +++ b/server/src/main/java/org/elasticsearch/env/BuildVersion.java @@ -11,9 +11,13 @@ import org.elasticsearch.Build; import org.elasticsearch.Version; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.internal.BuildExtension; import org.elasticsearch.plugins.ExtensionLoader; +import org.elasticsearch.xcontent.ToXContentFragment; +import java.io.IOException; import java.util.ServiceLoader; /** @@ -31,7 +35,7 @@ * provide versions that accommodate different release models or versioning * schemes.

    */ -public abstract class BuildVersion { +public abstract class BuildVersion implements ToXContentFragment, Writeable { /** * Check whether this version is on or after a minimum threshold. @@ -58,6 +62,11 @@ public abstract class BuildVersion { */ public abstract boolean isFutureVersion(); + /** + * Returns this build version in a form suitable for storing in node metadata + */ + public abstract String toNodeMetadata(); + /** * Create a {@link BuildVersion} from a version ID number. * @@ -72,6 +81,16 @@ public static BuildVersion fromVersionId(int versionId) { return CurrentExtensionHolder.BUILD_EXTENSION.fromVersionId(versionId); } + /** + * Create a {@link BuildVersion} from a version in node metadata + * + * @param version The string stored in node metadata + * @return a version representing a build or release of Elasticsearch + */ + public static BuildVersion fromNodeMetadata(String version) { + return CurrentExtensionHolder.BUILD_EXTENSION.fromNodeMetadata(version); + } + /** * Create a {@link BuildVersion} from a version string. * @@ -82,6 +101,16 @@ public static BuildVersion fromString(String version) { return CurrentExtensionHolder.BUILD_EXTENSION.fromString(version); } + /** + * Read a {@link BuildVersion} from an input stream + * + * @param input The stream to read + * @return a version representing a build or release of Elasticsearch + */ + public static BuildVersion fromStream(StreamInput input) throws IOException { + return CurrentExtensionHolder.BUILD_EXTENSION.fromStream(input); + } + /** * Get the current build version. * @@ -94,9 +123,6 @@ public static BuildVersion current() { return CurrentExtensionHolder.BUILD_EXTENSION.currentBuildVersion(); } - // only exists for NodeMetadata#toXContent - public abstract int id(); - private static class CurrentExtensionHolder { private static final BuildExtension BUILD_EXTENSION = findExtension(); @@ -125,6 +151,10 @@ public BuildVersion fromVersionId(int versionId) { public BuildVersion fromString(String version) { return new DefaultBuildVersion(version); } - } + @Override + public BuildVersion fromStream(StreamInput in) throws IOException { + return new DefaultBuildVersion(in); + } + } } diff --git a/server/src/main/java/org/elasticsearch/env/DefaultBuildVersion.java b/server/src/main/java/org/elasticsearch/env/DefaultBuildVersion.java index 9cf0d60719653..a7e1a4fee341d 100644 --- a/server/src/main/java/org/elasticsearch/env/DefaultBuildVersion.java +++ b/server/src/main/java/org/elasticsearch/env/DefaultBuildVersion.java @@ -10,7 +10,11 @@ package org.elasticsearch.env; import org.elasticsearch.Version; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.xcontent.XContentBuilder; +import java.io.IOException; import java.util.Objects; /** @@ -28,7 +32,7 @@ final class DefaultBuildVersion extends BuildVersion { public static BuildVersion CURRENT = new DefaultBuildVersion(Version.CURRENT.id()); - private final Version version; + final Version version; DefaultBuildVersion(int versionId) { assert versionId >= 0 : "Release version IDs must be non-negative integers"; @@ -39,6 +43,10 @@ final class DefaultBuildVersion extends BuildVersion { this.version = Version.fromString(Objects.requireNonNull(version)); } + DefaultBuildVersion(StreamInput in) throws IOException { + this(in.readVInt()); + } + @Override public boolean onOrAfterMinimumCompatible() { return Version.CURRENT.minimumCompatibilityVersion().onOrBefore(version); @@ -50,8 +58,18 @@ public boolean isFutureVersion() { } @Override - public int id() { - return version.id(); + public String toNodeMetadata() { + return Integer.toString(version.id()); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return builder.value(version.id()); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeVInt(version.id()); } @Override diff --git a/server/src/main/java/org/elasticsearch/env/NodeMetadata.java b/server/src/main/java/org/elasticsearch/env/NodeMetadata.java index 5b2ee39c1b622..c71a3798be1f7 100644 --- a/server/src/main/java/org/elasticsearch/env/NodeMetadata.java +++ b/server/src/main/java/org/elasticsearch/env/NodeMetadata.java @@ -204,7 +204,7 @@ protected XContentBuilder newXContentBuilder(XContentType type, OutputStream str @Override public void toXContent(XContentBuilder builder, NodeMetadata nodeMetadata) throws IOException { builder.field(NODE_ID_KEY, nodeMetadata.nodeId); - builder.field(NODE_VERSION_KEY, nodeMetadata.nodeVersion.id()); + builder.field(NODE_VERSION_KEY, nodeMetadata.nodeVersion); builder.field(OLDEST_INDEX_VERSION_KEY, nodeMetadata.oldestIndexVersion.id()); } diff --git a/server/src/main/java/org/elasticsearch/gateway/PersistedClusterStateService.java b/server/src/main/java/org/elasticsearch/gateway/PersistedClusterStateService.java index 92b8686700a05..44be3a3812fd1 100644 --- a/server/src/main/java/org/elasticsearch/gateway/PersistedClusterStateService.java +++ b/server/src/main/java/org/elasticsearch/gateway/PersistedClusterStateService.java @@ -364,7 +364,7 @@ public static NodeMetadata nodeMetadata(Path... dataPaths) throws IOException { ); } else if (nodeId == null) { nodeId = thisNodeId; - version = BuildVersion.fromVersionId(Integer.parseInt(userData.get(NODE_VERSION_KEY))); + version = BuildVersion.fromNodeMetadata(userData.get(NODE_VERSION_KEY)); if (userData.containsKey(OLDEST_INDEX_VERSION_KEY)) { oldestIndexVersion = IndexVersion.fromId(Integer.parseInt(userData.get(OLDEST_INDEX_VERSION_KEY))); } else { @@ -395,7 +395,7 @@ public static void overrideVersion(BuildVersion newVersion, Path... dataPaths) t try (IndexWriter indexWriter = createIndexWriter(new NIOFSDirectory(dataPath.resolve(METADATA_DIRECTORY_NAME)), true)) { final Map commitData = new HashMap<>(userData); - commitData.put(NODE_VERSION_KEY, Integer.toString(newVersion.id())); + commitData.put(NODE_VERSION_KEY, newVersion.toNodeMetadata()); commitData.put(OVERRIDDEN_NODE_VERSION_KEY, Boolean.toString(true)); indexWriter.setLiveCommitData(commitData.entrySet()); indexWriter.commit(); @@ -852,7 +852,7 @@ void prepareCommit( final Map commitData = Maps.newMapWithExpectedSize(COMMIT_DATA_SIZE); commitData.put(CURRENT_TERM_KEY, Long.toString(currentTerm)); commitData.put(LAST_ACCEPTED_VERSION_KEY, Long.toString(lastAcceptedVersion)); - commitData.put(NODE_VERSION_KEY, Integer.toString(BuildVersion.current().id())); + commitData.put(NODE_VERSION_KEY, BuildVersion.current().toNodeMetadata()); commitData.put(OLDEST_INDEX_VERSION_KEY, Integer.toString(oldestIndexVersion.id())); commitData.put(NODE_ID_KEY, nodeId); commitData.put(CLUSTER_UUID_KEY, clusterUUID); diff --git a/server/src/main/java/org/elasticsearch/internal/BuildExtension.java b/server/src/main/java/org/elasticsearch/internal/BuildExtension.java index 427e186bc40cf..7b4f5a7fadb4c 100644 --- a/server/src/main/java/org/elasticsearch/internal/BuildExtension.java +++ b/server/src/main/java/org/elasticsearch/internal/BuildExtension.java @@ -10,8 +10,11 @@ package org.elasticsearch.internal; import org.elasticsearch.Build; +import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.env.BuildVersion; +import java.io.IOException; + /** * Allows plugging in current build info. */ @@ -39,8 +42,20 @@ default boolean hasReleaseVersioning() { */ BuildVersion fromVersionId(int versionId); + /** + * Returns the {@link BuildVersion} as read from node metadata + */ + default BuildVersion fromNodeMetadata(String version) { + return fromVersionId(Integer.parseInt(version)); + } + /** * Returns the {@link BuildVersion} for a given version string. */ BuildVersion fromString(String version); + + /** + * Reads a {@link BuildVersion} from the given stream + */ + BuildVersion fromStream(StreamInput in) throws IOException; } diff --git a/server/src/main/java/org/elasticsearch/reservedstate/service/ReservedStateVersion.java b/server/src/main/java/org/elasticsearch/reservedstate/service/ReservedStateVersion.java index 116d470755e1c..ed10df2552b6b 100644 --- a/server/src/main/java/org/elasticsearch/reservedstate/service/ReservedStateVersion.java +++ b/server/src/main/java/org/elasticsearch/reservedstate/service/ReservedStateVersion.java @@ -48,12 +48,12 @@ public static ReservedStateVersion parse(XContentParser parser) { } public static ReservedStateVersion readFrom(StreamInput input) throws IOException { - return new ReservedStateVersion(input.readLong(), BuildVersion.fromVersionId(input.readVInt())); + return new ReservedStateVersion(input.readLong(), BuildVersion.fromStream(input)); } @Override public void writeTo(StreamOutput out) throws IOException { - out.writeLong(version()); - out.writeVInt(buildVersion().id()); + out.writeLong(version); + buildVersion.writeTo(out); } } diff --git a/server/src/test/java/org/elasticsearch/env/BuildVersionTests.java b/server/src/test/java/org/elasticsearch/env/BuildVersionTests.java index dab0adc2446ce..9fd889426fd2d 100644 --- a/server/src/test/java/org/elasticsearch/env/BuildVersionTests.java +++ b/server/src/test/java/org/elasticsearch/env/BuildVersionTests.java @@ -42,4 +42,8 @@ public void testIsFutureVersion() { assertFalse(afterMinCompat.isFutureVersion()); assertTrue(futureVersion.isFutureVersion()); } + + public static BuildVersion increment(BuildVersion version) { + return BuildVersion.fromVersionId(((DefaultBuildVersion) version).version.id() + 1); + } } diff --git a/server/src/test/java/org/elasticsearch/reservedstate/service/ReservedClusterStateServiceTests.java b/server/src/test/java/org/elasticsearch/reservedstate/service/ReservedClusterStateServiceTests.java index 5c7dd6cb346b9..efe3566064170 100644 --- a/server/src/test/java/org/elasticsearch/reservedstate/service/ReservedClusterStateServiceTests.java +++ b/server/src/test/java/org/elasticsearch/reservedstate/service/ReservedClusterStateServiceTests.java @@ -26,6 +26,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.Releasable; import org.elasticsearch.env.BuildVersion; +import org.elasticsearch.env.BuildVersionTests; import org.elasticsearch.reservedstate.ReservedClusterStateHandler; import org.elasticsearch.reservedstate.TransformState; import org.elasticsearch.reservedstate.action.ReservedClusterSettingsAction; @@ -519,7 +520,7 @@ public void testCheckMetadataVersion() { task = new ReservedStateUpdateTask( "test", - new ReservedStateChunk(Map.of(), new ReservedStateVersion(124L, BuildVersion.fromVersionId(BuildVersion.current().id() + 1))), + new ReservedStateChunk(Map.of(), new ReservedStateVersion(124L, BuildVersionTests.increment(BuildVersion.current()))), ReservedStateVersionCheck.HIGHER_VERSION_ONLY, Map.of(), List.of(), @@ -529,7 +530,7 @@ public void testCheckMetadataVersion() { assertThat("Cluster state should not be modified", task.execute(state), sameInstance(state)); task = new ReservedStateUpdateTask( "test", - new ReservedStateChunk(Map.of(), new ReservedStateVersion(124L, BuildVersion.fromVersionId(BuildVersion.current().id() + 1))), + new ReservedStateChunk(Map.of(), new ReservedStateVersion(124L, BuildVersionTests.increment(BuildVersion.current()))), ReservedStateVersionCheck.HIGHER_OR_SAME_VERSION, Map.of(), List.of(), From 6a3a447f1885418164ca62294226b00ba2965089 Mon Sep 17 00:00:00 2001 From: Tim Grein Date: Thu, 31 Oct 2024 15:25:20 +0100 Subject: [PATCH 240/324] Remove double "the" from median absolute deviation description (#115826) --- .../esql/functions/examples/median_absolute_deviation.asciidoc | 2 +- .../expression/function/aggregate/MedianAbsoluteDeviation.java | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/reference/esql/functions/examples/median_absolute_deviation.asciidoc b/docs/reference/esql/functions/examples/median_absolute_deviation.asciidoc index 9084c008e890a..cfd3d0a9159aa 100644 --- a/docs/reference/esql/functions/examples/median_absolute_deviation.asciidoc +++ b/docs/reference/esql/functions/examples/median_absolute_deviation.asciidoc @@ -10,7 +10,7 @@ include::{esql-specs}/median_absolute_deviation.csv-spec[tag=median-absolute-dev |=== include::{esql-specs}/median_absolute_deviation.csv-spec[tag=median-absolute-deviation-result] |=== -The expression can use inline functions. For example, to calculate the the median absolute deviation of the maximum values of a multivalued column, first use `MV_MAX` to get the maximum value per row, and use the result with the `MEDIAN_ABSOLUTE_DEVIATION` function +The expression can use inline functions. For example, to calculate the median absolute deviation of the maximum values of a multivalued column, first use `MV_MAX` to get the maximum value per row, and use the result with the `MEDIAN_ABSOLUTE_DEVIATION` function [source.merge.styled,esql] ---- include::{esql-specs}/median_absolute_deviation.csv-spec[tag=docsStatsMADNestedExpression] diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/MedianAbsoluteDeviation.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/MedianAbsoluteDeviation.java index dfcbd6d22abae..42960cafdfd3a 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/MedianAbsoluteDeviation.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/MedianAbsoluteDeviation.java @@ -58,7 +58,7 @@ public class MedianAbsoluteDeviation extends NumericAggregate implements Surroga examples = { @Example(file = "median_absolute_deviation", tag = "median-absolute-deviation"), @Example( - description = "The expression can use inline functions. For example, to calculate the the " + description = "The expression can use inline functions. For example, to calculate the " + "median absolute deviation of the maximum values of a multivalued column, first " + "use `MV_MAX` to get the maximum value per row, and use the result with the " + "`MEDIAN_ABSOLUTE_DEVIATION` function", From aaf7a3ec72b051605924b6f40deb7c7ec8a80cd1 Mon Sep 17 00:00:00 2001 From: Jake Landis Date: Thu, 31 Oct 2024 09:46:19 -0500 Subject: [PATCH 241/324] Prep docs for v9 (#115375) This commit prepares the documentation for version 9. Some of the automation generates docs that are not correct for version 9. The content has been commented out with a reference to an internal issue for us to address before this documentation is used. --- docs/reference/migration/index.asciidoc | 38 +- docs/reference/migration/migrate_8_0.asciidoc | 94 -- .../cluster-node-setting-changes.asciidoc | 931 -------------- .../command-line-tool-changes.asciidoc | 20 - .../index-setting-changes.asciidoc | 122 -- .../migrate_8_0/java-api-changes.asciidoc | 50 - .../migrate_8_0/jvm-option-changes.asciidoc | 54 - .../migrate_8_0/logging-changes.asciidoc | 53 - .../migrate_8_0/mapping-changes.asciidoc | 133 -- .../migrate_8_0/migrate_to_java_time.asciidoc | 314 ----- .../migrate_8_0/packaging-changes.asciidoc | 60 - .../migrate_8_0/painless-changes.asciidoc | 42 - .../migrate_8_0/plugin-changes.asciidoc | 64 - .../migrate_8_0/rest-api-changes.asciidoc | 1138 ----------------- .../migrate_8_0/sql-jdbc-changes.asciidoc | 22 - .../migrate_8_0/system-req-changes.asciidoc | 59 - .../migration/migrate_8_0/transform.asciidoc | 16 - docs/reference/migration/migrate_8_1.asciidoc | 109 -- .../reference/migration/migrate_8_10.asciidoc | 89 -- .../reference/migration/migrate_8_11.asciidoc | 69 - .../reference/migration/migrate_8_12.asciidoc | 74 -- .../reference/migration/migrate_8_13.asciidoc | 137 -- .../reference/migration/migrate_8_14.asciidoc | 90 -- .../reference/migration/migrate_8_15.asciidoc | 140 -- .../reference/migration/migrate_8_16.asciidoc | 37 - .../reference/migration/migrate_8_17.asciidoc | 20 - docs/reference/migration/migrate_8_2.asciidoc | 16 - docs/reference/migration/migrate_8_3.asciidoc | 61 - docs/reference/migration/migrate_8_4.asciidoc | 46 - docs/reference/migration/migrate_8_5.asciidoc | 101 -- docs/reference/migration/migrate_8_6.asciidoc | 92 -- docs/reference/migration/migrate_8_7.asciidoc | 43 - docs/reference/migration/migrate_8_8.asciidoc | 47 - docs/reference/migration/migrate_8_9.asciidoc | 35 - docs/reference/migration/migrate_9_0.asciidoc | 319 +++++ .../migrate_9_0/rest-api-changes.asciidoc | 5 + ...ransient-settings-migration-guide.asciidoc | 0 docs/reference/release-notes.asciidoc | 130 +- .../release-notes/8.0.0-alpha1.asciidoc | 473 ------- .../release-notes/8.0.0-alpha2.asciidoc | 77 -- .../release-notes/8.0.0-beta1.asciidoc | 238 ---- .../release-notes/8.0.0-rc1.asciidoc | 112 -- .../release-notes/8.0.0-rc2.asciidoc | 131 -- docs/reference/release-notes/8.0.0.asciidoc | 717 ----------- docs/reference/release-notes/8.0.1.asciidoc | 81 -- docs/reference/release-notes/8.1.0.asciidoc | 348 ----- docs/reference/release-notes/8.1.1.asciidoc | 65 - docs/reference/release-notes/8.1.2.asciidoc | 44 - docs/reference/release-notes/8.1.3.asciidoc | 33 - docs/reference/release-notes/8.10.0.asciidoc | 289 ----- docs/reference/release-notes/8.10.1.asciidoc | 30 - docs/reference/release-notes/8.10.2.asciidoc | 12 - docs/reference/release-notes/8.10.3.asciidoc | 87 -- docs/reference/release-notes/8.10.4.asciidoc | 45 - docs/reference/release-notes/8.11.0.asciidoc | 342 ----- docs/reference/release-notes/8.11.1.asciidoc | 43 - docs/reference/release-notes/8.11.2.asciidoc | 89 -- docs/reference/release-notes/8.11.3.asciidoc | 28 - docs/reference/release-notes/8.11.4.asciidoc | 31 - docs/reference/release-notes/8.12.0.asciidoc | 435 ------- docs/reference/release-notes/8.12.1.asciidoc | 83 -- docs/reference/release-notes/8.12.2.asciidoc | 68 - docs/reference/release-notes/8.13.0.asciidoc | 472 ------- docs/reference/release-notes/8.13.1.asciidoc | 53 - docs/reference/release-notes/8.13.2.asciidoc | 54 - docs/reference/release-notes/8.13.3.asciidoc | 60 - docs/reference/release-notes/8.13.4.asciidoc | 36 - docs/reference/release-notes/8.14.0.asciidoc | 364 ------ docs/reference/release-notes/8.14.1.asciidoc | 50 - docs/reference/release-notes/8.14.2.asciidoc | 52 - docs/reference/release-notes/8.14.3.asciidoc | 32 - docs/reference/release-notes/8.15.0.asciidoc | 558 -------- docs/reference/release-notes/8.15.1.asciidoc | 103 -- docs/reference/release-notes/8.15.2.asciidoc | 42 - docs/reference/release-notes/8.16.0.asciidoc | 8 - docs/reference/release-notes/8.17.0.asciidoc | 8 - docs/reference/release-notes/8.2.0.asciidoc | 350 ----- docs/reference/release-notes/8.2.1.asciidoc | 73 -- docs/reference/release-notes/8.2.2.asciidoc | 35 - docs/reference/release-notes/8.2.3.asciidoc | 39 - docs/reference/release-notes/8.3.0.asciidoc | 367 ------ docs/reference/release-notes/8.3.1.asciidoc | 43 - docs/reference/release-notes/8.3.2.asciidoc | 26 - docs/reference/release-notes/8.3.3.asciidoc | 46 - docs/reference/release-notes/8.4.0.asciidoc | 356 ------ docs/reference/release-notes/8.4.1.asciidoc | 27 - docs/reference/release-notes/8.4.2.asciidoc | 92 -- docs/reference/release-notes/8.4.3.asciidoc | 37 - docs/reference/release-notes/8.5.0.asciidoc | 340 ----- docs/reference/release-notes/8.5.1.asciidoc | 76 -- docs/reference/release-notes/8.5.2.asciidoc | 51 - docs/reference/release-notes/8.5.3.asciidoc | 55 - docs/reference/release-notes/8.6.0.asciidoc | 295 ----- docs/reference/release-notes/8.6.1.asciidoc | 43 - docs/reference/release-notes/8.6.2.asciidoc | 33 - docs/reference/release-notes/8.7.0.asciidoc | 402 ------ docs/reference/release-notes/8.7.1.asciidoc | 80 -- docs/reference/release-notes/8.8.0.asciidoc | 310 ----- docs/reference/release-notes/8.8.1.asciidoc | 34 - docs/reference/release-notes/8.8.2.asciidoc | 46 - docs/reference/release-notes/8.9.0.asciidoc | 286 ----- docs/reference/release-notes/8.9.1.asciidoc | 56 - docs/reference/release-notes/8.9.2.asciidoc | 43 - docs/reference/release-notes/9.0.0.asciidoc | 557 ++++++++ .../release-notes/highlights.asciidoc | 255 ++-- .../rest-api/rest-api-compatibility.asciidoc | 36 +- 106 files changed, 1063 insertions(+), 14089 deletions(-) delete mode 100644 docs/reference/migration/migrate_8_0.asciidoc delete mode 100644 docs/reference/migration/migrate_8_0/cluster-node-setting-changes.asciidoc delete mode 100644 docs/reference/migration/migrate_8_0/command-line-tool-changes.asciidoc delete mode 100644 docs/reference/migration/migrate_8_0/index-setting-changes.asciidoc delete mode 100644 docs/reference/migration/migrate_8_0/java-api-changes.asciidoc delete mode 100644 docs/reference/migration/migrate_8_0/jvm-option-changes.asciidoc delete mode 100644 docs/reference/migration/migrate_8_0/logging-changes.asciidoc delete mode 100644 docs/reference/migration/migrate_8_0/mapping-changes.asciidoc delete mode 100644 docs/reference/migration/migrate_8_0/migrate_to_java_time.asciidoc delete mode 100644 docs/reference/migration/migrate_8_0/packaging-changes.asciidoc delete mode 100644 docs/reference/migration/migrate_8_0/painless-changes.asciidoc delete mode 100644 docs/reference/migration/migrate_8_0/plugin-changes.asciidoc delete mode 100644 docs/reference/migration/migrate_8_0/rest-api-changes.asciidoc delete mode 100644 docs/reference/migration/migrate_8_0/sql-jdbc-changes.asciidoc delete mode 100644 docs/reference/migration/migrate_8_0/system-req-changes.asciidoc delete mode 100644 docs/reference/migration/migrate_8_0/transform.asciidoc delete mode 100644 docs/reference/migration/migrate_8_1.asciidoc delete mode 100644 docs/reference/migration/migrate_8_10.asciidoc delete mode 100644 docs/reference/migration/migrate_8_11.asciidoc delete mode 100644 docs/reference/migration/migrate_8_12.asciidoc delete mode 100644 docs/reference/migration/migrate_8_13.asciidoc delete mode 100644 docs/reference/migration/migrate_8_14.asciidoc delete mode 100644 docs/reference/migration/migrate_8_15.asciidoc delete mode 100644 docs/reference/migration/migrate_8_16.asciidoc delete mode 100644 docs/reference/migration/migrate_8_17.asciidoc delete mode 100644 docs/reference/migration/migrate_8_2.asciidoc delete mode 100644 docs/reference/migration/migrate_8_3.asciidoc delete mode 100644 docs/reference/migration/migrate_8_4.asciidoc delete mode 100644 docs/reference/migration/migrate_8_5.asciidoc delete mode 100644 docs/reference/migration/migrate_8_6.asciidoc delete mode 100644 docs/reference/migration/migrate_8_7.asciidoc delete mode 100644 docs/reference/migration/migrate_8_8.asciidoc delete mode 100644 docs/reference/migration/migrate_8_9.asciidoc create mode 100644 docs/reference/migration/migrate_9_0.asciidoc create mode 100644 docs/reference/migration/migrate_9_0/rest-api-changes.asciidoc rename docs/reference/migration/{ => migrate_9_0}/transient-settings-migration-guide.asciidoc (100%) delete mode 100644 docs/reference/release-notes/8.0.0-alpha1.asciidoc delete mode 100644 docs/reference/release-notes/8.0.0-alpha2.asciidoc delete mode 100644 docs/reference/release-notes/8.0.0-beta1.asciidoc delete mode 100644 docs/reference/release-notes/8.0.0-rc1.asciidoc delete mode 100644 docs/reference/release-notes/8.0.0-rc2.asciidoc delete mode 100644 docs/reference/release-notes/8.0.0.asciidoc delete mode 100644 docs/reference/release-notes/8.0.1.asciidoc delete mode 100644 docs/reference/release-notes/8.1.0.asciidoc delete mode 100644 docs/reference/release-notes/8.1.1.asciidoc delete mode 100644 docs/reference/release-notes/8.1.2.asciidoc delete mode 100644 docs/reference/release-notes/8.1.3.asciidoc delete mode 100644 docs/reference/release-notes/8.10.0.asciidoc delete mode 100644 docs/reference/release-notes/8.10.1.asciidoc delete mode 100644 docs/reference/release-notes/8.10.2.asciidoc delete mode 100644 docs/reference/release-notes/8.10.3.asciidoc delete mode 100644 docs/reference/release-notes/8.10.4.asciidoc delete mode 100644 docs/reference/release-notes/8.11.0.asciidoc delete mode 100644 docs/reference/release-notes/8.11.1.asciidoc delete mode 100644 docs/reference/release-notes/8.11.2.asciidoc delete mode 100644 docs/reference/release-notes/8.11.3.asciidoc delete mode 100644 docs/reference/release-notes/8.11.4.asciidoc delete mode 100644 docs/reference/release-notes/8.12.0.asciidoc delete mode 100644 docs/reference/release-notes/8.12.1.asciidoc delete mode 100644 docs/reference/release-notes/8.12.2.asciidoc delete mode 100644 docs/reference/release-notes/8.13.0.asciidoc delete mode 100644 docs/reference/release-notes/8.13.1.asciidoc delete mode 100644 docs/reference/release-notes/8.13.2.asciidoc delete mode 100644 docs/reference/release-notes/8.13.3.asciidoc delete mode 100644 docs/reference/release-notes/8.13.4.asciidoc delete mode 100644 docs/reference/release-notes/8.14.0.asciidoc delete mode 100644 docs/reference/release-notes/8.14.1.asciidoc delete mode 100644 docs/reference/release-notes/8.14.2.asciidoc delete mode 100644 docs/reference/release-notes/8.14.3.asciidoc delete mode 100644 docs/reference/release-notes/8.15.0.asciidoc delete mode 100644 docs/reference/release-notes/8.15.1.asciidoc delete mode 100644 docs/reference/release-notes/8.15.2.asciidoc delete mode 100644 docs/reference/release-notes/8.16.0.asciidoc delete mode 100644 docs/reference/release-notes/8.17.0.asciidoc delete mode 100644 docs/reference/release-notes/8.2.0.asciidoc delete mode 100644 docs/reference/release-notes/8.2.1.asciidoc delete mode 100644 docs/reference/release-notes/8.2.2.asciidoc delete mode 100644 docs/reference/release-notes/8.2.3.asciidoc delete mode 100644 docs/reference/release-notes/8.3.0.asciidoc delete mode 100644 docs/reference/release-notes/8.3.1.asciidoc delete mode 100644 docs/reference/release-notes/8.3.2.asciidoc delete mode 100644 docs/reference/release-notes/8.3.3.asciidoc delete mode 100644 docs/reference/release-notes/8.4.0.asciidoc delete mode 100644 docs/reference/release-notes/8.4.1.asciidoc delete mode 100644 docs/reference/release-notes/8.4.2.asciidoc delete mode 100644 docs/reference/release-notes/8.4.3.asciidoc delete mode 100644 docs/reference/release-notes/8.5.0.asciidoc delete mode 100644 docs/reference/release-notes/8.5.1.asciidoc delete mode 100644 docs/reference/release-notes/8.5.2.asciidoc delete mode 100644 docs/reference/release-notes/8.5.3.asciidoc delete mode 100644 docs/reference/release-notes/8.6.0.asciidoc delete mode 100644 docs/reference/release-notes/8.6.1.asciidoc delete mode 100644 docs/reference/release-notes/8.6.2.asciidoc delete mode 100644 docs/reference/release-notes/8.7.0.asciidoc delete mode 100644 docs/reference/release-notes/8.7.1.asciidoc delete mode 100644 docs/reference/release-notes/8.8.0.asciidoc delete mode 100644 docs/reference/release-notes/8.8.1.asciidoc delete mode 100644 docs/reference/release-notes/8.8.2.asciidoc delete mode 100644 docs/reference/release-notes/8.9.0.asciidoc delete mode 100644 docs/reference/release-notes/8.9.1.asciidoc delete mode 100644 docs/reference/release-notes/8.9.2.asciidoc create mode 100644 docs/reference/release-notes/9.0.0.asciidoc diff --git a/docs/reference/migration/index.asciidoc b/docs/reference/migration/index.asciidoc index 719588cb4b0d0..11aca45b003fa 100644 --- a/docs/reference/migration/index.asciidoc +++ b/docs/reference/migration/index.asciidoc @@ -1,40 +1,6 @@ include::migration_intro.asciidoc[] -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> +* <> -include::migrate_8_17.asciidoc[] -include::migrate_8_16.asciidoc[] -include::migrate_8_15.asciidoc[] -include::migrate_8_14.asciidoc[] -include::migrate_8_13.asciidoc[] -include::migrate_8_12.asciidoc[] -include::migrate_8_11.asciidoc[] -include::migrate_8_10.asciidoc[] -include::migrate_8_9.asciidoc[] -include::migrate_8_8.asciidoc[] -include::migrate_8_7.asciidoc[] -include::migrate_8_6.asciidoc[] -include::migrate_8_5.asciidoc[] -include::migrate_8_4.asciidoc[] -include::migrate_8_3.asciidoc[] -include::migrate_8_2.asciidoc[] -include::migrate_8_1.asciidoc[] -include::migrate_8_0.asciidoc[] +include::migrate_9_0.asciidoc[] diff --git a/docs/reference/migration/migrate_8_0.asciidoc b/docs/reference/migration/migrate_8_0.asciidoc deleted file mode 100644 index 09433904f2ea8..0000000000000 --- a/docs/reference/migration/migrate_8_0.asciidoc +++ /dev/null @@ -1,94 +0,0 @@ -[[migrating-8.0]] -== Migrating to 8.0 -++++ -8.0 -++++ - -This section discusses the changes that you need to be aware of when migrating -your application to {es} 8.0. - -See also <> and <>. - -[discrete] -[[breaking-changes-8.0]] -=== Breaking changes - -The following changes in {es} 8.0 might affect your applications -and prevent them from operating normally. -Before upgrading to 8.0, review these changes and take the described steps -to mitigate the impact. - -include::migrate_8_0/cluster-node-setting-changes.asciidoc[] -include::migrate_8_0/command-line-tool-changes.asciidoc[] -include::migrate_8_0/index-setting-changes.asciidoc[] -include::migrate_8_0/java-api-changes.asciidoc[] -include::migrate_8_0/jvm-option-changes.asciidoc[] -include::migrate_8_0/logging-changes.asciidoc[] -include::migrate_8_0/mapping-changes.asciidoc[] -include::migrate_8_0/packaging-changes.asciidoc[] -include::migrate_8_0/painless-changes.asciidoc[] -include::migrate_8_0/plugin-changes.asciidoc[] -include::migrate_8_0/rest-api-changes.asciidoc[] -include::migrate_8_0/sql-jdbc-changes.asciidoc[] -include::migrate_8_0/system-req-changes.asciidoc[] -include::migrate_8_0/transform.asciidoc[] - -[discrete] -[[deprecated-8.0]] -=== Deprecations - -The following functionality has been deprecated in {es} 8.0 -and will be removed in a future version. -While this won't have an immediate impact on your applications, -we strongly encourage you take the described steps to update your code -after upgrading to 8.0. - -To find out if you are using any deprecated functionality, -enable <>. - -[discrete] -[[breaking_80_cluster_node_setting_deprecations]] -==== Cluster and node setting deprecations - -[[deprecate-transient-cluster-settings]] -.We no longer recommend using transient cluster settings. -[%collapsible] -==== -*Details* + -We no longer recommend using transient cluster settings. Use persistent cluster -settings instead. If a cluster becomes unstable, transient settings can clear -unexpectedly, resulting in an undesired cluster configuration. - -*Impact* + -Transient cluster settings are not yet deprecated, but we plan to deprecate them -in a future release. For migration steps, see the -{ref}/transient-settings-migration-guide.html[Transient settings migration -guide]. -==== - -[discrete] -[[breaking_80_command_line_tool_deprecations]] -==== Command line tool deprecations - -TIP: {ess-skip-section} - -[[deprecate-elasticsearch-setup-passwords]] -.The `elasticsearch-setup-passwords` tool is deprecated. -[%collapsible] -==== -*Details* + -The `elasticsearch-setup-passwords` tool is deprecated in 8.0. To -manually reset the password for built-in users (including the `elastic` user), use -the {ref}/reset-password.html[`elasticsearch-reset-password`] tool, the {es} -{ref}/security-api-change-password.html[change passwords API], or the -User Management features in {kib}. -`elasticsearch-setup-passwords` will be removed in a future release. - -*Impact* + -Passwords are generated automatically for the `elastic` user when you start {es} for the first time. If you run `elasticsearch-setup-passwords` after -starting {es}, it will fail because the `elastic` -user password is already configured. -==== - -include::migrate_8_0/migrate_to_java_time.asciidoc[] -include::transient-settings-migration-guide.asciidoc[] diff --git a/docs/reference/migration/migrate_8_0/cluster-node-setting-changes.asciidoc b/docs/reference/migration/migrate_8_0/cluster-node-setting-changes.asciidoc deleted file mode 100644 index bad4ab93676bb..0000000000000 --- a/docs/reference/migration/migrate_8_0/cluster-node-setting-changes.asciidoc +++ /dev/null @@ -1,931 +0,0 @@ -[discrete] -[[breaking_80_cluster_node_setting_changes]] -==== Cluster and node setting changes - -TIP: {ess-setting-change} - -.`action.destructive_requires_name` now defaults to `true`. {ess-icon} -[%collapsible] -==== -*Details* + -The default for the `action.destructive_requires_name` setting changes from `false` -to `true` in {es} 8.0.0. - -Previously, defaulting to `false` allowed users to use wildcard -patterns to delete, close, or change index blocks on indices. -To prevent the accidental deletion of indices that happen to match a -wildcard pattern, we now default to requiring that destructive -operations explicitly name the indices to be modified. - -*Impact* + -To use wildcard patterns for destructive actions, set -`action.destructive_requires_name` to `false` using the -{ref}/cluster-update-settings.html[] cluster settings API]. -==== - -.You can no longer set `xpack.searchable.snapshot.shared_cache.size` on non-frozen nodes. -[%collapsible] -==== -*Details* + -You can no longer set -{ref}/searchable-snapshots.html#searchable-snapshots-shared-cache[`xpack.searchable.snapshot.shared_cache.size`] -on a node that doesn't have the `data_frozen` node role. This setting reserves -disk space for the shared cache of partially mounted indices. {es} only -allocates partially mounted indices to nodes with the `data_frozen` role. - -*Impact* + -Remove `xpack.searchable.snapshot.shared_cache.size` from `elasticsearch.yml` -for nodes that don't have the `data_frozen` role. Specifying the setting on a -non-frozen node will result in an error on startup. -==== - -[[max_clause_count_change]] -.`indices.query.bool.max_clause_count` is deprecated and has no effect. -[%collapsible] -==== -*Details* + -Elasticsearch will now dynamically set the maximum number of allowed clauses -in a query, using a heuristic based on the size of the search thread pool and -the size of the heap allocated to the JVM. This limit has a minimum value of -1024 and will in most cases be larger (for example, a node with 30Gb RAM and -48 CPUs will have a maximum clause count of around 27,000). Larger heaps lead -to higher values, and larger thread pools result in lower values. - -*Impact* + -Queries with many clauses should be avoided whenever possible. -If you previously bumped this setting to accommodate heavy queries, -you might need to increase the amount of memory available to Elasticsearch, -or to reduce the size of your search thread pool so that more memory is -available to each concurrent search. - -In previous versions of Lucene you could get around this limit by nesting -boolean queries within each other, but the limit is now based on the total -number of leaf queries within the query as a whole and this workaround will -no longer help. - -Specifying `indices.query.bool.max_clause_count` will have no effect -but will generate deprecation warnings. To avoid these warnings, remove the -setting from `elasticsearch.yml` during an upgrade or node restart. -==== - -[[ilm-poll-interval-limit]] -.`indices.lifecycle.poll_interval` must be greater than `1s`. -[%collapsible] -==== -*Details* + -Setting `indices.lifecycle.poll_interval` too low can cause -excessive load on a cluster. The poll interval must now be at least `1s` (one second). - -*Impact* + -Set `indices.lifecycle.poll_interval` setting to `1s` or -greater in `elasticsearch.yml` or through the -{ref}/cluster-update-settings.html[cluster update settings API]. - -Setting `indices.lifecycle.poll_interval` to less than `1s` in -`elasticsearch.yml` will result in an error on startup. -{ref}/cluster-update-settings.html[Cluster update settings API] requests that -set `indices.lifecycle.poll_interval` to less than `1s` will return an error. -==== - -.The file and native realms are now enabled unless explicitly disabled. -[%collapsible] -==== -*Details* + -The file and native realms are now enabled unless explicitly disabled. If -explicitly disabled, the file and native realms remain disabled at all times. - -Previously, the file and native realms had the following implicit behaviors: - -* If the file and native realms were not configured, they were implicitly disabled -if any other realm was configured. - -* If no other realm was available because realms were either not configured, -not permitted by license, or explicitly disabled, the file and native realms -were enabled, even if explicitly disabled. - -*Impact* + -To explicitly disable the file or native realm, set the respective -`file..enabled` or `native..enabled` setting to `false` -under the `xpack.security.authc.realms` namespace in `elasticsearch.yml`. - -The following configuration example disables the native realm and the file realm. - -[source,yaml] ----- -xpack.security.authc.realms: - - native.realm1.enabled: false - file.realm2.enabled: false - - ... ----- -==== - -.The realm `order` setting is now required. -[%collapsible] -==== -*Details* + -The `xpack.security.authc.realms.{type}.{name}.order` setting is now required and must be -specified for each explicitly configured realm. Each value must be unique. - -*Impact* + -The cluster will fail to start if the requirements are not met. - -For example, the following configuration is invalid: -[source,yaml] --------------------------------------------------- -xpack.security.authc.realms.kerberos.kerb1: - keytab.path: es.keytab - remove_realm_name: false --------------------------------------------------- - -And must be configured as: -[source,yaml] --------------------------------------------------- -xpack.security.authc.realms.kerberos.kerb1: - order: 0 - keytab.path: es.keytab - remove_realm_name: false --------------------------------------------------- -==== - -[[breaking_80_allocation_change_include_relocations_removed]] -.`cluster.routing.allocation.disk.include_relocations` has been removed. -[%collapsible] -==== -*Details* + -{es} now always accounts for the sizes of relocating shards when making -allocation decisions based on the disk usage of the nodes in the cluster. In -earlier versions, you could disable this by setting `cluster.routing.allocation.disk.include_relocations` to `false`. -That could result in poor allocation decisions that could overshoot watermarks and require significant -extra work to correct. The `cluster.routing.allocation.disk.include_relocations` setting has been removed. - -*Impact* + -Remove the `cluster.routing.allocation.disk.include_relocations` -setting. Specifying this setting in `elasticsearch.yml` will result in an error -on startup. -==== - -.`cluster.join.timeout` has been removed. -[%collapsible] -==== -*Details* + -The `cluster.join.timeout` setting has been removed. Join attempts no longer -time out. - -*Impact* + -Remove `cluster.join.timeout` from `elasticsearch.yml`. -==== - -.`discovery.zen` settings have been removed. -[%collapsible] -==== -*Details* + -All settings under the `discovery.zen` namespace are no longer supported. They existed only only for BWC reasons in 7.x. This includes: - -- `discovery.zen.minimum_master_nodes` -- `discovery.zen.no_master_block` -- `discovery.zen.hosts_provider` -- `discovery.zen.publish_timeout` -- `discovery.zen.commit_timeout` -- `discovery.zen.publish_diff.enable` -- `discovery.zen.ping.unicast.concurrent_connects` -- `discovery.zen.ping.unicast.hosts.resolve_timeout` -- `discovery.zen.ping.unicast.hosts` -- `discovery.zen.ping_timeout` -- `discovery.zen.unsafe_rolling_upgrades_enabled` -- `discovery.zen.fd.connect_on_network_disconnect` -- `discovery.zen.fd.ping_interval` -- `discovery.zen.fd.ping_timeout` -- `discovery.zen.fd.ping_retries` -- `discovery.zen.fd.register_connection_listener` -- `discovery.zen.join_retry_attempts` -- `discovery.zen.join_retry_delay` -- `discovery.zen.join_timeout` -- `discovery.zen.max_pings_from_another_master` -- `discovery.zen.send_leave_request` -- `discovery.zen.master_election.wait_for_joins_timeout` -- `discovery.zen.master_election.ignore_non_master_pings` -- `discovery.zen.publish.max_pending_cluster_states` -- `discovery.zen.bwc_ping_timeout` - -*Impact* + -Remove the `discovery.zen` settings from `elasticsearch.yml`. Specifying these settings will result in an error on startup. -==== - -.`http.content_type.required` has been removed. -[%collapsible] -==== -*Details* + -The `http.content_type.required` setting was deprecated in Elasticsearch 6.0 -and has been removed in Elasticsearch 8.0. The setting was introduced in -Elasticsearch 5.3 to prepare users for Elasticsearch 6.0, where content type -auto detection was removed for HTTP requests. - -*Impact* + -Remove the `http.content_type.required` setting from `elasticsearch.yml`. Specifying this setting will result in an error on startup. -==== - -.`http.tcp_no_delay` has been removed. -[%collapsible] -==== -*Details* + -The `http.tcp_no_delay` setting was deprecated in 7.x and has been removed in 8.0. Use `http.tcp.no_delay` instead. - -*Impact* + -Replace the `http.tcp_no_delay` setting with `http.tcp.no_delay`. -Specifying `http.tcp_no_delay` in `elasticsearch.yml` will -result in an error on startup. -==== - -.`network.tcp.connect_timeout` has been removed. -[%collapsible] -==== -*Details* + -The `network.tcp.connect_timeout` setting was deprecated in 7.x and has been removed in 8.0. This setting -was a fallback setting for `transport.connect_timeout`. - -*Impact* + -Remove the `network.tcp.connect_timeout` setting. -Use the `transport.connect_timeout` setting to change the default connection -timeout for client connections. Specifying -`network.tcp.connect_timeout` in `elasticsearch.yml` will result in an -error on startup. -==== - -.`node.max_local_storage_nodes` has been removed. -[%collapsible] -==== -*Details* + -The `node.max_local_storage_nodes` setting was deprecated in 7.x and -has been removed in 8.0. Nodes should be run on separate data paths -to ensure that each node is consistently assigned to the same data path. - -*Impact* + -Remove the `node.max_local_storage_nodes` setting. Specifying this -setting in `elasticsearch.yml` will result in an error on startup. -==== - -[[accept-default-password-removed]] -.The `accept_default_password` setting has been removed. -[%collapsible] -==== -*Details* + -The `xpack.security.authc.accept_default_password` setting has not had any affect -since the 6.0 release of {es} and is no longer allowed. - -*Impact* + -Remove the `xpack.security.authc.accept_default_password` setting from `elasticsearch.yml`. -Specifying this setting will result in an error on startup. -==== - -[[roles-index-cache-removed]] -.The `roles.index.cache.*` settings have been removed. -[%collapsible] -==== -*Details* + -The `xpack.security.authz.store.roles.index.cache.max_size` and -`xpack.security.authz.store.roles.index.cache.ttl` settings have -been removed. These settings have been redundant and deprecated -since the 5.2 release of {es}. - -*Impact* + -Remove the `xpack.security.authz.store.roles.index.cache.max_size` -and `xpack.security.authz.store.roles.index.cache.ttl` settings from `elasticsearch.yml` . -Specifying these settings will result in an error on startup. -==== - -[[separating-node-and-client-traffic]] -.The `transport.profiles.*.xpack.security.type` setting has been removed. -[%collapsible] -==== -*Details* + -The `transport.profiles.*.xpack.security.type` setting is no longer supported. -The Transport Client has been removed and all client traffic now uses -the HTTP transport. Transport profiles using this setting should be removed. - -*Impact* + -Remove the `transport.profiles.*.xpack.security.type` setting from `elasticsearch.yml`. -Specifying this setting in a transport profile will result in an error on startup. -==== - -[discrete] -[[saml-realm-nameid-changes]] -.The `nameid_format` SAML realm setting no longer has a default value. -[%collapsible] -==== -*Details* + -In SAML, Identity Providers (IdPs) can either be explicitly configured to -release a `NameID` with a specific format, or configured to attempt to conform -with the requirements of a Service Provider (SP). The SP declares its -requirements in the `NameIDPolicy` element of a SAML Authentication Request. -In {es}, the `nameid_format` SAML realm setting controls the `NameIDPolicy` -value. - -Previously, the default value for `nameid_format` was -`urn:oasis:names:tc:SAML:2.0:nameid-format:transient`. This setting created -authentication requests that required the IdP to release `NameID` with a -`transient` format. - -The default value has been removed, which means that {es} will create SAML Authentication Requests by default that don't put this requirement on the -IdP. If you want to retain the previous behavior, set `nameid_format` to -`urn:oasis:names:tc:SAML:2.0:nameid-format:transient`. - -*Impact* + -If you currently don't configure `nameid_format` explicitly, it's possible -that your IdP will reject authentication requests from {es} because the requests -do not specify a `NameID` format (and your IdP is configured to expect one). -This mismatch can result in a broken SAML configuration. If you're unsure whether -your IdP is explicitly configured to use a certain `NameID` format and you want to retain current behavior -, try setting `nameid_format` to `urn:oasis:names:tc:SAML:2.0:nameid-format:transient` explicitly. -==== - -.The `xpack.security.transport.ssl.enabled` setting is now required to configure `xpack.security.transport.ssl` settings. -[%collapsible] -==== -*Details* + -It is now an error to configure any SSL settings for -`xpack.security.transport.ssl` without also configuring -`xpack.security.transport.ssl.enabled`. - -*Impact* + -If using other `xpack.security.transport.ssl` settings, you must explicitly -specify the `xpack.security.transport.ssl.enabled` setting. - -If you do not want to enable SSL and are currently using other -`xpack.security.transport.ssl` settings, do one of the following: - -* Explicitly specify `xpack.security.transport.ssl.enabled` as `false` -* Discontinue use of other `xpack.security.transport.ssl` settings - -If you want to enable SSL, follow the instructions in -{ref}/configuring-tls.html#tls-transport[Encrypting communications between nodes -in a cluster]. As part of this configuration, explicitly specify -`xpack.security.transport.ssl.enabled` as `true`. - -For example, the following configuration is invalid: -[source,yaml] --------------------------------------------------- -xpack.security.transport.ssl.keystore.path: elastic-certificates.p12 -xpack.security.transport.ssl.truststore.path: elastic-certificates.p12 --------------------------------------------------- - -And must be configured as: -[source,yaml] --------------------------------------------------- -xpack.security.transport.ssl.enabled: true <1> -xpack.security.transport.ssl.keystore.path: elastic-certificates.p12 -xpack.security.transport.ssl.truststore.path: elastic-certificates.p12 --------------------------------------------------- -<1> or `false`. -==== - -.The `xpack.security.http.ssl.enabled` setting is now required to configure `xpack.security.http.ssl` settings. -[%collapsible] -==== -*Details* + -It is now an error to configure any SSL settings for -`xpack.security.http.ssl` without also configuring -`xpack.security.http.ssl.enabled`. - -*Impact* + -If using other `xpack.security.http.ssl` settings, you must explicitly -specify the `xpack.security.http.ssl.enabled` setting. - -If you do not want to enable SSL and are currently using other -`xpack.security.http.ssl` settings, do one of the following: - -* Explicitly specify `xpack.security.http.ssl.enabled` as `false` -* Discontinue use of other `xpack.security.http.ssl` settings - -If you want to enable SSL, follow the instructions in -{ref}/security-basic-setup-https.html#encrypt-http-communication[Encrypting HTTP client communications]. As part -of this configuration, explicitly specify `xpack.security.http.ssl.enabled` -as `true`. - -For example, the following configuration is invalid: -[source,yaml] --------------------------------------------------- -xpack.security.http.ssl.certificate: elasticsearch.crt -xpack.security.http.ssl.key: elasticsearch.key -xpack.security.http.ssl.certificate_authorities: [ "corporate-ca.crt" ] --------------------------------------------------- - -And must be configured as either: -[source,yaml] --------------------------------------------------- -xpack.security.http.ssl.enabled: true <1> -xpack.security.http.ssl.certificate: elasticsearch.crt -xpack.security.http.ssl.key: elasticsearch.key -xpack.security.http.ssl.certificate_authorities: [ "corporate-ca.crt" ] --------------------------------------------------- -<1> or `false`. -==== - -.A `xpack.security.transport.ssl` certificate and key are now required to enable SSL for the transport interface. -[%collapsible] -==== -*Details* + -It is now an error to enable SSL for the transport interface without also configuring -a certificate and key through use of the `xpack.security.transport.ssl.keystore.path` -setting or the `xpack.security.transport.ssl.certificate` and -`xpack.security.transport.ssl.key` settings. - -*Impact* + -If `xpack.security.transport.ssl.enabled` is set to `true`, provide a -certificate and key using the `xpack.security.transport.ssl.keystore.path` -setting or the `xpack.security.transport.ssl.certificate` and -`xpack.security.transport.ssl.key` settings. If a certificate and key is not -provided, {es} will return in an error on startup. -==== - -.A `xpack.security.http.ssl` certificate and key are now required to enable SSL for the HTTP server. -[%collapsible] -==== -*Details* + -It is now an error to enable SSL for the HTTP (Rest) server without also configuring -a certificate and key through use of the `xpack.security.http.ssl.keystore.path` -setting or the `xpack.security.http.ssl.certificate` and -`xpack.security.http.ssl.key` settings. - -*Impact* + -If `xpack.security.http.ssl.enabled` is set to `true`, provide a certificate and -key using the `xpack.security.http.ssl.keystore.path` setting or the -`xpack.security.http.ssl.certificate` and `xpack.security.http.ssl.key` -settings. If certificate and key is not provided, {es} will return in an error -on startup. -==== - -.PKCS#11 keystores and trustores cannot be configured in `elasticsearch.yml` -[%collapsible] -==== -*Details* + -The settings `*.ssl.keystore.type` and `*.ssl.truststore.type` no longer accept "PKCS11" as a valid type. -This applies to all SSL settings in Elasticsearch, including - -- `xpack.security.http.keystore.type` -- `xpack.security.transport.keystore.type` -- `xpack.security.http.truststore.type` -- `xpack.security.transport.truststore.type` - -As well as SSL settings for security realms, watcher and monitoring. - -Use of a PKCS#11 keystore or truststore as the JRE's default store is not affected. - -*Impact* + -If you have a PKCS#11 keystore configured within your `elasticsearch.yml` file, you must remove that -configuration and switch to a supported keystore type, or configure your PKCS#11 keystore as the -JRE default store. -==== - -.The `kibana` user has been replaced by `kibana_system`. -[%collapsible] -==== -*Details* + -The `kibana` user was historically used to authenticate {kib} to {es}. -The name of this user was confusing, and was often mistakenly used to login to {kib}. -This has been renamed to `kibana_system` in order to reduce confusion, and to better -align with other built-in system accounts. - -*Impact* + -Replace any use of the `kibana` user with the `kibana_system` user. Specifying -the `kibana` user in `kibana.yml` will result in an error on startup. - -If your `kibana.yml` used to contain: -[source,yaml] --------------------------------------------------- -elasticsearch.username: kibana --------------------------------------------------- - -then you should update to use the new `kibana_system` user instead: -[source,yaml] --------------------------------------------------- -elasticsearch.username: kibana_system --------------------------------------------------- - -IMPORTANT: The new `kibana_system` user does not preserve the previous `kibana` -user password. You must explicitly set a password for the `kibana_system` user. -==== - -[[search-remote-settings-removed]] -.The `search.remote.*` settings have been removed. -[%collapsible] -==== -*Details* + -In 6.5 these settings were deprecated in favor of `cluster.remote`. In 7.x we -provided automatic upgrading of these settings to their `cluster.remote` -counterparts. In 8.0.0, these settings have been removed. Elasticsearch will -refuse to start if you have these settings in your configuration or cluster -state. - -*Impact* + -Use the replacement `cluster.remote` settings. Discontinue use of the -`search.remote.*` settings. Specifying these settings in `elasticsearch.yml` -will result in an error on startup. -==== - -[[remove-pidfile]] -.The `pidfile` setting has been replaced by `node.pidfile`. -[%collapsible] -==== -*Details* + -To ensure that all settings are in a proper namespace, the `pidfile` setting was -previously deprecated in version 7.4.0 of Elasticsearch, and is removed in -version 8.0.0. Instead, use `node.pidfile`. - -*Impact* + -Use the `node.pidfile` setting. Discontinue use of the `pidfile` setting. -Specifying the `pidfile` setting in `elasticsearch.yml` will result in an error -on startup. -==== - -[[remove-processors]] -.The `processors` setting has been replaced by `node.processors`. -[%collapsible] -==== -*Details* + -To ensure that all settings are in a proper namespace, the `processors` setting -was previously deprecated in version 7.4.0 of Elasticsearch, and is removed in -version 8.0.0. Instead, use `node.processors`. - -*Impact* + -Use the `node.processors` setting. Discontinue use of the `processors` setting. -Specifying the `processors` setting in `elasticsearch.yml` will result in an -error on startup. -==== - -.The `node.processors` setting can no longer exceed the available number of processors. -[%collapsible] -==== -*Details* + -Previously it was possible to set the number of processors used to set the -default sizes for the thread pools to be more than the number of available -processors. As this leads to more context switches and more threads but without -an increase in the number of physical CPUs on which to schedule these additional -threads, the `node.processors` setting is now bounded by the number of available -processors. - -*Impact* + -If specified, ensure the value of `node.processors` setting does not exceed the -number of available processors. Setting the `node.processors` value greater than -the number of available processors in `elasticsearch.yml` will result in an -error on startup. -==== - -.The `cluster.remote.connect` setting has been removed. -[%collapsible] -==== -*Details* + -In Elasticsearch 7.7.0, the setting `cluster.remote.connect` was deprecated in -favor of setting `node.remote_cluster_client`. In Elasticsearch 8.0.0, the -setting `cluster.remote.connect` is removed. - -*Impact* + -Use the `node.remote_cluster_client` setting. Discontinue use of the -`cluster.remote.connect` setting. Specifying the `cluster.remote.connect` -setting in `elasticsearch.yml` will result in an error on startup. -==== - -.The `node.local_storage` setting has been removed. -[%collapsible] -==== -*Details* + -In Elasticsearch 7.8.0, the setting `node.local_storage` was deprecated and -beginning in Elasticsearch 8.0.0 all nodes will require local storage. Therefore, -the `node.local_storage` setting has been removed. - -*Impact* + -Discontinue use of the `node.local_storage` setting. Specifying this setting in -`elasticsearch.yml` will result in an error on startup. -==== - -.The `auth.password` setting for HTTP monitoring has been removed. -[%collapsible] -==== -*Details* + -In Elasticsearch 7.7.0, the setting `xpack.monitoring.exporters..auth.password` -was deprecated in favor of setting `xpack.monitoring.exporters..auth.secure_password`. -In Elasticsearch 8.0.0, the setting `xpack.monitoring.exporters..auth.password` is -removed. - -*Impact* + -Use the `xpack.monitoring.exporters..auth.secure_password` -setting. Discontinue use of the -`xpack.monitoring.exporters..auth.password` setting. Specifying -the `xpack.monitoring.exporters..auth.password` setting in -`elasticsearch.yml` will result in an error on startup. -==== - -.Settings used to disable basic license features have been removed. -[%collapsible] -==== -*Details* + -The following settings were deprecated in {es} 7.8.0 and have been removed -in {es} 8.0.0: - -* `xpack.enrich.enabled` -* `xpack.flattened.enabled` -* `xpack.ilm.enabled` -* `xpack.monitoring.enabled` -* `xpack.rollup.enabled` -* `xpack.slm.enabled` -* `xpack.sql.enabled` -* `xpack.transform.enabled` -* `xpack.vectors.enabled` - -These basic license features are now always enabled. - -If you have disabled ILM so that you can use another tool to manage Watcher -indices, the newly introduced `xpack.watcher.use_ilm_index_management` setting -may be set to false. - -*Impact* + -Discontinue use of the removed settings. Specifying these settings in -`elasticsearch.yml` will result in an error on startup. -==== - -.Settings used to defer cluster recovery pending a certain number of master nodes have been removed. -[%collapsible] -==== -*Details* + -The following cluster settings have been removed: - -* `gateway.expected_nodes` -* `gateway.expected_master_nodes` -* `gateway.recover_after_nodes` -* `gateway.recover_after_master_nodes` - -It is safe to recover the cluster as soon as a majority of master-eligible -nodes have joined so there is no benefit in waiting for any additional -master-eligible nodes to start. - -*Impact* + -Discontinue use of the removed settings. If needed, use -`gateway.expected_data_nodes` or `gateway.recover_after_data_nodes` to defer -cluster recovery pending a certain number of data nodes. -==== - -.Legacy role settings have been removed. -[%collapsible] -==== -*Details* + -The legacy role settings: - -* `node.data` -* `node.ingest` -* `node.master` -* `node.ml` -* `node.remote_cluster_client` -* `node.transform` -* `node.voting_only` - -have been removed. Instead, use the `node.roles` setting. If you were previously -using the legacy role settings on a 7.13 or later cluster, you will have a -deprecation log message on each of your nodes indicating the exact replacement -value for `node.roles`. - -*Impact* + -Discontinue use of the removed settings. Specifying these settings in -`elasticsearch.yml` will result in an error on startup. -==== - -[[system-call-filter-setting]] -.The system call filter setting has been removed. -[%collapsible] -==== -*Details* + -Elasticsearch uses system call filters to remove its ability to fork another -process. This is useful to mitigate remote code exploits. These system call -filters are enabled by default, and were previously controlled via the setting -`bootstrap.system_call_filter`. Starting in Elasticsearch 8.0, system call -filters will be required. As such, the setting `bootstrap.system_call_filter` -was deprecated in Elasticsearch 7.13.0, and is removed as of Elasticsearch -8.0.0. - -*Impact* + -Discontinue use of the removed setting. Specifying this setting in Elasticsearch -configuration will result in an error on startup. -==== - -[[tier-filter-setting]] -.Tier filtering settings have been removed. -[%collapsible] -==== -*Details* + -The cluster and index level settings ending in `._tier` used for filtering the allocation of a shard -to a particular set of nodes have been removed. Instead, the -{ref}/data-tier-shard-filtering.html#tier-preference-allocation-filter[tier -preference setting], `index.routing.allocation.include._tier_preference` should -be used. The removed settings are: - -Cluster level settings: - -- `cluster.routing.allocation.include._tier` -- `cluster.routing.allocation.exclude._tier` -- `cluster.routing.allocation.require._tier` - -Index settings: - -- `index.routing.allocation.include._tier` -- `index.routing.allocation.exclude._tier` -- `index.routing.allocation.require._tier` - -*Impact* + -Discontinue use of the removed settings. Specifying any of these cluster settings in Elasticsearch -configuration will result in an error on startup. Any indices using these settings will have the -settings archived (and they will have no effect) when the index metadata is loaded. -==== - -[[shared-data-path-setting]] -.Shared data path and per index data path settings are deprecated. -[%collapsible] -==== -*Details* + -Elasticsearch uses the shared data path as the base path of per index data -paths. This feature was previously used with shared replicas. Starting in -7.13.0, these settings are deprecated. Starting in 8.0 only existing -indices created in 7.x will be capable of using the shared data path and -per index data path settings. - -*Impact* + -Discontinue use of the deprecated settings. -==== - -[[single-data-node-watermark-setting]] -.The single data node watermark setting is deprecated and now only accepts `true`. -[%collapsible] -==== -*Details* + -In 7.14, setting `cluster.routing.allocation.disk.watermark.enable_for_single_data_node` -to false was deprecated. Starting in 8.0, the only legal value will be -true. In a future release, the setting will be removed completely, with same -behavior as if the setting was `true`. - -If the old behavior is desired for a single data node cluster, disk based -allocation can be disabled by setting -`cluster.routing.allocation.disk.threshold_enabled: false` - -*Impact* + -Discontinue use of the deprecated setting. -==== - -[[auto-import-dangling-indices-removed]] -.The `gateway.auto_import_dangling_indices` setting has been removed. -[%collapsible] -==== -*Details* + -The `gateway.auto_import_dangling_indices` cluster setting has been removed. -Previously, you could use this setting to automatically import -{ref}/modules-gateway.html#dangling-indices[dangling indices]. However, -automatically importing dangling indices is unsafe. Use the -{ref}/indices.html#dangling-indices-api[dangling indices APIs] to manage and -import dangling indices instead. - -*Impact* + -Discontinue use of the removed setting. Specifying the setting in -`elasticsearch.yml` will result in an error on startup. -==== - -.The `listener` thread pool has been removed. -[%collapsible] -==== -*Details* + -Previously, the transport client used the thread pool to ensure listeners aren't -called back on network threads. The transport client has been removed -in 8.0, and the thread pool is no longer needed. - -*Impact* + -Remove `listener` thread pool settings from `elasticsearch.yml` for any nodes. -Specifying `listener` thread pool settings in `elasticsearch.yml` will result in -an error on startup. -==== - -.The `fixed_auto_queue_size` thread pool type has been removed. -[%collapsible] -==== -*Details* + -The `fixed_auto_queue_size` thread pool type, previously marked as an -experimental feature, was deprecated in 7.x and has been removed in 8.0. -The `search` and `search_throttled` thread pools have the `fixed` type now. - -*Impact* + -No action needed. -==== - -.Several `transport` settings have been replaced. -[%collapsible] -==== -*Details* + -The following settings have been deprecated in 7.x and removed in 8.0. Each setting has a replacement -setting that was introduced in 6.7. - -- `transport.tcp.port` replaced by `transport.port` -- `transport.tcp.compress` replaced by `transport.compress` -- `transport.tcp.connect_timeout` replaced by `transport.connect_timeout` -- `transport.tcp_no_delay` replaced by `transport.tcp.no_delay` -- `transport.profiles.profile_name.tcp_no_delay` replaced by `transport.profiles.profile_name.tcp.no_delay` -- `transport.profiles.profile_name.tcp_keep_alive` replaced by `transport.profiles.profile_name.tcp.keep_alive` -- `transport.profiles.profile_name.reuse_address` replaced by `transport.profiles.profile_name.tcp.reuse_address` -- `transport.profiles.profile_name.send_buffer_size` replaced by `transport.profiles.profile_name.tcp.send_buffer_size` -- `transport.profiles.profile_name.receive_buffer_size` replaced by `transport.profiles.profile_name.tcp.receive_buffer_size` - -*Impact* + -Use the replacement settings. Discontinue use of the removed settings. -Specifying the removed settings in `elasticsearch.yml` will result in an error -on startup. -==== - -.Selective transport compression has been enabled by default. -[%collapsible] -==== -*Details* + -Prior to 8.0, transport compression was disabled by default. Starting in 8.0, -`transport.compress` defaults to `indexing_data`. This configuration means that -the propagation of raw indexing data will be compressed between nodes. - -*Impact* + -Inter-node transit will get reduced along the indexing path. In some scenarios, -CPU usage could increase. -==== - -.Transport compression defaults to lz4. -[%collapsible] -==== -*Details* + -Prior to 8.0, the `transport.compression_scheme` setting defaulted to `deflate`. Starting in -8.0, `transport.compress_scheme` defaults to `lz4`. - -Prior to 8.0, the `cluster.remote..transport.compression_scheme` -setting defaulted to `deflate` when `cluster.remote..transport.compress` -was explicitly configured. Starting in 8.0, -`cluster.remote..transport.compression_scheme` will fallback to -`transport.compression_scheme` by default. - -*Impact* + -This configuration means that transport compression will produce somewhat lower -compression ratios in exchange for lower CPU load. -==== - -.The `repositories.fs.compress` node-level setting has been removed. -[%collapsible] -==== -*Details* + -For shared file system repositories (`"type": "fs"`), the node level setting `repositories.fs.compress` could -previously be used to enable compression for all shared file system repositories where `compress` was not specified. -The `repositories.fs.compress` setting has been removed. - -*Impact* + -Discontinue use of the `repositories.fs.compress` node-level setting. Use the -repository-specific `compress` setting to enable compression instead. Refer to -{ref}/snapshots-filesystem-repository.html#filesystem-repository-settings[Shared -file system repository settings]. -==== - -// This change is not notable because it should not have any impact on upgrades -// However we document it here out of an abundance of caution -[[fips-default-hash-changed]] -.When FIPS mode is enabled the default password hash is now PBKDF2_STRETCH -[%collapsible] -==== -*Details* + -If `xpack.security.fips_mode.enabled` is true (see <>), -the value of `xpack.security.authc.password_hashing.algorithm` now defaults to -`pbkdf2_stretch`. - -In earlier versions this setting would always default to `bcrypt` and a runtime -check would prevent a node from starting unless the value was explicitly set to -a "pbkdf2" variant. - -There is no change for clusters that do not enable FIPS 140 mode. - -*Impact* + -This change should not have any impact on upgraded nodes. -Any node with an explicitly configured value for the password hashing algorithm -will continue to use that configured value. -Any node that did not have an explicitly configured password hashing algorithm in -{es} 6.x or {es} 7.x would have failed to start. -==== - -.The `xpack.monitoring.history.duration` will not delete indices created by metricbeat or elastic agent -[%collapsible] -==== -*Details* + - -Prior to 8.0, Elasticsearch would internally handle removal of all monitoring indices according to the -`xpack.monitoring.history.duration` setting. - -When using metricbeat or elastic agent >= 8.0 to collect monitoring data, indices are managed via an ILM policy. If the setting is present, the policy will be created using the `xpack.monitoring.history.duration` as an initial retention period. - -If you need to customize retention settings for monitoring data collected with metricbeat, please update the `.monitoring-8-ilm-policy` ILM policy directly. - -The `xpack.monitoring.history.duration` setting will only apply to monitoring indices written using (legacy) internal -collection, not indices created by metricbeat or agent. - -*Impact* + -After upgrading, insure that the `.monitoring-8-ilm-policy` ILM policy aligns with your desired retention settings. - -If you only use -metricbeat or agent to collect monitoring data, you can also remove any custom `xpack.monitoring.history.duration` -settings. - -==== diff --git a/docs/reference/migration/migrate_8_0/command-line-tool-changes.asciidoc b/docs/reference/migration/migrate_8_0/command-line-tool-changes.asciidoc deleted file mode 100644 index 7af28a1ae95cc..0000000000000 --- a/docs/reference/migration/migrate_8_0/command-line-tool-changes.asciidoc +++ /dev/null @@ -1,20 +0,0 @@ -[discrete] -[[breaking_80_command_line_tool_changes]] -==== Command line tool changes - -TIP: {ess-skip-section} - -[[migrate-tool-removed]] -.The `elasticsearch-migrate` tool has been removed. -[%collapsible] -==== -*Details* + -The `elasticsearch-migrate` tool provided a way to convert file -realm users and roles into the native realm. It has been deprecated -since {es} 7.2.0. Users and roles should now be created in the native -realm directly. - -*Impact* + -Discontinue use of the `elasticsearch-migrate` tool. Attempts to use the -`elasticsearch-migrate` tool will result in an error. -==== diff --git a/docs/reference/migration/migrate_8_0/index-setting-changes.asciidoc b/docs/reference/migration/migrate_8_0/index-setting-changes.asciidoc deleted file mode 100644 index 60e5588a187a7..0000000000000 --- a/docs/reference/migration/migrate_8_0/index-setting-changes.asciidoc +++ /dev/null @@ -1,122 +0,0 @@ -[discrete] -[[breaking_80_index_setting_changes]] -==== Index setting changes - -[[deprecation-system-indices]] -.Direct access to system indices is deprecated. -[%collapsible] -==== -*Details* + -Directly accessing system indices is deprecated, and may be prevented in a -future version. If you must access a system index, create a security role with -an index permission that targets the specific index and set the -`allow_restricted_indices` permission to `true`. Refer to -{ref}/defining-roles.html#roles-indices-priv[indices privileges] for -information on adding this permission to an index privilege. - -*Impact* + -Accessing system indices directly results in warnings in the header of API -responses. If available, use {kib} or the associated feature's {es} APIs to -manage the data that you want to access. -==== - -[[deprecate-max-merge-at-once-explicit-setting]] -.`index.merge.policy.max_merge_at_once_explicit` is deprecated and has no effect. -[%collapsible] -==== -*Details* + -The `index.merge.policy.max_merge_at_once_explicit` index setting is deprecated -and has no effect. - -Previously, you could specify `index.merge.policy.max_merge_at_once_explicit` to -set the maximum number of segments to merge at the same time during a force -merge or when expunging deleted documents. In 8.0, this number is unlimited, -regardless of the setting. - -*Impact* + -Specifying `index.merge.policy.max_merge_at_once_explicit` will have no effect -but will generate deprecation warnings. - -To avoid these deprecation warnings, discontinue use of the setting. Don't -specify the setting when creating new indices, and remove the setting from -index and component templates. - -To remove the setting from an existing data stream or index, specify the -setting's value as `null` using the update index settings API. - -[source,console] ----- -PUT my-index-000001/_settings -{ - "index.merge.policy.max_merge_at_once_explicit": null -} ----- -// TEST[setup:my_index] - -==== - -[[index-max-adjacency-matrix-filters-removed]] -.The `index.max_adjacency_matrix_filters` index setting has been removed. -[%collapsible] -==== -*Details* + -The `index.max_adjacency_matrix_filters` index setting has been removed. -Previously, you could use this setting to configure the maximum number of -filters for the -{ref}/search-aggregations-bucket-adjacency-matrix-aggregation.html[adjacency -matrix aggregation]. The `indices.query.bool.max_clause_count` index setting now -determines the maximum number of filters for the aggregation. - -*Impact* + -Discontinue use of the `index.max_adjacency_matrix_filters` index setting. - -Requests that include the index setting will return an error. If you upgrade a -cluster with a 7.x index that already contains the setting, {es} will -{ref}/archived-settings.html#archived-index-settings[archive the setting]. - -Remove the index setting from index and component templates. Attempts to use a -template that contains the setting will fail and return an error. This includes -automated operations, such the {ilm-init} rollover action. -==== - -.The `index.force_memory_term_dictionary` setting has been removed. -[%collapsible] -==== -*Details* + -The `index.force_memory_term_dictionary` setting was introduced in 7.0 as a -temporary measure to allow users to opt-out of the optimization that leaves the -term dictionary on disk when appropriate. This optimization is now mandatory -and the setting is removed. - -*Impact* + -Discontinue use of the `index.force_memory_term_dictionary` index setting. -Requests that include this setting will return an error. -==== - -.The `index.soft_deletes.enabled` setting has been removed. -[%collapsible] -==== -*Details* + -Creating indices with soft deletes disabled was deprecated in 7.6 and -is no longer supported in 8.0. The `index.soft_deletes.enabled` setting -can no longer be set to `false`. - -*Impact* + -Discontinue use of the `index.soft_deletes.enabled` index setting. Requests that -set `index.soft_deletes.enabled` to `false` will return an error. -==== - -.The `index.translog.retention.age` and `index.translog.retention.size` settings have been removed. -[%collapsible] -==== -*Details* + -Translog retention settings `index.translog.retention.age` and -`index.translog.retention.size` were effectively ignored in 7.4, deprecated in -7.7, and removed in 8.0 in favor of -{ref}/index-modules-history-retention.html[soft deletes]. - -*Impact* + -Discontinue use of the `index.translog.retention.age` and -`index.translog.retention.size` index settings. Requests that -include these settings will return an error. -==== diff --git a/docs/reference/migration/migrate_8_0/java-api-changes.asciidoc b/docs/reference/migration/migrate_8_0/java-api-changes.asciidoc deleted file mode 100644 index 22e1caf1bf5e4..0000000000000 --- a/docs/reference/migration/migrate_8_0/java-api-changes.asciidoc +++ /dev/null @@ -1,50 +0,0 @@ -[discrete] -[[breaking_80_java_api_changes]] -==== Java API changes - -[[ilm-hlrc-rename]] -.The `indexlifecycle` package has been renamed `ilm` in the Java High Level REST Client. -[%collapsible] -==== -*Details* + -In the high level REST client, the `indexlifecycle` package has been -renamed to `ilm` to match the package rename inside the {es} code. - -*Impact* + -Update your workflow and applications to use the `ilm` package in place of -`indexlifecycle`. -==== - -.Changes to `Fuzziness`. -[%collapsible] -==== -*Details* + -To create `Fuzziness` instances, use the `fromString` and `fromEdits` method -instead of the `build` method that used to accept both Strings and numeric -values. Several fuzziness setters on query builders (e.g. -MatchQueryBuilder#fuzziness) now accept only a `Fuzziness` instance instead of -an Object. - -Fuzziness used to be lenient when it comes to parsing arbitrary numeric values -while silently truncating them to one of the three allowed edit distances 0, 1 -or 2. This leniency is now removed and the class will throw errors when trying -to construct an instance with another value (e.g. floats like 1.3 used to get -accepted but truncated to 1). - -*Impact* + -Use the available constants (e.g. `Fuzziness.ONE`, `Fuzziness.AUTO`) or build -your own instance using the above mentioned factory methods. Use only allowed -`Fuzziness` values. -==== - -.Changes to `Repository`. -[%collapsible] -==== -*Details* + -Repository has no dependency on IndexShard anymore. The contract of restoreShard -and snapshotShard has been reduced to Store and MappingService in order to improve -testability. - -*Impact* + -No action needed. -==== diff --git a/docs/reference/migration/migrate_8_0/jvm-option-changes.asciidoc b/docs/reference/migration/migrate_8_0/jvm-option-changes.asciidoc deleted file mode 100644 index bdcffe4667ca4..0000000000000 --- a/docs/reference/migration/migrate_8_0/jvm-option-changes.asciidoc +++ /dev/null @@ -1,54 +0,0 @@ -[discrete] -[[breaking_80_jvm_option_changes]] -==== JVM option changes - -TIP: {ess-skip-section} - -[[breaking_80_allocation_change_flood_stage_block_always_removed]] -.`es.disk.auto_release_flood_stage_block` has been removed. -[%collapsible] -==== -*Details* + -If a node exceeds the flood-stage disk watermark then we add a block to all of -its indices to prevent further writes as a last-ditch attempt to prevent the -node completely exhausting its disk space. By default, from 7.4 onwards the -block is automatically removed when a node drops below the high watermark -again, but this behaviour could be disabled by setting the system property -`es.disk.auto_release_flood_stage_block` to `false`. This behaviour is no -longer optional, and this system property must now not be set. - -*Impact* + -Discontinue use of the `es.disk.auto_release_flood_stage_block` system property. -Setting this system property will result in an error on startup. -==== - -.`es.rest.url_plus_as_space` has been removed. -[%collapsible] -==== -*Details* + -Starting in version 7.4, a `+` in a URL will be encoded as `%2B` by all REST API functionality. Prior versions handled a `+` as a single space. -In these previous versions, if your application required handling `+` as a single space, you could return to the old behaviour by setting the system property -`es.rest.url_plus_as_space` to `true`. Note that this behaviour is deprecated and setting this system property to `true` will cease -to be supported in version 8. - -*Impact* + -Update your application or workflow to assume `+` in a URL is encoded as `%2B`. -==== - -.`es.unsafely_permit_handshake_from_incompatible_builds` has been removed. -[%collapsible] -==== -*Details* + -{es} has a check that verifies that communicating pairs of nodes of the same -version are running exactly the same build and therefore using the same wire -format as each other. In previous versions this check can be bypassed by -setting the system property -`es.unsafely_permit_handshake_from_incompatible_builds` to `true`. The use of -this system property is now forbidden. - -*Impact* + -Discontinue use of the `es.unsafely_permit_handshake_from_incompatible_builds` -system property, and ensure that all nodes of the same version are running -exactly the same build. Setting this system property will result in an error -on startup. -==== diff --git a/docs/reference/migration/migrate_8_0/logging-changes.asciidoc b/docs/reference/migration/migrate_8_0/logging-changes.asciidoc deleted file mode 100644 index 63c025746a64c..0000000000000 --- a/docs/reference/migration/migrate_8_0/logging-changes.asciidoc +++ /dev/null @@ -1,53 +0,0 @@ -[discrete] -[[breaking_80_logging_changes]] -==== Logging changes - -.{es} JSON logs now comply with ECS. -[%collapsible] -==== -*Details* + -{es}'s {ref}/logging.html[JSON logs] now comply with the -{ecs-ref}/index.html[Elastic Common Schema (ECS)]. Previously, {es}'s JSON logs -used a custom schema. - -*Impact* + -If your application parses {es}'s JSON logs, update it to support the new ECS -format. -==== - -.{es} no longer emits deprecation logs or slow logs in plaintext. -[%collapsible] -==== -*Details* + -{es} no longer emits a plaintext version of the following logs: - -* Deprecation logs -* Indexing slow logs -* Search slow logs - -These logs are now only available in JSON. - -Server logs are still available in both a JSON and plaintext format. - -*Impact* + -If your application parses {es}'s plaintext logs, update it to use the new ECS -JSON logs. -==== - -[[audit-logs-are-rolled-over-and-archived-by-size]] -.Audit logs are rolled-over and archived by size. -[%collapsible] -==== -*Details* + -In addition to the existing daily rollover, the security audit logs are -now rolled-over by disk size limit as well. Moreover, the rolled-over logs -are also gzip compressed. - -*Impact* + -The names of rolled over audit log files (but not the name of the current log) -have changed. -If you've set up automated tools to consume these files, you must configure them -to use the new names and to possibly account for `gzip` archives instead of -plain text. The Docker build of {es} is not affected because it logs on `stdout`, -where rollover is not performed. -==== diff --git a/docs/reference/migration/migrate_8_0/mapping-changes.asciidoc b/docs/reference/migration/migrate_8_0/mapping-changes.asciidoc deleted file mode 100644 index 7b3922cf0a5dd..0000000000000 --- a/docs/reference/migration/migrate_8_0/mapping-changes.asciidoc +++ /dev/null @@ -1,133 +0,0 @@ -[discrete] -[[breaking_80_mapping_changes]] -==== Mapping changes - -.Indices created in {es} 6.x and earlier versions are not supported. -[%collapsible] -==== -*Details* + -Elasticsearch 8.0 can read indices created in version 7.0 or above. An -Elasticsearch 8.0 node will not start in the presence of indices created in a -version of Elasticsearch before 7.0. - -*Impact* + -Reindex indices created in {es} 6.x or before with {es} 7.x if they need to be carried forward to {es} 8.x. -==== - -.Closed indices created in {es} 6.x and earlier versions are not supported. -[%collapsible] -==== -*Details* + -In earlier versions a node would start up even if it had data from indices -created in a version before the previous major version, as long as those -indices were closed. {es} now ensures that it is compatible with every index, -open or closed, at startup time. - -*Impact* + -Reindex closed indices created in {es} 6.x or before with {es} 7.x if they need -to be carried forward to {es} 8.x. -==== - -.The maximum number of completion contexts per field is now 10. -[%collapsible] -==== -*Details* + -The number of completion contexts within a single completion field -has been limited to 10. - -*Impact* + -Use a maximum of 10 completion contexts in a completion field. Specifying more -than 10 completion contexts will return an error. -==== - -.Multi-fields within multi-fields is no longer supported. -[%collapsible] -==== -*Details* + -Previously, it was possible to define a multi-field within a multi-field. -Defining chained multi-fields was deprecated in 7.3 and is now no longer -supported. - -*Impact* + -To migrate mappings, all instances of `fields` that occur within -a `fields` block should be removed, either by flattening the chained `fields` -blocks into a single level, or by switching to `copy_to` if appropriate. -==== - -[[fieldnames-enabling]] -.The `_field_names` metadata field's `enabled` parameter has been removed. -[%collapsible] -==== -*Details* + -The setting has been deprecated with 7.5 and is no longer supported on new indices. -Mappings for older indices will continue to work but emit a deprecation warning. - -*Impact* + -The `enabled` setting for `_field_names` should be removed from templates and mappings. -Disabling _field_names is not necessary because it no longer carries a large index overhead. -==== - -[[mapping-boosts]] -.The `boost` parameter on field mappings has been removed. -[%collapsible] -==== -*Details* + -Index-time boosts have been deprecated since the 5x line, but it was still possible -to declare field-specific boosts in the mappings. This is now removed completely. -Indexes built in 7x that contain mapping boosts will emit warnings, and the boosts -will have no effect in 8.0. New indexes will not permit boosts to be set in their -mappings at all. - -*Impact* + -The `boost` setting should be removed from templates and mappings. Use boosts -directly on queries instead. -==== - -.Java-time date formats replace joda-time formats. -[%collapsible] -==== -*Details* + -In 7.0, {es} switched from joda time to java time for date-related parsing, -formatting, and calculations. Indices created in 7.0 and later versions are -already required to use mappings with java-time date formats. However, -earlier indices using joda-time formats must be reindexed to use -mappings with java-time formats. - -*Impact* + -For a detailed migration guide, see the {ref}/migrate-to-java-time.html[Java -time migration guide]. -==== - -[[geo-shape-strategy]] -.Several `geo_shape` mapping parameters have been removed. -[%collapsible] -==== -*Details* + -The following `geo_shape` mapping parameters were deprecated in 6.6: - -* `tree` -* `tree_levels` -* `strategy` -* `distance_error_pct` - -These parameters have been removed in 8.0.0. - -*Impact* + -In 8.0, you can no longer create mappings that include these parameters. -However, 7.x indices that use these mapping parameters will continue to work. -==== - -.The `sparse_vector` field data type has been removed. -[%collapsible] -==== -*Details* + -The `sparse_vector` field type was deprecated in 7.6 and is now removed in -8.0. We have not seen much interest in this experimental field type, and don't -see a clear use case as it's currently designed. If you have feedback or -suggestions around sparse vector functionality, please let us know through -GitHub or the 'discuss' forums. - -*Impact* + -Discontinue use of the `sparse_vector` field data type. Requests containing -a mapping for this field data type will return an error. -==== diff --git a/docs/reference/migration/migrate_8_0/migrate_to_java_time.asciidoc b/docs/reference/migration/migrate_8_0/migrate_to_java_time.asciidoc deleted file mode 100644 index c86eddc04c013..0000000000000 --- a/docs/reference/migration/migrate_8_0/migrate_to_java_time.asciidoc +++ /dev/null @@ -1,314 +0,0 @@ -[[migrate-to-java-time]] -=== Java time migration guide - -With 7.0, {es} switched from joda time to java time for date-related parsing, -formatting, and calculations. This guide is designed to help you determine -if your cluster is impacted and, if so, prepare for the upgrade. - - -[discrete] -[[java-time-convert-date-formats]] -==== Convert date formats - -To upgrade to {es} 8, you'll need to convert any joda-time date formats -to their java-time equivalents. - -[discrete] -[[java-time-migration-impacted-features]] -=== Impacted features -The switch to java time only impacts custom <> and -<> formats. - -These formats are commonly used in: - -* <> -* <> -* <> - -If you don't use custom date formats, you can skip the rest of this guide. -Most custom date formats are compatible. However, several require -an update. - -To see if your date format is impacted, use the <> -or the {kibana-ref-all}/{prev-major-last}/upgrade-assistant.html[Kibana Upgrade Assistant]. - -[discrete] -[[java-time-migration-incompatible-date-formats]] -=== Incompatible date formats -Custom date formats containing the following joda-time literals should be -migrated. - -`Y` (Year of era):: -+ --- -Replace with `y`. - -*Example:* -`YYYY-MM-dd` should become `yyyy-MM-dd`. - -In java time, `Y` is used for -https://docs.oracle.com/javase/8/docs/api/java/time/temporal/WeekFields.html[week-based year]. -Using `Y` in place of `y` could result in off-by-one errors in year calculation. - -For pattern `YYYY-ww` and date `2019-01-01T00:00:00.000Z` will give `2019-01` -For pattern `YYYY-ww` and date `2018-12-31T00:00:00.000Z` will give `2019-01` (counter-intuitive) because there is >4 days of that week in 2019 --- - -`y` (Year):: -+ --- -Replace with `u`. - -*Example:* -`yyyy-MM-dd` should become `uuuu-MM-dd`. - -In java time, `y` is used for year of era. `u` can contain non-positive -values while `y` cannot. `y` can also be associated with an era field. --- - - -`C` (Century of era):: -+ --- -Century of era is not supported in java time. -There is no replacement. Instead, we recommend you preprocess your input. --- - -`x` (Week year):: -+ --- -Replace with `Y`. - -In java time, `x` means https://docs.oracle.com/javase/8/docs/api/java/time/format/DateTimeFormatter.html[zone-offset]. - -[WARNING] -==== -Failure to properly convert `x` (Week year) to `Y` could result in data loss. -==== --- - -`Z` (Zone offset/id):: -+ --- -Replace with multiple `X`'s. - -`Z` has a similar meaning in java time. However, java time expects different -numbers of literals to parse different forms. - -Consider migrating to `X`, which gives you more control over how time is parsed. -For example, the joda-time format `YYYY-MM-dd'T'hh:mm:ssZZ` accepts the following dates: - -``` -2010-01-01T01:02:03Z -2010-01-01T01:02:03+01 -2010-01-01T01:02:03+01:02 -2010-01-01T01:02:03+01:02:03 -``` - -In java time, you cannot parse all these dates using a single format -Instead, you must specify 3 separate formats: - -``` -2010-01-01T01:02:03Z -2010-01-01T01:02:03+01 -both parsed with yyyy-MM-dd'T'hh:mm:ssX - -2010-01-01T01:02:03+01:02 -yyyy-MM-dd'T'hh:mm:ssXXX - -2010-01-01T01:02:03+01:02:03 -yyyy-MM-dd'T'hh:mm:ssXXXXX -``` - - -The formats must then be delimited using `||`: -[source,txt] --------------------------------------------------- -yyyy-MM-dd'T'hh:mm:ssX||yyyy-MM-dd'T'hh:mm:ssXXX||yyyy-MM-dd'T'hh:mm:ssXXXXX --------------------------------------------------- - -The same applies if you expect your pattern to occur without a colon (`:`): -For example, the `YYYY-MM-dd'T'hh:mm:ssZ` format accepts the following date forms: -``` -2010-01-01T01:02:03Z -2010-01-01T01:02:03+01 -2010-01-01T01:02:03+0102 -2010-01-01T01:02:03+010203 -``` -To accept all these forms in java time, you must use the `||` delimiter: -[source,txt] --------------------------------------------------- -yyyy-MM-dd'T'hh:mm:ssX||yyyy-MM-dd'T'hh:mm:ssXX||yyyy-MM-dd'T'hh:mm:ssXXXX --------------------------------------------------- --- - -`d` (Day):: -+ --- -In java time, `d` is still interpreted as "day" but is less flexible. - -For example, the joda-time date format `YYYY-MM-dd` accepts `2010-01-01` or -`2010-01-1`. - -In java time, you must use the `||` delimiter to provide specify each format: - -[source,txt] --------------------------------------------------- -yyyy-MM-dd||yyyy-MM-d --------------------------------------------------- - -In java time, `d` also does not accept more than 2 digits. To accept days with more -than two digits, you must include a text literal in your java-time date format. -For example, to parse `2010-01-00001`, you must use the following java-time date format: - -[source,txt] --------------------------------------------------- -yyyy-MM-'000'dd --------------------------------------------------- --- - -`e` (Name of day):: -+ --- -In java time, `e` is still interpreted as "name of day" but does not parse -short- or full-text forms. - -For example, the joda-time date format `EEE YYYY-MM` accepts both -`Wed 2020-01` and `Wednesday 2020-01`. - -To accept both of these dates in java time, you must specify each format using -the `||` delimiter: - -[source,txt] --------------------------------------------------- -cccc yyyy-MM||ccc yyyy-MM --------------------------------------------------- - -The joda-time literal `E` is interpreted as "day of week." -The java-time literal `c` is interpreted as "localized day of week." -`E` does not accept full-text day formats, such as `Wednesday`. --- - -`EEEE` and similar text forms:: -+ --- -Support for full-text forms depends on the locale data provided with your Java -Development Kit (JDK) and other implementation details. We recommend you -test formats containing these patterns carefully before upgrading. --- - -`z` (Time zone text):: -+ --- -In java time, `z` outputs 'Z' for Zulu when given a UTC timezone. --- - -[discrete] -[[java-time-migration-test]] -=== Test with your data - -We strongly recommend you test any date format changes using real data before -deploying in production. - -[discrete] -[[java-time-migrate-update-mappings]] -=== Update index mappings -To update joda-time date formats in index mappings, you must create a new index -with an updated mapping and reindex your data to it. - -The following `my-index-000001` index contains a mapping for the `datetime` field, a -`date` field with a custom joda-time date format. -//// -[source,console] --------------------------------------------------- -PUT my-index-000001 -{ - "mappings": { - "properties": { - "datetime": { - "type": "date", - "format": "yyyy/MM/dd HH:mm:ss||yyyy/MM/dd||epoch_millis" - } - } - } -} --------------------------------------------------- -//// - -[source,console] --------------------------------------------------- -GET my-index-000001/_mapping --------------------------------------------------- -// TEST[continued] - -[source,console-result] --------------------------------------------------- -{ - "my-index-000001" : { - "mappings" : { - "properties" : { - "datetime": { - "type": "date", - "format": "yyyy/MM/dd HH:mm:ss||yyyy/MM/dd||epoch_millis" - } - } - } - } -} --------------------------------------------------- - - -To change the date format for the `datetime` field, create a separate index -containing an updated mapping and date format. - -For example, the following `my-index-000002` index changes the `datetime` field's -date format to `uuuu/MM/dd HH:mm:ss||uuuu/MM/dd||epoch_millis`. - -[source,console] --------------------------------------------------- -PUT my-index-000002 -{ - "mappings": { - "properties": { - "datetime": { - "type": "date", - "format": "uuuu/MM/dd HH:mm:ss||uuuu/MM/dd||epoch_millis" - } - } - } -} --------------------------------------------------- -// TEST[continued] - -Next, reindex data from the old index to the new index. - -The following <> API request reindexes data from -`my-index-000001` to `my-index-000002`. - -[source,console] --------------------------------------------------- -POST _reindex -{ - "source": { - "index": "my-index-000001" - }, - "dest": { - "index": "my-index-000002" - } -} --------------------------------------------------- -// TEST[continued] - -If you use index aliases, update them to point to the new index. - -[source,console] --------------------------------------------------- -POST /_aliases -{ - "actions" : [ - { "remove" : { "index" : "my-index-000001", "alias" : "my-index" } }, - { "add" : { "index" : "my-index-000002", "alias" : "my-index" } } - ] -} --------------------------------------------------- -// TEST[continued] diff --git a/docs/reference/migration/migrate_8_0/packaging-changes.asciidoc b/docs/reference/migration/migrate_8_0/packaging-changes.asciidoc deleted file mode 100644 index 7e0c2c72ee6d7..0000000000000 --- a/docs/reference/migration/migrate_8_0/packaging-changes.asciidoc +++ /dev/null @@ -1,60 +0,0 @@ -[discrete] -[[breaking_80_packaging_changes]] -==== Packaging changes - -TIP: {ess-skip-section} - -.The layout of the data folder has changed. -[%collapsible] -==== -*Details* + -Each node's data is now stored directly in the data directory set by the -`path.data` setting, rather than in `${path.data}/nodes/0`, because the removal -of the `node.max_local_storage_nodes` setting means that nodes may no longer -share a data path. - -*Impact* + -At startup, {es} will automatically migrate the data path to the new layout. -This automatic migration will not proceed if the data path contains data for -more than one node. You should move to a configuration in which each node has -its own data path before upgrading. - -If you try to upgrade a configuration in which there is data for more than one -node in a data path then the automatic migration will fail and {es} -will refuse to start. To resolve this you will need to perform the migration -manually. The data for the extra nodes are stored in folders named -`${path.data}/nodes/1`, `${path.data}/nodes/2` and so on, and you should move -each of these folders to an appropriate location and then configure the -corresponding node to use this location for its data path. If your nodes each -have more than one data path in their `path.data` settings then you should move -all the corresponding subfolders in parallel. Each node uses the same subfolder -(e.g. `nodes/2`) across all its data paths. -==== - -.The default Maxmind geoip databases have been removed. -[%collapsible] -==== -*Details* + -The default Maxmind geoip databases that shipped by default with Elasticsearch -have been removed. These databases are out dated and stale and using these -databases will likely result in incorrect geoip lookups. - -By default since 7.13, these pre-packaged geoip databases -were used in case no database were specified in the config directory or before -the geoip downloader downloaded the geoip databases. When the geoip database -downloader completed downloading the new databases then these pre-packaged -databases were no longer used. - -*Impact* + -If the geoip downloader is disabled and no geoip databases are provided -in the config directory of each ingest node then the geoip processor will -no longer perform geoip lookups and tag these documents with the fact that -the requested database is no longer available. - -After a cluster has been started and before the geoip downloader has completed -downloading the most up to data databases, the geoip processor will not perform -any geoip lookups and tag documents that the requested database is not available. -After the geoip downloader has completed downloading the most up to data databases -then the geoip processor will function as normal. The window of time that the -geoip processor can't do geoip lookups after cluster startup should be very small. -==== diff --git a/docs/reference/migration/migrate_8_0/painless-changes.asciidoc b/docs/reference/migration/migrate_8_0/painless-changes.asciidoc deleted file mode 100644 index 601866cb8995d..0000000000000 --- a/docs/reference/migration/migrate_8_0/painless-changes.asciidoc +++ /dev/null @@ -1,42 +0,0 @@ -[discrete] -[[breaking_80_painless_changes]] -==== Painless changes - -.The `JodaCompatibleZonedDateTime` class has been removed. -[%collapsible] -==== -*Details* + -As a transition from Joda datetime to Java datetime, scripting used -an intermediate class called `JodaCompatibleZonedDateTime`. This class -has been removed and is replaced by `ZonedDateTime`. Any use of casting -to a `JodaCompatibleZonedDateTime` or use of method calls only available -in `JodaCompatibleZonedDateTime` in a script will result in a compilation -error, and may not allow the upgraded node to start. - -*Impact* + -Before upgrading, replace `getDayOfWeek` with `getDayOfWeekEnum().value` in any -scripts. Any use of `getDayOfWeek` expecting a return value of `int` will result -in a compilation error or runtime error and may not allow the upgraded node to -start. - -The following `JodaCompatibleZonedDateTime` methods must be replaced using -`ZonedDateTime` methods prior to upgrade: - -* `getMillis()` -> `toInstant().toEpochMilli()` -* `getCenturyOfEra()` -> `get(ChronoField.YEAR_OF_ERA) / 100` -* `getEra()` -> `get(ChronoField.ERA)` -* `getHourOfDay()` -> `getHour()` -* `getMillisOfDay()` -> `get(ChronoField.MILLI_OF_DAY)` -* `getMillisOfSecond()` -> `get(ChronoField.MILLI_OF_SECOND)` -* `getMinuteOfDay()` -> `get(ChronoField.MINUTE_OF_DAY)` -* `getMinuteOfHour()` -> `getMinute()` -* `getMonthOfYear()` -> `getMonthValue()` -* `getSecondOfDay()` -> `get(ChronoField.SECOND_OF_DAY)` -* `getSecondOfMinute()` -> `getSecond()` -* `getWeekOfWeekyear()` -> `get(IsoFields.WEEK_OF_WEEK_BASED_YEAR)` -* `getWeekyear()` -> `get(IsoFields.WEEK_BASED_YEAR)` -* `getYearOfCentury()` -> `get(ChronoField.YEAR_OF_ERA) % 100` -* `getYearOfEra()` -> `get(ChronoField.YEAR_OF_ERA)` -* `toString(String)` -> a DateTimeFormatter -* `toString(String, Locale)` -> a DateTimeFormatter -==== diff --git a/docs/reference/migration/migrate_8_0/plugin-changes.asciidoc b/docs/reference/migration/migrate_8_0/plugin-changes.asciidoc deleted file mode 100644 index 42baf8f7f2a69..0000000000000 --- a/docs/reference/migration/migrate_8_0/plugin-changes.asciidoc +++ /dev/null @@ -1,64 +0,0 @@ -[discrete] -[[breaking_80_plugin_changes]] -==== Plugin changes - -TIP: {ess-skip-section} - -.The S3, GCS and Azure repository plugins are now included in Elasticsearch -[%collapsible] -==== -*Details* + -In previous versions of {es}, in order to register a snapshot repository -backed by Amazon S3, Google Cloud Storage (GCS) or Microsoft Azure Blob -Storage, you first had to install the corresponding Elasticsearch plugin, -for example `repository-s3`. These plugins are now included in {es} by -default. - -*Impact* + -You no longer need to install the following plugins, and not should attempt -to do so. - -* `repository-azure` -* `repository-gcs` -* `repository-s3` - -{es} and the `elasticsearch-plugin` CLI tool have been changed to tolerate -attempted installation and removal of these plugins in order to avoid -breaking any existing automation. In the future, attempting to install -these plugins will be an error. - -Specifically, the `elasticsearch-plugin` CLI tool will not fail if you -attempt to install any of the above plugins, and will instead print a -warning and skip the plugins. If any of these plugins are already -installed, for example because you installed them when running an older -version of {es}, then you can still remove them with -`elasticsearch-plugin`. Attempting to remove them if they are not installed -will succeed but print a warnings. - -If you run {es} using Docker and you are managing plugins using a -{plugins}/manage-plugins-using-configuration-file.html[configuration file], then when -{es} first starts after you upgrade it, it will remove the above plugins if -they already installed. If any of these plugins are specified in your -configuration file, {es} will ignore them and emit a warning log message. -==== - -.Third party plugins can no longer intercept REST requests (`RestHandlerWrapper`) -[%collapsible] -==== -*Details* + -In previous versions of {es}, third-party plugins could implement the -`getRestHandlerWrapper` method to intercept all REST requests to the node. A -common use of this feature was to implement custom security plugins to replace -the built-in {security-features}. This extension point is no longer available -to third-party plugins. - - -*Impact* + -Some third-party plugins that were designed to work with earlier versions of -{es} might not be compatible with {es} version 8.0 or later. - -If you depend on any plugins that are not produced and supported by Elastic, -check with the plugin author and ensure that the plugin is available for your -target version of {es} before upgrading. - -==== diff --git a/docs/reference/migration/migrate_8_0/rest-api-changes.asciidoc b/docs/reference/migration/migrate_8_0/rest-api-changes.asciidoc deleted file mode 100644 index 99c09b9b05385..0000000000000 --- a/docs/reference/migration/migrate_8_0/rest-api-changes.asciidoc +++ /dev/null @@ -1,1138 +0,0 @@ -[discrete] -[[breaking_80_rest_api_changes]] -==== REST API changes - -.REST API endpoints containing `_xpack` have been removed. -[%collapsible] -==== -*Details* + -In 7.0, we deprecated REST endpoints that contain `_xpack` in their path. These -endpoints are now removed in 8.0. Each endpoint that was deprecated and removed -is replaced with a new endpoint that does not contain `_xpack`. As an example, -`/{index}/_xpack/graph/_explore` is replaced by `/{index}/_graph/explore`. - -*Impact* + -Use the replacement REST API endpoints. Requests submitted to the `_xpack` -API endpoints will return an error. - -*Compatibility* + -When {ref}/rest-api-compatibility.html[rest-api-compatibility] is -{ref}/rest-api-compatibility.html[requested], any requests that include -the`_xpack` prefix are rerouted to the corresponding URL without the `_xpack` -prefix. -==== - -[[remove-mapping-type-api-endpoints]] -.REST API endpoints containing mapping types have been removed. -[%collapsible] -==== -*Details* + -Mapping types have been removed. API endpoints that contain a mapping type have -also been removed. Use a typeless endpoint instead. - -[options="header",cols="<1,<3,<1"] -|==== -| API | Typed API endpoint | Typeless API endpoint - -| {ref}/docs-bulk.html[Bulk] -| `//_bulk` -| `/_bulk` - -| {ref}/search-count.html[Count] -| `//_count` -| `/_count` - -| {ref}/docs-delete.html[Delete] -| `//<_id>` -| `/_doc/<_id>` - -| {ref}/docs-delete-by-query.html[Delete by query] -| `//_delete_by_query` -| `/_delete_by_query` - -| {ref}/search-explain.html[Explain] -| `//<_id>/_explain` -| `/_explain/<_id>` - -| {ref}/docs-get.html[Get] -| `//<_id>` -| `/_doc/<_id>` - -| -| `//<_id>/_source` -| `/_source/<_id>` - -| {ref}/indices-get-field-mapping.html[Get field mapping] -| `_mapping//field/` -| `_mapping/field/` - -| -| `/_mapping//field/` -| `/_mapping/field/` - -| {ref}/indices-get-mapping.html[Get mapping] -| `_mapping/` -| `_mapping` or `/_mapping` - -| -| `//_mapping` -| `/_mapping` - -| -| `/_mapping/` -| `/_mapping` - -| {ref}/graph-explore-api.html[Graph explore] -| `//_graph/explore` -| `/_graph/explore` - -| {ref}/docs-index_.html[Index] -| `//<_id>/_create` -| `/_create/<_id>` - -| -| `/` -| `/_doc` - -| -| `//<_id>` -| `/_doc/<_id>` - -| {ref}/docs-multi-get.html[Multi get] -| `//_mget` -| `/_mget` - -| {ref}/search-multi-search.html[Multi search] -| `//_msearch` -| `/_msearch` - -| {ref}/multi-search-template.html[Multi search template] -| `//_msearch/template` -| `/_msearch/template` - -| {ref}/docs-multi-termvectors.html[Multi term vectors] -| `//_mtermvectors` -| `/_mtermvectors` - -| {ref}/rollup-search.html[Rollup search] -| `//_rollup_search` -| `/_rollup_search` - -| {ref}/search-search.html[Search] -| `//_search` -| `/_search` - -| {ref}/search-template-api.html[Search template] -| `//_search/template` -| `/_search/template` - -| {ref}/docs-termvectors.html[Term vectors] -| `//<_id>/_termvectors` -| `/_termvectors<_id>` - -| -| `//_termvectors` -| `/_termvectors` - -| {ref}/docs-update.html[Update] -| `//<_id>/_update` -| `/_update/<_id>` - -| {ref}/docs-update-by-query.html[Update by query] -| `//_update_by_query` -| `/_update_by_query` - -| {ref}/indices-put-mapping.html[Update mapping] -| `//_mapping` -| `/_mapping` - -| -| `/_mapping/` -| `/_mapping` - -| -| `_mapping/` -| `/_mapping` - -| {ref}/search-validate.html[Validate] -| `//_validate/query` -| `/_validate/query` - -|==== - -*Impact* + -Update your application to use typeless REST API endpoints. Requests to -endpoints that contain a mapping type will return an error. - -*Compatibility* + -When {ref}/rest-api-compatibility.html[rest-api-compatibility] is -{ref}/rest-api-compatibility.html[requested], if a request includes a custom -mapping type it is ignored. The request is rerouted to the corresponding -typeless URL. Custom mapping types in request bodies and type related HTTP -parameters are ignored, and responses, where warranted, include `_type` : -`_doc`. - -==== - -.{ccs-cap} ({ccs-init}) is now only backward-compatible with the previous minor version. -[%collapsible] -==== -*Details* + -In 8.0+, Elastic supports searches from a local cluster to a remote cluster -running: - -* The previous minor version. -* The same version. -* A newer minor version in the same major version. - -Elastic also supports searches from a local cluster running the last minor -version of a major version to a remote cluster running any minor version in the -following major version. For example, a local 7.17 cluster can search any -remote 8.x cluster. - -include::{es-ref-dir}/search/search-your-data/ccs-version-compat-matrix.asciidoc[] - -IMPORTANT: For the {ref}/eql-search-api.html[EQL search API], the local and -remote clusters must use the same {es} version if they have versions prior to 7.17.7 (included) or prior to 8.5.1 (included). - -For example, a local 8.0 cluster can search a remote 7.17 or any remote 8.x -cluster. However, a search from a local 8.0 cluster to a remote 7.16 or 6.8 -cluster is not supported. - -Previously, we also supported searches on remote clusters running: - -* Any minor version of the local cluster's major version. -* The last minor release of the previous major version. - -However, such searches can result in undefined behavior. - -*Impact* + -If you only run cross-cluster searches on remote clusters using the same or a -newer version, no changes are needed. - -If you previously searched remote clusters running an earlier version of {es}, -see {ref}/modules-cross-cluster-search.html#ensure-ccs-support[Ensure {ccs} -support] for recommended solutions. - -A {ccs} using an unsupported configuration may still work. However, such -searches aren't tested by Elastic, and their behavior isn't guaranteed. -==== - -[[remove-term-order-key]] -.The `terms` aggregation no longer supports the `_term` order key. -[%collapsible] -==== -*Details* + -The `terms` aggregation no longer supports the `_term` key in `order` values. To -sort buckets by their term, use `_key` instead. - -*Impact* + -Discontinue use of the `_term` order key. Requests that include a `_term` order -key will return an error. - -*Compatibility* + -When {ref}/rest-api-compatibility.html[rest-api-compatibility] is -{ref}/rest-api-compatibility.html[requested], the `_term` order is ignored and -`key` is used instead. -==== - -[[remove-time-order-key]] -.The `date_histogram` aggregation no longer supports the `_time` order key. -[%collapsible] -==== -*Details* + -The `date_histogram` aggregation no longer supports the `_time` key in `order` -values. To sort buckets by their key, use `_key` instead. - -*Impact* + -Discontinue use of the `_time` order key. Requests that include a `_time` order -key will return an error. - -*Compatibility* + -When {ref}/rest-api-compatibility.html[rest-api-compatibility] is -{ref}/rest-api-compatibility.html[requested], the `_time` order is ignored and -`_key` is used instead. -==== - -[[remove-moving-avg-agg]] -.The `moving_avg` aggregation has been removed. -[%collapsible] -==== -*Details* + -The `moving_avg` aggregation was deprecated in 6.4 and has been removed. To -calculate moving averages, use the -{ref}/search-aggregations-pipeline-movfn-aggregation.html[`moving_fn` -aggregation] instead. - -*Impact* + -Discontinue use of the `moving_avg` aggregation. Requests that include the -`moving_avg` aggregation will return an error. - - -==== - -[[percentile-duplication]] -.The `percentiles` aggregation's `percents` parameter no longer supports duplicate values. -[%collapsible] -==== -*Details* + -If you specify the `percents` parameter with the -{ref}/search-aggregations-metrics-percentile-aggregation.html[`percentiles` aggregation], -its values must be unique. Otherwise, an exception occurs. - -*Impact* + -Use unique values in the `percents` parameter of the `percentiles` aggregation. -Requests containing duplicate values in the `percents` parameter will return -an error. - -==== - -[[date-histogram-interval]] -.The `date_histogram` aggregation's `interval` parameter is no longer valid. -[%collapsible] -==== -*Details* + -It is now an error to specify the `interval` parameter to the -{ref}/search-aggregations-bucket-datehistogram-aggregation.html[`date_histogram` -aggregation] or the -{ref}/search-aggregations-bucket-composite-aggregation.html#_date_histogram[`composite -date_histogram` source. Instead, please use either `calendar_interval` or -`fixed_interval` as appropriate. - -*Impact* + -Uses of the `interval` parameter in either the `date_histogram` aggregation or -the `date_histogram` composite source will now generate an error. Instead -please use the more specific `fixed_interval` or `calendar_interval` -parameters. - -*Compatibility* + -When {ref}/rest-api-compatibility.html[rest-api-compatibility] is -{ref}/rest-api-compatibility.html[requested], the `interval` parameter is -adapted to either a fixed or calendar interval. -==== - -[[ngram-edgengram-filter-names-removed]] -.The `nGram` and `edgeNGram` token filter names have been removed. -[%collapsible] -==== -*Details* + -The `nGram` and `edgeNGram` token filter names that have been deprecated since -version 6.4 have been removed. Both token filters can only be used by their -alternative names `ngram` and `edge_ngram` since version 7.0. - -*Impact* + -Use the equivalent `ngram` and `edge_ngram` token filters. Requests containing -the `nGram` and `edgeNGram` token filter names will return an error. -==== - -[[nGram-edgeNGram-tokenizer-dreprecation]] -.The `nGram` and `edgeNGram` tokenizer names have been removed. -[%collapsible] -==== -*Details* + -The `nGram` and `edgeNGram` tokenizer names haven been deprecated with 7.6 and are no longer -supported on new indices. Mappings for indices created after 7.6 will continue to work but -emit a deprecation warning. The tokenizer name should be changed to the fully equivalent -`ngram` or `edge_ngram` names for new indices and in index templates. - -*Impact* + -Use the `ngram` and `edge_ngram` tokenizers. Requests to create new indices -using the `nGram` and `edgeNGram` tokenizer names will return an error. -==== - -.The `in_flight_requests` stat has been renamed `inflight_requests` in logs and diagnostic APIs. -[%collapsible] -==== -*Details* + -The name of the in flight requests circuit breaker in log output and diagnostic APIs (such as the node stats API) changes from `in_flight_requests` to `inflight_requests` to align it with the name of the corresponding settings. - -*Impact* + -Update your workflow and applications to use the `inflight_requests` stat in -place of `in_flight_requests`. -==== - -.The voting configuration exclusions API endpoint has changed. -[%collapsible] -==== -*Details* + -The `POST /_cluster/voting_config_exclusions/{node_filter}` API has been -removed in favour of `POST /_cluster/voting_config_exclusions?node_names=...` -and `POST /_cluster/voting_config_exclusions?node_ids=...` which allow you to -specify the names or IDs of the nodes to exclude. - -*Impact* + -Use `POST /_cluster/voting_config_exclusions?node_ids=...` and specify the nodes -to exclude instead of using a node filter. Requests submitted to the -`/_cluster/voting_config_exclusions/{node_filter}` endpoint will return an -error. -==== - -.Remote system indices are not followed automatically if they match an auto-follow pattern. -[%collapsible] -==== -*Details* + -Remote system indices matching an {ref}/ccr-auto-follow.html[auto-follow -pattern] won't be configured as a follower index automatically. - -*Impact* + -Explicitly {ref}/ccr-put-follow.html[create a follower index] to follow a remote -system index if that's the wanted behaviour. -==== - -.The EQL `wildcard` function has been removed. -[%collapsible] -==== -*Details* + -The `wildcard` function was deprecated in {es} 7.13.0 and has been removed. - -*Impact* + -Use the `like` or `regex` {ref}/eql-syntax.html#eql-syntax-pattern-comparison-keywords[keywords] instead. -==== - -[[ilm-freeze-noop]] -.The ILM `freeze` action is now a no-op. -[%collapsible] -==== -*Details* + -The ILM freeze action is now a no-op and performs no action on the index, as the freeze API endpoint -has been removed in 8.0. - -*Impact* + -Update your ILM policies to remove the `freeze` action from the `cold` phase. -==== - -[[ilm-policy-validation]] -.Additional validation for ILM policies. -[%collapsible] -==== -*Details* + -Creating or updating an ILM policy now requires that any referenced snapshot repositories and SLM -policies exist. - -*Impact* + -Update your code or configuration management to ensure that repositories and SLM policies are created -before any policies that reference them. -==== - -.The deprecated `_upgrade` API has been removed. -[%collapsible] -==== -*Details* + -Previously, the `_upgrade` API upgraded indices from the previous major -version to the current version. The `_reindex` API should be used -instead for that purpose. - -*Impact* + -Requests made to the old `_upgrade` API will return an error. -==== - -.The deprecated freeze index API has been removed. -[%collapsible] -==== -*Details* + -The freeze index API (`POST //_freeze`) has been removed. -https://www.elastic.co/blog/significantly-decrease-your-elasticsearch-heap-memory-usage[Improvements -in heap memory usage] have eliminated the reason to freeze indices. -You can still unfreeze existing frozen indices using the -{ref}/unfreeze-index-api.html[unfreeze index API]. For some use cases, the -frozen tier may be a suitable replacement for frozen indices. See -{ref}/data-tiers.html[data tiers] for more information. - -*Impact* + -Requests made to the old freeze index API will return an error. -==== - -.The force merge API's `max_num_segments` and `only_expunge_deletes` parameters cannot both be specified in the same request. -[%collapsible] -==== -*Details* + -Previously, the force merge API allowed the parameters `only_expunge_deletes` -and `max_num_segments` to be set to a non default value at the same time. But -the `max_num_segments` was silently ignored when `only_expunge_deletes` is set -to `true`, leaving the false impression that it has been applied. - -*Impact* + -When using the {ref}/indices-forcemerge.html[force merge API], do not specify -values for both the `max_num_segments` and `only_expunge_deletes` parameters. -Requests that include values for both parameters will return an error. -==== - -.The create or update index template API's `template` parameter has been removed. -[%collapsible] -==== -*Details* + -In 6.0, we deprecated the `template` parameter in create or update index -template requests in favor of using `index_patterns`. Support for the `template` -parameter is now removed in 8.0. - -*Impact* + -Use the {ref}/indices-templates-v1.html[create or update index template API]'s -`index_patterns` parameter. Requests that include the `template` parameter will -return an error. - -*Compatibility* + -When {ref}/rest-api-compatibility.html[rest-api-compatibility] is -{ref}/rest-api-compatibility.html[requested], the `template` parameter is mapped -to `index_patterns`. -==== - -.Synced flush has been removed. -[%collapsible] -==== -*Details* + -Synced flush was deprecated in 7.6 and is removed in 8.0. Use a regular flush -instead as it has the same effect as a synced flush in 7.6 and later. - -*Impact* + -Use the {ref}/indices-flush.html[flush API]. Requests to the -`//flush/synced` or `/flush/synced` endpoints will return an error. - -*Compatibility* + -When {ref}/rest-api-compatibility.html[rest-api-compatibility] is -{ref}/rest-api-compatibility.html[requested], the request to synced flush is -routed to the equivalent non-synced flush URL. -==== - -.The default for the `?wait_for_active_shards` parameter on the close index API has changed. -[%collapsible] -==== -*Details* + -When closing an index in earlier versions, by default {es} would not wait for -the shards of the closed index to be properly assigned before returning. From -version 8.0 onwards the default behaviour is to wait for shards to be assigned -according to the -{ref}/docs-index_.html#index-wait-for-active-shards[`index.write.wait_for_active_shards` -index setting]. - -*Impact* + -Accept the new behaviour, or specify `?wait_for_active_shards=0` to preserve -the old behaviour if needed. -==== - -.The index stats API's `types` query parameter has been removed. -[%collapsible] -==== -*Details* + -The index stats API's `types` query parameter has been removed. Previously, you -could combine `types` with the `indexing` query parameter to return indexing -stats for specific mapping types. Mapping types have been removed in 8.0. - -*Impact* + -Discontinue use of the `types` query parameter. Requests that include the -parameter will return an error. - -*Compatibility* + -When {ref}/rest-api-compatibility.html[rest-api-compatibility] is -{ref}/rest-api-compatibility.html[requested], the `types` query parameter is -ignored and stats are returned for the entire index. -==== - -.The `user_agent` ingest processor's `ecs` parameter has no effect. -[%collapsible] -==== -*Details* + -In 7.2, we deprecated the `ecs` parameter for the `user_agent` ingest processor. -In 8.x, the `user_agent` ingest processor will only return {ecs-ref}[Elastic -Common Schema (ECS)] fields, regardless of the `ecs` value. - -*Impact* + -To avoid deprecation warnings, remove the parameter from your ingest pipelines. -If a pipeline specifies an `ecs` value, the value is ignored. -==== - -.The `include_type_name` query parameter has been removed. -[%collapsible] -==== -*Details* + -The `include_type_name` query parameter has been removed from the index -creation, index template, and mapping APIs. Previously, you could set -`include_type_name` to `true` to indicate that requests and responses should -include a mapping type name. Mapping types have been removed in 8.x. - -*Impact* + -Discontinue use of the `include_type_name` query parameter. Requests that -include the parameter will return an error. - -*Compatibility* + -When {ref}/rest-api-compatibility.html[rest-api-compatibility] is -{ref}/rest-api-compatibility.html[requested], the `include_type_name` query -parameter is ignored and any custom mapping types in the request are removed. -==== - -.Reindex from remote now re-encodes URL-encoded index names. -[%collapsible] -==== -*Details* + -Reindex from remote would previously allow URL-encoded index names and not -re-encode them when generating the search request for the remote host. This -leniency has been removed such that all index names are correctly encoded when -reindex generates remote search requests. - -*Impact* + -Specify unencoded index names for reindex from remote requests. -==== - -.In the reindex, delete by query, and update by query APIs, the `size` parameter has been renamed. -[%collapsible] -==== -*Details* + -Previously, a `_reindex` request had two different size specifications in the body: - -- Outer level, determining the maximum number of documents to process -- Inside the `source` element, determining the scroll/batch size. - -The outer level `size` parameter has now been renamed to `max_docs` to -avoid confusion and clarify its semantics. - -Similarly, the `size` parameter has been renamed to `max_docs` for -`_delete_by_query` and `_update_by_query` to keep the 3 interfaces consistent. - -*Impact* + -Use the replacement parameters. Requests containing the `size` parameter will -return an error. - -*Compatibility* + -When {ref}/rest-api-compatibility.html[rest-api-compatibility] is -{ref}/rest-api-compatibility.html[requested], the `size` parameter is mapped to -the `max_docs` parameter. -==== - -.The update by query API now rejects unsupported `script` fields. -[%collapsible] -==== -*Details* + -An update by query API request that includes an unsupported field in the -`script` object now returns an error. Previously, the API would silently ignore -these unsupported fields. - -*Impact* + -To avoid errors, remove unsupported fields from the `script` object. -==== - -.The cat node API's `local` query parameter has been removed. -[%collapsible] -==== -*Details* + -The `?local` parameter to the `GET _cat/nodes` API was deprecated in 7.x and is -rejected in 8.0. This parameter caused the API to use the local cluster state -to determine the nodes returned by the API rather than the cluster state from -the master, but this API requests information from each selected node -regardless of the `?local` parameter which means this API does not run in a -fully node-local fashion. - -*Impact* + -Discontinue use of the `?local` query parameter. {ref}/cat-nodes.html[cat node -API] requests that include this parameter will return an error. -==== - -.The cat shard API's `local` query parameter has been removed. -[%collapsible] -==== -*Details* + -The `?local` parameter to the `GET _cat/shards` API was deprecated in 7.x and is -rejected in 8.0. This parameter caused the API to use the local cluster state -to determine the nodes returned by the API rather than the cluster state from -the master, but this API requests information from each selected node -regardless of the `?local` parameter which means this API does not run in a -fully node-local fashion. - -*Impact* + -Discontinue use of the `?local` query parameter. {ref}/cat-shards.html[cat shards -API] requests that include this parameter will return an error. -==== - -.The cat indices API's `local` query parameter has been removed. -[%collapsible] -==== -*Details* + -The `?local` parameter to the `GET _cat/indices` API was deprecated in 7.x and is -rejected in 8.0. This parameter caused the API to use the local cluster state -to determine the nodes returned by the API rather than the cluster state from -the master, but this API requests information from each selected node -regardless of the `?local` parameter which means this API does not run in a -fully node-local fashion. - -*Impact* + -Discontinue use of the `?local` query parameter. {ref}/cat-indices.html[cat indices -API] requests that include this parameter will return an error. -==== - -.The get field mapping API's `local` query parameter has been removed. -[%collapsible] -==== -*Details* + -The `local` parameter for get field mapping API was deprecated in 7.8 and is -removed in 8.0. This parameter is a no-op and field mappings are always retrieved -locally. - -*Impact* + -Discontinue use of the `local` query parameter. -{ref}/indices-get-field-mapping.html[get field mapping API] requests that -include this parameter will return an error. -==== - -.Post data to jobs API is deprecated. -[%collapsible] -==== -*Details* + -The {ml} {ref}/ml-post-data.html[post data to jobs API] is deprecated starting in 7.11.0 -and will be removed in a future major version. - -*Impact* + -Use {ref}/ml-ad-apis.html#ml-api-datafeed-endpoint[{dfeeds}] instead. -==== - -.The `job_id` property of the Update {dfeeds} API has been removed. -[%collapsible] -==== -*Details* + -The ability to update a `job_id` in a {dfeed} was deprecated in 7.3.0. and is -removed in 8.0. - -*Impact* + -It is not possible to move {dfeeds} between {anomaly-jobs}. -==== - -.Create repository and delete repository API's return `409` status code when a repository is in use instead of `500`. -[%collapsible] -==== -*Details* + -The {ref}/put-snapshot-repo-api.html[Create or update snapshot repository API] and -{ref}/delete-snapshot-repo-api.html[Delete snapshot repository API] return `409` -status code when the request is attempting to modify an existing repository that's in use instead of status code `500`. - -*Impact* + -Update client code that handles creation and deletion of repositories to reflect this change. -==== - -.The `allow_no_datafeeds` property has been removed from {ml} APIs. -[%collapsible] -==== -*Details* + -The `allow_no_datafeeds` property was deprecated in the -{ref}/cat-datafeeds.html[cat {dfeeds}], -{ref}/ml-get-datafeed.html[get {dfeeds}], -{ref}/ml-get-datafeed-stats.html[get {dfeed} statistics], and -{ref}/ml-stop-datafeed.html[stop {dfeeds}] APIs in 7.10.0. - -*Impact* + -Use `allow_no_match` instead. -==== - -.The `allow_no_jobs` property has been removed from {ml} APIs. -[%collapsible] -==== -*Details* + -The `allow_no_jobs` property was deprecated in the -{ref}/cat-anomaly-detectors.html[cat anomaly detectors], -{ref}/ml-close-job.html[close {anomaly-jobs}], -{ref}/ml-get-job.html[get {anomaly-jobs}], -{ref}/ml-get-job-stats.html[get {anomaly-job} statistics], and -{ref}/ml-get-overall-buckets.html[get overall buckets] APIs in 7.10.0. - -*Impact* + -Use `allow_no_match` instead. -==== - -.The StartRollupJob endpoint now returns a success status if a job has already started. -[%collapsible] -==== -*Details* + -Previously, attempting to start an already-started rollup job would -result in a `500 InternalServerError: Cannot start task for Rollup Job -[job] because state was [STARTED]` exception. - -Now, attempting to start a job that is already started will just -return a successful `200 OK: started` response. - -*Impact* + -Update your workflow and applications to assume that a 200 status in response to -attempting to start a rollup job means the job is in an actively started state. -The request itself may have started the job, or it was previously running and so -the request had no effect. -==== - -.Stored scripts no longer support empty scripts or search templates. -[%collapsible] -==== -*Details* + -The {ref}/create-stored-script-api.html[create or update stored script API]'s -`source` parameter cannot be empty. - -*Impact* + -Before upgrading, use the {ref}/delete-stored-script-api.html[delete stored -script API] to delete any empty stored scripts or search templates. -In 8.0, {es} will drop any empty stored scripts or empty search templates from -the cluster state. Requests to create a stored script or search template with -an empty `source` will return an error. -==== - -.The create or update stored script API's `code` parameter has been removed. -[%collapsible] -==== -*Details* + -The {ref}/create-stored-script-api.html[create or update stored script API]'s -`code` parameter has been removed. Use the `source` parameter instead. - -*Impact* + -Discontinue use of the `code` parameter. Requests that include the parameter -will return an error. -==== - -[[_type-search-matches-no-docs]] -.Searches on the `_type` field are no longer supported. -[%collapsible] -==== -*Details* + -In 8.x, the `_type` metadata field has been removed. {es} now handles a search -on the `_type` field as a search on a non-existent field. A search on a -non-existent field matches no documents, regardless of the query string. - -In 7.x, a search for `_doc` in the `_type` field would match the same documents -as a `match_all` query. - -*Impact* + -Remove queries on the `_type` field from your search requests and search -templates. Searches that include these queries may return no results. -==== - -[[msearch-empty-line-support]] -.The multi search API now parses an empty first line as action metadata in text files. -[%collapsible] -==== -*Details* + -The multi search API now parses an empty first line as empty action metadata -when you provide a text file as the request body, such as when using curl's -`--data-binary` flag. - -The API no longer supports text files that contain: - -* An empty first line followed by a line containing only `{}`. -* An empty first line followed by another empty line. - -*Impact* + -Don't provide an unsupported text file to the multi search API. Requests that -include an unsupported file will return an error. -==== - -[[remove-unmapped-type-string]] -.The `unmapped_type: string` sort option has been removed. -[%collapsible] -==== -*Details* + -Search requests no longer support the `unmapped_type: string` sort option. -Instead, use `unmapped_type: keyword` to handle an unmapped field as if it had -the `keyword` field type but ignore its values for sorting. - -*Impact* + -Discontinue use of `unmapped_type: string`. Search requests that include the -`unmapped_type: string` sort option will return shard failures. -==== - -[[id-field-data]] -.Aggregating and sorting on `_id` is disallowed by default. -[%collapsible] -==== -*Details* + -Previously, it was possible to aggregate and sort on the built-in `_id` field -by loading an expensive data structure called fielddata. This was deprecated -in 7.6 and is now disallowed by default in 8.0. - -*Impact* + -Aggregating and sorting on `_id` should be avoided. As an alternative, the -`_id` field's contents can be duplicated into another field with docvalues -enabled (note that this does not apply to auto-generated IDs). -==== - -.The `common` query has been removed. -[%collapsible] -==== -*Details* + -The `common` query, deprecated in 7.x, has been removed in 8.0. -The same functionality can be achieved by the `match` query if the total number of hits is not tracked. - -*Impact* + -Discontinue use of the `common` query. Search requests containing a `common` -query will return an error. -==== - -.The `cutoff_frequency` parameter has been removed from the `match` and `multi_match` query. -[%collapsible] -==== -*Details* + -The `cutoff_frequency` parameter, deprecated in 7.x, has been removed in 8.0 from `match` and `multi_match` queries. -The same functionality can be achieved without any configuration provided that the total number of hits is not tracked. - -*Impact* + -Discontinue use of the `cutoff_frequency` parameter. Search requests containing -this parameter in a `match` or `multi_match` query will return an error. -==== - -.The `nested_filter` and `nested_path` properties have been removed from the search API's `sort` request body parameter. -[%collapsible] -==== -*Details* + -The `nested_filter` and `nested_path` options, deprecated in 6.x, have been removed in favor of the `nested` context. - -*Impact* + -Discontinue use of the `sort` request body parameter's `nested_filter` and -`nested_path` properties. Requests containing these properties will return an -error. -==== - -.Search and get requests are now routed to shards using adaptive replica selection by default. -[%collapsible] -==== -*Details* + -{es} will no longer prefer using shards in the same location (with the same awareness attribute values) to process -`_search` and `_get` requests. Adaptive replica selection (activated by default in this version) will route requests -more efficiently using the service time of prior inter-node communications. - -*Impact* + -No action needed. -==== - -.Vector functions using `(query, doc['field'])` are no longer supported. -[%collapsible] -==== -*Details* + -The vector functions of the form `function(query, doc['field'])` were -deprecated in 7.6, and are now removed in 8.x. The form -`function(query, 'field')` should be used instead. For example, -`cosineSimilarity(query, doc['field'])` is replaced by -`cosineSimilarity(query, 'field')`. - -*Impact* + -Use the `function(query, 'field')` form. Discontinue use of the `function(query, -doc['field'])` form. Requests containing the `function(query, -doc['field'])` form will return an error. -==== - -.The search API's `indices_boost` request body parameter no longer accepts object values. -[%collapsible] -==== -*Details* + -The `indices_boost` option in the search request used to accept the boosts -both as an object and as an array. The object format has been deprecated since -5.2 and is now removed in 8.0. - -*Impact* + -Use only array values in the `indices_boost` parameter. Requests containing an -object value in the `indices_boost` parameter will return an error. -==== - -.The search API's `use_field_mapping` request body parameter has been removed. -[%collapsible] -==== -*Details* + -In 7.0, we began formatting `docvalue_fields` by default using each field's -mapping definition. To ease the transition from 6.x, we added the format -option `use_field_mapping`. This parameter was deprecated in 7.0, and is now -removed in 8.0. - -*Impact* + -Discontinue use of the `use_field_mapping` request body parameter. Requests -containing this parameter will return an error. - -*Compatibility* + -When {ref}/rest-api-compatibility.html[rest-api-compatibility] is -{ref}/rest-api-compatibility.html[requested], the `use_field_mapping` parameter -is ignored. -==== - -.The search API's `from` request body and url parameter cannot be negative. -[%collapsible] -==== -*Details* + -Search request used to accept `-1` as a `from` in the search body and the url, -treating it as the default value of 0. Other negative values got rejected with -an error already. We now also reject `-1` as an invalid value. - -*Impact* + -Change any use of `-1` as `from` parameter in request body or url parameters by either -setting it to `0` or omitting it entirely. Requests containing negative values will -return an error. -==== - -.Range queries on date fields treat numeric values alwas as milliseconds-since-epoch. -[%collapsible] -==== -*Details* + -Range queries on date fields used to misinterpret small numbers (e.g. four digits like 1000) -as a year when no additional format was set, but would interpret other numeric values as -milliseconds since epoch. We now treat all numeric values in absence of a specific `format` -parameter as milliseconds since epoch. If you want to query for years instead, with a missing -`format` you now need to quote the input value (e.g. "1984"). - -*Impact* + -If you query date fields without a specified `format`, check if the values in your queries are -actually meant to be milliseconds-since-epoch and use a numeric value in this case. If not, use -a string value which gets parsed by either the date format set on the field in the mappings or -by `strict_date_optional_time` by default. -==== - -.The `geo_bounding_box` query's `type` parameter has been removed. -[%collapsible] -==== -*Details* + -The `geo_bounding_box` query's `type` parameter was deprecated in 7.14.0 and has -been removed in 8.0.0. This parameter is a no-op and has no effect on the query. - -*Impact* + -Discontinue use of the `type` parameter. `geo_bounding_box` queries that include -this parameter will return an error. -==== - -.The `type` query has been removed. -[%collapsible] -==== -*Details* + -The `type` query has been removed. Mapping types have been removed in 8.0. - -*Impact* + -Discontinue use of the `type` query. Requests that include the `type` query -will return an error. - -==== - -.The `kibana_user` role has been renamed `kibana_admin`. -[%collapsible] -==== -*Details* + -Users who were previously assigned the `kibana_user` role should instead be assigned -the `kibana_admin` role. This role grants the same set of privileges as `kibana_user`, but has been -renamed to better reflect its intended use. - -*Impact* + -Assign users with the `kibana_user` role to the `kibana_admin` role. -Discontinue use of the `kibana_user` role. -==== - -[[snapshot-resolve-system-indices]] -.For snapshot and {slm-init} APIs, the `indices` parameter no longer resolves to system indices or system data streams. -[%collapsible] -==== -*Details* + -For snapshot and {slm-init} APIs, the `indices` parameter no longer resolves to -system indices or system data streams. -{ref}/snapshot-restore.html#feature-state[Feature states] are now the only way -to back up and restore system indices or system data streams from a snapshot. - -You can no longer use the `indices` parameter for the -{ref}/slm-api-put-policy.html[create {slm-init} policy API] or the -{ref}/create-snapshot-api.html[create snapshot API] to include a system index in -a snapshot. To back up a system index, use the `include_global_state` and -`feature_states` parameters to include the corresponding feature state instead. -By default, the `include_global_state` and `feature_states` parameters include -all system indices. - -Similarly, you can no longer use the {ref}/restore-snapshot-api.html[restore snapshot -API]'s `indices` parameter to restore a system index from a snapshot. To restore -a system index, use the `include_global_state` and `feature_states` parameters -to restore the corresponding feature state instead. By default, the -`include_global_state` and `feature_states` parameters don't restore any system -indices. - -*Impact* + -If you previously used the `indices` parameter to back up or restore system -indices, update your {slm-init} policies and application to use the -`include_global_state` and `feature_states` parameters instead. - -An {slm-init} policy that explicitly specifies a system index in the `indices` -parameter will fail to create snapshots. Similarly, a create snapshot API or -restore snapshot API request that explicitly specifies a system index in the -`indices` parameter will fail and return an error. If the `indices` value -includes a wildcard (`*`) pattern, the pattern will no longer match system -indices. -==== - -.Snapshots compress metadata files by default. -[%collapsible] -==== -*Details* + -Previously, the default value for `compress` was `false`. The default has been changed to `true`. - -This change will affect both newly created repositories and existing repositories where `compress=false` has not been -explicitly specified. - -*Impact* + -Update your workflow and applications to assume a default value of `true` for -the `compress` parameter. -==== - -.S3 snapshot repositories now use a DNS-style access pattern by default. -[%collapsible] -==== -*Details* + -Starting in version 7.4, `s3` snapshot repositories no longer use the -now-deprecated path-style access pattern by default. In versions 7.0, 7.1, 7.2 -and 7.3 `s3` snapshot repositories always used the path-style access pattern. -This is a breaking change for deployments that only support path-style access -but which are recognized as supporting DNS-style access by the AWS SDK. This -breaking change was made necessary by -https://aws.amazon.com/blogs/aws/amazon-s3-path-deprecation-plan-the-rest-of-the-story/[AWS's -announcement] that the path-style access pattern is deprecated and will be -unsupported on buckets created after September 30th 2020. - -*Impact* + -If your deployment only supports path-style access and is affected by this -change then you must configure the S3 client setting `path_style_access` to -`true`. -==== - -.Restore requests no longer accept settings. -[%collapsible] -==== -*Details* + -In earlier versions, you could pass both `settings` and `index_settings` in the -body of a restore snapshot request, but the `settings` value was ignored. The -restore snapshot API now rejects requests that include a `settings` value. - -*Impact* + -Discontinue use of the `settings` parameter in restore -snapshot request. Requests that include these parameters will return an error. -==== - -.The repository stats API has been removed. -[%collapsible] -==== -*Details* + -The repository stats API has been removed. We deprecated this experimental API -in 7.10.0. - -*Impact* + -Use the {ref}/repositories-metering-apis.html[repositories metering APIs] -instead. -==== - -.Watcher history now writes to a hidden data stream. -[%collapsible] -==== -*Details* + -In 8.x, {es} writes Watcher history to a hidden -`.watcher-history-` data stream. Previously, {es} wrote -Watcher history to hidden -`.watcher-history--` indices. - -*Impact* + -Update your requests to target the Watcher history data stream. For example, use -the `.watcher-history-*` wildcard expression. Requests that specifically target -non-existent Watcher history indices may return an error. -==== - -.HTTP Status code has changed for the Cluster Health API in case of a server timeout. -[%collapsible] -==== -*Details* + -The {ref}/cluster-health.html[cluster health API] includes options for waiting -for certain health conditions to be satisfied. If the requested conditions are -not satisfied within a timeout then {es} will send back a normal response -including the field `"timed_out": true`. In earlier versions it would also use -the HTTP response code `408 Request timeout` if the request timed out, and `200 -OK` otherwise. The `408 Request timeout` response code is not appropriate for -this situation, so from version 8.0.0 {es} will use the response code `200 OK` -for both cases. - -*Impact* + -To detect a server timeout, check the `timed_out` field of the JSON response. -==== diff --git a/docs/reference/migration/migrate_8_0/sql-jdbc-changes.asciidoc b/docs/reference/migration/migrate_8_0/sql-jdbc-changes.asciidoc deleted file mode 100644 index 71efa21e032f5..0000000000000 --- a/docs/reference/migration/migrate_8_0/sql-jdbc-changes.asciidoc +++ /dev/null @@ -1,22 +0,0 @@ -[discrete] -[[breaking_80_jdbc_changes]] -==== SQL JDBC changes - -.JDBC driver returns geometry objects as well-known-text string instead of `org.elasticsearch.geo` objects. -[%collapsible] -==== -*Details* + -To reduce the dependency of the JDBC driver onto Elasticsearch classes, the JDBC driver returns geometry data -as strings using the WKT (well-known text) format instead of classes from the `org.elasticsearch.geometry`. -Users can choose the geometry library desired to convert the string representation into a full-blown objects -either such as the `elasticsearch-geo` library (which returned the object `org.elasticsearch.geo` as before), -jts or spatial4j. - -*Impact* + -Before upgrading, replace any `org.elasticsearch.geo` classes on the `ResultSet#getObject` or `ResultSet#setObject` -Elasticsearch JDBC driver with their WKT representation by simply calling `toString` or -`org.elasticsearch.geometry.utils.WellKnownText#toWKT/fromWKT` methods. - -This change does NOT impact users that do not use geometry classes. - -==== diff --git a/docs/reference/migration/migrate_8_0/system-req-changes.asciidoc b/docs/reference/migration/migrate_8_0/system-req-changes.asciidoc deleted file mode 100644 index df698eaeb2dfd..0000000000000 --- a/docs/reference/migration/migrate_8_0/system-req-changes.asciidoc +++ /dev/null @@ -1,59 +0,0 @@ -[discrete] -[[breaking_80_system_req_changes]] -==== System requirement changes - -TIP: {ess-skip-section} - -.Several EOL operating systems are no longer supported. -[%collapsible] -==== -*Details* + -The following operating systems have reached their end of life and are no longer -supported by {es}: - -* Amazon Linux -* CentOS 6 -* Debian 8 -* openSUSE Leap 42 -* Oracle Enterprise Linux 6 -* Ubuntu 16.04 - -We've also removed support for `SysV init`. No supported operating systems use -the `SysV init` process. - -*Details* + -Ensure your nodes use a -https://www.elastic.co/support/matrix#matrix_os[supported operating system]. -Running {es} on an unsupported operating system can result in unexpected errors -or failures. -==== - -.Java 17 is required. -[%collapsible] -==== -*Details* + -Java 17 or higher is now required to run {es} and any of its command -line tools. - -*Impact* + -Use Java 17 or higher. Attempts to run {es} 8.0 using earlier Java versions will -fail. - -There is not yet a FIPS-certified security module for Java 17 -that you can use when running {es} 8.0 in FIPS 140-2 mode. -If you run in FIPS 140-2 mode, you will either need to request an exception -from your security organization to upgrade to {es} 8.0, -or remain on {es} 7.x until Java 17 is certified. -==== - -.`JAVA_HOME` is no longer supported. -[%collapsible] -==== -*Details* + -`JAVA_HOME` is no longer supported to set the path for the JDK. Instead, use -the bundled JDK (preferable), or set `ES_JAVA_HOME`. - -*Impact* + -Use the bundled JDK (preferable), or set `ES_JAVA_HOME`. `JAVA_HOME` will be -ignored. -==== diff --git a/docs/reference/migration/migrate_8_0/transform.asciidoc b/docs/reference/migration/migrate_8_0/transform.asciidoc deleted file mode 100644 index aa47e28d83750..0000000000000 --- a/docs/reference/migration/migrate_8_0/transform.asciidoc +++ /dev/null @@ -1,16 +0,0 @@ -[discrete] -[[breaking_80_transform_changes]] -==== Transform changes - -.{transforms-cap} created in 7.4 or earlier versions must be upgraded. -[%collapsible] -==== -*Details* + -Early beta versions of {transforms} had configuration information in a format -that is no longer supported. - - -*Impact* + -Use the {ref}/upgrade-transforms.html[upgrade {transforms} API] to fix your -{transforms}. This upgrade does not affect the source or destination indices. -==== diff --git a/docs/reference/migration/migrate_8_1.asciidoc b/docs/reference/migration/migrate_8_1.asciidoc deleted file mode 100644 index 692559205f735..0000000000000 --- a/docs/reference/migration/migrate_8_1.asciidoc +++ /dev/null @@ -1,109 +0,0 @@ -[[migrating-8.1]] -== Migrating to 8.1 -++++ -8.1 -++++ - -This section discusses the changes that you need to be aware of when migrating -your application to {es} 8.1. - -See also <> and <>. - - -[discrete] -[[breaking-changes-8.1]] -=== Breaking changes - -The following changes in {es} 8.1 might affect your applications -and prevent them from operating normally. -Before upgrading to 8.1, review these changes and take the described steps -to mitigate the impact. - -[discrete] -[[breaking_81_rest_api_changes]] -==== REST API changes - -[[search_apis_fields_parameter_normalizes_geometry_objects_cross_international_dateline]] -.The search API's `fields` parameter now normalizes geometry objects that cross the international dateline -[%collapsible] -==== -*Details* + -The search API's `fields` parameter now normalizes `geo_shape` objects that -cross the international dateline (+/-180° longitude). For example, if a polygon -crosses the dateline, the `fields` parameter returns it as two polygons. You can -still retrieve original, unnormalized geometry objects from `_source`. - -*Impact* + -If your application requires unnormalized geometry objects, retrieve them from -`_source` rather than using the `fields` parameter. -==== - - -[discrete] -[[deprecated-8.1]] -=== Deprecations - -The following functionality has been deprecated in {es} 8.1 -and will be removed in a future version. -While this won't have an immediate impact on your applications, -we strongly encourage you take the described steps to update your code -after upgrading to 8.1. - -To find out if you are using any deprecated functionality, -enable <>. - -[discrete] -[[deprecations_81_cluster_and_node_setting]] -==== Cluster and node setting deprecations - -[[legacy_values_for_discovery_type_setting_are_deprecated]] -.Legacy values for the `discovery.type` setting are deprecated -[%collapsible] -==== -*Details* + -Legacy values for the `discovery.type` setting are deprecated and will be -forbidden in a future version. - -*Impact* + -Do not set `discovery.type` to any value except `single-node` or `multi-node`. -All other values are equivalent to the default discovery type which is -`multi-node`. Where possible, omit this setting so that {es} uses the default -discovery type. -==== - -[discrete] -[[deprecations_81_rest_api]] -==== REST API deprecations - -[[lenient_parsing_of_bulk_actions_deprecated]] -.Lenient parsing of bulk actions is deprecated -[%collapsible] -==== -*Details* + -Older versions of {es} parse the action lines of bulk requests very permissively -and would silently ignore invalid or malformed actions. This lenience is -deprecated and a future version will reject bulk requests containing invalid -actions. - -*Impact* + -Ensure that bulk actions are well-formed JSON objects containing a single entry -with the correct key. -==== - -[[deprecate_index_include_frozen_request_parameter_in_sql_api]] -.Deprecate `index_include_frozen` request parameter in `_sql` API -[%collapsible] -==== -*Details* + -Following the deprecation of frozen indices, the `index_include_frozen` -parameter and `FROZEN` syntax is now also deprecated. - -*Impact* + -You should unfreeze frozen indices using the -{ref}/unfreeze-index-api.html[unfreeze index API] and stop using the -`index_include_frozen` parameter or the `FROZEN` keyword in SQL -queries. For some use cases, the frozen tier may be a suitable -replacement for frozen indices. See {ref}/data-tiers.html[data tiers] -for more information. -==== - diff --git a/docs/reference/migration/migrate_8_10.asciidoc b/docs/reference/migration/migrate_8_10.asciidoc deleted file mode 100644 index a1d132812ad03..0000000000000 --- a/docs/reference/migration/migrate_8_10.asciidoc +++ /dev/null @@ -1,89 +0,0 @@ -[[migrating-8.10]] -== Migrating to 8.10 -++++ -8.10 -++++ - -This section discusses the changes that you need to be aware of when migrating -your application to {es} 8.10. - -See also <> and <>. - -[discrete] -[[breaking-changes-8.10]] -=== Breaking changes - -The following changes in {es} 8.10 might affect your applications -and prevent them from operating normally. -Before upgrading to 8.10, review these changes and take the described steps -to mitigate the impact. - - -There are no notable breaking changes in {es} 8.10. -But there are some less critical breaking changes. - -[discrete] -[[breaking_810_cluster_and_node_setting_changes]] -==== Cluster and node setting changes - -[[remove_unused_executor_builder_for_vector_tile_plugin]] -.Remove the unused executor builder for vector tile plugin -[%collapsible] -==== -*Details* + -The threadpool called `vectortile` is a left over from the original development of the vector tile search end point and it is used nowhere. It can still be a breaking change if it is configured on the elasticsearch yml file, for example by changing the threadpool size `thread_pool.vectortile.size=8`' - -*Impact* + -In the case the threadpool appears on the yaml file, Elasticsearch will not start until those lines are removed. -==== - -[discrete] -[[breaking_810_java_api_changes]] -==== Java API changes - -[[change_pre_configured_cached_analyzer_components_to_use_indexversion_instead_of_version-highlight]] -.Change pre-configured and cached analyzer components to use IndexVersion instead of Version -[%collapsible] -==== -*Details* + -This PR changes the types used to obtain pre-configured components from Version to IndexVersion, -with corresponding changes to method names. - -Prior to 8.10, there is a one-to-one mapping between node version and index version, with corresponding constants -in the IndexVersion class. -Starting in 8.10, IndexVersion is versioned independently of node version, and will be a simple incrementing number. -For more information on how to use IndexVersion and other version types, please see the contributing guide. - -*Impact* + -Analysis components now take IndexVersion instead of Version -==== - - -[discrete] -[[deprecated-8.10]] -=== Deprecations - -The following functionality has been deprecated in {es} 8.10 -and will be removed in a future version. -While this won't have an immediate impact on your applications, -we strongly encourage you to take the described steps to update your code -after upgrading to 8.10. - -To find out if you are using any deprecated functionality, -enable <>. - -[discrete] -[[deprecations_810_authorization]] -==== Authorization deprecations - -[[mark_apm_user_for_removal_in_future_major_release]] -.Mark `apm_user` for removal in a future major release -[%collapsible] -==== -*Details* + -The `apm_user` role has been deprecated and will be removed in a future major release. Users should migrate to `editor` and `viewer` roles - -*Impact* + -Users will have to migrate to `editor` and `viewer` roles -==== - diff --git a/docs/reference/migration/migrate_8_11.asciidoc b/docs/reference/migration/migrate_8_11.asciidoc deleted file mode 100644 index 098456e1aca42..0000000000000 --- a/docs/reference/migration/migrate_8_11.asciidoc +++ /dev/null @@ -1,69 +0,0 @@ -[[migrating-8.11]] -== Migrating to 8.11 -++++ -8.11 -++++ - -This section discusses the changes that you need to be aware of when migrating -your application to {es} 8.11. - -See also <> and <>. - - -[discrete] -[[breaking-changes-8.11]] -=== Breaking changes - -The following changes in {es} 8.11 might affect your applications -and prevent them from operating normally. -Before upgrading to 8.11, review these changes and take the described steps -to mitigate the impact. - - -There are no notable breaking changes in {es} 8.11. -But there are some less critical breaking changes. - -[discrete] -[[breaking_811_rest_api_changes]] -==== REST API changes - -[[remove_transport_versions_from_cluster_state_api]] -.Remove `transport_versions` from cluster state API -[%collapsible] -==== -*Details* + -The `transport_versions` subobject of the response to `GET _cluster/state` has been replaced by the `nodes_versions` subobject. - -*Impact* + -If needed, retrieve the per-node transport versions from the `nodes_versions` subobject. -==== - - -[discrete] -[[deprecated-8.11]] -=== Deprecations - -The following functionality has been deprecated in {es} 8.11 -and will be removed in a future version. -While this won't have an immediate impact on your applications, -we strongly encourage you to take the described steps to update your code -after upgrading to 8.11. - -To find out if you are using any deprecated functionality, -enable <>. - -[discrete] -[[deprecations_811_rollup]] -==== Rollup deprecations - -[[rollup_functionality_deprecated]] -.Rollup functionality is now deprecated -[%collapsible] -==== -*Details* + -<> has been deprecated and will be removed in a future release. Previously, rollups were available in technical preview. - -*Impact* + -Use <> to reduce storage costs for time series data by storing it at reduced granularity. -==== - diff --git a/docs/reference/migration/migrate_8_12.asciidoc b/docs/reference/migration/migrate_8_12.asciidoc deleted file mode 100644 index c7f4aa8728693..0000000000000 --- a/docs/reference/migration/migrate_8_12.asciidoc +++ /dev/null @@ -1,74 +0,0 @@ -[[migrating-8.12]] -== Migrating to 8.12 -++++ -8.12 -++++ - -This section discusses the changes that you need to be aware of when migrating -your application to {es} 8.12. - -See also <> and <>. - -[discrete] -[[breaking-changes-8.12]] -=== Breaking changes - -There are no breaking changes in 8.12 - -[discrete] -[[notable-changes-8.12]] -=== Notable changes - -There are notable changes in 8.12 that you need to be aware of, items that we may consider as notable changes are - -* Changes to features that are in Technical Preview. -* Changes to log formats. -* Changes to non public APIs. -* Behaviour changes that repair critical bugs. - - -[discrete] -[[breaking_812_authorization_changes]] -==== Authorization changes - -[[fixed_jwt_principal_from_claims]] -.Fixed JWT principal from claims -[%collapsible] -==== -*Details* + -This changes the format of a JWT's principal before the JWT is actually validated by any JWT realm. The JWT's principal is a convenient way to refer to a JWT that has not yet been verified by a JWT realm. The JWT's principal is printed in the audit and regular logs (notably for auditing authn failures) as well as the smart realm chain reordering optimization. The JWT principal is NOT required to be identical to the JWT-authenticated user's principal, but in general, they should be similar. Previously, the JWT's principal was built by individual realms in the same way the realms built the authenticated user's principal. This had the advantage that, in simpler JWT realms configurations (e.g. a single JWT realm in the chain), the JWT principal and the authenticated user's principal are very similar. However the drawback is that, in general, the JWT principal and the user principal can be very different (i.e. in the case where one JWT realm builds the JWT principal and a different one builds the user principal). Another downside is that the (unauthenticated) JWT principal depended on realm ordering, which makes identifying the JWT from its principal dependent on the ES authn realm configuration. This PR implements a consistent fixed logic to build the JWT principal, which only depends on the JWT's claims and no ES configuration. - -*Impact* + -Users will observe changed format and values for the `user.name` attribute of `authentication_failed` audit log events, in the JWT (failed) authn case. -==== - -[discrete] -[[breaking_812_java_api_changes]] -==== Java API changes - -[[plugin_createcomponents_method_has_been_refactored_to_take_single_pluginservices_object]] -.Plugin.createComponents method has been refactored to take a single PluginServices object -[%collapsible] -==== -*Details* + -Plugin.createComponents currently takes several different service arguments. The signature of this method changes every time a new service is added. The method has now been modified to take a single interface object that new services are added to. This will reduce API incompatibility issues when a new service is introduced in the future. - -*Impact* + -Plugins that override createComponents will need to be refactored to override the new method on ES 8.12+ -==== - -[discrete] -[[breaking_812_rest_api_changes]] -==== REST API changes - -[[es_ql_pow_function_always_returns_double]] -.[ES|QL] pow function always returns double -[%collapsible] -==== -*Details* + -This corrects an earlier mistake in the ES|QL language design. Initially we had thought to have pow return the same type as its inputs, but in practice even for integer inputs this quickly grows out of the representable range, and we returned null much of the time. This also created a lot of edge cases around casting to/from doubles (which the underlying java function uses). The version in this PR follows the java spec, by always casting its inputs to doubles, and returning a double. Doing it this way also allows for a rather significant reduction in lines of code. - -*Impact* + -low. Most queries should continue to function with the change. -==== - diff --git a/docs/reference/migration/migrate_8_13.asciidoc b/docs/reference/migration/migrate_8_13.asciidoc deleted file mode 100644 index dca10671e57bc..0000000000000 --- a/docs/reference/migration/migrate_8_13.asciidoc +++ /dev/null @@ -1,137 +0,0 @@ -[[migrating-8.13]] -== Migrating to 8.13 -++++ -8.13 -++++ - -This section discusses the changes that you need to be aware of when migrating -your application to {es} 8.13. - -See also <> and <>. - -coming::[8.13.0] - - -[discrete] -[[breaking-changes-8.13]] -=== Breaking changes - -There are no breaking changes in 8.13. - -[discrete] -[[migrate-notable-changes-8.13]] -=== Notable changes -The following are notable, non-breaking updates to be aware of: - -* Changes to features that are in Technical Preview. -* Changes to log formats. -* Changes to non-public APIs. -* Behaviour changes that repair critical bugs. - -[discrete] -[[breaking_813_index_setting_changes]] -==== Index setting changes - -[[change_index_look_ahead_time_index_settings_default_value_from_2_hours_to_30_minutes]] -.Change `index.look_ahead_time` index setting's default value from 2 hours to 30 minutes. -[%collapsible] -==== -*Details* + -Lower the `index.look_ahead_time` index setting's max value from 2 hours to 30 minutes. - -*Impact* + -Documents with @timestamp of 30 minutes or more in the future will be rejected. Before documents with @timestamp of 2 hours or more in the future were rejected. If the previous behaviour should be kept, then update the `index.look_ahead_time` setting to two hours before performing the upgrade. -==== - -[[lower_look_ahead_time_index_settings_max_value]] -.Lower the `look_ahead_time` index setting's max value -[%collapsible] -==== -*Details* + -Lower the `look_ahead_time` index setting's max value from 7 days to 2 hours. - -*Impact* + -Any value between 2 hours and 7 days will be as a look ahead time of 2 hours is defined -==== - -[discrete] -[[breaking_813_rest_api_changes]] -==== REST API changes - -[[esql_grammar_from_metadata_no_longer_requires]] -.ESQL: Grammar - FROM METADATA no longer requires [] -[%collapsible] -==== -*Details* + -Remove [ ] for METADATA option inside FROM command statements - -*Impact* + -Previously to return metadata fields, one had to use square brackets: (eg. 'FROM index [METADATA _index]'). This is no longer needed: the [ ] are dropped and do not have to be specified, thus simplifying the command above to:'FROM index METADATA _index'. -==== - -[[es_ql_remove_project_keyword_from_grammar]] -.ES|QL: remove PROJECT keyword from the grammar -[%collapsible] -==== -*Details* + -Removes the PROJECT keyword (an alias for KEEP) from ES|QL grammar - -*Impact* + -Before this change, users could use PROJECT as an alias for KEEP in ESQL queries, (eg. 'FROM idx | PROJECT name, surname') the parser replaced PROJECT with KEEP, emitted a warning: 'PROJECT command is no longer supported, please use KEEP instead' and the query was executed normally. With this change, PROJECT command is no longer recognized by the query parser; queries using PROJECT command now return a parsing exception. -==== - -[[esql_remove_nan_finite_infinite]] -.[ESQL] Remove is_nan, is_finite, and `is_infinite` -[%collapsible] -==== -*Details* + -Removes the functions `is_nan`, `is_finite`, and `is_infinite`. - -*Impact* + -Attempting to use the above functions will now be a planner time error. These functions are no longer supported. -==== - - -[discrete] -[[deprecated-8.13]] -=== Deprecations - -The following functionality has been deprecated in {es} 8.13 -and will be removed in a future version. -While this won't have an immediate impact on your applications, -we strongly encourage you to take the described steps to update your code -after upgrading to 8.13. - -To find out if you are using any deprecated functionality, -enable <>. - -[discrete] -[[deprecations_813_cluster_and_node_setting]] -==== Cluster and node setting deprecations - -[[deprecate_client_type]] -.Deprecate `client.type` -[%collapsible] -==== -*Details* + -The node setting `client.type` has been ignored since the node client was removed in 8.0. The setting is now deprecated and will be removed in a future release. - -*Impact* + -Remove the `client.type` setting from `elasticsearch.yml` -==== - -[discrete] -[[deprecations_813_rest_api]] -==== REST API deprecations - -[[desirednode_deprecate_node_version_field_make_it_optional_for_current_version]] -.`DesiredNode:` deprecate `node_version` field and make it optional for the current version -[%collapsible] -==== -*Details* + -The desired_node API includes a `node_version` field to perform validation on the new node version required. This kind of check is too broad, and it's better done by external logic, so it has been removed, making the `node_version` field not necessary. The field will be removed in a later version. - -*Impact* + -Users should update their usages of `desired_node` to not include the `node_version` field anymore. -==== - diff --git a/docs/reference/migration/migrate_8_14.asciidoc b/docs/reference/migration/migrate_8_14.asciidoc deleted file mode 100644 index 2e6cd439ebed0..0000000000000 --- a/docs/reference/migration/migrate_8_14.asciidoc +++ /dev/null @@ -1,90 +0,0 @@ -[[migrating-8.14]] -== Migrating to 8.14 -++++ -8.14 -++++ - -This section discusses the changes that you need to be aware of when migrating -your application to {es} 8.14. - -See also <> and <>. - -coming::[8.14.0] - - -[discrete] -[[breaking-changes-8.14]] -=== Breaking changes - -The following changes in {es} 8.14 might affect your applications -and prevent them from operating normally. -Before upgrading to 8.14, review these changes and take the described steps -to mitigate the impact. - - -There are no notable breaking changes in {es} 8.14. -But there are some less critical breaking changes. - -[discrete] -[[breaking_814_rest_api_changes]] -==== REST API changes - -[[prevent_dls_fls_if_replication_assigned]] -.Prevent DLS/FLS if `replication` is assigned -[%collapsible] -==== -*Details* + -For cross-cluster API keys, {es} no longer allows specifying document-level security (DLS) or field-level security (FLS) in the `search` field, if `replication` is also specified. {es} likewise blocks the use of any existing cross-cluster API keys that meet this condition. - -*Impact* + -Remove any document-level security (DLS) or field-level security (FLS) definitions from the `search` field for cross-cluster API keys that also have a `replication` field, or create two separate cross-cluster API keys, one for search and one for replication. -==== - - -[discrete] -[[breaking_814_dls_changes]] -==== Stricter Document Level Security (DLS) - -[[stricter_dls_814]] -.Document Level Security (DLS) applies stricter checks for the validate query API and for terms aggregations when min_doc_count is set to 0. - -[%collapsible] -==== -*Details* + -When Document Level Security (DLS) is applied to terms aggregations and min_doc_count is set to 0, stricter security rules apply. - -When Document Level Security (DLS) is applied to the validate query API with the rewrite parameter, stricter security rules apply. - -*Impact* + -If needed, test workflows with DLS enabled to ensure that the stricter security rules do not impact your application. -==== - - -[discrete] -[[deprecated-8.14]] -=== Deprecations - -The following functionality has been deprecated in {es} 8.14 -and will be removed in a future version. -While this won't have an immediate impact on your applications, -we strongly encourage you to take the described steps to update your code -after upgrading to 8.14. - -To find out if you are using any deprecated functionality, -enable <>. - -[discrete] -[[deprecations_814_mapping]] -==== Mapping deprecations - -[[deprecate_allowing_fields_in_scenarios_where_it_ignored]] -.Deprecate allowing `fields` in scenarios where it is ignored -[%collapsible] -==== -*Details* + -The following mapped types have always ignored `fields` when using multi-fields. This deprecation makes this clearer and we will completely disallow `fields` for these mapped types in the future. - -*Impact* + -In the future, `join`, `aggregate_metric_double`, and `constant_keyword`, will all disallow supplying `fields` as a parameter in the mapping. -==== - diff --git a/docs/reference/migration/migrate_8_15.asciidoc b/docs/reference/migration/migrate_8_15.asciidoc deleted file mode 100644 index 1961230da1bbf..0000000000000 --- a/docs/reference/migration/migrate_8_15.asciidoc +++ /dev/null @@ -1,140 +0,0 @@ -[[migrating-8.15]] -== Migrating to 8.15 -++++ -8.15 -++++ - -This section discusses the changes that you need to be aware of when migrating -your application to {es} 8.15. - -See also <> and <>. - -coming::[8.15.0] - - -[discrete] -[[breaking-changes-8.15]] -=== Breaking changes - -The following changes in {es} 8.15 might affect your applications -and prevent them from operating normally. -Before upgrading to 8.15, review these changes and take the described steps -to mitigate the impact. - -[discrete] -[[breaking_815_cluster_and_node_setting_changes]] -==== Cluster and node setting changes - -[[change_skip_unavailable_remote_cluster_setting_default_value_to_true]] -.Change `skip_unavailable` remote cluster setting default value to true -[%collapsible] -==== -*Details* + -The default value of the `skip_unavailable` setting is now set to true. All existing and future remote clusters that do not define this setting will use the new default. This setting only affects cross-cluster searches using the _search or _async_search API. - -*Impact* + -Unavailable remote clusters in a cross-cluster search will no longer cause the search to fail unless skip_unavailable is configured to be `false` in elasticsearch.yml or via the `_cluster/settings` API. Unavailable clusters with `skip_unavailable`=`true` (either explicitly or by using the new default) are marked as SKIPPED in the search response metadata section and do not fail the entire search. If users want to ensure that a search returns a failure when a particular remote cluster is not available, `skip_unavailable` must be now be set explicitly. -==== - -[discrete] -[[breaking_815_rollup_changes]] -==== Rollup changes - -[[disallow_new_rollup_jobs_in_clusters_with_no_rollup_usage]] -.Disallow new rollup jobs in clusters with no rollup usage -[%collapsible] -==== -*Details* + -The put rollup API will fail with an error when a rollup job is created in a cluster with no rollup usage - -*Impact* + -Clusters with no rollup usage (either no rollup job or index) can not create new rollup jobs -==== - -[discrete] -[[breaking_815_rest_api_changes]] -==== REST API changes - -[[interpret_timeout_1_as_infinite_ack_timeout]] -.Interpret `?timeout=-1` as infinite ack timeout -[%collapsible] -==== -*Details* + -Today {es} accepts the parameter `?timeout=-1` in many APIs, but interprets -this to mean the same as `?timeout=0`. From 8.15 onwards `?timeout=-1` will -mean to wait indefinitely, aligning the behaviour of this parameter with -other similar parameters such as `?master_timeout`. - -*Impact* + -Use `?timeout=0` to force relevant operations to time out immediately -instead of `?timeout=-1` -==== - -[[replace_model_id_with_inference_id]] -.Replace `model_id` with `inference_id` in GET inference API -[%collapsible] -==== -*Details* + -From 8.15 onwards the <> response will return an -`inference_id` field instead of a `model_id`. - -*Impact* + -If your application uses the `model_id` in a GET inference API response, -switch it to use `inference_id` instead. -==== - - -[discrete] -[[deprecated-8.15]] -=== Deprecations - -The following functionality has been deprecated in {es} 8.15 -and will be removed in a future version. -While this won't have an immediate impact on your applications, -we strongly encourage you to take the described steps to update your code -after upgrading to 8.15. - -To find out if you are using any deprecated functionality, -enable <>. - -[discrete] -[[deprecations_815_cluster_and_node_setting]] -==== Cluster and node setting deprecations - -[[deprecate_absolute_size_values_for_indices_breaker_total_limit_setting]] -.Deprecate absolute size values for `indices.breaker.total.limit` setting -[%collapsible] -==== -*Details* + -Previously, the value of `indices.breaker.total.limit` could be specified as an absolute size in bytes. This setting controls the overal amount of memory the server is allowed to use before taking remedial actions. Setting this to a specific number of bytes led to strange behaviour when the node maximum heap size changed because the circut breaker limit would remain unchanged. This would either leave the value too low, causing part of the heap to remain unused; or it would leave the value too high, causing the circuit breaker to be ineffective at preventing OOM errors. The only reasonable behaviour for this setting is that it scales with the size of the heap, and so absolute byte limits are now deprecated. - -*Impact* + -Users must change their configuration to specify a percentage instead of an absolute number of bytes for `indices.breaker.total.limit`, or else accept the default, which is already specified as a percentage. -==== - -[discrete] -[[deprecations_815_rest_api]] -==== REST API deprecations - -[[deprecate_text_expansion_weighted_tokens_queries]] -.Deprecate `text_expansion` and `weighted_tokens` queries -[%collapsible] -==== -*Details* + -The `text_expansion` and `weighted_tokens` queries have been replaced by `sparse_vector`. - -*Impact* + -Please update your existing `text_expansion` and `weighted_tokens` queries to use `sparse_vector.` -==== - -[[deprecate_using_slm_privileges_to_access_ilm]] -.Deprecate using slm privileges to access ilm -[%collapsible] -==== -*Details* + -The `read_slm` privilege can get the ILM status, and the `manage_slm` privilege can start and stop ILM. Access to these APIs should be granted using the `read_ilm` and `manage_ilm` privileges instead. Access to ILM APIs will be removed from SLM privileges in a future major release, and is now deprecated. - -*Impact* + -Users that need access to the ILM status API should now use the `read_ilm` privilege. Users that need to start and stop ILM, should use the `manage_ilm` privilege. -==== - diff --git a/docs/reference/migration/migrate_8_16.asciidoc b/docs/reference/migration/migrate_8_16.asciidoc deleted file mode 100644 index 950b8f7ec3964..0000000000000 --- a/docs/reference/migration/migrate_8_16.asciidoc +++ /dev/null @@ -1,37 +0,0 @@ -[[migrating-8.16]] -== Migrating to 8.16 -++++ -8.16 -++++ - -This section discusses the changes that you need to be aware of when migrating -your application to {es} 8.16. - -See also <> and <>. - -coming::[8.16.0] - - -[discrete] -[[breaking-changes-8.16]] -=== Breaking changes - -The following changes in {es} 8.16 might affect your applications -and prevent them from operating normally. -Before upgrading to 8.16, review these changes and take the described steps -to mitigate the impact. - -[discrete] -[[breaking_816_locale_change]] -==== JDK locale database change - -{es} 8.16 changes the version of the JDK that is included from version 22 to version 23. This changes -the locale database that is used by Elasticsearch from the _COMPAT_ database to the _CLDR_ database. -This can result in significant changes to custom textual date field formats, -and calculations for custom week-date date fields. - -For more information see <>. - -If you run {es} 8.16 on JDK version 22 or below, it will use the _COMPAT_ locale database -to match the behavior of 8.15. However, please note that starting with {es} 9.0, -{es} will use the _CLDR_ database regardless of JDK version it is run on. diff --git a/docs/reference/migration/migrate_8_17.asciidoc b/docs/reference/migration/migrate_8_17.asciidoc deleted file mode 100644 index 15bc6431c60ba..0000000000000 --- a/docs/reference/migration/migrate_8_17.asciidoc +++ /dev/null @@ -1,20 +0,0 @@ -[[migrating-8.17]] -== Migrating to 8.17 -++++ -8.17 -++++ - -This section discusses the changes that you need to be aware of when migrating -your application to {es} 8.17. - -See also <> and <>. - -coming::[8.17.0] - - -[discrete] -[[breaking-changes-8.17]] -=== Breaking changes - -There are no breaking changes in {es} 8.17. - diff --git a/docs/reference/migration/migrate_8_2.asciidoc b/docs/reference/migration/migrate_8_2.asciidoc deleted file mode 100644 index 3630456aed6fd..0000000000000 --- a/docs/reference/migration/migrate_8_2.asciidoc +++ /dev/null @@ -1,16 +0,0 @@ -[[migrating-8.2]] -== Migrating to 8.2 -++++ -8.2 -++++ - -This section discusses the changes that you need to be aware of when migrating -your application to {es} 8.2. - -See also <> and <>. - -[discrete] -[[breaking-changes-8.2]] -=== Breaking changes - -There are no breaking changes in {es} 8.2. diff --git a/docs/reference/migration/migrate_8_3.asciidoc b/docs/reference/migration/migrate_8_3.asciidoc deleted file mode 100644 index 1dfc2d1b8cd23..0000000000000 --- a/docs/reference/migration/migrate_8_3.asciidoc +++ /dev/null @@ -1,61 +0,0 @@ -[[migrating-8.3]] -== Migrating to 8.3 -++++ -8.3 -++++ - -This section discusses the changes that you need to be aware of when migrating -your application to {es} 8.3. - -See also <> and <>. - - -[discrete] -[[breaking-changes-8.3]] -=== Breaking changes - -There are no breaking changes in {es} 8.3. - - - -[discrete] -[[deprecated-8.3]] -=== Deprecations - -The following functionality has been deprecated in {es} 8.3 -and will be removed in a future version. -While this won't have an immediate impact on your applications, -we strongly encourage you take the described steps to update your code -after upgrading to 8.3. - -To find out if you are using any deprecated functionality, -enable <>. - - -[discrete] -[[deprecations_83_cluster_and_node_setting]] -==== Cluster and node setting deprecations - -[[configuring_bind_dn_in_an_ldap_or_active_directory_ad_realm_without_corresponding_bind_password_deprecated]] -.Configuring a bind DN in an LDAP or Active Directory (AD) realm without a corresponding bind password is deprecated -[%collapsible] -==== -*Details* + -For LDAP or AD authentication realms, setting a bind DN (via the -`xpack.security.authc.realms.ldap.*.bind_dn` realm setting) without a -bind password is a misconfiguration that may prevent successful -authentication to the node. In the next major release, nodes will fail -to start if a bind DN is specified without a password. - -*Impact* + -If you have a bind DN configured for an LDAP or AD authentication -realm, set a bind password for {ref}/ldap-realm.html#ldap-realm-configuration[LDAP] -or {ref}/active-directory-realm.html#ad-realm-configuration[Active Directory]. -Configuring a bind DN without a password generates a warning in the -deprecation logs. - -*Note:* This deprecation only applies if your current LDAP or AD -configuration specifies a bind DN without a password. This scenario is -unlikely, but might impact a small subset of users. -==== - diff --git a/docs/reference/migration/migrate_8_4.asciidoc b/docs/reference/migration/migrate_8_4.asciidoc deleted file mode 100644 index d9aab317d70f7..0000000000000 --- a/docs/reference/migration/migrate_8_4.asciidoc +++ /dev/null @@ -1,46 +0,0 @@ -[[migrating-8.4]] -== Migrating to 8.4 -++++ -8.4 -++++ - -This section discusses the changes that you need to be aware of when migrating -your application to {es} 8.4. - -See also <> and <>. - -[discrete] -[[breaking-changes-8.4]] -=== Breaking changes - -There are no breaking changes in {es} 8.4. - -[discrete] -[[deprecated-8.4]] -=== Deprecations - -The following functionality has been deprecated in {es} 8.4 -and will be removed in a future version. -While this won't have an immediate impact on your applications, -we strongly encourage you to take the described steps to update your code -after upgrading to 8.4. - -To find out if you are using any deprecated functionality, -enable <>. - - -[discrete] -[[deprecations_84_rest_api]] -==== REST API deprecations - -[[deprecate_knn_search_endpoint]] -.Deprecate the `_knn_search` endpoint -[%collapsible] -==== -*Details* + --| The kNN search API is deprecated in favor of the new 'knn' option inside the search API. The 'knn' option is now the recommended way of running ANN search. - -*Impact* + -Users should switch from `_knn_search` to the search `knn` option. -==== - diff --git a/docs/reference/migration/migrate_8_5.asciidoc b/docs/reference/migration/migrate_8_5.asciidoc deleted file mode 100644 index 1f040946670e1..0000000000000 --- a/docs/reference/migration/migrate_8_5.asciidoc +++ /dev/null @@ -1,101 +0,0 @@ -[[migrating-8.5]] -== Migrating to 8.5 -++++ -8.5 -++++ - -This section discusses the changes that you need to be aware of when migrating -your application to {es} 8.5. - -See also <> and <>. - -[discrete] -[[breaking-changes-8.5]] -=== Breaking changes - -The following changes in {es} 8.5 might affect your applications and prevent -them from operating normally. Before upgrading to 8.5, review these changes and -take the described steps to mitigate the impact. - -[discrete] -[[breaking_85_rest_api_changes]] -==== REST API changes - -[[breaking_85_bulk_action_stricter]] -.The bulk API now rejects requests containing unrecognized actions -[%collapsible] -==== -*Details* + -Requests to the bulk API comprise a sequence of items, each of which starts with -a JSON object describing the item. This object includes the type of action to -perform with the item which should be one of `create`, `update`, `index`, or -`delete`. Earlier versions of {es} had a bug that caused them to ignore items -with an unrecognized type, skipping the next line in the request, but this -lenient behaviour meant that there is no way for the client to associate the -items in the response with the items in the request, and in some cases it would -cause the remainder of the request to be parsed incorrectly. - -From version 8.5 onwards, requests to the bulk API must comprise only items -with recognized types. {es} will reject requests containing any items with an -unrecognized type with a `400 Bad Request` error response. - -We consider this change to be a bugfix but list it here as a breaking change -since it may affect the behaviour of applications which rely on being able to -send unrecognized actions to {es}. - -*Impact* + -Ensure your application only sends items with type `create`, `update`, `index` -or `delete` to the bulk API. -==== - -[discrete] -[[deprecated-8.5]] -=== Deprecations - -The following functionality has been deprecated in {es} 8.5 -and will be removed in a future version. -While this won't have an immediate impact on your applications, -we strongly encourage you to take the described steps to update your code -after upgrading to 8.5. - -To find out if you are using any deprecated functionality, -enable <>. - - -[discrete] -[[deprecations_85_plugins]] -==== Plugin API deprecations - -[[network_plugins_deprecated]] -Plugins that extend the NetworkPlugin interface are deprecated. -[%collapsible] -==== -*Details* + -Plugins may override funcionality that controls how nodes connect -with other nodes over TCP/IP. These plugins extend the NetworkPlugin -interface. In the next major release, these plugins will fail -to install. - -*Impact* + -Discontinue using any plugins which extend NetworkPlugin. You can -see if any plugins use deprecated functionality by checking -the Elasticsearch deprecation log. -==== - -[[discoveryplugin_joinvalidator_and_election_strategies_deprecated]] -.Extending DiscoveryPlugin to override join validators or election strategies is deprecated -[%collapsible] -==== -*Details* + -Plugins that extend DiscoveryPlugin may override getJoinValidator and -getElectionStrategies. These methods are implementation details of the -clustering mechanism within Elasticsearch. They should not be overriden. -In the next major release, plugins overriding getJoinValidator or -getElectionStrategies will fail to install. - -*Impact* + -Discontinue using any plugins that override getJoinValidator or -getElectionStrategies in DiscoveryPlugin. You can see if any plugins -use deprecated functionality by checking the Elasticsearch deprecation log. -==== - diff --git a/docs/reference/migration/migrate_8_6.asciidoc b/docs/reference/migration/migrate_8_6.asciidoc deleted file mode 100644 index 80c0ece8c1e37..0000000000000 --- a/docs/reference/migration/migrate_8_6.asciidoc +++ /dev/null @@ -1,92 +0,0 @@ -[[migrating-8.6]] -== Migrating to 8.6 -++++ -8.6 -++++ - -This section discusses the changes that you need to be aware of when migrating -your application to {es} 8.6. - -See also <> and <>. - -[discrete] -[[breaking-changes-8.6]] -=== Breaking changes - -There are no breaking changes in {es} 8.6. - -[discrete] -[[deprecated-8.6]] -=== Deprecations - -The following functionality has been deprecated in {es} 8.6 -and will be removed in a future version. -While this won't have an immediate impact on your applications, -we strongly encourage you to take the described steps to update your code -after upgrading to 8.6. - -To find out if you are using any deprecated functionality, -enable <>. - - -[discrete] -[[deprecations_86_crud]] -==== CRUD deprecations - -[[deprecate_remove_binary_default_of_false_for_ingest_attachment_processor]] -.Deprecate 'remove_binary' default of false for ingest attachment processor -[%collapsible] -==== -*Details* + -The default "remove_binary" option for the attachment processor will be changed from false to true in a later Elasticsearch release. This means that the binary file sent to Elasticsearch will not be retained. - -*Impact* + -Users should update the "remove_binary" option to be explicitly true or false, instead of relying on the default value, so that no default value changes will affect Elasticsearch. -==== - -[discrete] -[[deprecations_86_cluster_and_node_setting]] -==== Cluster and node setting deprecations - -[[ensure_balance_threshold_at_least_1]] -.Ensure balance threshold is at least 1 -[%collapsible] -==== -*Details* + -Values for `cluster.routing.allocation.balance.threshold` smaller than `1` are now ignored. Support for values less than `1` for this setting is deprecated and will be forbidden in a future version. - -*Impact* + -Set `cluster.routing.allocation.balance.threshold` to be at least `1`. -==== - -[discrete] -[[deprecations_86_mapping]] -==== Mapping deprecations - -[[deprecate_silently_ignoring_type_fields_copy_to_boost_in_metadata_field_definition]] -.Deprecate silently ignoring type, fields, copy_to and boost in metadata field definition -[%collapsible] -==== -*Details* + -Unsupported parameters like type, fields, copy_to and boost are silently ignored when provided as part of the configuration of a metadata field in the index mappings. They will cause a deprecation warning when used in the mappings for indices that are created from 8.6 onwards. - -*Impact* + -To resolve the deprecation warning, remove the mention of type, fields, copy_to or boost from any metadata field definition as part of index mappings. They take no effect so removing them won't have any impact besides resolving the deprecation warning. -==== - -[discrete] -[[deprecations_86_rest_api]] -==== REST API deprecations - -[[state_field_deprecated_in_cluster_reroute_response]] -.state field is deprecated in /_cluster/reroute response -[%collapsible] -==== -*Details* + -`state` field is deprecated in `/_cluster/reroute` response. Cluster state does not provide meaningful information -about the result of reroute/commands execution. There are no guarantees that this exact state would be applied. - -*Impact* + -Reroute API users should not rely on `state` field and instead use `explain` to request result of commands execution. -==== - diff --git a/docs/reference/migration/migrate_8_7.asciidoc b/docs/reference/migration/migrate_8_7.asciidoc deleted file mode 100644 index 2061743e1be4a..0000000000000 --- a/docs/reference/migration/migrate_8_7.asciidoc +++ /dev/null @@ -1,43 +0,0 @@ -[[migrating-8.7]] -== Migrating to 8.7 -++++ -8.7 -++++ - -This section discusses the changes that you need to be aware of when migrating -your application to {es} 8.7. - -See also <> and <>. - -[discrete] -[[breaking-changes-8.7]] -=== Breaking changes - -The following changes in {es} 8.7 might affect your applications -and prevent them from operating normally. -Before upgrading to 8.7, review these changes and take the described steps -to mitigate the impact. - -There are no notable breaking changes in {es} 8.7. -But there are some less critical breaking changes. - -[discrete] -[[breaking_87_ingest_changes]] -==== Ingest changes - -[[making_jsonprocessor_stricter_so_it_does_not_silently_drop_data]] -.Making `JsonProcessor` stricter so that it does not silently drop data -[%collapsible] -==== -*Details* + -The ingest node's `json` processor was previously lenient. It would accept invalid JSON data if it started with valid JSON data. -Anything after the valid part would be silently discarded. From 8.7 onwards, the default behavior is to reject invalid JSON data with -an exception so that data is not silently lost. The old behavior can be reproduced by passing `false` as the value of the new -`strict_json_parsing` processor parameter. -We consider this change to be a bugfix but list it here as a breaking change since it may affect the behavior of applications which -were sending invalid JSON data to the `json` processor. - -*Impact* + -Ensure your application only sends valid JSON data to the `json` processor, or modify the `json` processors in your pipelines to set -the `strict_json_parsing` parameter to `false`. -==== diff --git a/docs/reference/migration/migrate_8_8.asciidoc b/docs/reference/migration/migrate_8_8.asciidoc deleted file mode 100644 index 22c5ae2a33750..0000000000000 --- a/docs/reference/migration/migrate_8_8.asciidoc +++ /dev/null @@ -1,47 +0,0 @@ -[[migrating-8.8]] -== Migrating to 8.8 -++++ -8.8 -++++ - -This section discusses the changes that you need to be aware of when migrating -your application to {es} 8.8. - -See also <> and <>. - - -[discrete] -[[breaking-changes-8.8]] -=== Breaking changes - -There are no breaking changes in {es} 8.8. - -[discrete] -[[deprecated-8.8]] -=== Deprecations - -The following functionality has been deprecated in {es} 8.8 -and will be removed in a future version. -While this won't have an immediate impact on your applications, -we strongly encourage you to take the described steps to update your code -after upgrading to 8.8. - -To find out if you are using any deprecated functionality, -enable <>. - - -[discrete] -[[deprecations_88_cluster_and_node_setting]] -==== Cluster and node setting deprecations - -[[deprecate_cluster_routing_allocation_type]] -.Deprecate `cluster.routing.allocation.type` -[%collapsible] -==== -*Details* + -The `cluster.routing.allocation.type` setting is deprecated and will be removed in a future version. - -*Impact* + -Discontinue use of the `cluster.routing.allocation.type` setting. -==== - diff --git a/docs/reference/migration/migrate_8_9.asciidoc b/docs/reference/migration/migrate_8_9.asciidoc deleted file mode 100644 index e2f54dc58bfe5..0000000000000 --- a/docs/reference/migration/migrate_8_9.asciidoc +++ /dev/null @@ -1,35 +0,0 @@ -[[migrating-8.9]] -== Migrating to 8.9 -++++ -8.9 -++++ - -This section discusses the changes that you need to be aware of when migrating -your application to {es} 8.9. - -See also <> and <>. - -[discrete] -[[breaking-changes-8.9]] -=== Breaking changes - -The following changes in {es} 8.9 might affect your applications -and prevent them from operating normally. -Before upgrading to 8.9, review these changes and take the described steps -to mitigate the impact. - -[discrete] -[[breaking_89_rest_api_changes]] -==== REST API changes - -[[switch_tdigeststate_to_use_hybriddigest_by_default]] -.Switch TDigestState to use `HybridDigest` by default -[%collapsible] -==== -*Details* + -The default implementation for TDigest in percentile calculations switches to a new internal implementation offering superior performance (2x-10x speedup), at a very small accuracy penalty for very large sample populations. - -*Impact* + -This change leads to generating slightly different results in percentile calculations. If the highest possible accuracy is desired, or it's crucial to produce exactly the same results as in previous versions, one can either set `execution_hint` to `high_accuracy` in the `tdigest` spec of a given percentile calculation, or set `search.aggs.tdigest_execution_hint` to `high_accuracy` in cluster settings to apply to all percentile queries. -==== - diff --git a/docs/reference/migration/migrate_9_0.asciidoc b/docs/reference/migration/migrate_9_0.asciidoc new file mode 100644 index 0000000000000..6569647fd993e --- /dev/null +++ b/docs/reference/migration/migrate_9_0.asciidoc @@ -0,0 +1,319 @@ +// THIS IS A GENERATED FILE. DO NOT EDIT DIRECTLY. +// The content generated here are is not correct and most has been manually commented out until it can be fixed. +// See ES-9931 for more details. +[[migrating-9.0]] +== Migrating to 9.0 +++++ +9.0 +++++ + +This section discusses the changes that you need to be aware of when migrating +your application to {es} 9.0. + +See also <> and <>. + +coming::[9.0.0] + + +[discrete] +[[breaking-changes-9.0]] +=== Breaking changes + +The following changes in {es} 9.0 might affect your applications +and prevent them from operating normally. +Before upgrading to 9.0, review these changes and take the described steps +to mitigate the impact. +// +// [discrete] +// [[breaking_90_analysis_changes]] +// ==== Analysis changes +// +// [[set_lenient_to_true_by_default_when_using_updateable_synonyms]] +// .Set lenient to true by default when using updateable synonyms +// [%collapsible] +// ==== +// *Details* + +// When a `synonym` or `synonym_graph` token filter is configured with `updateable: true`, the default `lenient` +// value will now be `true`. +// +// *Impact* + +// `synonym` or `synonym_graph` token filters configured with `updateable: true` will ignore invalid synonyms by +// default. This prevents shard initialization errors on invalid synonyms. +// ==== +// +// [discrete] +// [[breaking_90_mapping_changes]] +// ==== Mapping changes +// +// [[jdk_locale_database_change]] +// .JDK locale database change +// [%collapsible] +// ==== +// *Details* + +// {es} 8.16 changes the version of the JDK that is included from version 22 to version 23. This changes the locale database that is used by Elasticsearch from the COMPAT database to the CLDR database. This change can cause significant differences to the textual date formats accepted by Elasticsearch, and to calculated week-dates. +// +// If you run {es} 8.16 on JDK version 22 or below, it will use the COMPAT locale database to match the behavior of 8.15. However, starting with {es} 9.0, {es} will use the CLDR database regardless of JDK version it is run on. +// +// *Impact* + +// This affects you if you use custom date formats using textual or week-date field specifiers. If you use date fields or calculated week-dates that change between the COMPAT and CLDR databases, then this change will cause Elasticsearch to reject previously valid date fields as invalid data. You might need to modify your ingest or output integration code to account for the differences between these two JDK versions. +// +// Starting in version 8.15.2, Elasticsearch will log deprecation warnings if you are using date format specifiers that might change on upgrading to JDK 23. These warnings are visible in Kibana. +// +// For detailed guidance, refer to <> and the https://ela.st/jdk-23-locales[Elastic blog]. +// ==== +// +// [discrete] +// [[breaking_90_analysis_changes]] +// ==== Analysis changes +// +// [[snowball_stemmers_have_been_upgraded]] +// .Snowball stemmers have been upgraded +// [%collapsible] +// ==== +// *Details* + +// Lucene 10 ships with an upgrade of its Snowball stemmers. For details see https://github.com/apache/lucene/issues/13209. Users using Snowball stemmers that are experiencing changes in search behaviour on existing data are advised to reindex. +// +// *Impact* + +// The upgrade should generally provide improved stemming results. Small changes in token analysis can lead to mismatches with previously index data, so existing indices using Snowball stemmers as part of their analysis chain should be reindexed. +// ==== +// +// [[german2_snowball_stemmer_an_alias_for_german_stemmer]] +// .The "german2" snowball stemmer is now an alias for the "german" stemmer +// [%collapsible] +// ==== +// *Details* + +// Lucene 10 has merged the improved "german2" snowball language stemmer with the "german" stemmer. For Elasticsearch, "german2" is now a deprecated alias for "german". This may results in slightly different tokens being generated for terms with umlaut substitution (like "ue" for "ü" etc...) +// +// *Impact* + +// Replace usages of "german2" with "german" in analysis configuration. Old indices that use the "german" stemmer should be reindexed if possible. +// ==== +// +// [[persian_analyzer_has_stemmer_by_default]] +// .The 'persian' analyzer has stemmer by default +// [%collapsible] +// ==== +// *Details* + +// Lucene 10 has added a final stemming step to its PersianAnalyzer that Elasticsearch exposes as 'persian' analyzer. Existing indices will keep the old non-stemming behaviour while new indices will see the updated behaviour with added stemming. Users that wish to maintain the non-stemming behaviour need to define their own analyzer as outlined in https://www.elastic.co/guide/en/elasticsearch/reference/8.15/analysis-lang-analyzer.html#persian-analyzer. Users that wish to use the new stemming behaviour for existing indices will have to reindex their data. +// +// *Impact* + +// Indexing with the 'persian' analyzer will produce slightly different tokens. Users should check if this impacts their search results. If they wish to maintain the legacy non-stemming behaviour they can define their own analyzer equivalent as explained in https://www.elastic.co/guide/en/elasticsearch/reference/8.15/analysis-lang-analyzer.html#persian-analyzer. +// ==== +// +// [[korean_dictionary_for_nori_has_been_updated]] +// .The Korean dictionary for Nori has been updated +// [%collapsible] +// ==== +// *Details* + +// Lucene 10 ships with an updated Korean dictionary (mecab-ko-dic-2.1.1). For details see https://github.com/apache/lucene/issues/11452. Users experiencing changes in search behaviour on existing data are advised to reindex. +// +// *Impact* + +// The change is small and should generally provide better analysis results. Existing indices for full-text use cases should be reindexed though. +// ==== +// +// [discrete] +// [[breaking_90_cluster_and_node_setting_changes]] +// ==== Cluster and node setting changes +// +// [[remove_unsupported_legacy_value_for_discovery_type]] +// .Remove unsupported legacy value for `discovery.type` +// [%collapsible] +// ==== +// *Details* + +// Earlier versions of {es} had a `discovery.type` setting which permitted values that referred to legacy discovery types. From v9.0.0 onwards, the only supported values for this setting are `multi-node` (the default) and `single-node`. +// +// *Impact* + +// Remove any value for `discovery.type` from your `elasticsearch.yml` configuration file. +// ==== +// +// [discrete] +// [[breaking_90_es_ql_changes]] +// ==== ES|QL changes +// +// [[esql_entirely_remove_meta_functions]] +// .ESQL: Entirely remove META FUNCTIONS +// [%collapsible] +// ==== +// *Details* + +// Removes an undocumented syntax from ESQL: META FUNCTION. This was never +// reliable or really useful. Consult the documentation instead. +// +// *Impact* + +// Removes an undocumented syntax from ESQL: META FUNCTION +// ==== +// +// [discrete] +// [[breaking_90_rest_api_changes]] +// ==== REST API changes +// +// [[remove_cluster_state_from_cluster_reroute_response]] +// .Remove cluster state from `/_cluster/reroute` response +// [%collapsible] +// ==== +// *Details* + +// The `POST /_cluster/reroute` API no longer returns the cluster state in its response. The `?metric` query parameter to this API now has no effect and its use will be forbidden in a future version. +// +// *Impact* + +// Cease usage of the `?metric` query parameter when calling the `POST /_cluster/reroute` API. +// ==== +// +// [[remove_deprecated_local_attribute_from_alias_apis]] +// .Remove deprecated local attribute from alias APIs +// [%collapsible] +// ==== +// *Details* + +// The following APIs no longer accept the `?local` query parameter: `GET /_alias`, `GET /_aliases`, `GET /_alias/{name}`, `HEAD /_alias/{name}`, `GET /{index}/_alias`, `HEAD /{index}/_alias`, `GET /{index}/_alias/{name}`, `HEAD /{index}/_alias/{name}`, `GET /_cat/aliases`, and `GET /_cat/aliases/{alias}`. This parameter has been deprecated and ignored since version 8.12. +// +// *Impact* + +// Cease usage of the `?local` query parameter when calling the listed APIs. +// ==== +// +// [[reworking_rrf_retriever_to_be_evaluated_during_rewrite_phase]] +// .Reworking RRF retriever to be evaluated during rewrite phase +// [%collapsible] +// ==== +// *Details* + +// In this release (8.16), we have introduced major changes to the retrievers framework +// and how they can be evaluated, focusing mainly on compound retrievers +// like `rrf` and `text_similarity_reranker`, which allowed us to support full +// composability (i.e. any retriever can be nested under any compound retriever), +// as well as supporting additional search features like collapsing, explaining, +// aggregations, and highlighting. +// +// To ensure consistency, and given that this rework is not available until 8.16, +// `rrf` and `text_similarity_reranker` retriever queries would now +// throw an exception in a mixed cluster scenario, where there are nodes +// both in current or later (i.e. >= 8.16) and previous ( <= 8.15) versions. +// +// As part of the rework, we have also removed the `_rank` property from +// the responses of an `rrf` retriever. +// +// *Impact* + +// - Users will not be able to use the `rrf` and `text_similarity_reranker` retrievers in a mixed cluster scenario +// with previous releases (i.e. prior to 8.16), and the request will throw an `IllegalArgumentException`. +// - `_rank` has now been removed from the output of the `rrf` retrievers so trying to directly parse the field +// will throw an exception +// ==== +// +// [[update_data_stream_lifecycle_telemetry_to_track_global_retention]] +// .Update data stream lifecycle telemetry to track global retention +// [%collapsible] +// ==== +// *Details* + +// In this release we introduced global retention settings that fulfil the following criteria: +// +// - a data stream managed by the data stream lifecycle, +// - a data stream that is not an internal data stream. +// +// As a result, we defined different types of retention: +// +// - **data retention**: the retention configured on data stream level by the data stream user or owner +// - **default global retention:** the retention configured by an admin on a cluster level and applied to any +// data stream that doesn't have data retention and fulfils the criteria. +// - **max global retention:** the retention configured by an admin to guard against having long retention periods. +// Any data stream that fulfills the criteria will adhere to the data retention unless it exceeds the max retention, +// in which case the max global retention applies. +// - **effective retention:** the retention that applies on the data stream that fulfill the criteria at a given moment +// in time. It takes into consideration all the retention above and resolves it to the retention that will take effect. +// +// Considering the above changes, having a field named `retention` in the usage API was confusing. For this reason, we +// renamed it to `data_retention` and added telemetry about the other configurations too. +// +// *Impact* + +// Users that use the field `data_lifecycle.retention` should use the `data_lifecycle.data_retention` +// ==== + + +[discrete] +[[deprecated-9.0]] +=== Deprecations + +The following functionality has been deprecated in {es} 9.0 +and will be removed in a future version. +While this won't have an immediate impact on your applications, +we strongly encourage you to take the described steps to update your code +after upgrading to 9.0. + +To find out if you are using any deprecated functionality, +enable <>. +// +// [discrete] +// [[deprecations_90_analysis]] +// ==== Analysis deprecations +// +// [[deprecate_dutch_kp_lovins_stemmer_as_they_are_removed_in_lucene_10]] +// .Deprecate dutch_kp and lovins stemmer as they are removed in Lucene 10 +// [%collapsible] +// ==== +// *Details* + +// kp, dutch_kp, dutchKp and lovins stemmers are deprecated and will be removed. +// +// *Impact* + +// These stemmers will be removed and will be no longer supported. +// ==== +// +// [[deprecate_edge_ngram_side_parameter]] +// .deprecate `edge_ngram` side parameter +// [%collapsible] +// ==== +// *Details* + +// edge_ngram will no longer accept the side parameter. +// +// *Impact* + +// Users will need to update any usage of edge_ngram token filter that utilizes `side`. If the `back` value was used, they can achieve the same behavior by using the `reverse` token filter. +// ==== +// +// [discrete] +// [[deprecations_90_crud]] +// ==== CRUD deprecations +// +// [[deprecate_dot_prefixed_indices_composable_template_index_patterns]] +// .Deprecate dot-prefixed indices and composable template index patterns +// [%collapsible] +// ==== +// *Details* + +// Indices beginning with a dot '.' are reserved for system and internal indices, and should not be used by and end-user. Additionally, composable index templates that contain patterns for dot-prefixed indices should also be avoided, as these patterns are meant for internal use only. In a future Elasticsearch version, creation of these dot-prefixed indices will no longer be allowed. +// +// *Impact* + +// Requests performing an action that would create an index beginning with a dot (indexing a document, manual creation, reindex), or creating an index template with index patterns beginning with a dot, will contain a deprecation header warning about dot-prefixed indices in the response. +// ==== +// +// [discrete] +// [[deprecations_90_rest_api]] +// ==== REST API deprecations +// +// [[adding_deprecation_warnings_for_rrf_using_rank_sub_searches]] +// .Adding deprecation warnings for rrf using rank and `sub_searches` +// [%collapsible] +// ==== +// *Details* + +// Search API parameter `sub_searches` will no longer be a supported and will be removed in future releases. Similarly, `rrf` can only be used through the specified `retriever` and no longer though the `rank` parameter +// +// *Impact* + +// Requests specifying rrf through `rank` and/or `sub_searches` elements will be disallowed in a future version. Users should instead utilize the new `retriever` parameter. +// ==== +// +// [[deprecate_legacy_params_from_range_query]] +// .Deprecate legacy params from range query +// [%collapsible] +// ==== +// *Details* + +// Range query will not longer accept `to`, `from`, `include_lower`, and `include_upper` parameters. +// +// *Impact* + +// Instead use `gt`, `gte`, `lt` and `lte` parameters. +// ==== +// +// [[inference_api_deprecate_elser_service]] +// .[Inference API] Deprecate elser service +// [%collapsible] +// ==== +// *Details* + +// The `elser` service of the inference API will be removed in an upcoming release. Please use the elasticsearch service instead. +// +// *Impact* + +// In the current version there is no impact. In a future version, users of the `elser` service will no longer be able to use it, and will be required to use the `elasticsearch` service to access elser through the inference API. +// ==== + +// BELOW WAS MANUALLY ADDED TO FIX THE BUILD +include::migrate_9_0/transient-settings-migration-guide.asciidoc[] +//include::migrate_9_0/rest-api-changes.asciidoc[] //see ES-9932 diff --git a/docs/reference/migration/migrate_9_0/rest-api-changes.asciidoc b/docs/reference/migration/migrate_9_0/rest-api-changes.asciidoc new file mode 100644 index 0000000000000..fc6fc7c011a22 --- /dev/null +++ b/docs/reference/migration/migrate_9_0/rest-api-changes.asciidoc @@ -0,0 +1,5 @@ +[discrete] +[[migrate_rest_api_changes]] +=== REST API changes + +//See https://www.elastic.co/guide/en/elasticsearch/reference/8.0/migrating-8.0.html#breaking_80_rest_api_changes for formatting examples diff --git a/docs/reference/migration/transient-settings-migration-guide.asciidoc b/docs/reference/migration/migrate_9_0/transient-settings-migration-guide.asciidoc similarity index 100% rename from docs/reference/migration/transient-settings-migration-guide.asciidoc rename to docs/reference/migration/migrate_9_0/transient-settings-migration-guide.asciidoc diff --git a/docs/reference/release-notes.asciidoc b/docs/reference/release-notes.asciidoc index c912b0e62b94d..615e7135365cd 100644 --- a/docs/reference/release-notes.asciidoc +++ b/docs/reference/release-notes.asciidoc @@ -6,135 +6,9 @@ This section summarizes the changes in each release. -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> +* <> -- -include::release-notes/8.17.0.asciidoc[] -include::release-notes/8.16.0.asciidoc[] -include::release-notes/8.15.1.asciidoc[] -include::release-notes/8.15.0.asciidoc[] -include::release-notes/8.14.3.asciidoc[] -include::release-notes/8.14.2.asciidoc[] -include::release-notes/8.14.1.asciidoc[] -include::release-notes/8.14.0.asciidoc[] -include::release-notes/8.13.4.asciidoc[] -include::release-notes/8.13.3.asciidoc[] -include::release-notes/8.13.2.asciidoc[] -include::release-notes/8.13.1.asciidoc[] -include::release-notes/8.13.0.asciidoc[] -include::release-notes/8.12.2.asciidoc[] -include::release-notes/8.12.1.asciidoc[] -include::release-notes/8.12.0.asciidoc[] -include::release-notes/8.11.4.asciidoc[] -include::release-notes/8.11.3.asciidoc[] -include::release-notes/8.11.2.asciidoc[] -include::release-notes/8.11.1.asciidoc[] -include::release-notes/8.11.0.asciidoc[] -include::release-notes/8.10.4.asciidoc[] -include::release-notes/8.10.3.asciidoc[] -include::release-notes/8.10.2.asciidoc[] -include::release-notes/8.10.1.asciidoc[] -include::release-notes/8.10.0.asciidoc[] -include::release-notes/8.9.2.asciidoc[] -include::release-notes/8.9.1.asciidoc[] -include::release-notes/8.9.0.asciidoc[] -include::release-notes/8.8.2.asciidoc[] -include::release-notes/8.8.1.asciidoc[] -include::release-notes/8.8.0.asciidoc[] -include::release-notes/8.7.1.asciidoc[] -include::release-notes/8.7.0.asciidoc[] -include::release-notes/8.6.2.asciidoc[] -include::release-notes/8.6.1.asciidoc[] -include::release-notes/8.6.0.asciidoc[] -include::release-notes/8.5.3.asciidoc[] -include::release-notes/8.5.2.asciidoc[] -include::release-notes/8.5.1.asciidoc[] -include::release-notes/8.5.0.asciidoc[] -include::release-notes/8.4.3.asciidoc[] -include::release-notes/8.4.2.asciidoc[] -include::release-notes/8.4.1.asciidoc[] -include::release-notes/8.4.0.asciidoc[] -include::release-notes/8.3.3.asciidoc[] -include::release-notes/8.3.2.asciidoc[] -include::release-notes/8.3.1.asciidoc[] -include::release-notes/8.3.0.asciidoc[] -include::release-notes/8.2.3.asciidoc[] -include::release-notes/8.2.2.asciidoc[] -include::release-notes/8.2.1.asciidoc[] -include::release-notes/8.2.0.asciidoc[] -include::release-notes/8.1.3.asciidoc[] -include::release-notes/8.1.2.asciidoc[] -include::release-notes/8.1.1.asciidoc[] -include::release-notes/8.1.0.asciidoc[] -include::release-notes/8.0.1.asciidoc[] -include::release-notes/8.0.0.asciidoc[] -include::release-notes/8.0.0-rc2.asciidoc[] -include::release-notes/8.0.0-rc1.asciidoc[] -include::release-notes/8.0.0-beta1.asciidoc[] -include::release-notes/8.0.0-alpha2.asciidoc[] -include::release-notes/8.0.0-alpha1.asciidoc[] +include::release-notes/9.0.0.asciidoc[] diff --git a/docs/reference/release-notes/8.0.0-alpha1.asciidoc b/docs/reference/release-notes/8.0.0-alpha1.asciidoc deleted file mode 100644 index a2c57cd3639f1..0000000000000 --- a/docs/reference/release-notes/8.0.0-alpha1.asciidoc +++ /dev/null @@ -1,473 +0,0 @@ -[[release-notes-8.0.0-alpha1]] -== {es} version 8.0.0-alpha1 - -Also see <>. - -[[breaking-8.0.0-alpha1]] -[float] -=== Breaking changes - -Aggregations:: -* Percentiles aggregation: disallow specifying same percentile values twice {es-pull}52257[#52257] (issue: {es-issue}51871[#51871]) -* Remove Adjacency_matrix setting {es-pull}46327[#46327] (issues: {es-issue}46257[#46257], {es-issue}46324[#46324]) -* Remove `MovingAverage` pipeline aggregation {es-pull}39328[#39328] -* Remove deprecated `_time` and `_term` sort orders {es-pull}39450[#39450] -* Remove deprecated date histo interval {es-pull}75000[#75000] - -Allocation:: -* Breaking change for single data node setting {es-pull}73737[#73737] (issues: {es-issue}55805[#55805], {es-issue}73733[#73733]) -* Remove `include_relocations` setting {es-pull}47717[#47717] (issues: {es-issue}46079[#46079], {es-issue}47443[#47443]) - -Analysis:: -* Cleanup versioned deprecations in analysis {es-pull}41560[#41560] (issue: {es-issue}41164[#41164]) -* Remove preconfigured `delimited_payload_filter` {es-pull}43686[#43686] (issues: {es-issue}41560[#41560], {es-issue}43684[#43684]) - -Authentication:: -* Always add file and native realms unless explicitly disabled {es-pull}69096[#69096] (issue: {es-issue}50892[#50892]) -* Do not set a NameID format in Policy by default {es-pull}44090[#44090] (issue: {es-issue}40353[#40353]) -* Make order setting mandatory for Realm config {es-pull}51195[#51195] (issue: {es-issue}37614[#37614]) - -CCR:: -* Avoid auto following leader system indices in CCR {es-pull}72815[#72815] (issue: {es-issue}67686[#67686]) - -Cluster Coordination:: -* Remove join timeout {es-pull}60873[#60873] (issue: {es-issue}60872[#60872]) -* Remove node filters for voting config exclusions {es-pull}55673[#55673] (issues: {es-issue}47990[#47990], {es-issue}50836[#50836]) -* Remove support for delaying state recovery pending master {es-pull}53845[#53845] (issue: {es-issue}51806[#51806]) - -Distributed:: -* Remove synced flush {es-pull}50882[#50882] (issues: {es-issue}50776[#50776], {es-issue}50835[#50835]) -* Remove the `cluster.remote.connect` setting {es-pull}54175[#54175] (issue: {es-issue}53924[#53924]) - -Engine:: -* Force merge should reject requests with `only_expunge_deletes` and `max_num_segments` set {es-pull}44761[#44761] (issue: {es-issue}43102[#43102]) -* Remove per-type indexing stats {es-pull}47203[#47203] (issue: {es-issue}41059[#41059]) -* Remove translog retention settings {es-pull}51697[#51697] (issue: {es-issue}50775[#50775]) - -Features/CAT APIs:: -* Remove the deprecated `local` parameter for `_cat/indices` {es-pull}64868[#64868] (issue: {es-issue}62198[#62198]) -* Remove the deprecated `local` parameter for `_cat/shards` {es-pull}64867[#64867] (issue: {es-issue}62197[#62197]) - -Features/Features:: -* Remove deprecated ._tier allocation filtering settings {es-pull}73074[#73074] (issue: {es-issue}72835[#72835]) - -Features/ILM+SLM:: -* Add lower bound on `poll_interval` {es-pull}39593[#39593] (issue: {es-issue}39163[#39163]) - -Features/Indices APIs:: -* Change prefer_v2_templates parameter to default to true {es-pull}55489[#55489] (issues: {es-issue}53101[#53101], {es-issue}55411[#55411]) -* Remove deprecated `_upgrade` API {es-pull}64732[#64732] (issue: {es-issue}21337[#21337]) -* Remove local parameter for get field mapping request {es-pull}55100[#55100] (issue: {es-issue}55099[#55099]) -* Remove `include_type_name` parameter from REST layer {es-pull}48632[#48632] (issue: {es-issue}41059[#41059]) -* Remove the `template` field in index templates {es-pull}49460[#49460] (issue: {es-issue}21009[#21009]) - -Features/Watcher:: -* Move watcher history to data stream {es-pull}64252[#64252] - -Geo:: -* Disallow creating `geo_shape` mappings with deprecated parameters {es-pull}70850[#70850] (issue: {es-issue}32039[#32039]) -* Remove bounding box query `type` parameter {es-pull}74536[#74536] - -Infra/Circuit Breakers:: -* Fixed synchronizing inflight breaker with internal variable {es-pull}40878[#40878] - -Infra/Core:: -* Fail when using multiple data paths {es-pull}72184[#72184] (issue: {es-issue}71205[#71205]) -* Limit processors by available processors {es-pull}44894[#44894] (issue: {es-issue}44889[#44889]) -* Remove `nodes/0` folder prefix from data path {es-pull}42489[#42489] -* Remove `bootstrap.system_call_filter` setting {es-pull}72848[#72848] -* Remove `fixed_auto_queue_size` threadpool type {es-pull}52280[#52280] -* Remove `node.max_local_storage_nodes` {es-pull}42428[#42428] (issue: {es-issue}42426[#42426]) -* Remove camel case named formats {es-pull}60044[#60044] -* Remove legacy role settings {es-pull}71163[#71163] (issues: {es-issue}54998[#54998], {es-issue}66409[#66409], {es-issue}71143[#71143]) -* Remove `processors` setting {es-pull}45905[#45905] (issue: {es-issue}45855[#45855]) -* Remove the `local` parameter of `/_cat/nodes` {es-pull}50594[#50594] (issues: {es-issue}50088[#50088], {es-issue}50499[#50499]) -* Remove the listener thread pool {es-pull}53314[#53314] (issue: {es-issue}53049[#53049]) -* Remove the node local storage setting {es-pull}54381[#54381] (issue: {es-issue}54374[#54374]) -* Remove the `pidfile` setting {es-pull}45940[#45940] (issue: {es-issue}45938[#45938]) -* Removes `week_year` date format {es-pull}63384[#63384] (issue: {es-issue}60707[#60707]) - -Infra/Logging:: -* Remove slowlog level {es-pull}57591[#57591] (issue: {es-issue}56171[#56171]) - -Infra/Plugins:: -* Remove deprecated basic license feature enablement settings from 8.0 {es-pull}56211[#56211] (issue: {es-issue}54745[#54745]) - -Infra/REST API:: -* Remove content type required setting {es-pull}61043[#61043] -* Remove deprecated endpoints containing `_xpack` {es-pull}48170[#48170] (issue: {es-issue}35958[#35958]) -* Remove deprecated endpoints of hot threads API {es-pull}55109[#55109] (issue: {es-issue}52640[#52640]) -* Allow parsing Content-Type and Accept headers with version {es-pull}61427[#61427] - -Infra/Resiliency:: -* Fail node containing ancient closed index {es-pull}44264[#44264] (issues: {es-issue}21830[#21830], {es-issue}41731[#41731], {es-issue}44230[#44230]) - -Infra/Scripting:: -* Consolidate script parsing from object {es-pull}59507[#59507] (issue: {es-issue}59391[#59391]) -* Scripting: Move `script_cache` into _nodes/stats {es-pull}59265[#59265] (issues: {es-issue}50152[#50152], {es-issue}59262[#59262]) -* Scripting: Remove general cache settings {es-pull}59262[#59262] (issue: {es-issue}50152[#50152]) - -Infra/Settings:: -* Change default value of `action.destructive_requires_name` to `true` {es-pull}66908[#66908] (issue: {es-issue}61074[#61074]) -* Forbid settings without a namespace {es-pull}45947[#45947] (issues: {es-issue}45905[#45905], {es-issue}45940[#45940]) - -Machine Learning:: -* Remove deprecated `_xpack` endpoints {es-pull}59870[#59870] (issues: {es-issue}35958[#35958], {es-issue}48170[#48170]) -* Remove the ability to update datafeed's `job_id` {es-pull}44752[#44752] (issue: {es-issue}44616[#44616]) - -Mapping:: -* Remove `boost` mapping parameter {es-pull}62639[#62639] (issue: {es-issue}62623[#62623]) -* Remove support for chained multi-fields {es-pull}42333[#42333] (issues: {es-issue}41267[#41267], {es-issue}41926[#41926]) -* Remove support for string in `unmapped_type` {es-pull}45675[#45675] -* Removes typed URLs from mapping APIs {es-pull}41676[#41676] - -Network:: -* Remove client feature tracking {es-pull}44929[#44929] (issues: {es-issue}31020[#31020], {es-issue}42538[#42538], {es-issue}44667[#44667]) -* Remove escape hatch permitting incompatible builds {es-pull}65753[#65753] (issues: {es-issue}65249[#65249], {es-issue}65601[#65601]) - -Packaging:: -* Remove SysV init support {es-pull}51716[#51716] (issue: {es-issue}51480[#51480]) -* Remove support for `JAVA_HOME` {es-pull}69149[#69149] (issue: {es-issue}55820[#55820]) - -Recovery:: -* Remove dangling index auto import functionality {es-pull}59698[#59698] (issue: {es-issue}48366[#48366]) - -Reindex:: -* Reindex from Remote encoding {es-pull}41007[#41007] (issue: {es-issue}40303[#40303]) -* Reindex remove outer level size {es-pull}43373[#43373] (issues: {es-issue}24344[#24344], {es-issue}41894[#41894]) - -Rollup:: -* `RollupStart` endpoint should return OK if job already started {es-pull}41502[#41502] (issues: {es-issue}35928[#35928], {es-issue}39845[#39845]) - -Search:: -* Decouple shard allocation awareness from search and get requests {es-pull}45735[#45735] (issue: {es-issue}43453[#43453]) -* Fix range query on date fields for number inputs {es-pull}63692[#63692] (issue: {es-issue}63680[#63680]) -* Make fuzziness reject illegal values earlier {es-pull}33511[#33511] -* Make remote cluster resolution stricter {es-pull}40419[#40419] (issue: {es-issue}37863[#37863]) -* Parse empty first line in msearch request body as action metadata {es-pull}41011[#41011] (issue: {es-issue}39841[#39841]) -* Remove `CommonTermsQuery` and `cutoff_frequency` param {es-pull}42654[#42654] (issue: {es-issue}37096[#37096]) -* Remove `type` query {es-pull}47207[#47207] (issue: {es-issue}41059[#41059]) -* Remove `use_field_mapping` format option for docvalue fields {es-pull}55622[#55622] -* Remove deprecated `SimpleQueryStringBuilder` parameters {es-pull}57200[#57200] -* Remove deprecated `search.remote` settings {es-pull}42381[#42381] (issues: {es-issue}33413[#33413], {es-issue}38556[#38556]) -* Remove deprecated sort options: `nested_path` and `nested_filter` {es-pull}42809[#42809] (issue: {es-issue}27098[#27098]) -* Remove deprecated vector functions {es-pull}48725[#48725] (issue: {es-issue}48604[#48604]) -* Remove support for `_type` in searches {es-pull}68564[#68564] (issues: {es-issue}41059[#41059], {es-issue}68311[#68311]) -* Remove support for sparse vectors {es-pull}48781[#48781] (issue: {es-issue}48368[#48368]) -* Remove the object format for `indices_boost` {es-pull}55078[#55078] -* Removes type from `TermVectors` APIs {es-pull}42198[#42198] (issue: {es-issue}41059[#41059]) -* Removes typed endpoint from search and related APIs {es-pull}41640[#41640] -* Set max allowed size for stored async response {es-pull}74455[#74455] (issue: {es-issue}67594[#67594]) -* `indices.query.bool.max_clause_count` now limits all query clauses {es-pull}75297[#75297] - -Security:: -* Remove obsolete security settings {es-pull}40496[#40496] -* Remove support of creating CA on the fly when generating certificates {es-pull}65590[#65590] (issue: {es-issue}61884[#61884]) -* Remove the `id` field from the `InvalidateApiKey` API {es-pull}66671[#66671] (issue: {es-issue}66317[#66317]) -* Remove the migrate tool {es-pull}42174[#42174] -* Compress audit logs {es-pull}64472[#64472] (issue: {es-issue}63843[#63843]) -* Remove insecure settings {es-pull}46147[#46147] (issue: {es-issue}45947[#45947]) - -Snapshot/Restore:: -* Blob store compress default to `true` {es-pull}40033[#40033] -* Get snapshots support for multiple repositories {es-pull}42090[#42090] (issue: {es-issue}41210[#41210]) -* Remove repository stats API {es-pull}62309[#62309] (issue: {es-issue}62297[#62297]) -* Remove frozen cache setting leniency {es-pull}71013[#71013] (issue: {es-issue}70341[#70341]) - -TLS:: -* Reject misconfigured/ambiguous SSL server config {es-pull}45892[#45892] -* Remove support for configurable PKCS#11 keystores {es-pull}75404[#75404] -* Remove the client transport profile filter {es-pull}43236[#43236] - - - -[[breaking-java-8.0.0-alpha1]] -[float] -=== Breaking Java changes - -Authentication:: -* Mandate x-pack REST handler installed {es-pull}71061[#71061] (issue: {es-issue}70523[#70523]) - -CCR:: -* Remove the `CcrClient` {es-pull}42816[#42816] - -CRUD:: -* Remove types from `BulkRequest` {es-pull}46983[#46983] (issue: {es-issue}41059[#41059]) -* Remove `Client.prepareIndex(index, type, id)` method {es-pull}48443[#48443] -* Remove deprecated `include-type` methods from HLRC indices client {es-pull}48471[#48471] - - -Client:: -* Remove `SecurityClient` from x-pack {es-pull}42471[#42471] - -Features/ILM+SLM:: -* Remove the `ILMClient` {es-pull}42817[#42817] -* Rename HLRC `indexlifecycle` components to `ilm` {es-pull}44982[#44982] (issues: {es-issue}44725[#44725], {es-issue}44917[#44917]) - -Features/Monitoring:: -* Remove `MonitoringClient` from x-pack {es-pull}42770[#42770] - -Features/Watcher:: -* Remove `WatcherClient` from x-pack {es-pull}42815[#42815] - -Infra/Core:: -* Remove `XPackClient` from x-pack {es-pull}42729[#42729] -* Remove the transport client {es-pull}42538[#42538] -* Remove transport client from x-pack {es-pull}42202[#42202] - -Infra/REST API:: -* Copy HTTP headers to `ThreadContext` strictly {es-pull}45945[#45945] - -Machine Learning:: -* Remove the `MachineLearningClient` {es-pull}43108[#43108] - -Mapping:: -* Remove type filter from `GetMappings` API {es-pull}47364[#47364] (issue: {es-issue}41059[#41059]) -* Remove `type` parameter from `PutMappingRequest.buildFromSimplifiedDef()` {es-pull}50844[#50844] (issue: {es-issue}41059[#41059]) -* Remove unused parameter from `MetadataFieldMapper.TypeParser#getDefault()` {es-pull}51219[#51219] -* Remove `type` parameter from `CIR.mapping(type, object...)` {es-pull}50739[#50739] (issue: {es-issue}41059[#41059]) - -Search:: -* Removes types from `SearchRequest` and `QueryShardContext` {es-pull}42112[#42112] - -Snapshot/Restore:: -* Remove deprecated repository methods {es-pull}42359[#42359] (issue: {es-issue}42213[#42213]) - - - -[[enhancement-8.0.0-alpha1]] -[float] -=== Enhancements - -Analysis:: -* Move `reload_analyzers` endpoint to x-pack {es-pull}43559[#43559] - -Authentication:: -* Reset elastic password CLI tool {es-pull}74892[#74892] (issues: {es-issue}70113[#70113], {es-issue}74890[#74890]) - -EQL:: -* Add option for returning results from the tail of the stream {es-pull}64869[#64869] (issue: {es-issue}58646[#58646]) -* Introduce case insensitive variant `in~` {es-pull}68176[#68176] (issue: {es-issue}68172[#68172]) -* Optimize redundant `toString` {es-pull}71070[#71070] (issue: {es-issue}70681[#70681]) - -Engine:: -* Always use soft-deletes in `InternalEngine` {es-pull}50415[#50415] -* Remove translog retention policy {es-pull}51417[#51417] (issue: {es-issue}50775[#50775]) - -Features/CAT APIs:: -* Remove `size` and add `time` params to `_cat/threadpool` {es-pull}55736[#55736] (issue: {es-issue}54478[#54478]) - -Features/Stats:: -* Add bulk stats track the bulk per shard {es-pull}52208[#52208] (issues: {es-issue}47345[#47345], {es-issue}50536[#50536]) - - -Features/Watcher:: -* Remove Watcher history clean up from monitoring {es-pull}67154[#67154] - -Infra/Core:: -* Remove aliases exist action {es-pull}43430[#43430] -* Remove indices exists action {es-pull}43164[#43164] -* Remove types exists action {es-pull}43344[#43344] - -Infra/Logging:: -* Make Elasticsearch JSON logs ECS compliant {es-pull}47105[#47105] (issue: {es-issue}46119[#46119]) - -Infra/REST API:: -* Allow for field declaration for future compatible versions {es-pull}69774[#69774] (issue: {es-issue}51816[#51816]) -* Introduce stability description to the REST API specification {es-pull}38413[#38413] -* Parsing: Validate that fields are not registered twice {es-pull}70243[#70243] -* Support response content-type with versioned media type {es-pull}65500[#65500] (issue: {es-issue}51816[#51816]) -* [REST API Compatibility] Typed endpoints for index and get APIs {es-pull}69131[#69131] (issue: {es-issue}54160[#54160]) -* [REST API Compatibility] Typed endpoints for put and get mapping and get field mappings {es-pull}71721[#71721] (issues: {es-issue}51816[#51816], {es-issue}54160[#54160]) -* [REST API Compatibility] Allow `copy_settings` flag for resize operations {es-pull}75184[#75184] (issues: {es-issue}38514[#38514], {es-issue}51816[#51816]) -* [REST API Compatibility] Allow for type in geo shape query {es-pull}74553[#74553] (issues: {es-issue}51816[#51816], {es-issue}54160[#54160]) -* [REST API Compatibility] Always return `adjust_pure_negative` value {es-pull}75182[#75182] (issues: {es-issue}49543[#49543], {es-issue}51816[#51816]) -* [REST API Compatibility] Clean up x-pack/plugin rest compat tests {es-pull}74701[#74701] (issue: {es-issue}51816[#51816]) -* [REST API Compatibility] Do not return `_doc` for empty mappings in template {es-pull}75448[#75448] (issues: {es-issue}51816[#51816], {es-issue}54160[#54160], {es-issue}70966[#70966], {es-issue}74544[#74544]) -* [REST API Compatibility] Dummy REST action for `indices.upgrade` API {es-pull}75136[#75136] (issue: {es-issue}51816[#51816]) -* [REST API Compatibility] REST Terms vector typed response {es-pull}73117[#73117] -* [REST API Compatibility] Rename `BulkItemResponse.Failure` type field {es-pull}74937[#74937] (issue: {es-issue}51816[#51816]) -* [REST API Compatibility] Type metadata for docs used in simulate request {es-pull}74222[#74222] (issues: {es-issue}51816[#51816], {es-issue}54160[#54160]) -* [REST API Compatibility] Typed `TermLookups` {es-pull}74544[#74544] (issues: {es-issue}46943[#46943], {es-issue}51816[#51816], {es-issue}54160[#54160]) -* [REST API Compatibility] Typed and x-pack graph explore API {es-pull}74185[#74185] (issues: {es-issue}46935[#46935], {es-issue}51816[#51816], {es-issue}54160[#54160]) -* [REST API Compatibility] Typed endpoint for bulk API {es-pull}73571[#73571] (issue: {es-issue}51816[#51816]) -* [REST API Compatibility] Typed endpoint for multi-get API {es-pull}73878[#73878] (issue: {es-issue}51816[#51816]) -* [REST API Compatibility] Typed endpoints for `RestUpdateAction` and `RestDeleteAction` {es-pull}73115[#73115] (issues: {es-issue}51816[#51816], {es-issue}54160[#54160]) -* [REST API Compatibility] Typed endpoints for `get_source` API {es-pull}73957[#73957] (issues: {es-issue}46587[#46587], {es-issue}46931[#46931], {es-issue}51816[#51816]) -* [REST API Compatibility] Typed endpoints for explain API {es-pull}73901[#73901] (issue: {es-issue}51816[#51816]) -* [REST API Compatibility] Typed endpoints for search `_count` API {es-pull}73958[#73958] (issues: {es-issue}42112[#42112], {es-issue}51816[#51816]) -* [REST API Compatibility] Typed indexing stats {es-pull}74181[#74181] (issues: {es-issue}47203[#47203], {es-issue}51816[#51816], {es-issue}54160[#54160]) -* [REST API Compatibility] Types for percolate query API {es-pull}74698[#74698] (issues: {es-issue}46985[#46985], {es-issue}51816[#51816], {es-issue}54160[#54160], {es-issue}74689[#74689]) -* [REST API Compatibility] Validate query typed API {es-pull}74171[#74171] (issues: {es-issue}46927[#46927], {es-issue}51816[#51816], {es-issue}54160[#54160]) -* [REST API Compatibility] Voting config exclusion exception message {es-pull}75406[#75406] (issues: {es-issue}51816[#51816], {es-issue}55291[#55291]) -* [REST API Compatibility] `MoreLikeThisQuery` with types {es-pull}75123[#75123] (issues: {es-issue}42198[#42198], {es-issue}51816[#51816], {es-issue}54160[#54160]) -* [REST API Compatibility] Update and delete by query using size field {es-pull}69606[#69606] -* [REST API Compatibility] Indicies boost in object format {es-pull}74422[#74422] (issues: {es-issue}51816[#51816], {es-issue}55078[#55078]) -* [REST API Compatibility] Typed endpoints for search and related endpoints {es-pull}72155[#72155] (issues: {es-issue}51816[#51816], {es-issue}54160[#54160]) -* [REST API Compatibility] Allow to use size `-1` {es-pull}75342[#75342] (issues: {es-issue}51816[#51816], {es-issue}69548[#69548], {es-issue}70209[#70209]) -* [REST API Compatibility] Ignore `use_field_mapping` option for docvalue {es-pull}74435[#74435] (issue: {es-issue}55622[#55622]) -* [REST API Compatibility] `_time` and `_term` sort orders {es-pull}74919[#74919] (issues: {es-issue}39450[#39450], {es-issue}51816[#51816]) -* [REST API Compatability] `template` parameter and field on PUT index template {es-pull}71238[#71238] (issues: {es-issue}49460[#49460], {es-issue}51816[#51816], {es-issue}68905[#68905]) -* [REST API Compatibility] Make query registration easier {es-pull}75722[#75722] (issue: {es-issue}51816[#51816]) -* [REST API Compatibility] Typed query {es-pull}75453[#75453] (issues: {es-issue}47207[#47207], {es-issue}51816[#51816], {es-issue}54160[#54160]) -* [REST API Compatibility] Deprecate the use of synced flush {es-pull}75372[#75372] (issues: {es-issue}50882[#50882], {es-issue}51816[#51816]) -* [REST API Compatibility] Licence `accept_enterprise` and response changes {es-pull}75479[#75479] (issues: {es-issue}50067[#50067], {es-issue}50735[#50735], {es-issue}51816[#51816], {es-issue}58217[#58217]) - -Infra/Scripting:: -* Update `DeprecationMap` to `DynamicMap` {es-pull}56149[#56149] (issue: {es-issue}52103[#52103]) - -Infra/Settings:: -* Fixed inconsistent `Setting.exist()` {es-pull}46603[#46603] (issue: {es-issue}41830[#41830]) -* Remove `index.optimize_auto_generated_id` setting (#27583) {es-pull}27600[#27600] (issue: {es-issue}27583[#27583]) - -License:: -* Add deprecated `accept_enterprise` param to `/_xpack` {es-pull}58220[#58220] (issue: {es-issue}58217[#58217]) -* Support `accept_enterprise` param in get license API {es-pull}50067[#50067] (issue: {es-issue}49474[#49474]) - -Machine Learning:: -* The Windows build platform for the {ml} C++ code now uses Visual Studio 2019 {ml-pull}1352[#1352] -* The macOS build platform for the {ml} C++ code is now Mojave running Xcode 11.3.1, - or Ubuntu 20.04 running clang 8 for cross compilation {ml-pull}1429[#1429] -* The Linux build platform for the {ml} C++ code is now CentOS 7 running gcc 9.3 {ml-pull}1170[#1170] -* Add a new application for evaluating PyTorch models. The app depends on LibTorch - the C++ front end to PyTorch - and performs inference on models stored in the TorchScript format {ml-pull}1902[#1902] - -Mapping:: -* Sparse vector to throw exception consistently {es-pull}62646[#62646] - -Packaging:: -* Make the Docker build more re-usable in Cloud {es-pull}50277[#50277] (issues: {es-issue}46166[#46166], {es-issue}49926[#49926]) -* Update docker-compose.yml to fix bootstrap check error {es-pull}47650[#47650] - -Recovery:: -* Use Lucene index in peer recovery and resync {es-pull}51189[#51189] (issue: {es-issue}50775[#50775]) - -Reindex:: -* Make reindexing managed by a persistent task {es-pull}43382[#43382] (issue: {es-issue}42612[#42612]) -* Reindex restart from checkpoint {es-pull}46055[#46055] (issue: {es-issue}42612[#42612]) -* Reindex search resiliency {es-pull}45497[#45497] (issues: {es-issue}42612[#42612], {es-issue}43187[#43187]) -* Reindex v2 rethrottle sliced fix {es-pull}46967[#46967] (issues: {es-issue}42612[#42612], {es-issue}46763[#46763]) - -Rollup:: -* Adds support for `date_nanos` in Rollup Metric and `DateHistogram` Configs {es-pull}59349[#59349] (issue: {es-issue}44505[#44505]) - -SQL:: -* Add text formatting support for multivalue {es-pull}68606[#68606] -* Add xDBC and CLI support. QA CSV specs {es-pull}68966[#68966] -* Export array values through result sets {es-pull}69512[#69512] -* Improve alias resolution in sub-queries {es-pull}67216[#67216] (issue: {es-issue}56713[#56713]) -* Improve the optimization of null conditionals {es-pull}71192[#71192] -* Push `WHERE` clause inside subqueries {es-pull}71362[#71362] -* Use Java `String` methods for `LTRIM/RTRIM` {es-pull}57594[#57594] -* QL: Make canonical form take into account children {es-pull}71266[#71266] -* QL: Polish optimizer expression rule declaration {es-pull}71396[#71396] -* QL: Propagate nullability constraints across conjunctions {es-pull}71187[#71187] (issue: {es-issue}70683[#70683]) - -Search:: -* Completely disallow setting negative size in search {es-pull}70209[#70209] (issue: {es-issue}69548[#69548]) -* Make `0` as invalid value for `min_children` in `has_child` query {es-pull}41347[#41347] -* Return error when remote indices are locally resolved {es-pull}74556[#74556] (issue: {es-issue}26247[#26247]) - -Security:: -* Add a tool for creating enrollment tokens {es-pull}74890[#74890] -* Add the Enroll Kibana API {es-pull}72207[#72207] -* Change default hashing algorithm for FIPS 140 {es-pull}55544[#55544] -* Create enrollment token {es-pull}73573[#73573] (issues: {es-issue}71438[#71438], {es-issue}72129[#72129]) -* Enroll node API {es-pull}72129[#72129] -* Not encoding the Api Key in Enrollment token {es-pull}74510[#74510] (issue: {es-issue}73573[#73573]) -* Configure security for the initial node CLI {es-pull}74868[#74868] - -Snapshot/Restore:: -* Introduce searchable snapshots index setting for cascade deletion of snapshots {es-pull}74977[#74977] -* Unify blob store compress setting {es-pull}39346[#39346] (issue: {es-issue}39073[#39073]) -* Add recovery state tracking for searchable snapshots {es-pull}60505[#60505] - -TLS:: -* Add `ChaCha20` TLS ciphers on Java 12+ {es-pull}42155[#42155] -* Add support for `KeyStore` filters to `ssl-config` {es-pull}75407[#75407] -* Update TLS ciphers and protocols for JDK 11 {es-pull}41808[#41808] (issues: {es-issue}38646[#38646], {es-issue}41385[#41385]) - - - -[[bug-8.0.0-alpha1]] -[float] -=== Bug fixes - -Aggregations:: -* Fix BWC issues for `x_pack/usage` {es-pull}55181[#55181] (issue: {es-issue}54847[#54847]) -* Fix `DoubleBounds` null serialization {es-pull}59475[#59475] -* Fix `TopHitsAggregationBuilder` adding duplicate `_score` sort clauses {es-pull}42179[#42179] (issue: {es-issue}42154[#42154]) -* Fix `t_test` usage stats {es-pull}54753[#54753] (issue: {es-issue}54744[#54744]) -* Throw exception if legacy interval cannot be parsed in `DateIntervalWrapper` {es-pull}41972[#41972] (issue: {es-issue}41970[#41970]) - -CCR:: -* Fix `AutoFollow` version checks {es-pull}73776[#73776] (issue: {es-issue}72935[#72935]) - -Cluster Coordination:: -* Apply cluster states in system context {es-pull}53785[#53785] (issue: {es-issue}53751[#53751]) - -Distributed:: -* Introduce `?wait_for_active_shards=index-setting` {es-pull}67158[#67158] (issue: {es-issue}66419[#66419]) -* Respect `CloseIndexRequest#waitForActiveShards` in HLRC {es-pull}67374[#67374] (issues: {es-issue}67158[#67158], {es-issue}67246[#67246]) -* Fixes to task result index mapping {es-pull}50359[#50359] (issue: {es-issue}50248[#50248]) - -Features/CAT APIs:: -* Fix cat recovery display of bytes fields {es-pull}40379[#40379] (issue: {es-issue}40335[#40335]) - -Features/Java High Level REST Client:: -* Fix HLRC compatibility with Java 8 {es-pull}74290[#74290] (issues: {es-issue}73910[#73910], {es-issue}74272[#74272], {es-issue}74289[#74289]) -* Avoid `StackOverflowError` due to regex alternate paths {es-pull}61259[#61259] (issue: {es-issue}60889[#60889]) - -Geo:: -* Preprocess polygon rings before processing it for decomposition {es-pull}59501[#59501] (issues: {es-issue}54441[#54441], {es-issue}59386[#59386]) - -Infra/Core:: -* Add searchable snapshot cache folder to `NodeEnvironment` {es-pull}66297[#66297] (issue: {es-issue}65725[#65725]) -* CLI tools: Write errors to stderr instead of stdout {es-pull}45586[#45586] (issue: {es-issue}43260[#43260]) -* Precompute `ParsedMediaType` for XContentType {es-pull}67409[#67409] - -Infra/Logging:: -* Fix NPE when logging null values in JSON {es-pull}53715[#53715] (issue: {es-issue}46702[#46702]) -* Fix stats in slow logs to be a escaped JSON {es-pull}44642[#44642] -* Populate data stream fields when `xOpaqueId` not provided {es-pull}62156[#62156] - -Infra/REST API:: -* Do not allow spaces within `MediaType's` parameters {es-pull}64650[#64650] (issue: {es-issue}51816[#51816]) -* Handle incorrect header values {es-pull}64708[#64708] (issues: {es-issue}51816[#51816], {es-issue}64689[#64689]) -* Ignore media ranges when parsing {es-pull}64721[#64721] (issues: {es-issue}51816[#51816], {es-issue}64689[#64689]) -* `RestController` should not consume request content {es-pull}44902[#44902] (issue: {es-issue}37504[#37504]) - -Infra/Scripting:: -* Change compound assignment structure to support string concatenation {es-pull}61825[#61825] -* Fixes casting in constant folding {es-pull}61508[#61508] -* Several minor Painless fixes {es-pull}61594[#61594] - -Machine Learning:: -* Handle null value of `FieldCapabilitiesResponse` {es-pull}64327[#64327] - -Mapping:: -* Remove assertions that mappings have one top-level key {es-pull}58779[#58779] (issue: {es-issue}58521[#58521]) - -Packaging:: -* Suppress illegal access in plugin install {es-pull}41620[#41620] (issue: {es-issue}41478[#41478]) - -SQL:: -* Introduce dedicated node for `HAVING` declaration {es-pull}71279[#71279] (issue: {es-issue}69758[#69758]) -* Make `RestSqlQueryAction` thread-safe {es-pull}69901[#69901] - -Search:: -* Check for negative `from` values in search request body {es-pull}54953[#54953] (issue: {es-issue}54897[#54897]) -* Fix `VectorsFeatureSetUsage` serialization in BWC mode {es-pull}55399[#55399] (issue: {es-issue}55378[#55378]) -* Handle total hits equal to `track_total_hits` {es-pull}37907[#37907] (issue: {es-issue}37897[#37897]) -* Improve error msg for CCS request on node without remote cluster role {es-pull}60351[#60351] (issue: {es-issue}59683[#59683]) - -Snapshot/Restore:: -* Fix `GET /_snapshot/_all/_all` if there are no repos {es-pull}43558[#43558] (issue: {es-issue}43547[#43547]) - - -[[upgrade-8.0.0-alpha1]] -[float] -=== Upgrades - -Lucene:: -* Upgrade to Lucene 8.9.0 {es-pull}74729[#74729] - diff --git a/docs/reference/release-notes/8.0.0-alpha2.asciidoc b/docs/reference/release-notes/8.0.0-alpha2.asciidoc deleted file mode 100644 index 3d3c0f6335d26..0000000000000 --- a/docs/reference/release-notes/8.0.0-alpha2.asciidoc +++ /dev/null @@ -1,77 +0,0 @@ -[[release-notes-8.0.0-alpha2]] -== {es} version 8.0.0-alpha2 - -Also see <>. - -[[breaking-8.0.0-alpha2]] -[float] -=== Breaking changes - - -ILM+SLM:: -* Make the ILM `freeze` action a no-op {es-pull}77158[#77158] (issue: {es-issue}70192[#70192]) - -Infra/Core:: -* Fail index creation using custom data path {es-pull}76792[#76792] (issue: {es-issue}73168[#73168]) -* System indices treated as restricted indices {es-pull}74212[#74212] (issue: {es-issue}69298[#69298]) - -License:: -* Set `xpack.security.enabled` to true for all licenses {es-pull}72300[#72300] - -Packaging:: -* Remove no-jdk distributions {es-pull}76896[#76896] (issue: {es-issue}65109[#65109]) - -Security:: -* Remove `kibana_dashboard_only_user` reserved role {es-pull}76507[#76507] - - -[[enhancement-8.0.0-alpha2]] -[float] -=== Enhancements - -Authentication:: -* Autogenerate and print elastic password on startup {es-pull}77291[#77291] -* Enroll Kibana API uses Service Accounts {es-pull}76370[#76370] -* Add `reset-kibana-system-user` tool {es-pull}77322[#77322] - -ILM+SLM:: -* Allow for setting the total shards per node in the Allocate ILM action {es-pull}76794[#76794] (issue: {es-issue}76775[#76775]) - -Infra/Core:: -* Retain reference to stdout for exceptional cases {es-pull}77460[#77460] - -Ingest:: -* Add support for `_meta` field to ingest pipelines {es-pull}76381[#76381] - -Machine Learning:: -* Adding new PUT trained model vocabulary endpoint {es-pull}77387[#77387] -* Creating new PUT model definition part API {es-pull}76987[#76987] - -Network:: -* Enable LZ4 transport compression by default {es-pull}76326[#76326] (issue: {es-issue}73497[#73497]) - -Search:: -* [REST API Compatibility] Nested path and filter sort options {es-pull}76022[#76022] (issues: {es-issue}42809[#42809], {es-issue}51816[#51816]) -* [REST API Compatibility] `CommonTermsQuery` and `cutoff_frequency` parameter {es-pull}75896[#75896] (issues: {es-issue}42654[#42654], {es-issue}51816[#51816]) -* [REST API Compatibility] Allow first empty line for `_msearch` {es-pull}75886[#75886] (issues: {es-issue}41011[#41011], {es-issue}51816[#51816]) - - -Security:: -* Adding base `RestHandler` class for Enrollment APIs {es-pull}76564[#76564] (issue: {es-issue}76097[#76097]) -* Generate and store password hash for elastic user {es-pull}76276[#76276] (issue: {es-issue}75310[#75310]) -* Set elastic password and generate enrollment token {es-pull}75816[#75816] (issue: {es-issue}75310[#75310]) -* Add `elasticsearch-enroll-node` tool {es-pull}77292[#77292] -* Default hasher to `PBKDF2_STRETCH` on FIPS mode {es-pull}76274[#76274] - - -[[bug-8.0.0-alpha2]] -[float] -=== Bug Fixes - -ILM+SLM:: -* Ensuring that the `ShrinkAction` does not hang if total shards per node is too low {es-pull}76732[#76732] (issue: {es-issue}44070[#44070]) - - -Security:: -* Allow access to restricted system indices for reserved system roles {es-pull}76845[#76845] - diff --git a/docs/reference/release-notes/8.0.0-beta1.asciidoc b/docs/reference/release-notes/8.0.0-beta1.asciidoc deleted file mode 100644 index 87a2283c9d423..0000000000000 --- a/docs/reference/release-notes/8.0.0-beta1.asciidoc +++ /dev/null @@ -1,238 +0,0 @@ -:es-issue: https://github.com/elastic/elasticsearch/issues/ -:es-pull: https://github.com/elastic/elasticsearch/pull/ - -[[release-notes-8.0.0-beta1]] -== {es} version 8.0.0-beta1 - -Also see <>. - -[[known-issues-8.0.0-beta1]] -[float] -=== Known issues - -* If you're using {ml}, it's not safe to upgrade to `8.0.0-beta1` - if the cluster you're upgrading was first used prior to `7.7.0`. - If you attempt such an upgrade the filtered aliases against - the {ml} results indices will lose their filters, causing {ml} - anomaly detection results for different jobs to be mixed. - Wait for `8.0.0-rc1` before attempting to test upgrading a cluster - where {ml} was used prior to version `7.7.0`. -* System indices are included in wildcard queries when using the `*` pattern. Do - not rely on this behavior, because system indices will be hidden in the 8.0 - release. If you need to access system indices, specify the index name, use a - more specific wildcard, or use the `expand_wildcards` parameter in your query. - -* By default, system indices are included in snapshots of all indices, and are - restored when no indices are specified. This behavior results from including - system indices in the implicit default wildcard (`*`) for snapshot and restore - operations, which was intended for backwards compatibility in 7.x versions. - In 8.0, all system indices should be accessed through the `feature_states` - parameter of the snapshot or restore request. - - -[[breaking-8.0.0-beta1]] -[float] -=== Breaking changes - -ILM+SLM:: -* Always enforce default tier preference {es-pull}79751[#79751] (issue: {es-issue}76147[#76147]) -* Validate that snapshot repository exists for ILM policies at creation/update time {es-pull}78468[#78468] (issues: {es-issue}72957[#72957], {es-issue}77657[#77657]) -* Default `ENFORCE_DEFAULT_TIER_PREFERENCE` to `true` {es-pull}79275[#79275] (issues: {es-issue}76147[#76147], {es-issue}79210[#79210]) - -Indices APIs:: -* Remove endpoint for freezing indices {es-pull}78918[#78918] (issues: {es-issue}70192[#70192], {es-issue}77273[#77273]) - -Infra/Core:: -* Remove Joda dependency {es-pull}79007[#79007] -* Remove Joda support from date formatters {es-pull}78990[#78990] - -Ingest:: -* Remove default maxmind GeoIP databases from distribution {es-pull}78362[#78362] (issue: {es-issue}68920[#68920]) - -License:: -* Enforce license expiration {es-pull}79671[#79671] - -Machine Learning:: -* Remove `allow_no_datafeeds` and `allow_no_jobs` parameters from APIs {es-pull}80048[#80048] (issue: {es-issue}60732[#60732]) - -Packaging:: -* Require Java 17 for running Elasticsearch {es-pull}79873[#79873] - - - -[[deprecation-8.0.0-beta1]] -[float] -=== Deprecations - -Authentication:: -* Deprecate setup-passwords tool {es-pull}76902[#76902] - -CRUD:: -* Remove `indices_segments` 'verbose' parameter {es-pull}78451[#78451] (issue: {es-issue}75955[#75955]) - -Monitoring:: -* Add deprecation info API entries for deprecated monitoring settings {es-pull}78799[#78799] -* Automatically install monitoring templates at plugin initialization {es-pull}78350[#78350] -* Remove Monitoring ingest pipelines {es-pull}77459[#77459] (issue: {es-issue}50770[#50770]) - - - -[[feature-8.0.0-beta1]] -[float] -=== New features - -Security:: -* Auto-configure TLS for new nodes of new clusters {es-pull}77231[#77231] (issues: {es-issue}75144[#75144], {es-issue}75704[#75704]) - -[[enhancement-8.0.0-beta1]] -[float] -=== Enhancements - -Authentication:: -* New CLI tool to reset password for built-in users {es-pull}79709[#79709] -* Upgrade to UnboundID LDAP SDK v6.0.2 {es-pull}79332[#79332] -* Auto-configure the `elastic` user password {es-pull}78306[#78306] - -Cluster Coordination:: -* Prevent downgrades from 8.x to 7.x {es-pull}78586[#78586] (issues: {es-issue}42489[#42489], {es-issue}52414[#52414]) -* Prevent downgrades from 8.x to 7.x {es-pull}78638[#78638] (issues: {es-issue}42489[#42489], {es-issue}52414[#52414]) - -Data streams:: -* Data stream support read and write with custom routing and partition size {es-pull}74394[#74394] (issue: {es-issue}74390[#74390]) - -ILM+SLM:: - -* Inject migrate action regardless of allocate action {es-pull}79090[#79090] (issue: {es-issue}76147[#76147]) - -Infra/Core:: -* Check whether stdout is a real console {es-pull}79882[#79882] -* Share int, long, float, double, and byte pages {es-pull}75053[#75053] -* Revert "Deprecate resolution loss on date field (#78921)" {es-pull}79914[#79914] (issue: {es-issue}78921[#78921]) -* Add two missing entries to the deprecation information API {es-pull}80290[#80290] (issue: {es-issue}80233[#80233]) - -Infra/Scripting:: -* Add nio Buffers to Painless {es-pull}79870[#79870] (issue: {es-issue}79867[#79867]) -* Restore the scripting general cache {es-pull}79453[#79453] (issue: {es-issue}62899[#62899]) - -Ingest:: -* Remove binary field after attachment processor execution {es-pull}79172[#79172] -* Improving cache lookup to reduce recomputing / searches {es-pull}77259[#77259] - - -License:: -* Enforce Transport TLS check on all licenses {es-pull}79602[#79602] (issue: {es-issue}75292[#75292]) - -Machine Learning:: -* Add inference time configuration overrides {es-pull}78441[#78441] (issue: {es-issue}77799[#77799]) -* Optimize source extraction for `categorize_text` aggregation {es-pull}79099[#79099] -* The Linux build platform for the {ml} C++ code is now CentOS 7 running gcc 10.3. {ml-pull}2028[#2028] -* Make ML indices hidden when the node becomes master {es-pull}77416[#77416] (issue: {es-issue}53674[#53674]) - -Mapping:: -* Add support for configuring HNSW parameters {es-pull}79193[#79193] (issue: {es-issue}78473[#78473]) -* Extend `dense_vector` to support indexing vectors {es-pull}78491[#78491] (issue: {es-issue}78473[#78473]) - -Monitoring:: -* Add previously removed Monitoring settings back for 8.0 {es-pull}78784[#78784] -* Change Monitoring plugin cluster alerts to not install by default {es-pull}79657[#79657] - -Packaging:: -* Allow total memory to be overridden {es-pull}78750[#78750] (issue: {es-issue}65905[#65905]) - -Search:: -* Node level can match action {es-pull}78765[#78765] -* TSDB: Add time series information to field caps {es-pull}78790[#78790] (issue: {es-issue}74660[#74660]) -* Add new kNN search endpoint {es-pull}79013[#79013] (issue: {es-issue}78473[#78473]) -* Disallow kNN searches on nested vector fields {es-pull}79403[#79403] (issue: {es-issue}78473[#78473]) -* Ensure kNN search respects authorization {es-pull}79693[#79693] (issue: {es-issue}78473[#78473]) -* Load kNN vectors format with mmapfs {es-pull}78724[#78724] (issue: {es-issue}78473[#78473]) -* Support cosine similarity in kNN search {es-pull}79500[#79500] -* Node level can match action {es-pull}78765[#78765] - - - -Security:: -* Add v7 `restCompat` for invalidating API key with the id field {es-pull}78664[#78664] (issue: {es-issue}66671[#66671]) -* Print enrollment token on startup {es-pull}78293[#78293] -* Startup check for security implicit behavior change {es-pull}76879[#76879] -* Update auto-generated credentials output {es-pull}79755[#79755] (issue: {es-issue}79312[#79312]) -* CLI tool to reconfigure nodes to enroll {es-pull}79690[#79690] (issue: {es-issue}7718[#7718]) -* Security auto-configuration for packaged installations {es-pull}75144[#75144] (issue: {es-issue}78306[#78306]) -* Update to OpenSAML 4 {es-pull}77012[#77012] (issue: {es-issue}71983[#71983]) - -Snapshot/Restore:: -* Allow listing older repositories {es-pull}78244[#78244] -* Optimize SLM Policy Queries {es-pull}79341[#79341] (issue: {es-issue}79321[#79321]) -* Upgrade repository-hdfs plugin to Hadoop 3 {es-pull}76897[#76897] - -Transform:: -* Prevent old beta transforms from starting {es-pull}79712[#79712] - -TSDB:: -* Automatically add timestamp mapper {es-pull}79136[#79136] -* Create a coordinating node level reader for tsdb {es-pull}79197[#79197] -* Fix TSDB shrink test in multi-version cluster {es-pull}79940[#79940] (issue: {es-issue}79936[#79936]) -* Do not allow shadowing metrics or dimensions {es-pull}79757[#79757] - -[[bug-8.0.0-beta1]] -[float] -=== Bug fixes - -Infra/Core:: -* Prevent stack overflow in rounding {es-pull}80450[#80450] - -Infra/Settings:: -* Stricter `UpdateSettingsRequest` parsing on the REST layer {es-pull}79227[#79227] (issue: {es-issue}29268[#29268]) -* Set Auto expand replica on deprecation log data stream {es-pull}79226[#79226] (issue: {es-issue}78991[#78991]) - -Machine Learning:: -* Add timeout parameter for delete trained models API {es-pull}79739[#79739] (issue: {es-issue}77070[#77070]) -* Fix `MlMetadata` backwards compatibility bug with 7.13 through 7.16 {es-pull}80041[#80041] -* Tone down ML unassigned job notifications {es-pull}79578[#79578] (issue: {es-issue}79270[#79270]) -* Use a new annotations index for future annotations {es-pull}79006[#79006] (issue: {es-issue}78439[#78439]) - -Search:: -* Remove unsafe assertion in wildcard field {es-pull}78966[#78966] - -Snapshot/Restore:: -* Don't fill stack traces in `SnapshotShardFailure` {es-pull}80009[#80009] (issue: {es-issue}79718[#79718]) - - - -[[regression-8.0.0-beta1]] -[float] -=== Regressions - -Search:: -* Disable numeric sort optimization conditionally {es-pull}78103[#78103] - - - -[[upgrade-8.0.0-beta1]] -[float] -=== Upgrades - -Search:: -* Update Lucene 9 snapshot {es-pull}79701[#79701] {es-pull}79138[#79138] {es-pull}78548[#78548] {es-pull}78286[#78286] {es-pull}73324[#73324] {es-pull}79461[#79461] - - - - - - - - - - - - - - - - - - - - - - diff --git a/docs/reference/release-notes/8.0.0-rc1.asciidoc b/docs/reference/release-notes/8.0.0-rc1.asciidoc deleted file mode 100644 index ced057be013bd..0000000000000 --- a/docs/reference/release-notes/8.0.0-rc1.asciidoc +++ /dev/null @@ -1,112 +0,0 @@ -[[release-notes-8.0.0-rc1]] -== {es} version 8.0.0-rc1 - -Also see <>. - -[[known-issues-8.0.0-rc1]] -[float] -=== Known issues - -* **Do not upgrade production clusters to {es} 8.0.0-rc1.** {es} 8.0.0-rc1 is -a pre-release of {es} 8.0 and is intended for testing purposes only. -+ -Upgrades from pre-release builds are not supported and could result in errors or -data loss. If you upgrade from a released version, such as 7.16, to a -pre-release version for testing, discard the contents of the cluster when you are -done. Do not attempt to upgrade to the final 8.0 release. - -* For {es} 8.0.0-rc1, the {ref}/sql-jdbc.html[{es} SQL JDBC driver] requires -Java 17 or newer. In {es} 8.0.0-rc2, the JDBC driver will only require Java 8 -or newer. {es-pull}82325 - -[[breaking-8.0.0-rc1]] -[float] -=== Breaking changes - -Infra/Core:: -* All system indices are hidden indices {es-pull}79512[#79512] - -Snapshot/Restore:: -* Adjust snapshot index resolution behavior to be more intuitive {es-pull}79670[#79670] (issue: {es-issue}78320[#78320]) - -[[deprecation-8.0.0-rc1]] -[float] -=== Deprecations - -Engine:: -* Deprecate setting `max_merge_at_once_explicit` {es-pull}80574[#80574] - -Machine Learning:: -* Deprecate `estimated_heap_memory_usage_bytes` and replace with `model_size_bytes` {es-pull}80554[#80554] - -Search:: -* Configure `IndexSearcher.maxClauseCount()` based on node characteristics {es-pull}81525[#81525] (issue: {es-issue}46433[#46433]) - -Transform:: -* Improve transform deprecation messages {es-pull}81847[#81847] (issues: {es-issue}81521[#81521], {es-issue}81523[#81523]) - -[[enhancement-8.0.0-rc1]] -[float] -=== Enhancements - -Authorization:: -* Granting `kibana_system` reserved role access to "all" privileges to `.internal.preview.alerts*` index {es-pull}80889[#80889] (issues: {es-issue}76624[#76624], {es-issue}80746[#80746], {es-issue}116374[#116374]) -* Granting `kibana_system` reserved role access to "all" privileges to .preview.alerts* index {es-pull}80746[#80746] -* Granting editor and viewer roles access to alerts-as-data indices {es-pull}81285[#81285] - -ILM+SLM:: -* Make unchanged ILM policy updates into noop {es-pull}82240[#82240] (issue: {es-issue}82065[#82065]) - -Indices APIs:: -* Batch rollover cluster state updates {es-pull}79945[#79945] (issues: {es-issue}77466[#77466], {es-issue}79782[#79782]) -* Reuse `MappingMetadata` instances in Metadata class {es-pull}80348[#80348] (issues: {es-issue}69772[#69772], {es-issue}77466[#77466]) - -Infra/Settings:: -* Implement setting deduplication via string interning {es-pull}80493[#80493] (issues: {es-issue}77466[#77466], {es-issue}78892[#78892]) - -Ingest:: -* Extract more standard metadata from binary files {es-pull}78754[#78754] (issue: {es-issue}22339[#22339]) - -Machine Learning:: -* Add `deployment_stats` to trained model stats {es-pull}80531[#80531] -* The setting `use_auto_machine_memory_percent` now defaults `max_model_memory_limit` {es-pull}80532[#80532] (issue: {es-issue}80415[#80415]) - -Monitoring:: -* Adding default templates for Metricbeat ECS data {es-pull}81744[#81744] - -Network:: -* Improve slow inbound handling to include response type {es-pull}80425[#80425] - -Recovery:: -* Fix `PendingReplicationActions` submitting lots of `NOOP` tasks to `GENERIC` {es-pull}82092[#82092] (issues: {es-issue}77466[#77466], {es-issue}79837[#79837]) - -Reindex:: -* Do not scroll if max docs is less than scroll size (update/delete by query) {es-pull}81654[#81654] (issue: {es-issue}54270[#54270]) - -Security:: -* URL option for `BaseRunAsSuperuserCommand` {es-pull}81025[#81025] (issue: {es-issue}80481[#80481]) - -[[bug-8.0.0-rc1]] -[float] -=== Bug fixes - -Autoscaling:: -* Autoscaling use adjusted total memory {es-pull}80528[#80528] (issue: {es-issue}78750[#78750]) - -Data streams:: -* Prohibit restoring a data stream alias with a conflicting write data stream {es-pull}81217[#81217] (issue: {es-issue}80976[#80976]) - -ILM+SLM:: -* Less verbose serialization of snapshot failure in SLM metadata {es-pull}80942[#80942] (issue: {es-issue}77466[#77466]) - -Indices APIs:: -* Fix `ComposableIndexTemplate` equals when `composed_of` is null {es-pull}80864[#80864] - -Infra/REST API:: -* Handle exceptions thrown from `RestCompatibleVersionHelper` {es-pull}80253[#80253] (issues: {es-issue}78214[#78214], {es-issue}79060[#79060]) - -Ingest:: -* Adjust default geoip logging to be less verbose {es-pull}81404[#81404] (issue: {es-issue}81356[#81356]) - -Machine Learning:: -* Set model state compatibility version to 8.0.0 {ml-pull}2139[#2139] diff --git a/docs/reference/release-notes/8.0.0-rc2.asciidoc b/docs/reference/release-notes/8.0.0-rc2.asciidoc deleted file mode 100644 index 9d3f93e006847..0000000000000 --- a/docs/reference/release-notes/8.0.0-rc2.asciidoc +++ /dev/null @@ -1,131 +0,0 @@ -[[release-notes-8.0.0-rc2]] -== {es} version 8.0.0-rc2 - -Also see <>. - -[[known-issues-8.0.0-rc2]] -[float] -=== Known issues - -* **Do not upgrade production clusters to {es} 8.0.0-rc2.** {es} 8.0.0-rc2 is -a pre-release of {es} 8.0 and is intended for testing purposes only. -+ -Upgrades from pre-release builds are not supported and could result in errors or -data loss. If you upgrade from a released version, such as 7.16, to a -pre-release version for testing, discard the contents of the cluster when you are -done. Do not attempt to upgrade to the final 8.0 release. - -* If you installed {es} from an archive on an aarch64 platform like Linux ARM or macOS M1, the -`elastic` user password and {kib} enrollment token are not generated -automatically when starting your node for the first time. -+ --- -After the node starts, generate the `elastic` password with the -<> tool: - -[source,bash] ----- -bin/elasticsearch-reset-password -u elastic ----- - -Then, create an enrollment token for {kib} with the -<> tool: - -[source,bash] ----- -bin/elasticsearch-create-enrollment-token -s kibana ----- --- -[[deprecation-8.0.0-rc2]] -[float] -=== Deprecations - -Engine:: -* Deprecate setting `max_merge_at_once_explicit` {es-pull}80574[#80574] - -Search:: -* Configure `IndexSearcher.maxClauseCount()` based on node characteristics {es-pull}81525[#81525] (issue: {es-issue}46433[#46433]) - - -[[feature-8.0.0-rc2]] -[float] -=== New features - -Snapshot/Restore:: -* Support IAM roles for Kubernetes service accounts {es-pull}81255[#81255] (issue: {es-issue}52625[#52625]) - -Watcher:: -* Use `startsWith` rather than exact matches for Watcher history template names {es-pull}82396[#82396] - - -[[enhancement-8.0.0-rc2]] -[float] -=== Enhancements - -Cluster Coordination:: -* Make `TaskBatcher` less lock-heavy {es-pull}82227[#82227] (issue: {es-issue}77466[#77466]) - -ILM+SLM:: -* Avoid unnecessary `LifecycleExecutionState` recalculation {es-pull}81558[#81558] (issues: {es-issue}77466[#77466], {es-issue}79692[#79692]) -* Make unchanged ILM policy updates into no-op {es-pull}82240[#82240] (issue: {es-issue}82065[#82065]) - -Infra/Core:: -* Prevent upgrades to 8.0 without first upgrading to the last 7.x release {es-pull}82321[#82321] (issue: {es-issue}81865[#81865]) - -Machine Learning:: -* Add `deployment_stats` to trained model stats {es-pull}80531[#80531] -* The setting `use_auto_machine_memory_percent` now defaults to `max_model_memory_limit` {es-pull}80532[#80532] (issue: {es-issue}80415[#80415]) - -Network:: -* Improve slow inbound handling to include response type {es-pull}80425[#80425] - -Packaging:: -* Convert repository plugins to modules {es-pull}81870[#81870] (issue: {es-issue}81652[#81652]) - -Search:: -* Check nested fields earlier in kNN search {es-pull}80516[#80516] (issue: {es-issue}78473[#78473]) - - -[[bug-8.0.0-rc2]] -[float] -=== Bug fixes - -Autoscaling:: -* Use adjusted total memory instead of total memory {es-pull}80528[#80528] (issue: {es-issue}78750[#78750]) - -Infra/Scripting:: -* Fix duplicated allow lists upon script engine creation {es-pull}82820[#82820] (issue: {es-issue}82778[#82778]) - -Ingest:: -* Adjust default geoip logging to be less verbose {es-pull}81404[#81404] (issue: {es-issue}81356[#81356]) - -Machine Learning:: -* Check that `total_definition_length` is consistent before starting a deployment {es-pull}80553[#80553] -* Fail inference processor more consistently on certain error types {es-pull}81475[#81475] -* Optimize the job stats call to do fewer searches {es-pull}82362[#82362] (issue: {es-issue}82255[#82255]) - -Recovery:: -* Make shard started response handling only return after the cluster state update completes {es-pull}82790[#82790] (issue: {es-issue}81628[#81628]) - -Search:: -* Reject zero-length vectors when using cosine similarity {es-pull}82241[#82241] (issue: {es-issue}81167[#81167]) - -Security:: -* Auto-generated TLS files under fixed config path {es-pull}81547[#81547] (issue: {es-issue}81057[#81057]) -* Bind to non-localhost for transport in some cases {es-pull}82973[#82973] -* Correct file ownership on node reconfiguration {es-pull}82789[#82789] (issue: {es-issue}80990[#80990]) -* Display security auto-configuration with fancy unicode {es-pull}82740[#82740] (issue: {es-issue}82364[#82364]) - -Snapshot/Restore:: -* Remove custom metadata if there is nothing to restore {es-pull}81373[#81373] (issues: {es-issue}81247[#81247], {es-issue}82019[#82019]) - - -[[upgrade-8.0.0-rc2]] -[float] -=== Upgrades - -Infra/Logging:: -* Upgrade ECS logging layout to latest version {es-pull}80500[#80500] - -Search:: -* Upgrade to released lucene 9.0.0 {es-pull}81426[#81426] diff --git a/docs/reference/release-notes/8.0.0.asciidoc b/docs/reference/release-notes/8.0.0.asciidoc deleted file mode 100644 index 835b7b036f954..0000000000000 --- a/docs/reference/release-notes/8.0.0.asciidoc +++ /dev/null @@ -1,717 +0,0 @@ -[[release-notes-8.0.0]] -== {es} version 8.0.0 - -The following list are changes in 8.0.0 as compared to 7.17.0, and combines -release notes from the 8.0.0-alpha1, -alpha2, -beta1, -rc1 and -rc2 releases. - -Also see <>. - -[[known-issues-8.0.0]] -[float] -=== Known issues - -* If you installed {es} from an archive on an aarch64 platform like Linux ARM or macOS M1, the -`elastic` user password and {kib} enrollment token are not generated -automatically when starting your node for the first time. -+ --- -After the node starts, generate the `elastic` password with the -<> tool: - -[source,bash] ----- -bin/elasticsearch-reset-password -u elastic ----- - -Then, create an enrollment token for {kib} with the -<> tool: - -[source,bash] ----- -bin/elasticsearch-create-enrollment-token -s kibana ----- --- -// tag::jackson-filtering-bug[] -* Parsing a request when the last element in an array is filtered out -(for instance using `_source_includes`) fails. This -is due to https://github.com/FasterXML/jackson-core/issues/882[a bug in Jackson parser]. -Fixed in {es} 8.6.1 ({es-pull}92480[#91456]) -// end::jackson-filtering-bug[] - - -[[breaking-8.0.0]] -[float] -=== Breaking changes - -Aggregations:: -* Percentiles aggregation: disallow specifying same percentile values twice {es-pull}52257[#52257] (issue: {es-issue}51871[#51871]) -* Remove adjacency matrix setting {es-pull}46327[#46327] (issues: {es-issue}46257[#46257], {es-issue}46324[#46324]) -* Remove `MovingAverage` pipeline aggregation {es-pull}39328[#39328] -* Remove deprecated `_time` and `_term` sort orders {es-pull}39450[#39450] -* Remove deprecated date histo interval {es-pull}75000[#75000] - -Allocation:: -* Require single data nodes to respect disk watermarks {es-pull}73737[#73737] (issues: {es-issue}55805[#55805], {es-issue}73733[#73733]) -* Remove `include_relocations` setting {es-pull}47717[#47717] (issues: {es-issue}46079[#46079], {es-issue}47443[#47443]) - -Analysis:: -* Cleanup versioned deprecations in analysis {es-pull}41560[#41560] (issue: {es-issue}41164[#41164]) -* Remove preconfigured `delimited_payload_filter` {es-pull}43686[#43686] (issues: {es-issue}41560[#41560], {es-issue}43684[#43684]) - -Authentication:: -* Always add file and native realms unless explicitly disabled {es-pull}69096[#69096] (issue: {es-issue}50892[#50892]) -* Do not set a NameID format in Policy by default {es-pull}44090[#44090] (issue: {es-issue}40353[#40353]) -* Make order setting mandatory for Realm config {es-pull}51195[#51195] (issue: {es-issue}37614[#37614]) - -CCR:: -* Avoid auto following leader system indices in CCR {es-pull}72815[#72815] (issue: {es-issue}67686[#67686]) - -Cluster Coordination:: -* Remove join timeout {es-pull}60873[#60873] (issue: {es-issue}60872[#60872]) -* Remove node filters for voting config exclusions {es-pull}55673[#55673] (issues: {es-issue}47990[#47990], {es-issue}50836[#50836]) -* Remove support for delaying state recovery pending master {es-pull}53845[#53845] (issue: {es-issue}51806[#51806]) - -Distributed:: -* Remove synced flush {es-pull}50882[#50882] (issues: {es-issue}50776[#50776], {es-issue}50835[#50835]) -* Remove the `cluster.remote.connect` setting {es-pull}54175[#54175] (issue: {es-issue}53924[#53924]) - -Engine:: -* Force merge should reject requests with `only_expunge_deletes` and `max_num_segments` set {es-pull}44761[#44761] (issue: {es-issue}43102[#43102]) -* Remove per-type indexing stats {es-pull}47203[#47203] (issue: {es-issue}41059[#41059]) -* Remove translog retention settings {es-pull}51697[#51697] (issue: {es-issue}50775[#50775]) - -Features/CAT APIs:: -* Remove the deprecated `local` parameter for `_cat/indices` {es-pull}64868[#64868] (issue: {es-issue}62198[#62198]) -* Remove the deprecated `local` parameter for `_cat/shards` {es-pull}64867[#64867] (issue: {es-issue}62197[#62197]) - -Features/Features:: -* Remove deprecated `._tier` allocation filtering settings {es-pull}73074[#73074] (issue: {es-issue}72835[#72835]) - -Features/ILM+SLM:: -* Add lower bound on `poll_interval` {es-pull}39593[#39593] (issue: {es-issue}39163[#39163]) -* Make the ILM `freeze` action a no-op {es-pull}77158[#77158] (issue: {es-issue}70192[#70192]) -* Always enforce default tier preference {es-pull}79751[#79751] (issue: {es-issue}76147[#76147]) -* Validate that snapshot repository exists for ILM policies at creation/update time {es-pull}78468[#78468] (issues: {es-issue}72957[#72957], {es-issue}77657[#77657]) -* Default `cluster.routing.allocation.enforce_default_tier_preference` to `true` {es-pull}79275[#79275] (issues: {es-issue}76147[#76147], {es-issue}79210[#79210]) - -Features/Indices APIs:: -* Change `prefer_v2_templates` parameter to default to true {es-pull}55489[#55489] (issues: {es-issue}53101[#53101], {es-issue}55411[#55411]) -* Remove deprecated `_upgrade` API {es-pull}64732[#64732] (issue: {es-issue}21337[#21337]) -* Remove local parameter for get field mapping request {es-pull}55100[#55100] (issue: {es-issue}55099[#55099]) -* Remove `include_type_name` parameter from REST layer {es-pull}48632[#48632] (issue: {es-issue}41059[#41059]) -* Remove the `template` field in index templates {es-pull}49460[#49460] (issue: {es-issue}21009[#21009]) -* Remove endpoint for freezing indices {es-pull}78918[#78918] (issues: {es-issue}70192[#70192], {es-issue}77273[#77273]) - -Features/Watcher:: -* Move watcher history to data stream {es-pull}64252[#64252] - -Geo:: -* Disallow creating `geo_shape` mappings with deprecated parameters {es-pull}70850[#70850] (issue: {es-issue}32039[#32039]) -* Remove bounding box query `type` parameter {es-pull}74536[#74536] - -Infra/Circuit Breakers:: -* Fixed synchronizing inflight breaker with internal variable {es-pull}40878[#40878] - -Infra/Core:: -* Limit processors by available processors {es-pull}44894[#44894] (issue: {es-issue}44889[#44889]) -* Remove `nodes/0` folder prefix from data path {es-pull}42489[#42489] -* Remove `bootstrap.system_call_filter` setting {es-pull}72848[#72848] -* Remove `fixed_auto_queue_size` threadpool type {es-pull}52280[#52280] -* Remove `node.max_local_storage_nodes` {es-pull}42428[#42428] (issue: {es-issue}42426[#42426]) -* Remove camel case named date/time formats {es-pull}60044[#60044] -* Remove legacy role settings {es-pull}71163[#71163] (issues: {es-issue}54998[#54998], {es-issue}66409[#66409], {es-issue}71143[#71143]) -* Remove `processors` setting {es-pull}45905[#45905] (issue: {es-issue}45855[#45855]) -* Remove the `local` parameter of `/_cat/nodes` {es-pull}50594[#50594] (issues: {es-issue}50088[#50088], {es-issue}50499[#50499]) -* Remove the listener thread pool {es-pull}53314[#53314] (issue: {es-issue}53049[#53049]) -* Remove the node local storage setting {es-pull}54381[#54381] (issue: {es-issue}54374[#54374]) -* Remove the `pidfile` setting {es-pull}45940[#45940] (issue: {es-issue}45938[#45938]) -* Removes `week_year` date format {es-pull}63384[#63384] (issue: {es-issue}60707[#60707]) -* System indices treated as restricted indices {es-pull}74212[#74212] (issue: {es-issue}69298[#69298]) -* Remove Joda dependency {es-pull}79007[#79007] -* Remove Joda support from date formatters {es-pull}78990[#78990] -* All system indices are hidden indices {es-pull}79512[#79512] - -Infra/Logging:: -* Remove slowlog level {es-pull}57591[#57591] (issue: {es-issue}56171[#56171]) - -Infra/Plugins:: -* Remove deprecated basic license feature enablement settings {es-pull}56211[#56211] (issue: {es-issue}54745[#54745]) - -Infra/REST API:: -* Remove content type required setting {es-pull}61043[#61043] -* Remove deprecated endpoints containing `_xpack` {es-pull}48170[#48170] (issue: {es-issue}35958[#35958]) -* Remove deprecated endpoints of hot threads API {es-pull}55109[#55109] (issue: {es-issue}52640[#52640]) -* Allow parsing Content-Type and Accept headers with version {es-pull}61427[#61427] - -Infra/Resiliency:: -* Fail node containing ancient closed index {es-pull}44264[#44264] (issues: {es-issue}21830[#21830], {es-issue}41731[#41731], {es-issue}44230[#44230]) - -Infra/Scripting:: -* Consolidate script parsing from object {es-pull}59507[#59507] (issue: {es-issue}59391[#59391]) -* Move `script_cache` into _nodes/stats {es-pull}59265[#59265] (issues: {es-issue}50152[#50152], {es-issue}59262[#59262]) -* Remove general cache settings {es-pull}59262[#59262] (issue: {es-issue}50152[#50152]) - -Infra/Settings:: -* Change default value of `action.destructive_requires_name` to `true` {es-pull}66908[#66908] (issue: {es-issue}61074[#61074]) -* Forbid settings without a namespace {es-pull}45947[#45947] (issues: {es-issue}45905[#45905], {es-issue}45940[#45940]) - -Ingest:: -* Remove default maxmind GeoIP databases from distribution {es-pull}78362[#78362] (issue: {es-issue}68920[#68920]) - -License:: -* Set `xpack.security.enabled` to true for all licenses {es-pull}72300[#72300] -* Enforce license expiration {es-pull}79671[#79671] - -Machine Learning:: -* Remove deprecated `_xpack` endpoints {es-pull}59870[#59870] (issues: {es-issue}35958[#35958], {es-issue}48170[#48170]) -* Remove the ability to update datafeed's `job_id` {es-pull}44752[#44752] (issue: {es-issue}44616[#44616]) -* Remove `allow_no_datafeeds` and `allow_no_jobs` parameters from APIs {es-pull}80048[#80048] (issue: {es-issue}60732[#60732]) - -Mapping:: -* Remove `boost` mapping parameter {es-pull}62639[#62639] (issue: {es-issue}62623[#62623]) -* Remove support for chained multi-fields {es-pull}42333[#42333] (issues: {es-issue}41267[#41267], {es-issue}41926[#41926]) -* Remove support for string in `unmapped_type` {es-pull}45675[#45675] -* Removes typed URLs from mapping APIs {es-pull}41676[#41676] - -Network:: -* Remove client feature tracking {es-pull}44929[#44929] (issues: {es-issue}31020[#31020], {es-issue}42538[#42538], {es-issue}44667[#44667]) -* Remove escape hatch permitting incompatible builds {es-pull}65753[#65753] (issues: {es-issue}65249[#65249], {es-issue}65601[#65601]) - -Packaging:: -* Remove SysV init support {es-pull}51716[#51716] (issue: {es-issue}51480[#51480]) -* Remove support for `JAVA_HOME` {es-pull}69149[#69149] (issue: {es-issue}55820[#55820]) -* Remove no-jdk distributions {es-pull}76896[#76896] (issue: {es-issue}65109[#65109]) -* Require Java 17 for running Elasticsearch {es-pull}79873[#79873] - -Recovery:: -* Remove dangling index auto import functionality {es-pull}59698[#59698] (issue: {es-issue}48366[#48366]) - -Reindex:: -* Reindex from Remote encoding {es-pull}41007[#41007] (issue: {es-issue}40303[#40303]) -* Reindex remove outer level size {es-pull}43373[#43373] (issues: {es-issue}24344[#24344], {es-issue}41894[#41894]) - -Rollup:: -* `RollupStart` endpoint should return OK if job already started {es-pull}41502[#41502] (issues: {es-issue}35928[#35928], {es-issue}39845[#39845]) - -Search:: -* Decouple shard allocation awareness from search and get requests {es-pull}45735[#45735] (issue: {es-issue}43453[#43453]) -* Fix range query on date fields for number inputs {es-pull}63692[#63692] (issue: {es-issue}63680[#63680]) -* Make fuzziness reject illegal values earlier {es-pull}33511[#33511] -* Make remote cluster resolution stricter {es-pull}40419[#40419] (issue: {es-issue}37863[#37863]) -* Parse empty first line in msearch request body as action metadata {es-pull}41011[#41011] (issue: {es-issue}39841[#39841]) -* Remove `CommonTermsQuery` and `cutoff_frequency` param {es-pull}42654[#42654] (issue: {es-issue}37096[#37096]) -* Remove `type` query {es-pull}47207[#47207] (issue: {es-issue}41059[#41059]) -* Remove `use_field_mapping` format option for docvalue fields {es-pull}55622[#55622] -* Remove deprecated `SimpleQueryStringBuilder` parameters {es-pull}57200[#57200] -* Remove deprecated `search.remote` settings {es-pull}42381[#42381] (issues: {es-issue}33413[#33413], {es-issue}38556[#38556]) -* Remove deprecated sort options: `nested_path` and `nested_filter` {es-pull}42809[#42809] (issue: {es-issue}27098[#27098]) -* Remove deprecated vector functions {es-pull}48725[#48725] (issue: {es-issue}48604[#48604]) -* Remove support for `_type` in searches {es-pull}68564[#68564] (issues: {es-issue}41059[#41059], {es-issue}68311[#68311]) -* Remove support for sparse vectors {es-pull}48781[#48781] (issue: {es-issue}48368[#48368]) -* Remove the object format for `indices_boost` {es-pull}55078[#55078] -* Removes type from `TermVectors` APIs {es-pull}42198[#42198] (issue: {es-issue}41059[#41059]) -* Removes typed endpoint from search and related APIs {es-pull}41640[#41640] -* Set max allowed size for stored async response {es-pull}74455[#74455] (issue: {es-issue}67594[#67594]) -* `indices.query.bool.max_clause_count` now limits all query clauses {es-pull}75297[#75297] - -Security:: -* Remove obsolete security settings {es-pull}40496[#40496] -* Remove support of creating CA on the fly when generating certificates {es-pull}65590[#65590] (issue: {es-issue}61884[#61884]) -* Remove the `id` field from the `InvalidateApiKey` API {es-pull}66671[#66671] (issue: {es-issue}66317[#66317]) -* Remove the migrate tool {es-pull}42174[#42174] -* Compress audit logs {es-pull}64472[#64472] (issue: {es-issue}63843[#63843]) -* Remove insecure settings {es-pull}46147[#46147] (issue: {es-issue}45947[#45947]) -* Remove `kibana_dashboard_only_user` reserved role {es-pull}76507[#76507] - -Snapshot/Restore:: -* Blob store compress default to `true` {es-pull}40033[#40033] -* Get snapshots support for multiple repositories {es-pull}42090[#42090] (issue: {es-issue}41210[#41210]) -* Remove repository stats API {es-pull}62309[#62309] (issue: {es-issue}62297[#62297]) -* Remove frozen cache setting leniency {es-pull}71013[#71013] (issue: {es-issue}70341[#70341]) -* Adjust snapshot index resolution behavior to be more intuitive {es-pull}79670[#79670] (issue: {es-issue}78320[#78320]) - -TLS:: -* Reject misconfigured/ambiguous SSL server config {es-pull}45892[#45892] -* Remove support for configurable PKCS#11 keystores {es-pull}75404[#75404] -* Remove the client transport profile filter {es-pull}43236[#43236] - - - -[[breaking-java-8.0.0]] -[float] -=== Breaking Java changes - -Authentication:: -* Mandate x-pack REST handler installed {es-pull}71061[#71061] (issue: {es-issue}70523[#70523]) - -CCR:: -* Remove the `CcrClient` {es-pull}42816[#42816] - -CRUD:: -* Remove types from `BulkRequest` {es-pull}46983[#46983] (issue: {es-issue}41059[#41059]) -* Remove `Client.prepareIndex(index, type, id)` method {es-pull}48443[#48443] - - -Client:: -* Remove `SecurityClient` from x-pack {es-pull}42471[#42471] - -Features/ILM+SLM:: -* Remove the `ILMClient` {es-pull}42817[#42817] - -Features/Monitoring:: -* Remove `MonitoringClient` from x-pack {es-pull}42770[#42770] - -Features/Watcher:: -* Remove `WatcherClient` from x-pack {es-pull}42815[#42815] - -Infra/Core:: -* Remove `XPackClient` from x-pack {es-pull}42729[#42729] -* Remove the transport client {es-pull}42538[#42538] -* Remove transport client from x-pack {es-pull}42202[#42202] - -Infra/REST API:: -* Copy HTTP headers to `ThreadContext` strictly {es-pull}45945[#45945] - -Machine Learning:: -* Remove the `MachineLearningClient` {es-pull}43108[#43108] - -Mapping:: -* Remove type filter from `GetMappings` API {es-pull}47364[#47364] (issue: {es-issue}41059[#41059]) -* Remove `type` parameter from `PutMappingRequest.buildFromSimplifiedDef()` {es-pull}50844[#50844] (issue: {es-issue}41059[#41059]) -* Remove unused parameter from `MetadataFieldMapper.TypeParser#getDefault()` {es-pull}51219[#51219] -* Remove `type` parameter from `CIR.mapping(type, object...)` {es-pull}50739[#50739] (issue: {es-issue}41059[#41059]) - -Search:: -* Removes types from `SearchRequest` and `QueryShardContext` {es-pull}42112[#42112] - -Snapshot/Restore:: -* Remove deprecated repository methods {es-pull}42359[#42359] (issue: {es-issue}42213[#42213]) - - -[[deprecation-8.0.0]] -[float] -=== Deprecations - -Authentication:: -* Deprecate setup-passwords tool {es-pull}76902[#76902] - -CRUD:: -* Remove `indices_segments` 'verbose' parameter {es-pull}78451[#78451] (issue: {es-issue}75955[#75955]) - -Engine:: -* Deprecate setting `max_merge_at_once_explicit` {es-pull}80574[#80574] - -Machine Learning:: -* Deprecate `estimated_heap_memory_usage_bytes` and replace with `model_size_bytes` {es-pull}80554[#80554] - -Monitoring:: -* Add deprecation info API entries for deprecated monitoring settings {es-pull}78799[#78799] -* Automatically install monitoring templates at plugin initialization {es-pull}78350[#78350] -* Remove Monitoring ingest pipelines {es-pull}77459[#77459] (issue: {es-issue}50770[#50770]) - -Search:: -* Configure `IndexSearcher.maxClauseCount()` based on node characteristics {es-pull}81525[#81525] (issue: {es-issue}46433[#46433]) - -Transform:: -* Improve transform deprecation messages {es-pull}81847[#81847] (issues: {es-issue}81521[#81521], {es-issue}81523[#81523]) - -[[feature-8.0.0]] -[float] -=== New features - -Security:: -* Auto-configure TLS for new nodes of new clusters {es-pull}77231[#77231] (issues: {es-issue}75144[#75144], {es-issue}75704[#75704]) - -Snapshot/Restore:: -* Support IAM roles for Kubernetes service accounts {es-pull}81255[#81255] (issue: {es-issue}52625[#52625]) - -Watcher:: -* Use `startsWith` rather than exact matches for Watcher history template names {es-pull}82396[#82396] - - -[[enhancement-8.0.0]] -[float] -=== Enhancements - -Analysis:: -* Move `reload_analyzers` endpoint to x-pack {es-pull}43559[#43559] - -Authentication:: -* Reset elastic password CLI tool {es-pull}74892[#74892] (issues: {es-issue}70113[#70113], {es-issue}74890[#74890]) -* Autogenerate and print elastic password on startup {es-pull}77291[#77291] -* Enroll Kibana API uses Service Accounts {es-pull}76370[#76370] -* Add `reset-kibana-system-user` tool {es-pull}77322[#77322] -* New CLI tool to reset password for built-in users {es-pull}79709[#79709] -* Auto-configure the `elastic` user password {es-pull}78306[#78306] - -Authorization:: -* Granting `kibana_system` reserved role access to "all" privileges to `.internal.preview.alerts*` index {es-pull}80889[#80889] (issues: {es-issue}76624[#76624], {es-issue}80746[#80746], {es-issue}116374[#116374]) -* Granting `kibana_system` reserved role access to "all" privileges to .preview.alerts* index {es-pull}80746[#80746] -* Granting editor and viewer roles access to alerts-as-data indices {es-pull}81285[#81285] - -Cluster Coordination:: -* Prevent downgrades from 8.x to 7.x {es-pull}78586[#78586] (issues: {es-issue}42489[#42489], {es-issue}52414[#52414]) -* Prevent downgrades from 8.x to 7.x {es-pull}78638[#78638] (issues: {es-issue}42489[#42489], {es-issue}52414[#52414]) -* Make `TaskBatcher` less lock-heavy {es-pull}82227[#82227] (issue: {es-issue}77466[#77466]) - -Data streams:: -* Data stream support read and write with custom routing and partition size {es-pull}74394[#74394] (issue: {es-issue}74390[#74390]) - -EQL:: -* Add option for returning results from the tail of the stream {es-pull}64869[#64869] (issue: {es-issue}58646[#58646]) -* Introduce case insensitive variant `in~` {es-pull}68176[#68176] (issue: {es-issue}68172[#68172]) -* Optimize redundant `toString` {es-pull}71070[#71070] (issue: {es-issue}70681[#70681]) - -Engine:: -* Always use soft-deletes in `InternalEngine` {es-pull}50415[#50415] -* Remove translog retention policy {es-pull}51417[#51417] (issue: {es-issue}50775[#50775]) - -Features/CAT APIs:: -* Remove `size` and add `time` params to `_cat/threadpool` {es-pull}55736[#55736] (issue: {es-issue}54478[#54478]) - -Features/ILM+SLM:: -* Allow for setting the total shards per node in the Allocate ILM action {es-pull}76794[#76794] (issue: {es-issue}76775[#76775]) -* Inject migrate action regardless of allocate action {es-pull}79090[#79090] (issue: {es-issue}76147[#76147]) -* Make unchanged ILM policy updates into noop {es-pull}82240[#82240] (issue: {es-issue}82065[#82065]) -* Avoid unnecessary `LifecycleExecutionState` recalculation {es-pull}81558[#81558] (issues: {es-issue}77466[#77466], {es-issue}79692[#79692]) - -Features/Indices APIs:: -* Batch rollover cluster state updates {es-pull}79945[#79945] (issues: {es-issue}77466[#77466], {es-issue}79782[#79782]) -* Reuse `MappingMetadata` instances in Metadata class {es-pull}80348[#80348] (issues: {es-issue}69772[#69772], {es-issue}77466[#77466]) - -Features/Stats:: -* Add bulk stats track the bulk per shard {es-pull}52208[#52208] (issues: {es-issue}47345[#47345], {es-issue}50536[#50536]) - -Features/Watcher:: -* Remove Watcher history clean up from monitoring {es-pull}67154[#67154] - -Infra/Core:: -* Remove aliases exist action {es-pull}43430[#43430] -* Remove indices exists action {es-pull}43164[#43164] -* Remove types exists action {es-pull}43344[#43344] -* Retain reference to stdout for exceptional cases {es-pull}77460[#77460] -* Check whether stdout is a real console {es-pull}79882[#79882] -* Share int, long, float, double, and byte pages {es-pull}75053[#75053] -* Revert "Deprecate resolution loss on date field (#78921)" {es-pull}79914[#79914] (issue: {es-issue}78921[#78921]) -* Add two missing entries to the deprecation information API {es-pull}80290[#80290] (issue: {es-issue}80233[#80233]) -* Prevent upgrades to 8.0 without first upgrading to the last 7.x release {es-pull}82321[#82321] (issue: {es-issue}81865[#81865]) - -Infra/Logging:: -* Make Elasticsearch JSON logs ECS compliant {es-pull}47105[#47105] (issue: {es-issue}46119[#46119]) - -Infra/REST API:: -* Allow for field declaration for future compatible versions {es-pull}69774[#69774] (issue: {es-issue}51816[#51816]) -* Introduce stability description to the REST API specification {es-pull}38413[#38413] -* Parsing: Validate that fields are not registered twice {es-pull}70243[#70243] -* Support response content-type with versioned media type {es-pull}65500[#65500] (issue: {es-issue}51816[#51816]) -* [REST API Compatibility] Typed endpoints for index and get APIs {es-pull}69131[#69131] (issue: {es-issue}54160[#54160]) -* [REST API Compatibility] Typed endpoints for put and get mapping and get field mappings {es-pull}71721[#71721] (issues: {es-issue}51816[#51816], {es-issue}54160[#54160]) -* [REST API Compatibility] Allow `copy_settings` flag for resize operations {es-pull}75184[#75184] (issues: {es-issue}38514[#38514], {es-issue}51816[#51816]) -* [REST API Compatibility] Allow for type in geo shape query {es-pull}74553[#74553] (issues: {es-issue}51816[#51816], {es-issue}54160[#54160]) -* [REST API Compatibility] Always return `adjust_pure_negative` value {es-pull}75182[#75182] (issues: {es-issue}49543[#49543], {es-issue}51816[#51816]) -* [REST API Compatibility] Clean up x-pack/plugin rest compat tests {es-pull}74701[#74701] (issue: {es-issue}51816[#51816]) -* [REST API Compatibility] Do not return `_doc` for empty mappings in template {es-pull}75448[#75448] (issues: {es-issue}51816[#51816], {es-issue}54160[#54160], {es-issue}70966[#70966], {es-issue}74544[#74544]) -* [REST API Compatibility] Dummy REST action for `indices.upgrade` API {es-pull}75136[#75136] (issue: {es-issue}51816[#51816]) -* [REST API Compatibility] REST Terms vector typed response {es-pull}73117[#73117] -* [REST API Compatibility] Rename `BulkItemResponse.Failure` type field {es-pull}74937[#74937] (issue: {es-issue}51816[#51816]) -* [REST API Compatibility] Type metadata for docs used in simulate request {es-pull}74222[#74222] (issues: {es-issue}51816[#51816], {es-issue}54160[#54160]) -* [REST API Compatibility] Typed `TermLookups` {es-pull}74544[#74544] (issues: {es-issue}46943[#46943], {es-issue}51816[#51816], {es-issue}54160[#54160]) -* [REST API Compatibility] Typed and x-pack graph explore API {es-pull}74185[#74185] (issues: {es-issue}46935[#46935], {es-issue}51816[#51816], {es-issue}54160[#54160]) -* [REST API Compatibility] Typed endpoint for bulk API {es-pull}73571[#73571] (issue: {es-issue}51816[#51816]) -* [REST API Compatibility] Typed endpoint for multi-get API {es-pull}73878[#73878] (issue: {es-issue}51816[#51816]) -* [REST API Compatibility] Typed endpoints for `RestUpdateAction` and `RestDeleteAction` {es-pull}73115[#73115] (issues: {es-issue}51816[#51816], {es-issue}54160[#54160]) -* [REST API Compatibility] Typed endpoints for `get_source` API {es-pull}73957[#73957] (issues: {es-issue}46587[#46587], {es-issue}46931[#46931], {es-issue}51816[#51816]) -* [REST API Compatibility] Typed endpoints for explain API {es-pull}73901[#73901] (issue: {es-issue}51816[#51816]) -* [REST API Compatibility] Typed endpoints for search `_count` API {es-pull}73958[#73958] (issues: {es-issue}42112[#42112], {es-issue}51816[#51816]) -* [REST API Compatibility] Typed indexing stats {es-pull}74181[#74181] (issues: {es-issue}47203[#47203], {es-issue}51816[#51816], {es-issue}54160[#54160]) -* [REST API Compatibility] Types for percolate query API {es-pull}74698[#74698] (issues: {es-issue}46985[#46985], {es-issue}51816[#51816], {es-issue}54160[#54160], {es-issue}74689[#74689]) -* [REST API Compatibility] Validate query typed API {es-pull}74171[#74171] (issues: {es-issue}46927[#46927], {es-issue}51816[#51816], {es-issue}54160[#54160]) -* [REST API Compatibility] Voting config exclusion exception message {es-pull}75406[#75406] (issues: {es-issue}51816[#51816], {es-issue}55291[#55291]) -* [REST API Compatibility] `MoreLikeThisQuery` with types {es-pull}75123[#75123] (issues: {es-issue}42198[#42198], {es-issue}51816[#51816], {es-issue}54160[#54160]) -* [REST API Compatibility] Update and delete by query using size field {es-pull}69606[#69606] -* [REST API Compatibility] Indicies boost in object format {es-pull}74422[#74422] (issues: {es-issue}51816[#51816], {es-issue}55078[#55078]) -* [REST API Compatibility] Typed endpoints for search and related endpoints {es-pull}72155[#72155] (issues: {es-issue}51816[#51816], {es-issue}54160[#54160]) -* [REST API Compatibility] Allow to use size `-1` {es-pull}75342[#75342] (issues: {es-issue}51816[#51816], {es-issue}69548[#69548], {es-issue}70209[#70209]) -* [REST API Compatibility] Ignore `use_field_mapping` option for docvalue {es-pull}74435[#74435] (issue: {es-issue}55622[#55622]) -* [REST API Compatibility] `_time` and `_term` sort orders {es-pull}74919[#74919] (issues: {es-issue}39450[#39450], {es-issue}51816[#51816]) -* [REST API Compatability] `template` parameter and field on PUT index template {es-pull}71238[#71238] (issues: {es-issue}49460[#49460], {es-issue}51816[#51816], {es-issue}68905[#68905]) -* [REST API Compatibility] Make query registration easier {es-pull}75722[#75722] (issue: {es-issue}51816[#51816]) -* [REST API Compatibility] Typed query {es-pull}75453[#75453] (issues: {es-issue}47207[#47207], {es-issue}51816[#51816], {es-issue}54160[#54160]) -* [REST API Compatibility] Deprecate the use of synced flush {es-pull}75372[#75372] (issues: {es-issue}50882[#50882], {es-issue}51816[#51816]) -* [REST API Compatibility] Licence `accept_enterprise` and response changes {es-pull}75479[#75479] (issues: {es-issue}50067[#50067], {es-issue}50735[#50735], {es-issue}51816[#51816], {es-issue}58217[#58217]) - -Infra/Scripting:: -* Update `DeprecationMap` to `DynamicMap` {es-pull}56149[#56149] (issue: {es-issue}52103[#52103]) -* Add nio Buffers to Painless {es-pull}79870[#79870] (issue: {es-issue}79867[#79867]) -* Restore the scripting general cache {es-pull}79453[#79453] (issue: {es-issue}62899[#62899]) - -Infra/Settings:: -* Fixed inconsistent `Setting.exist()` {es-pull}46603[#46603] (issue: {es-issue}41830[#41830]) -* Remove `index.optimize_auto_generated_id` setting (#27583) {es-pull}27600[#27600] (issue: {es-issue}27583[#27583]) -* Implement setting deduplication via string interning {es-pull}80493[#80493] (issues: {es-issue}77466[#77466], {es-issue}78892[#78892]) - -Ingest:: -* Add support for `_meta` field to ingest pipelines {es-pull}76381[#76381] -* Remove binary field after attachment processor execution {es-pull}79172[#79172] -* Improving cache lookup to reduce recomputing / searches {es-pull}77259[#77259] -* Extract more standard metadata from binary files {es-pull}78754[#78754] (issue: {es-issue}22339[#22339]) - -License:: -* Add deprecated `accept_enterprise` param to `/_xpack` {es-pull}58220[#58220] (issue: {es-issue}58217[#58217]) -* Support `accept_enterprise` param in get license API {es-pull}50067[#50067] (issue: {es-issue}49474[#49474]) -* Enforce Transport TLS check on all licenses {es-pull}79602[#79602] (issue: {es-issue}75292[#75292]) - -Machine Learning:: -* The Windows build platform for the {ml} C++ code now uses Visual Studio 2019 {ml-pull}1352[#1352] -* The macOS build platform for the {ml} C++ code is now Mojave running Xcode 11.3.1, - or Ubuntu 20.04 running clang 8 for cross compilation {ml-pull}1429[#1429] -* Add a new application for evaluating PyTorch models. The app depends on LibTorch - the C++ front end to PyTorch - and performs inference on models stored in the TorchScript format {ml-pull}1902[#1902] -* Adding new PUT trained model vocabulary endpoint {es-pull}77387[#77387] -* Creating new PUT model definition part API {es-pull}76987[#76987] -* Add inference time configuration overrides {es-pull}78441[#78441] (issue: {es-issue}77799[#77799]) -* Optimize source extraction for `categorize_text` aggregation {es-pull}79099[#79099] -* The Linux build platform for the {ml} C++ code is now CentOS 7 running gcc 10.3. {ml-pull}2028[#2028] -* Make ML indices hidden when the node becomes master {es-pull}77416[#77416] (issue: {es-issue}53674[#53674]) -* Add `deployment_stats` to trained model stats {es-pull}80531[#80531] -* The setting `use_auto_machine_memory_percent` now defaults `max_model_memory_limit` {es-pull}80532[#80532] (issue: {es-issue}80415[#80415]) - -Mapping:: -* Sparse vector to throw exception consistently {es-pull}62646[#62646] -* Add support for configuring HNSW parameters {es-pull}79193[#79193] (issue: {es-issue}78473[#78473]) -* Extend `dense_vector` to support indexing vectors {es-pull}78491[#78491] (issue: {es-issue}78473[#78473]) - -Monitoring:: -* Add previously removed Monitoring settings back for 8.0 {es-pull}78784[#78784] -* Change Monitoring plugin cluster alerts to not install by default {es-pull}79657[#79657] -* Adding default templates for Metricbeat ECS data {es-pull}81744[#81744] - -Network:: -* Enable LZ4 transport compression by default {es-pull}76326[#76326] (issue: {es-issue}73497[#73497]) -* Improve slow inbound handling to include response type {es-pull}80425[#80425] - -Packaging:: -* Make the Docker build more re-usable in Cloud {es-pull}50277[#50277] (issues: {es-issue}46166[#46166], {es-issue}49926[#49926]) -* Update docker-compose.yml to fix bootstrap check error {es-pull}47650[#47650] -* Allow total memory to be overridden {es-pull}78750[#78750] (issue: {es-issue}65905[#65905]) -* Convert repository plugins to modules {es-pull}81870[#81870] (issue: {es-issue}81652[#81652]) - -Recovery:: -* Use Lucene index in peer recovery and resync {es-pull}51189[#51189] (issue: {es-issue}50775[#50775]) -* Fix `PendingReplicationActions` submitting lots of `NOOP` tasks to `GENERIC` {es-pull}82092[#82092] (issues: {es-issue}77466[#77466], {es-issue}79837[#79837]) - -Reindex:: -* Make reindexing managed by a persistent task {es-pull}43382[#43382] (issue: {es-issue}42612[#42612]) -* Reindex restart from checkpoint {es-pull}46055[#46055] (issue: {es-issue}42612[#42612]) -* Reindex search resiliency {es-pull}45497[#45497] (issues: {es-issue}42612[#42612], {es-issue}43187[#43187]) -* Reindex v2 rethrottle sliced fix {es-pull}46967[#46967] (issues: {es-issue}42612[#42612], {es-issue}46763[#46763]) -* Do not scroll if max docs is less than scroll size (update/delete by query) {es-pull}81654[#81654] (issue: {es-issue}54270[#54270]) - -Rollup:: -* Adds support for `date_nanos` in Rollup Metric and `DateHistogram` Configs {es-pull}59349[#59349] (issue: {es-issue}44505[#44505]) - -SQL:: -* Add text formatting support for multivalue {es-pull}68606[#68606] -* Add xDBC and CLI support. QA CSV specs {es-pull}68966[#68966] -* Export array values through result sets {es-pull}69512[#69512] -* Improve alias resolution in sub-queries {es-pull}67216[#67216] (issue: {es-issue}56713[#56713]) -* Improve the optimization of null conditionals {es-pull}71192[#71192] -* Push `WHERE` clause inside subqueries {es-pull}71362[#71362] -* Use Java `String` methods for `LTRIM/RTRIM` {es-pull}57594[#57594] -* QL: Make canonical form take into account children {es-pull}71266[#71266] -* QL: Polish optimizer expression rule declaration {es-pull}71396[#71396] -* QL: Propagate nullability constraints across conjunctions {es-pull}71187[#71187] (issue: {es-issue}70683[#70683]) - -Search:: -* Completely disallow setting negative size in search {es-pull}70209[#70209] (issue: {es-issue}69548[#69548]) -* Make `0` as invalid value for `min_children` in `has_child` query {es-pull}41347[#41347] -* Return error when remote indices are locally resolved {es-pull}74556[#74556] (issue: {es-issue}26247[#26247]) -* [REST API Compatibility] Nested path and filter sort options {es-pull}76022[#76022] (issues: {es-issue}42809[#42809], {es-issue}51816[#51816]) -* [REST API Compatibility] `CommonTermsQuery` and `cutoff_frequency` parameter {es-pull}75896[#75896] (issues: {es-issue}42654[#42654], {es-issue}51816[#51816]) -* [REST API Compatibility] Allow first empty line for `_msearch` {es-pull}75886[#75886] (issues: {es-issue}41011[#41011], {es-issue}51816[#51816]) -* Node level can match action {es-pull}78765[#78765] -* TSDB: Add time series information to field caps {es-pull}78790[#78790] (issue: {es-issue}74660[#74660]) -* Add new kNN search endpoint {es-pull}79013[#79013] (issue: {es-issue}78473[#78473]) -* Disallow kNN searches on nested vector fields {es-pull}79403[#79403] (issue: {es-issue}78473[#78473]) -* Ensure kNN search respects authorization {es-pull}79693[#79693] (issue: {es-issue}78473[#78473]) -* Load kNN vectors format with mmapfs {es-pull}78724[#78724] (issue: {es-issue}78473[#78473]) -* Support cosine similarity in kNN search {es-pull}79500[#79500] -* Node level can match action {es-pull}78765[#78765] -* Check nested fields earlier in kNN search {es-pull}80516[#80516] (issue: {es-issue}78473[#78473]) - -Security:: -* Add a tool for creating enrollment tokens {es-pull}74890[#74890] -* Add the Enroll Kibana API {es-pull}72207[#72207] -* Change default hashing algorithm for FIPS 140 {es-pull}55544[#55544] -* Create enrollment token {es-pull}73573[#73573] (issues: {es-issue}71438[#71438], {es-issue}72129[#72129]) -* Enroll node API {es-pull}72129[#72129] -* Configure security for the initial node CLI {es-pull}74868[#74868] -* Generate and store password hash for elastic user {es-pull}76276[#76276] (issue: {es-issue}75310[#75310]) -* Set elastic password and generate enrollment token {es-pull}75816[#75816] (issue: {es-issue}75310[#75310]) -* Add `elasticsearch-enroll-node` tool {es-pull}77292[#77292] -* Default hasher to `PBKDF2_STRETCH` on FIPS mode {es-pull}76274[#76274] -* Add v7 `restCompat` for invalidating API key with the id field {es-pull}78664[#78664] (issue: {es-issue}66671[#66671]) -* Print enrollment token on startup {es-pull}78293[#78293] -* Startup check for security implicit behavior change {es-pull}76879[#76879] -* CLI tool to reconfigure nodes to enroll {es-pull}79690[#79690] (issue: {es-issue}7718[#7718]) -* Security auto-configuration for packaged installations {es-pull}75144[#75144] (issue: {es-issue}78306[#78306]) - -Snapshot/Restore:: -* Introduce searchable snapshots index setting for cascade deletion of snapshots {es-pull}74977[#74977] -* Unify blob store compress setting {es-pull}39346[#39346] (issue: {es-issue}39073[#39073]) -* Add recovery state tracking for searchable snapshots {es-pull}60505[#60505] -* Allow listing older repositories {es-pull}78244[#78244] -* Optimize SLM Policy Queries {es-pull}79341[#79341] (issue: {es-issue}79321[#79321]) - -TLS:: -* Add `ChaCha20` TLS ciphers on Java 12+ {es-pull}42155[#42155] -* Add support for `KeyStore` filters to `ssl-config` {es-pull}75407[#75407] -* Update TLS ciphers and protocols for JDK 11 {es-pull}41808[#41808] (issues: {es-issue}38646[#38646], {es-issue}41385[#41385]) - -Transform:: -* Prevent old beta transforms from starting {es-pull}79712[#79712] - -TSDB:: -* Automatically add timestamp mapper {es-pull}79136[#79136] -* Create a coordinating node level reader for tsdb {es-pull}79197[#79197] -* Fix TSDB shrink test in multi-version cluster {es-pull}79940[#79940] (issue: {es-issue}79936[#79936]) -* Do not allow shadowing metrics or dimensions {es-pull}79757[#79757] - - -[[bug-8.0.0]] -[float] -=== Bug fixes - -Aggregations:: -* Fix BWC issues for `x_pack/usage` {es-pull}55181[#55181] (issue: {es-issue}54847[#54847]) -* Fix `DoubleBounds` null serialization {es-pull}59475[#59475] -* Fix `TopHitsAggregationBuilder` adding duplicate `_score` sort clauses {es-pull}42179[#42179] (issue: {es-issue}42154[#42154]) -* Fix `t_test` usage stats {es-pull}54753[#54753] (issue: {es-issue}54744[#54744]) -* Throw exception if legacy interval cannot be parsed in `DateIntervalWrapper` {es-pull}41972[#41972] (issue: {es-issue}41970[#41970]) - -Autoscaling:: -* Autoscaling use adjusted total memory {es-pull}80528[#80528] (issue: {es-issue}78750[#78750]) - -CCR:: -* Fix `AutoFollow` version checks {es-pull}73776[#73776] (issue: {es-issue}72935[#72935]) - -Cluster Coordination:: -* Apply cluster states in system context {es-pull}53785[#53785] (issue: {es-issue}53751[#53751]) - -Data streams:: -* Prohibit restoring a data stream alias with a conflicting write data stream {es-pull}81217[#81217] (issue: {es-issue}80976[#80976]) - -Distributed:: -* Introduce `?wait_for_active_shards=index-setting` {es-pull}67158[#67158] (issue: {es-issue}66419[#66419]) -* Fixes to task result index mapping {es-pull}50359[#50359] (issue: {es-issue}50248[#50248]) - -Features/CAT APIs:: -* Fix cat recovery display of bytes fields {es-pull}40379[#40379] (issue: {es-issue}40335[#40335]) - -Features/ILM+SLM:: -* Ensuring that the `ShrinkAction` does not hang if total shards per node is too low {es-pull}76732[#76732] (issue: {es-issue}44070[#44070]) -* Less verbose serialization of snapshot failure in SLM metadata {es-pull}80942[#80942] (issue: {es-issue}77466[#77466]) - -Features/Indices APIs:: -* Fix `ComposableIndexTemplate` equals when `composed_of` is null {es-pull}80864[#80864] - -Features/Java High Level REST Client:: -* The Java High Level Rest Client (HLRC) has been removed and replaced by a new -{es} Java client. For migration steps, refer to -{java-api-client}/migrate-hlrc.html[Migrate from the High Level Rest Client]. - -Geo:: -* Preprocess polygon rings before processing it for decomposition {es-pull}59501[#59501] (issues: {es-issue}54441[#54441], {es-issue}59386[#59386]) - -Infra/Core:: -* Add searchable snapshot cache folder to `NodeEnvironment` {es-pull}66297[#66297] (issue: {es-issue}65725[#65725]) -* CLI tools: Write errors to stderr instead of stdout {es-pull}45586[#45586] (issue: {es-issue}43260[#43260]) -* Precompute `ParsedMediaType` for XContentType {es-pull}67409[#67409] -* Prevent stack overflow in rounding {es-pull}80450[#80450] - -Infra/Logging:: -* Fix NPE when logging null values in JSON {es-pull}53715[#53715] (issue: {es-issue}46702[#46702]) -* Fix stats in slow logs to be a escaped JSON {es-pull}44642[#44642] -* Populate data stream fields when `xOpaqueId` not provided {es-pull}62156[#62156] - -Infra/REST API:: -* Do not allow spaces within `MediaType's` parameters {es-pull}64650[#64650] (issue: {es-issue}51816[#51816]) -* Handle incorrect header values {es-pull}64708[#64708] (issues: {es-issue}51816[#51816], {es-issue}64689[#64689]) -* Ignore media ranges when parsing {es-pull}64721[#64721] (issues: {es-issue}51816[#51816], {es-issue}64689[#64689]) -* `RestController` should not consume request content {es-pull}44902[#44902] (issue: {es-issue}37504[#37504]) -* Handle exceptions thrown from `RestCompatibleVersionHelper` {es-pull}80253[#80253] (issues: {es-issue}78214[#78214], {es-issue}79060[#79060]) - -Infra/Scripting:: -* Change compound assignment structure to support string concatenation {es-pull}61825[#61825] -* Fixes casting in constant folding {es-pull}61508[#61508] -* Several minor Painless fixes {es-pull}61594[#61594] -* Fix duplicated allow lists upon script engine creation {es-pull}82820[#82820] (issue: {es-issue}82778[#82778]) - -Infra/Settings:: -* Stricter `UpdateSettingsRequest` parsing on the REST layer {es-pull}79227[#79227] (issue: {es-issue}29268[#29268]) -* Set Auto expand replica on deprecation log data stream {es-pull}79226[#79226] (issue: {es-issue}78991[#78991]) - -Ingest:: -* Adjust default geoip logging to be less verbose {es-pull}81404[#81404] (issue: {es-issue}81356[#81356]) - -Machine Learning:: -* Add timeout parameter for delete trained models API {es-pull}79739[#79739] (issue: {es-issue}77070[#77070]) -* Tone down ML unassigned job notifications {es-pull}79578[#79578] (issue: {es-issue}79270[#79270]) -* Use a new annotations index for future annotations {es-pull}79006[#79006] (issue: {es-issue}78439[#78439]) -* Set model state compatibility version to 8.0.0 {ml-pull}2139[#2139] -* Check that `total_definition_length` is consistent before starting a deployment {es-pull}80553[#80553] -* Fail inference processor more consistently on certain error types {es-pull}81475[#81475] -* Optimize the job stats call to do fewer searches {es-pull}82362[#82362] (issue: {es-issue}82255[#82255]) - -Mapping:: -* Remove assertions that mappings have one top-level key {es-pull}58779[#58779] (issue: {es-issue}58521[#58521]) - -Packaging:: -* Suppress illegal access in plugin install {es-pull}41620[#41620] (issue: {es-issue}41478[#41478]) - -Recovery:: -* Make shard started response handling only return after the cluster state update completes {es-pull}82790[#82790] (issue: {es-issue}81628[#81628]) - -SQL:: -* Introduce dedicated node for `HAVING` declaration {es-pull}71279[#71279] (issue: {es-issue}69758[#69758]) -* Make `RestSqlQueryAction` thread-safe {es-pull}69901[#69901] - -Search:: -* Check for negative `from` values in search request body {es-pull}54953[#54953] (issue: {es-issue}54897[#54897]) -* Fix `VectorsFeatureSetUsage` serialization in BWC mode {es-pull}55399[#55399] (issue: {es-issue}55378[#55378]) -* Handle total hits equal to `track_total_hits` {es-pull}37907[#37907] (issue: {es-issue}37897[#37897]) -* Improve error msg for CCS request on node without remote cluster role {es-pull}60351[#60351] (issue: {es-issue}59683[#59683]) -* Remove unsafe assertion in wildcard field {es-pull}78966[#78966] - -Security:: -* Allow access to restricted system indices for reserved system roles {es-pull}76845[#76845] - -Snapshot/Restore:: -* Fix `GET /_snapshot/_all/_all` if there are no repos {es-pull}43558[#43558] (issue: {es-issue}43547[#43547]) -* Don't fill stack traces in `SnapshotShardFailure` {es-pull}80009[#80009] (issue: {es-issue}79718[#79718]) -* Remove custom metadata if there is nothing to restore {es-pull}81373[#81373] (issues: {es-issue}81247[#81247], {es-issue}82019[#82019]) - -[[regression-8.0.0]] -[float] -=== Regressions - -Search:: -* Disable numeric sort optimization conditionally {es-pull}78103[#78103] - -[[upgrade-8.0.0]] -[float] -=== Upgrades - -Authentication:: -* Upgrade to UnboundID LDAP SDK v6.0.2 {es-pull}79332[#79332] - -Infra/Logging:: -* Upgrade ECS logging layout to latest version {es-pull}80500[#80500] - -Search:: -* Upgrade to Lucene 9 {es-pull}81426[#81426] - -Security:: -* Update to OpenSAML 4 {es-pull}77012[#77012] (issue: {es-issue}71983[#71983]) - -Snapshot/Restore:: -* Upgrade repository-hdfs plugin to Hadoop 3 {es-pull}76897[#76897] diff --git a/docs/reference/release-notes/8.0.1.asciidoc b/docs/reference/release-notes/8.0.1.asciidoc deleted file mode 100644 index b6eb14c78ef00..0000000000000 --- a/docs/reference/release-notes/8.0.1.asciidoc +++ /dev/null @@ -1,81 +0,0 @@ -[[release-notes-8.0.1]] -== {es} version 8.0.1 - -Also see <>. -[[known-issues-8.0.1]] -[float] -=== Known issues - -include::8.0.0.asciidoc[tag=jackson-filtering-bug] -[[bug-8.0.1]] -[float] -=== Bug fixes - -Aggregations:: -* Fix backward compatibility with 7.17.0 {es-pull}83715[#83715] - -Distributed:: -* Correctly handle large zones with 500 or more instances {es-pull}83785[#83785] (issue: {es-issue}83783[#83783]) - -ILM+SLM:: -* Do not allow negative age in explain lifecycle API response {es-pull}84043[#84043] - -Infra/Core:: -* Copy `trace.id` in threadcontext stash {es-pull}83218[#83218] -* Preserve context in `ResultDeduplicator` {es-pull}84038[#84038] (issue: {es-issue}84036[#84036]) -* Update system index mappings if `_meta` is null {es-pull}83896[#83896] (issue: {es-issue}83890[#83890]) - -Ingest:: -* Fix `GeoIpDownloader` startup during rolling upgrade {es-pull}84000[#84000] -* Short circuit date patterns after first match {es-pull}83764[#83764] - -Machine Learning:: -* Retry anomaly detection job recovery during relocation {es-pull}83456[#83456] - -Packaging:: -* Add `log4j-slf4j-impl` to `repository-azure` {es-pull}83661[#83661] (issue: {es-issue}83652[#83652]) - -Recovery:: -* Add missing `indices.recovery.internal_action_retry_timeout` to list of settings {es-pull}83354[#83354] - -SQL:: -* Fix txt format for empty result sets {es-pull}83376[#83376] - -Search:: -* Avoid eagerly loading `StoredFieldsReader` in fetch phase {es-pull}83693[#83693] (issue: {es-issue}82777[#82777]) -* Returns valid PIT when no index matched {es-pull}83424[#83424] - -Security:: -* Upgrade jANSI dependency to 2.4.0 {es-pull}83566[#83566] - -Snapshot/Restore:: -* Move get snapshots serialization to management pool {es-pull}83215[#83215] -* Preserve context in `snapshotDeletionListeners` {es-pull}84089[#84089] (issue: {es-issue}84036[#84036]) - -Transform:: -* Fix condition on which the transform stops processing buckets {es-pull}82852[#82852] - -Watcher:: -* Tolerate empty types array in Watch definitions {es-pull}83524[#83524] (issue: {es-issue}83235[#83235]) - -[[enhancement-8.0.1]] -[float] -=== Enhancements - -Infra/REST API:: -* Update YAML REST tests to check for product header on all responses {es-pull}83290[#83290] - -Recovery:: -* Adjust `indices.recovery.max_bytes_per_sec` according to external settings {es-pull}82819[#82819] - -[[upgrade-8.0.1]] -[float] -=== Upgrades - -Geo:: -* Update vector tiles google protobuf to 3.16.1 {es-pull}83402[#83402] - -Packaging:: -* Bump bundled JDK to 17.0.2+8 {es-pull}83243[#83243] (issue: {es-issue}83242[#83242]) - - diff --git a/docs/reference/release-notes/8.1.0.asciidoc b/docs/reference/release-notes/8.1.0.asciidoc deleted file mode 100644 index 868739665f5fb..0000000000000 --- a/docs/reference/release-notes/8.1.0.asciidoc +++ /dev/null @@ -1,348 +0,0 @@ -[[release-notes-8.1.0]] -== {es} version 8.1.0 - -Also see <>. -[[known-issues-8.1.0]] -[float] -=== Known issues - -include::8.0.0.asciidoc[tag=jackson-filtering-bug] -[[breaking-8.1.0]] -[float] -=== Breaking changes - -Geo:: -* Fields API should return normalize geometries {es-pull}80649[#80649] (issues: {es-issue}79232[#79232], {es-issue}63739[#63739]) - -[[bug-8.1.0]] -[float] -=== Bug fixes - -Aggregations:: -* Reenable `BooleanTermsIT` {es-pull}83421[#83421] (issue: {es-issue}83351[#83351]) -* backward compatibility with version 7.17.0 {es-pull}83715[#83715] -* ip prefix bucket reduction {es-pull}83637[#83637] -* reduce float and half-float values to their stored precision {es-pull}83213[#83213] - -Allocation:: -* Fix `updateMinNode` condition {es-pull}80403[#80403] (issue: {es-issue}41194[#41194]) -* Make `*.routing.allocation.*` list-based setting {es-pull}80420[#80420] (issue: {es-issue}77773[#77773]) -* Permit metadata updates on flood-stage-blocked indices {es-pull}81781[#81781] -* Reroute after cluster recovery {es-pull}82856[#82856] (issue: {es-issue}82456[#82456]) - -Authorization:: -* Capture anonymous roles when creating API keys {es-pull}81427[#81427] (issue: {es-issue}81024[#81024]) -* Extend fleet-server service account privileges {es-pull}82600[#82600] - -Autoscaling:: -* Fix autoscaling of follower data streams {es-pull}83302[#83302] (issue: {es-issue}82857[#82857]) - -Client:: -* Fix for self-suppression in REST client {es-pull}83568[#83568] (issue: {es-issue}42223[#42223]) - -Distributed:: -* [GCE Discovery] Correctly handle large zones with 500 or more instances {es-pull}83785[#83785] (issue: {es-issue}83783[#83783]) - -Engine:: -* Fork to `WRITE` thread when failing shard {es-pull}84606[#84606] (issue: {es-issue}84602[#84602]) - -Geo:: -* Handle bounds properly when grid tiles crosses the dateline {es-pull}83348[#83348] (issue: {es-issue}83299[#83299]) -* `GeometryNormalizer` should not fail if it cannot compute signed area {es-pull}84051[#84051] (issue: {es-issue}83946[#83946]) - -ILM+SLM:: -* Fix `PolicyStepsRegistry`'s `cachedSteps` null handling {es-pull}84588[#84588] - -Indices APIs:: -* Allow removing unreferenced composable data stream templates {es-pull}84376[#84376] (issues: {es-issue}84171[#84171], {es-issue}84188[#84188]) -* Simplify and speed up `ExecutorSelector` {es-pull}83514[#83514] (issue: {es-issue}82450[#82450]) - -Infra/Core:: -* Always re-run Feature migrations which have encountered errors {es-pull}83918[#83918] (issue: {es-issue}83917[#83917]) -* Copy `trace.id` in threadcontext stash {es-pull}83218[#83218] -* Preserve context in `ResultDeduplicator` {es-pull}84038[#84038] (issue: {es-issue}84036[#84036]) -* Registration of `SystemIndexMigrationTask` named xcontent objects {es-pull}84192[#84192] (issue: {es-issue}84115[#84115]) -* Update system index mappings if `_meta` is null {es-pull}83896[#83896] (issue: {es-issue}83890[#83890]) - -Infra/REST API:: -* Do not allow safelisted media types on Content-Type {es-pull}83448[#83448] - -Infra/Scripting:: -* Fix duplicated allow lists upon script engine creation {es-pull}82820[#82820] (issue: {es-issue}82778[#82778]) -* Fix plumbing in double and keyword runtime fields for the scripting fields API {es-pull}83392[#83392] - -Ingest:: -* Fix `GeoIpDownloader` startup during rolling upgrade {es-pull}84000[#84000] -* Short circuit date patterns after first match {es-pull}83764[#83764] - -Machine Learning:: -* Allow autoscaling to work when vertical scaling is possible {es-pull}84242[#84242] (issue: {es-issue}84198[#84198]) -* Correctly capture min stats for `inference.ingest_processors` in ML usage {es-pull}82352[#82352] -* Fail queued inference requests with cause if the process crashes {es-pull}81584[#81584] -* Fix NLP tokenization `never_split` handling around punctuation {es-pull}82982[#82982] -* Fix `ZeroShotClassificationConfig` update mixing fields {es-pull}82848[#82848] -* Fix bug where initial scale from 0->1 could scale too high {es-pull}84244[#84244] -* Fix submit after shutdown in process worker service {es-pull}83645[#83645] (issue: {es-issue}83633[#83633]) -* Fixes `categorize_text` parameter validation to be parse order independent {es-pull}82628[#82628] (issue: {es-issue}82629[#82629]) -* Record node shutdown start time for each node {es-pull}84355[#84355] -* Register the named X-content parser for snapshot upgrade params {es-pull}84420[#84420] (issue: {es-issue}84419[#84419]) -* Retry anomaly detection job recovery during relocation {es-pull}83456[#83456] -* Return `zxx` for `lang_ident_model_1` if no valid text is found for language identification {es-pull}82746[#82746] (issue: {es-issue}81933[#81933]) -* Text structure finder caps exclude lines pattern at 1000 characters {es-pull}84236[#84236] (issue: {es-issue}83434[#83434]) -* Validate vocabulary on model deployment {es-pull}81548[#81548] (issue: {es-issue}81470[#81470]) -* Wait for model process to stop in stop deployment {es-pull}83644[#83644] - -Mapping:: -* Add support for sub-fields to `search_as_you_type` fields {es-pull}82430[#82430] (issue: {es-issue}56326[#56326]) -* Better exception message for `MappingParser.parse` {es-pull}80696[#80696] -* Completion field to support multiple completion multi-fields {es-pull}83595[#83595] (issue: {es-issue}83534[#83534]) - -Network:: -* Throw `NoSeedNodeLeftException` on proxy failure {es-pull}80961[#80961] (issue: {es-issue}80898[#80898]) - -Packaging:: -* Add `log4j-slf4j-impl` to `repository-azure` {es-pull}83661[#83661] (issue: {es-issue}83652[#83652]) -* Restart ES after keystore upgrade in postinst {es-pull}84224[#84224] (issue: {es-issue}82433[#82433]) - -Recovery:: -* Add missing `indices.recovery.internal_action_retry_timeout` to list of settings {es-pull}83354[#83354] -* Add missing max overcommit factor to list of (dynamic) settings {es-pull}83350[#83350] - -SQL:: -* Fix txt format for empty result sets {es-pull}83376[#83376] - -Search:: -* Avoid eagerly loading `StoredFieldsReader` in fetch phase {es-pull}83693[#83693] (issue: {es-issue}82777[#82777]) -* Do not deserialise the document when not needed in the fields fetch phase {es-pull}84184[#84184] -* Returns valid PIT when no index matched {es-pull}83424[#83424] - -Security:: -* Add validation for API key role descriptors {es-pull}82049[#82049] (issue: {es-issue}67311[#67311]) - -Snapshot/Restore:: -* Adjust `LinuxFileSystemNatives.allocatedSizeInBytes` for aarch64 architectures {es-pull}81376[#81376] (issues: {es-issue}80437[#80437], {es-issue}81362[#81362]) -* Distinguish "missing repository" from "missing repository plugin" {es-pull}82457[#82457] (issue: {es-issue}81758[#81758]) -* Fix `DirectBlobContainerIndexInput` cloning method {es-pull}84341[#84341] (issue: {es-issue}84238[#84238]) -* Move get snapshots serialization to management pool {es-pull}83215[#83215] -* Preserve context in `snapshotDeletionListeners` {es-pull}84089[#84089] (issue: {es-issue}84036[#84036]) - -TSDB:: -* Fix time series timestamp meta missing {es-pull}80695[#80695] - -Transform:: -* Fix NPE in transform version check {es-pull}81756[#81756] -* Fix condition on which the transform stops processing buckets {es-pull}82852[#82852] -* Prevent stopping of transforms due to threadpool limitation {es-pull}81912[#81912] (issue: {es-issue}81796[#81796]) - -Watcher:: -* Tolerate empty types array in Watch definitions {es-pull}83524[#83524] (issue: {es-issue}83235[#83235]) - -[[deprecation-8.1.0]] -[float] -=== Deprecations - -CRUD:: -* Bulk actions JSON must be well-formed {es-pull}78876[#78876] (issue: {es-issue}43774[#43774]) - -Cluster Coordination:: -* Remove last few mentions of Zen discovery {es-pull}80410[#80410] - -Search:: -* Deprecate the `indices.query.bool.max_clause_count` node setting {es-pull}81525[#81525] (issue: {es-issue}46433[#46433]) - -SQL:: -* Deprecate `index_include_frozen` request parameter {es-pull}83943[#83943] (issue: {es-issue}81939[#81939]) - -[[enhancement-8.1.0]] -[float] -=== Enhancements - -Aggregations:: -* Add an aggregator for IPv4 and IPv6 subnets {es-pull}82410[#82410] -* Fail shards early when we can detect a type missmatch {es-pull}79869[#79869] (issue: {es-issue}72276[#72276]) -* Optimize `significant_text` aggregation to only parse the field it requires from `_source` {es-pull}79651[#79651] - -Allocation:: -* Identify other node in `SameShardAllocDec` message {es-pull}82890[#82890] (issue: {es-issue}80767[#80767]) -* Make `AllocationService#adaptAutoExpandReplicas` Faster {es-pull}83092[#83092] -* Speed up same host check {es-pull}80767[#80767] - -Analysis:: -* Expose Japanese completion filter to kuromoji analysis plugin {es-pull}81858[#81858] - -Authentication:: -* Enable `run_as` for all authentication schemes {es-pull}79809[#79809] -* Return API key name in `_authentication` response {es-pull}78946[#78946] (issue: {es-issue}70306[#70306]) - -Authorization:: -* Avoid loading authorized indices when requested indices are all concrete names {es-pull}81237[#81237] -* Optimize DLS bitset building for `matchAll` query {es-pull}81030[#81030] (issue: {es-issue}80904[#80904]) - -Cluster Coordination:: -* Add detail to slow cluster state warning message {es-pull}83221[#83221] -* Batch Index Settings Update Requests {es-pull}82896[#82896] (issue: {es-issue}79866[#79866]) -* Improve node-join task descriptions {es-pull}80090[#80090] -* Make `PeerFinder` log messages happier {es-pull}83222[#83222] -* More compact serialization of metadata {es-pull}82608[#82608] (issue: {es-issue}77466[#77466]) -* Paginate persisted cluster state {es-pull}78875[#78875] -* Reduce verbosity-increase timeout to 3 minutes {es-pull}81118[#81118] -* Use network recycler for publications {es-pull}80650[#80650] (issue: {es-issue}80111[#80111]) - -Data streams:: -* Defer reroute when autocreating datastream {es-pull}82412[#82412] (issue: {es-issue}82159[#82159]) - -ILM+SLM:: -* Expose the index age in ILM explain output {es-pull}81273[#81273] (issue: {es-issue}64429[#64429]) - -Indices APIs:: -* Batch auto create index cluster state updates {es-pull}82159[#82159] -* Expose 'features' option in Get Index API {es-pull}83083[#83083] (issue: {es-issue}82948[#82948]) -* Expose index health and status to the `_stats` API {es-pull}81954[#81954] (issue: {es-issue}80413[#80413]) -* Force merge REST API support `wait_for_completion` {es-pull}80463[#80463] (issues: {es-issue}80129[#80129], {es-issue}80129[#80129]) - -Infra/Circuit Breakers:: -* Allow dynamically changing the `use_real_memory` setting {es-pull}78288[#78288] (issue: {es-issue}77324[#77324]) - -Infra/Core:: -* Use `VarHandles` for number conversions {es-pull}80367[#80367] (issue: {es-issue}78823[#78823]) -* Use `VarHandles` in `ByteUtils` {es-pull}80442[#80442] (issue: {es-issue}78823[#78823]) -* `FilterPathBasedFilter` support match fieldname with dot {es-pull}83178[#83178] (issues: {es-issue}83148[#83148], {es-issue}83152[#83152]) - -Infra/REST API:: -* Allow for customised content-type validation {es-pull}80906[#80906] (issue: {es-issue}80482[#80482]) -* Update YAML REST tests to check for product header on all responses {es-pull}83290[#83290] - -Infra/Scripting:: -* Add '$' syntax as a shortcut for 'field' in Painless {es-pull}80518[#80518] -* Add `BinaryDocValuesField` to replace `BytesRef` `(ScriptDocValues)` {es-pull}79760[#79760] -* Add a geo point field for the scripting fields api {es-pull}81395[#81395] -* Add date fields to the scripting fields api {es-pull}81272[#81272] -* Add half float mapping to the scripting fields API {es-pull}82294[#82294] -* Add scaled float to the scripting fields API {es-pull}82275[#82275] -* Add support for `GeoShape` to the scripting fields API {es-pull}81617[#81617] -* Fields API for IP mapped type {es-pull}81396[#81396] -* Fields API for byte, double, float, integer, long, short {es-pull}81126[#81126] (issue: {es-issue}79105[#79105]) -* Fields API for flattened mapped type {es-pull}82590[#82590] -* Fields API for x-pack `constant_keyword` {es-pull}82292[#82292] -* Fields API for x-pack version, doc version, seq no, mumur3 {es-pull}81476[#81476] -* Improve support for joda datetime to java datetime in Painless {es-pull}83099[#83099] -* Keyword fields API support {es-pull}81266[#81266] -* Make wildcard accessible from the scripting field API {es-pull}82763[#82763] -* Ordinal field data plumbing {es-pull}80970[#80970] (issue: {es-issue}79105[#79105]) -* Support boolean fields in Fields API {es-pull}80043[#80043] (issue: {es-issue}79105[#79105]) -* Time series compile and cache evict metrics {es-pull}79078[#79078] (issue: {es-issue}62899[#62899]) - -Infra/Settings:: -* Optimize duplicated code block in `MetadataUpdateSettingsService` {es-pull}82048[#82048] - -Machine Learning:: -* Add ability to update the truncation option at inference {es-pull}80267[#80267] -* Add error counts to trained model stats {es-pull}82705[#82705] -* Add latest search interval to datafeed stats {es-pull}82620[#82620] (issue: {es-issue}82405[#82405]) -* Adds new MPNet tokenization for NLP models {es-pull}82234[#82234] -* Force delete trained models {es-pull}80595[#80595] -* Improve error message on starting scrolling datafeed with no matching indices {es-pull}81069[#81069] (issue: {es-issue}81013[#81013]) -* Report thread settings per node for trained model deployments {es-pull}81723[#81723] (issue: {es-issue}81149[#81149]) -* Set default value of 30 days for model prune window {es-pull}81377[#81377] -* Track token positions and use source string to tag NER entities {es-pull}81275[#81275] -* Warn when creating job with an unusual bucket span {es-pull}82145[#82145] (issue: {es-issue}81645[#81645]) - -Mapping:: -* Allow doc-values only search on geo_point fields {es-pull}83395[#83395] -* Implement all queries on doc-values only keyword fields {es-pull}83404[#83404] -* Optimize source filtering in `SourceFieldMapper` {es-pull}81970[#81970] (issues: {es-issue}77154[#77154], {es-issue}81575[#81575]) - -Monitoring:: -* Add Enterprise Search monitoring index templates {es-pull}82743[#82743] -* Add `beats_stats.metrics.apm-server.sampling.tail` to Stack Monitoring templates {es-pull}82401[#82401] - -Network:: -* Report close connection exceptions at INFO {es-pull}81768[#81768] (issues: {es-issue}51612[#51612], {es-issue}66473[#66473]) -* Serialize outbound messages on netty buffers {es-pull}80111[#80111] -* Track histogram of transport handling times {es-pull}80581[#80581] (issue: {es-issue}80428[#80428]) - -Recovery:: -* Adjust `indices.recovery.max_bytes_per_sec` according to external settings {es-pull}82819[#82819] - -SQL:: -* Compress Cursors {es-pull}83591[#83591] -* Extend Tableau connector to reconnect with catalog {es-pull}81321[#81321] - -Search:: -* Add `scripted_metric` agg context to `unsigned_long` {es-pull}64422[#64422] (issue: {es-issue}64347[#64347]) -* Add field usage support for vectors {es-pull}80608[#80608] -* Allow doc-values only search on boolean fields {es-pull}82925[#82925] (issues: {es-issue}82409[#82409], {es-issue}81210[#81210], {es-issue}52728[#52728]) -* Allow doc-values only search on date types {es-pull}82602[#82602] (issues: {es-issue}82409[#82409], {es-issue}81210[#81210], {es-issue}52728[#52728]) -* Allow doc-values only search on ip fields {es-pull}82929[#82929] (issues: {es-issue}82409[#82409], {es-issue}81210[#81210], {es-issue}52728[#52728]) -* Allow doc-values only search on keyword fields {es-pull}82846[#82846] (issues: {es-issue}82409[#82409], {es-issue}81210[#81210], {es-issue}52728[#52728]) -* Allow doc-values only search on number types {es-pull}82409[#82409] (issues: {es-issue}81210[#81210], {es-issue}52728[#52728]) -* Rewrite `match` and `match_phrase` queries to `term` queries on `keyword` fields {es-pull}82612[#82612] (issue: {es-issue}82515[#82515]) -* Short cut if reader has point values {es-pull}80268[#80268] -* Support combining `_shards` preference param with `` {es-pull}80024[#80024] (issue: {es-issue}80021[#80021]) - -Security:: -* Activate user profile API {es-pull}82400[#82400] -* Add an initial `ProfileService` for user profiles {es-pull}81899[#81899] -* Add new system index for user profile documents {es-pull}81355[#81355] -* Add update user profile data API {es-pull}82772[#82772] -* Add user profile API for get profile by UID {es-pull}81910[#81910] -* Update Kibana system user privileges {es-pull}82781[#82781] - -Snapshot/Restore:: -* Add Linux x86-64bits native method to retrieve the number of allocated bytes on disk for a file {es-pull}80437[#80437] (issue: {es-issue}79698[#79698]) - -Stats:: -* Add index pressure stats in cluster stats {es-pull}80303[#80303] (issue: {es-issue}79788[#79788]) -* Optimize `getIndices` in `IndicesSegmentResponse` {es-pull}80064[#80064] -* Speed up `MappingStats` Computation on Coordinating Node {es-pull}82830[#82830] - -TSDB:: -* Add `_tsid` field to `time_series` indices {es-pull}80276[#80276] -* Make time boundaries settings required in TSDB indices {es-pull}81146[#81146] - -Transform:: -* Introduce `deduce_mappings` transform setting {es-pull}82256[#82256] (issue: {es-issue}82559[#82559]) -* Make it possible to clear retention policy on an existing transform {es-pull}82703[#82703] (issue: {es-issue}82560[#82560]) -* Report transforms without config as erroneous {es-pull}81141[#81141] (issue: {es-issue}80955[#80955]) - -[[feature-8.1.0]] -[float] -=== New features - -Authentication:: -* Initial version of JWT Realm {es-pull}82175[#82175] -* Introduce domain setting to associate realms {es-pull}81968[#81968] - -Distributed:: -* Add desired nodes API {es-pull}82975[#82975] - -Geo:: -* New `GeoHexGrid` aggregation {es-pull}82924[#82924] - -Health:: -* Model for the new health reporting api {es-pull}83398[#83398] - -TSDB:: -* Handle `fields.with.dots` in `routing_path` {es-pull}83148[#83148] - -Transform:: -* Add transform reset API {es-pull}79828[#79828] (issue: {es-issue}75768[#75768]) - -[[upgrade-8.1.0]] -[float] -=== Upgrades - -Geo:: -* Update vector tiles google protobuf to 3.16.1 {es-pull}83402[#83402] - -Network:: -* Upgrade to Netty 4.1.73 {es-pull}82844[#82844] - -Packaging:: -* Bump bundled JDK to 17.0.2+8 {es-pull}83243[#83243] (issue: {es-issue}83242[#83242]) - -Security:: -* Upgrade jANSI dependency to 2.4.0 {es-pull}83566[#83566] - - - diff --git a/docs/reference/release-notes/8.1.1.asciidoc b/docs/reference/release-notes/8.1.1.asciidoc deleted file mode 100644 index d0ad36fd36487..0000000000000 --- a/docs/reference/release-notes/8.1.1.asciidoc +++ /dev/null @@ -1,65 +0,0 @@ -[[release-notes-8.1.1]] -== {es} version 8.1.1 - -Also see <>. -[[known-issues-8.1.1]] -[float] -=== Known issues - -include::8.0.0.asciidoc[tag=jackson-filtering-bug] -[[bug-8.1.1]] -[float] -=== Bug fixes - -Analysis:: -* Fix `min_hash` configuration settings names {es-pull}84753[#84753] (issue: {es-issue}84578[#84578]) - -EQL:: -* Clean any used memory by the sequence matcher and circuit breaker used bytes in case of exception {es-pull}84451[#84451] - -Engine:: -* Increase store ref before snapshotting index commit {es-pull}84776[#84776] - -ILM+SLM:: -* Invoke initial `AsyncActionStep` for newly created indices {es-pull}84541[#84541] (issue: {es-issue}77269[#77269]) - -Indices APIs:: -* Remove existing `indices/datastreams/aliases` before simulating index template {es-pull}84675[#84675] (issue: {es-issue}84256[#84256]) - -Infra/Core:: -* Fix `NullPointerException` in `SystemIndexMetadataUpgradeService` hidden alias handling {es-pull}84780[#84780] (issue: {es-issue}81411[#81411]) -* Require and preserve content type for filtered rest requests {es-pull}84914[#84914] (issue: {es-issue}84784[#84784]) -* Wrap thread creation in `doPrivileged` call {es-pull}85180[#85180] - -Infra/REST API:: -* Correctly return `_type` field for documents in V7 compatiblity mode {es-pull}84873[#84873] (issue: {es-issue}84173[#84173]) - -Ingest:: -* Backport mark `GeoIpDownloaderTask` as completed after cancellation #84028 {es-pull}85014[#85014] (issues: {es-issue}84028[#84028], {es-issue}84652[#84652]) -* `CompoundProcessor` should also catch exceptions when executing a processor {es-pull}84838[#84838] (issue: {es-issue}84781[#84781]) - -Machine Learning:: -* Fix Kibana date format and similar overrides in text structure endpoint {es-pull}84967[#84967] -* Fixes for multi-line start patterns in text structure endpoint {es-pull}85066[#85066] -* Return all datafeeds in get anomaly detection jobs API {es-pull}84759[#84759] - -Packaging:: -* Remove use of Cloudflare zlib {es-pull}84680[#84680] - -Search:: -* Fix point visitor in `DiskUsage` API {es-pull}84909[#84909] -* `DotExpandingXContentParser` to expose the original token location {es-pull}84970[#84970] - -Snapshot/Restore:: -* Don't fail if there no symlink for AWS Web Identity Token {es-pull}84697[#84697] -* Lookup AWS Region for STS Client from STS endpoint {es-pull}84585[#84585] (issue: {es-issue}83826[#83826]) - -[[enhancement-8.1.1]] -[float] -=== Enhancements - -SQL:: -* Forward warning headers to JDBC driver {es-pull}84499[#84499] - -Watcher:: -* Add list of allowed domains for Watcher email action {es-pull}84894[#84894] (issue: {es-issue}84739[#84739]) diff --git a/docs/reference/release-notes/8.1.2.asciidoc b/docs/reference/release-notes/8.1.2.asciidoc deleted file mode 100644 index ed557e41a8a87..0000000000000 --- a/docs/reference/release-notes/8.1.2.asciidoc +++ /dev/null @@ -1,44 +0,0 @@ -[[release-notes-8.1.2]] -== {es} version 8.1.2 - -Also see <>. -[[known-issues-8.1.2]] -[float] -=== Known issues - -include::8.0.0.asciidoc[tag=jackson-filtering-bug] -[[bug-8.1.2]] -[float] -=== Bug fixes - -Authorization:: -* Add delete privilege to `kibana_system` for APM {es-pull}85085[#85085] - -Engine:: -* Increase store ref before snapshotting index commit {es-pull}84776[#84776] - -Infra/Core:: -* Return empty version instead of blowing up if we cannot find it {es-pull}85244[#85244] -* Validate index format agreement for system index descriptors {es-pull}85173[#85173] -* Wrap thread creation in `doPrivileged` call {es-pull}85180[#85180] - -Machine Learning:: -* Do not fetch source when finding index of last state docs {es-pull}85334[#85334] -* Fixes for multi-line start patterns in text structure endpoint {es-pull}85066[#85066] -* Reallocate model deployments on node shutdown events {es-pull}85310[#85310] - -Mapping:: -* Do not fail on duplicated content field filters {es-pull}85382[#85382] - -Search:: -* Increase store ref before analyzing disk usage {es-pull}84774[#84774] -* Limit concurrent shard requests in disk usage API {es-pull}84900[#84900] (issue: {es-issue}84779[#84779]) -* `TransportBroadcastAction` should always set response for each shard {es-pull}84926[#84926] - -Snapshot/Restore:: -* Fix leaking listeners bug on frozen tier {es-pull}85239[#85239] - -Watcher:: -* No longer require master node to install Watcher templates {es-pull}85287[#85287] (issue: {es-issue}85043[#85043]) - - diff --git a/docs/reference/release-notes/8.1.3.asciidoc b/docs/reference/release-notes/8.1.3.asciidoc deleted file mode 100644 index e69e6f4cc0480..0000000000000 --- a/docs/reference/release-notes/8.1.3.asciidoc +++ /dev/null @@ -1,33 +0,0 @@ -[[release-notes-8.1.3]] -== {es} version 8.1.3 - -Also see <>. -[[known-issues-8.1.3]] -[float] -=== Known issues - -include::8.0.0.asciidoc[tag=jackson-filtering-bug] -[[bug-8.1.3]] -[float] -=== Bug fixes - -Authorization:: -* Ignore app priv failures when resolving superuser {es-pull}85519[#85519] - -Machine Learning:: -* Avoid multiple queued quantiles documents in renormalizer {es-pull}85555[#85555] (issue: {es-issue}85539[#85539]) - -Mapping:: -* Do not fail on duplicated content field filters {es-pull}85382[#85382] - -Search:: -* Fix skip caching factor with `indices.queries.cache.all_segments` {es-pull}85510[#85510] - -Snapshot/Restore:: -* Expose proxy settings for GCS repositories {es-pull}85785[#85785] (issue: {es-issue}84569[#84569]) - -Watcher:: -* Avoiding watcher validation errors when a data stream points to more than one index {es-pull}85507[#85507] (issue: {es-issue}85508[#85508]) -* Log at WARN level for Watcher cluster state validation errors {es-pull}85632[#85632] - - diff --git a/docs/reference/release-notes/8.10.0.asciidoc b/docs/reference/release-notes/8.10.0.asciidoc deleted file mode 100644 index 34d1d26e5d69a..0000000000000 --- a/docs/reference/release-notes/8.10.0.asciidoc +++ /dev/null @@ -1,289 +0,0 @@ -[[release-notes-8.10.0]] -== {es} version 8.10.0 - -Also see <>. - -[[known-issues-8.10.0]] -[float] -=== Known issues - -// tag::repositorydata-format-change[] -* Snapshot-based downgrades -+ -The snapshot repository format changed in a manner that prevents earlier -versions of Elasticsearch from reading the repository contents if it contains -snapshots from this version and the last cluster to write to this repository -was in the 8.10 series. This will prevent you from reverting an upgrade to the -8.10 series by restoring a snapshot taken before the upgrade. -+ -Snapshot repositories written by clusters running versions 8.11.0 and later are -compatible with all earlier versions. Moreover, clusters running version 8.11.0 -or later will also automatically repair the repository format the first time -they write to the repository to take or delete a snapshot, making it so that -all earlier versions can read its contents again. -+ -If you wish to downgrade to a version prior to 8.9.0, take or delete a snapshot -using a cluster running version 8.11.0 or later to repair the repository format -first. If you cannot repair the repository in this way, first delete all the -snapshots in the repository taken with version 8.9.0 or later. To do this will -require using a cluster running version 8.10.0 or later. -+ -If you wish to downgrade to a version in the 8.9 series, you must take or delete -a snapshot using a cluster running version 8.11.0 or later to repair the -repository format first. If you cannot repair the repository in this way, first -delete all the snapshots in the repository taken with version 8.10.0 or later -using a cluster running version 8.10.4. -// end::repositorydata-format-change[] - -include::8.7.1.asciidoc[tag=no-preventive-gc-issue] - -[[breaking-8.10.0]] -[float] -=== Breaking changes - -Analysis:: -* Change pre-configured and cached analyzer components to use IndexVersion instead of Version {es-pull}97319[#97319] - -Geo:: -* Remove the unused executor builder for vector tile plugin {es-pull}96577[#96577] - -[[bug-8.10.0]] -[float] -=== Bug fixes - -Aggregations:: -* Cardinality nested in time series doc values bug {es-pull}99007[#99007] -* Skip segment for `MatchNoDocsQuery` filters {es-pull}98295[#98295] (issue: {es-issue}94637[#94637]) - -Allocation:: -* Do not assign ignored shards {es-pull}98265[#98265] -* Remove exception wrapping in `BatchedRerouteService` {es-pull}97224[#97224] - -Application:: -* [Profiling] Abort index creation on outdated index {es-pull}98864[#98864] -* [Profiling] Consider static settings in status {es-pull}97890[#97890] -* [Profiling] Mark executables without a name {es-pull}98884[#98884] - -CRUD:: -* Add missing sync on `indicesThatCannotBeCreated` {es-pull}97869[#97869] - -Cluster Coordination:: -* Fix cluster bootstrap warning for single-node discovery {es-pull}96895[#96895] (issue: {es-issue}96874[#96874]) -* Fix election scheduling after discovery outage {es-pull}98420[#98420] -* Improve reliability of elections with message delays {es-pull}98354[#98354] (issue: {es-issue}97909[#97909]) -* Make `TransportAddVotingConfigExclusionsAction` retryable {es-pull}98386[#98386] -* Release master service task on timeout {es-pull}97711[#97711] - -Data streams:: -* Avoid lifecycle NPE in the data stream lifecycle usage API {es-pull}98260[#98260] - -Distributed:: -* Avoid `transport_worker` thread in `TransportBroadcastAction` {es-pull}98001[#98001] -* Avoid `transport_worker` thread in `TransportBroadcastByNodeAction` {es-pull}97920[#97920] (issue: {es-issue}97914[#97914]) -* Fork response reading in `TransportNodesAction` {es-pull}97899[#97899] - -Downsampling:: -* Copy "index.lifecycle.name" for ILM managed indices {es-pull}97110[#97110] (issue: {es-issue}96732[#96732]) -* Downsampling: copy the `_tier_preference` setting {es-pull}96982[#96982] (issue: {es-issue}96733[#96733]) - -EQL:: -* Fix async missing events {es-pull}97718[#97718] (issue: {es-issue}97644[#97644]) - -Geo:: -* Fix how Maps#flatten handle map values inside a list {es-pull}98828[#98828] -* Fix mvt error when returning partial results {es-pull}98765[#98765] (issue: {es-issue}98730[#98730]) - -Health:: -* `_health_report` SLM indicator should use the policy ID (not the name) {es-pull}99111[#99111] - -Indices APIs:: -* Ensure frozen indices have correct tier preference {es-pull}97967[#97967] - -Infra/REST API:: -* Fix possible NPE when transportversion is null in `MainResponse` {es-pull}97203[#97203] - -Ingest Node:: -* Revert "Add mappings for enrich fields" {es-pull}98683[#98683] - -Machine Learning:: -* Avoid risk of OOM in datafeeds when memory is constrained {es-pull}98324[#98324] (issue: {es-issue}89769[#89769]) -* Detect infinite loop in the WordPiece tokenizer {es-pull}98206[#98206] -* Fix to stop aggregatable subobjects from being considered multi-fields, to support `"subobjects": false` in data frame analytics {es-pull}97705[#97705] (issue: {es-issue}88605[#88605]) -* Fix weird `change_point` bug where all data values are equivalent {es-pull}97588[#97588] -* The model loading service should not notify listeners in a sync block {es-pull}97142[#97142] - -Mapping:: -* Fix `fields` API with `subobjects: false` {es-pull}97092[#97092] (issue: {es-issue}96700[#96700]) - -Network:: -* Fork remote-cluster response handling {es-pull}97922[#97922] - -Search:: -* Fork CCS remote-cluster responses {es-pull}98124[#98124] (issue: {es-issue}97997[#97997]) -* Fork CCS search-shards handling {es-pull}98209[#98209] -* Improve test coverage for CCS search cancellation and fix response bugs {es-pull}97029[#97029] -* Make `terminate_after` early termination friendly {es-pull}97540[#97540] (issue: {es-issue}97269[#97269]) -* Track `max_score` in collapse when requested {es-pull}97703[#97703] (issue: {es-issue}97653[#97653]) - -Security:: -* Fix NPE when `GetUser` with profile uid before profile index exists {es-pull}98961[#98961] - -Snapshot/Restore:: -* Fix `BlobCacheBufferedIndexInput` large read after clone {es-pull}98970[#98970] - -TSDB:: -* Mapped field types searchable with doc values {es-pull}97724[#97724] - -Transform:: -* Fix transform incorrectly calculating date bucket on updating old data {es-pull}97401[#97401] (issue: {es-issue}97101[#97101]) - -Watcher:: -* Changing watcher to disable cookies in shared http client {es-pull}97591[#97591] - -[[deprecation-8.10.0]] -[float] -=== Deprecations - -Authorization:: -* Mark `apm_user` for removal in a future major release {es-pull}87674[#87674] - -[[enhancement-8.10.0]] -[float] -=== Enhancements - -Aggregations:: -* Improve error message when aggregation doesn't support counter field {es-pull}93545[#93545] -* Set default index mode for `TimeSeries` to `null` {es-pull}98808[#98808] (issue: {es-issue}97429[#97429]) - -Allocation:: -* Add `node.roles` to cat allocation API {es-pull}96994[#96994] - -Application:: -* [Profiling] Add initial support for upgrades {es-pull}97380[#97380] -* [Profiling] Support index migrations {es-pull}97773[#97773] - -Authentication:: -* Avoid double get {es-pull}98067[#98067] (issue: {es-issue}97928[#97928]) -* Give all acces to .slo-observability.* indice to kibana user {es-pull}97539[#97539] -* Refresh tokens without search {es-pull}97395[#97395] - -Authorization:: -* Add "operator" field to authenticate response {es-pull}97234[#97234] -* Read operator privs enabled from Env settings {es-pull}98246[#98246] -* [Fleet] Allow `kibana_system` to put datastream lifecycle {es-pull}97732[#97732] - -Data streams:: -* Install data stream template for Kibana reporting {es-pull}97765[#97765] - -Downsampling:: -* Change `MetricFieldProducer#metrics` field type from list to array {es-pull}97344[#97344] -* Improve iterating over many field producers during downsample operation {es-pull}97281[#97281] -* Run downsampling using persistent tasks {es-pull}97557[#97557] (issue: {es-issue}93582[#93582]) - -EQL:: -* EQL to use only the necessary fields in the internal `field_caps` calls {es-pull}98987[#98987] - -Engine:: -* Fix edge case for active flag for flush on idle {es-pull}97332[#97332] (issue: {es-issue}97154[#97154]) - -Health:: -* Adding special logic to the disk health check for search-only nodes {es-pull}98508[#98508] -* Health API Periodic Logging {es-pull}96772[#96772] - -ILM+SLM:: -* Separating SLM from ILM {es-pull}98184[#98184] - -Infra/Core:: -* Infrastructure to report upon document parsing {es-pull}97961[#97961] - -Infra/Node Lifecycle:: -* Check ILM status before reporting node migration STALLED {es-pull}98367[#98367] (issue: {es-issue}89486[#89486]) - -Infra/Plugins:: -* Adding `ApiFilteringActionFilter` {es-pull}97985[#97985] - -Infra/REST API:: -* Enable Serverless API protections dynamically {es-pull}97079[#97079] -* Make `RestController` pluggable {es-pull}98187[#98187] - -Infra/Settings:: -* Mark customer settings for serverless {es-pull}98051[#98051] - -Ingest Node:: -* Allow custom geo ip database files to be downloaded {es-pull}97850[#97850] - -Network:: -* Add request header size limit for RCS transport connections {es-pull}98692[#98692] - -Search:: -* Add `completion_time` time field to `async_search` get and status response {es-pull}97700[#97700] (issue: {es-issue}88640[#88640]) -* Add setting for search parallelism {es-pull}98455[#98455] -* Add support for concurrent collection when size is greater than zero {es-pull}98425[#98425] -* Cross-cluster search provides details about search on each cluster {es-pull}97731[#97731] -* Enable parallel collection in Dfs phase {es-pull}97416[#97416] -* Exclude clusters from a cross-cluster search {es-pull}97865[#97865] -* Improve MatchNoDocsQuery description {es-pull}96069[#96069] (issue: {es-issue}95741[#95741]) -* Improve exists query rewrite {es-pull}97159[#97159] -* Improve match query rewrite {es-pull}97208[#97208] -* Improve prefix query rewrite {es-pull}97209[#97209] -* Improve wildcard query and terms query rewrite {es-pull}97594[#97594] -* Introduce Synonyms Management API used for synonym and synonym_graph filters {es-pull}97962[#97962] (issue: {es-issue}38523[#38523]) -* Introduce a collector manager for `PartialHitCountCollector` {es-pull}97550[#97550] -* Introduce a collector manager for `QueryPhaseCollector` {es-pull}97410[#97410] -* Limit `_terms_enum` prefix size {es-pull}97488[#97488] (issue: {es-issue}96572[#96572]) -* Support minimum_should_match field for terms_set query {es-pull}96082[#96082] -* Support type for simple query string {es-pull}96717[#96717] -* Unwrap IOException in `ContextIndexSearcher` concurrent code-path {es-pull}98459[#98459] -* Use a collector manager in DfsPhase Knn Search {es-pull}96689[#96689] -* Use the Weight#matches mode for highlighting by default {es-pull}96068[#96068] -* Wire `QueryPhaseCollectorManager` into the query phase {es-pull}97726[#97726] -* Wire concurrent top docs collector managers when size is 0 {es-pull}97755[#97755] -* `ProfileCollectorManager` to support child profile collectors {es-pull}97387[#97387] -* cleanup some code NoriTokenizerFactory and KuromojiTokenizerFactory {es-pull}92574[#92574] - -Security:: -* Add an API for managing the settings of Security system indices {es-pull}97630[#97630] -* Support getting active-only API keys via Get API keys API {es-pull}98259[#98259] (issue: {es-issue}97995[#97995]) - -Snapshot/Restore:: -* Add Setting to optionally use mmap for shared cache IO {es-pull}97581[#97581] -* Collect additional object store stats for S3 {es-pull}98083[#98083] -* HDFS plugin add replication_factor param {es-pull}94132[#94132] - -Store:: -* Allow Lucene directory implementations to estimate their size {es-pull}97822[#97822] -* Allow `ByteSizeDirectory` to expose their data set sizes {es-pull}98085[#98085] - -TSDB:: -* Add tsdb metrics builtin component template {es-pull}97602[#97602] -* Include more downsampling status statistics {es-pull}96930[#96930] (issue: {es-issue}96760[#96760]) -* `TimeSeriesIndexSearcher` to offload to the provided executor {es-pull}98414[#98414] - -Transform:: -* Support boxplot aggregation in transform {es-pull}96515[#96515] - -[[feature-8.10.0]] -[float] -=== New features - -Application:: -* Enable Query Rules as technical preview {es-pull}97466[#97466] -* [Enterprise Search] Add connectors indices and ent-search pipeline {es-pull}97463[#97463] - -Data streams:: -* Introduce downsampling configuration for data stream lifecycle {es-pull}97041[#97041] - -Search:: -* Introduce executor for concurrent search {es-pull}98204[#98204] - -Security:: -* Beta release for API key based cross-cluster access {es-pull}98307[#98307] - -[[upgrade-8.10.0]] -[float] -=== Upgrades - -Network:: -* Upgrade Netty to 4.1.94.Final {es-pull}97040[#97040] - - diff --git a/docs/reference/release-notes/8.10.1.asciidoc b/docs/reference/release-notes/8.10.1.asciidoc deleted file mode 100644 index 0cb00699eeac7..0000000000000 --- a/docs/reference/release-notes/8.10.1.asciidoc +++ /dev/null @@ -1,30 +0,0 @@ -[[release-notes-8.10.1]] -== {es} version 8.10.1 - -Also see <>. - -[[known-issues-8.10.1]] -[float] -=== Known issues - -include::8.10.0.asciidoc[tag=repositorydata-format-change] - -include::8.7.1.asciidoc[tag=no-preventive-gc-issue] - -[[bug-8.10.1]] -[float] -=== Bug fixes - -Aggregations:: -* Use long in Centroid count {es-pull}99491[#99491] (issue: {es-issue}80153[#80153]) - -Infra/Core:: -* Fix deadlock between Cache.put and Cache.invalidateAll {es-pull}99480[#99480] (issue: {es-issue}99326[#99326]) - -Infra/Node Lifecycle:: -* Fork computation in `TransportGetShutdownStatusAction` {es-pull}99490[#99490] (issue: {es-issue}99487[#99487]) - -Search:: -* Fix PIT when resolving with deleted indices {es-pull}99281[#99281] - - diff --git a/docs/reference/release-notes/8.10.2.asciidoc b/docs/reference/release-notes/8.10.2.asciidoc deleted file mode 100644 index 911a410104a26..0000000000000 --- a/docs/reference/release-notes/8.10.2.asciidoc +++ /dev/null @@ -1,12 +0,0 @@ -[[release-notes-8.10.2]] -== {es} version 8.10.2 - -[[known-issues-8.10.2]] -[float] -=== Known issues - -include::8.10.0.asciidoc[tag=repositorydata-format-change] - -include::8.7.1.asciidoc[tag=no-preventive-gc-issue] - -Also see <>. diff --git a/docs/reference/release-notes/8.10.3.asciidoc b/docs/reference/release-notes/8.10.3.asciidoc deleted file mode 100644 index 119930058a42e..0000000000000 --- a/docs/reference/release-notes/8.10.3.asciidoc +++ /dev/null @@ -1,87 +0,0 @@ -[[release-notes-8.10.3]] -== {es} version 8.10.3 - -[[known-issues-8.10.3]] -[float] -=== Known issues - -include::8.10.0.asciidoc[tag=repositorydata-format-change] - -// tag::no-preventive-gc-issue[] -* High Memory Pressure due to a GC change in JDK 21 -+ -This version of Elasticsearch is bundled with JDK 21. In JDK 21 -https://bugs.openjdk.org/browse/JDK-8297639[Preventive GC has been removed]. -This may lead to increased memory pressure and an increased number of CircuitBreakerExceptions when retrieving large -documents under some particular load. (issue: {es-issue}99592[#99592]) -+ -If you needed to explicitly <>, we recommend you avoid to upgrade to this version, as the settings to enable Preventive GC have been removed -from JDK 21. -// end::no-preventive-gc-issue[] - -Also see <>. - -[[bug-8.10.3]] -[float] -=== Bug fixes - -Aggregations:: -* Fix cardinality agg for `const_keyword` {es-pull}99814[#99814] (issue: {es-issue}99776[#99776]) - -Distributed:: -* Skip settings validation during desired nodes updates {es-pull}99946[#99946] - -Highlighting:: -* Implement matches() on `SourceConfirmedTextQuery` {es-pull}100252[#100252] - -ILM+SLM:: -* ILM introduce the `check-ts-end-time-passed` step {es-pull}100179[#100179] (issue: {es-issue}99696[#99696]) -* ILM the delete action waits for a TSDS index time/bounds to lapse {es-pull}100207[#100207] - -Ingest Node:: -* Validate enrich index before completing policy execution {es-pull}100106[#100106] - -Machine Learning:: -* Adding retry logic for start model deployment API {es-pull}99673[#99673] -* Using 1 MB chunks for elser model storage {es-pull}99677[#99677] - -Search:: -* Close expired search contexts on SEARCH thread {es-pull}99660[#99660] -* Fix fields API for `geo_point` fields inside other arrays {es-pull}99868[#99868] (issue: {es-issue}99781[#99781]) - -Snapshot/Restore:: -* Support $ and / in restore rename replacements {es-pull}99892[#99892] (issue: {es-issue}99078[#99078]) - -Transform:: -* Do not use PIT in the presence of remote indices in source {es-pull}99803[#99803] -* Ignore "index not found" error when `delete_dest_index` flag is set but the dest index doesn't exist {es-pull}99738[#99738] -* Let `_stats` internally timeout if checkpoint information can not be retrieved {es-pull}99914[#99914] - -Vector Search:: -* Update version range in `jvm.options` for the Panama Vector API {es-pull}99846[#99846] - -[[enhancement-8.10.3]] -[float] -=== Enhancements - -Authorization:: -* Add manage permission for fleet managed threat intel indices {es-pull}99231[#99231] - -Highlighting:: -* Implement matches() on `SourceConfirmedTextQuery` {es-pull}100134[#100134] - -Ingest Node:: -* Show a concrete error when the enrich index does not exist rather than a NullPointerException {es-pull}99604[#99604] - -Search:: -* Add checks in term and terms queries that input terms are not too long {es-pull}99818[#99818] (issue: {es-issue}99802[#99802]) - -[[upgrade-8.10.3]] -[float] -=== Upgrades - -Packaging:: -* Upgrade bundled JDK to Java 21 {es-pull}99724[#99724] - - diff --git a/docs/reference/release-notes/8.10.4.asciidoc b/docs/reference/release-notes/8.10.4.asciidoc deleted file mode 100644 index 6c49bae1e2150..0000000000000 --- a/docs/reference/release-notes/8.10.4.asciidoc +++ /dev/null @@ -1,45 +0,0 @@ -[[release-notes-8.10.4]] -== {es} version 8.10.4 - -[[known-issues-8.10.4]] -[float] -=== Known issues - -* Snapshot-based downgrades -+ -The snapshot repository format changed in a manner that prevents earlier -versions of Elasticsearch from reading the repository contents if it contains -snapshots from this version and the last cluster to write to this repository was -in the 8.10 series. This will prevent you from reverting an upgrade to the 8.10 -series by restoring a snapshot taken before the upgrade. -+ -Snapshot repositories written by clusters running versions 8.11.0 and later are -compatible with all earlier versions. Moreover, clusters running version 8.11.0 -or later will also automatically repair the repository format the first time -they write to the repository to take or delete a snapshot, making it so that all -earlier versions can read its contents again. -+ -If you wish to downgrade to a version prior to 8.10.0, take or delete a snapshot -using a cluster running version 8.11.0 or later to repair the repository format -first. If you cannot repair the repository in this way, first delete all the -snapshots in the repository taken with version 8.10.0 or later using a cluster -running version 8.10.4. - -include::8.10.3.asciidoc[tag=no-preventive-gc-issue] - -Also see <>. - -[[bug-8.10.4]] -[float] -=== Bug fixes - -Search:: -* Search of remote clusters with no shards results in successful status {es-pull}100354[#100354] - -Snapshot/Restore:: -* Improve `RepositoryData` BwC {es-pull}100401[#100401] - -Transform:: -* Shutdown the task immediately when `force` == `true` {es-pull}100203[#100203] - - diff --git a/docs/reference/release-notes/8.11.0.asciidoc b/docs/reference/release-notes/8.11.0.asciidoc deleted file mode 100644 index acb27dc180727..0000000000000 --- a/docs/reference/release-notes/8.11.0.asciidoc +++ /dev/null @@ -1,342 +0,0 @@ -[[release-notes-8.11.0]] -== {es} version 8.11.0 - -Also see <>. - -[[breaking-8.11.0]] -[float] -=== Breaking changes - -Infra/Core:: -* Remove `transport_versions` from cluster state API {es-pull}99223[#99223] - -[[known-issues-8.11.0]] -[float] -=== Known issues -include::8.10.3.asciidoc[tag=no-preventive-gc-issue] - -[[bug-8.11.0]] -[float] -=== Bug fixes - -Aggregations:: -* Adjust `DateHistogram's` bucket accounting to be iteratively {es-pull}101012[#101012] -* Allow parsing on non-string routing fields {es-pull}97729[#97729] -* Support runtime fields in synthetic source {es-pull}99796[#99796] (issue: {es-issue}98287[#98287]) - -Allocation:: -* Consider node shutdown in `DataTierAllocationDecider` {es-pull}98824[#98824] (issue: {es-issue}97207[#97207]) - -Application:: -* Align look-back with client-side cache {es-pull}101264[#101264] -* Increase K/V look-back time interval {es-pull}101205[#101205] -* Provide stable resampling {es-pull}101255[#101255] -* [Profiling] Tighten resource creation check {es-pull}99873[#99873] - -Authorization:: -* Allow `enrich_user` to read/view enrich indices {es-pull}100707[#100707] -* Grant editor and viewer access to profiling {es-pull}100594[#100594] - -CCR:: -* CCR: Use local cluster state request {es-pull}100323[#100323] - -CRUD:: -* Change `GetFromTranslog` to indices action {es-pull}99300[#99300] -* Wait for cluster to recover before resolving index template {es-pull}99797[#99797] - -Cluster Coordination:: -* Reset `GatewayService` flags before reroute {es-pull}98653[#98653] (issue: {es-issue}98606[#98606]) - -Data streams:: -* DSL waits for the tsdb time boundaries to lapse {es-pull}100470[#100470] (issue: {es-issue}99696[#99696]) -* Propagate cancellation in `DataTiersUsageTransportAction` {es-pull}100253[#100253] -* [DSL] skip deleting indices that have in-progress downsampling operations {es-pull}101495[#101495] - -Downsampling:: -* Make downsample target index replicas configurable {es-pull}99712[#99712] - -ES|QL:: -* "params" correctly parses the values including an optional "type" {es-pull}99310[#99310] (issue: {es-issue}99294[#99294]) -* Account for an exception being thrown when building a `BytesRefArrayBlock` {es-pull}99726[#99726] (issue: {es-issue}99472[#99472]) -* Add arithmetic operators {es-pull}98628[#98628] -* Add identity check in Block equality {es-pull}100377[#100377] (issue: {es-issue}100374[#100374]) -* Adds Enrich implicit `match_fields` to `field_caps` call {es-pull}101456[#101456] (issue: {es-issue}101328[#101328]) -* Better management of not stored TEXT fiels with synthetic source {es-pull}99695[#99695] -* Continue resolving attributes for Eval {es-pull}99601[#99601] (issue: {es-issue}99576[#99576]) -* Create a Vector when needed for IN {es-pull}99382[#99382] (issue: {es-issue}99347[#99347]) -* ESQL: Fix unreleased block in topn {es-pull}101648[#101648] (issue: {es-issue}101588[#101588]) -* ESQL: check type before casting {es-pull}101492[#101492] (issue: {es-issue}101489[#101489]) -* Fix NPE when aggregating literals {es-pull}99827[#99827] -* Fix escaping of backslash in LIKE operator {es-pull}101120[#101120] (issue: {es-issue}101106[#101106]) -* Fix eval of functions on foldable literals {es-pull}101438[#101438] (issue: {es-issue}101425[#101425]) -* Fix non-null value being returned for unsupported data types in `ValueSources` {es-pull}100656[#100656] (issue: {es-issue}100048[#100048]) -* Graceful handling of non-bool condition in the filter {es-pull}100645[#100645] (issues: {es-issue}100049[#100049], {es-issue}100409[#100409]) -* Handle queries with non-existing enrich policies and no field {es-pull}100647[#100647] (issue: {es-issue}100593[#100593]) -* Implement serialization of `InvalidMappedField` {es-pull}98972[#98972] (issue: {es-issue}98851[#98851]) -* Improve verifier error for incorrect agg declaration {es-pull}100650[#100650] (issue: {es-issue}100641[#100641]) -* Limit how many bytes `concat()` can process {es-pull}100360[#100360] -* Make DISSECT parameter `append_separator` case insensitive {es-pull}101358[#101358] (issue: {es-issue}101138[#101138]) -* Page shouldn't close a block twice {es-pull}100370[#100370] (issues: {es-issue}100356[#100356], {es-issue}100365[#100365]) -* Preserve intermediate aggregation output in local relation {es-pull}100866[#100866] (issue: {es-issue}100807[#100807]) -* Properly handle multi-values in fold() and date math {es-pull}100766[#100766] (issue: {es-issue}100497[#100497]) -* Remove aliasing inside Eval {es-pull}100238[#100238] (issue: {es-issue}100174[#100174]) -* Resilience to non-indexed fields {es-pull}99588[#99588] (issue: {es-issue}99506[#99506]) -* Skip synthetic attributes when planning the physical fragment {es-pull}99188[#99188] (issue: {es-issue}99170[#99170]) -* Support date and time intervals as input params {es-pull}101001[#101001] (issue: {es-issue}99570[#99570]) -* Support queries that don't return underlying fields {es-pull}98759[#98759] (issue: {es-issue}98404[#98404]) -* Use exact attributes for data source extraction {es-pull}99874[#99874] (issue: {es-issue}99183[#99183]) -* `mv_expand` pushes down limit and project and keep the limit after it untouched {es-pull}100782[#100782] (issues: {es-issue}99971[#99971], {es-issue}100774[#100774]) -* support metric tsdb fields while querying index patterns {es-pull}100351[#100351] (issue: {es-issue}100144[#100144]) - -Geo:: -* Use `NamedWritable` to enable `GeoBoundingBox` serialisation {es-pull}99163[#99163] (issue: {es-issue}99089[#99089]) - -Health:: -* Fix NPE in `StableMasterHealthIndicatorService` {es-pull}98635[#98635] -* Health report infrastructure doesn't trip the circuit breakers {es-pull}101629[#101629] -* Propagate cancellation in `GetHealthAction` {es-pull}100273[#100273] - -Highlighting:: -* Correctly handle `ScriptScoreQuery` in plain highlighter {es-pull}99804[#99804] (issue: {es-issue}99700[#99700]) -* Disable `weight_matches` when kNN query is present {es-pull}101713[#101713] - -ILM+SLM:: -* Compute SLM retention from `RepositoryData` {es-pull}100092[#100092] (issue: {es-issue}99953[#99953]) -* `WaitForSnapshotStep` verifies if the index belongs to the latest snapshot of that SLM policy {es-pull}100911[#100911] - -Infra/Core:: -* Add `java.net.NetPermission` to APM module's permissions {es-pull}99474[#99474] -* Don't update system index mappings in mixed clusters {es-pull}101778[#101778] (issues: {es-issue}101331[#101331], {es-issue}99778[#99778]) -* Revert "Kibana system index does not allow user templates to affect it" {es-pull}98888[#98888] -* Specify correct current `IndexVersion` after 8.10 release {es-pull}98574[#98574] (issue: {es-issue}98555[#98555]) -* Tracing: Use `doPriv` when working with spans, use `SpanId` {es-pull}100232[#100232] - -Infra/Scripting:: -* Improve painless error wrapping {es-pull}100872[#100872] - -Ingest Node:: -* Improving tika handling {es-pull}101486[#101486] -* Update enrich execution to only set index false on fields that support it {es-pull}98038[#98038] (issue: {es-issue}98019[#98019]) - -Machine Learning:: -* Avoid risk of OOM in datafeeds when memory is constrained {es-pull}98915[#98915] (issue: {es-issue}89769[#89769]) -* Fix for inference requests being sent to every node with a model allocation. If there are more nodes than items in the original request then empty requests were sent. {es-pull}100388[#100388] (issue: {es-issue}100180[#100180]) -* Preserve order of inference results when calling the _infer API with multiple inputs on a model deployment with more than one allocation the output results order was not guaranteed to match the input order. The fix ensures the output order matches the input order. {es-pull}100143[#100143] -* Remove noisy 'Could not find trained model' message {es-pull}100760[#100760] -* Safely drain deployment request queues before allowing node to shutdown {es-pull}98406[#98406] -* Use the correct writable name for model assignment metadata in mixed version clusters. Prevents a node failure due to IllegalArgumentException Unknown NamedWriteable [trained_model_assignment] {es-pull}100886[#100886] -* Wait to gracefully stop deployments until alternative allocation exists {es-pull}99107[#99107] - -Mapping:: -* Automatically disable `ignore_malformed` on datastream `@timestamp` fields {es-pull}99346[#99346] -* Correct behaviour of `ContentPath::remove()` {es-pull}98332[#98332] (issue: {es-issue}98327[#98327]) -* Fix merges of mappings with `subobjects: false` for composable index templates {es-pull}97317[#97317] (issue: {es-issue}96768[#96768]) -* Percolator to support parsing script score query with params {es-pull}101051[#101051] (issue: {es-issue}97377[#97377]) - -Network:: -* Do not report failure after connections are made {es-pull}99117[#99117] - -Percolator:: -* Fix percolator query for stored queries that expand on wildcard field names {es-pull}98878[#98878] - -Query Languages:: -* Preserve subfields for unsupported types {es-pull}100875[#100875] (issue: {es-issue}100869[#100869]) - -Recovery:: -* Fix interruption of `markAllocationIdAsInSync` {es-pull}100610[#100610] (issues: {es-issue}96578[#96578], {es-issue}100589[#100589]) - -Search:: -* Consistent scores for multi-term `SourceConfirmedTestQuery` {es-pull}100846[#100846] (issue: {es-issue}98712[#98712]) -* Fix UnsignedLong field range query gt "0" can get the result equal to 0 {es-pull}98843[#98843] -* Fix `advanceExact` for doc values from sources {es-pull}99685[#99685] -* Fork response-sending in `OpenPointInTimeAction` {es-pull}99222[#99222] -* [CI] `SearchResponseTests#testSerialization` failing resolved {es-pull}100020[#100020] (issue: {es-issue}100005[#100005]) -* fix fuzzy query rewrite parameter not work {es-pull}97642[#97642] - -Security:: -* Fix NullPointerException in RotableSecret {es-pull}100779[#100779] (issue: {es-issue}99759[#99759]) - -Snapshot/Restore:: -* Fix race condition in `SnapshotsService` {es-pull}101652[#101652] -* Fix snapshot double finalization {es-pull}101497[#101497] -* Fix thread context in `getRepositoryData` {es-pull}99627[#99627] -* Frozen index input clone copy cache file {es-pull}98930[#98930] -* Make S3 anti-contention delay configurable {es-pull}101245[#101245] -* More robust timeout for repo analysis {es-pull}101184[#101184] (issue: {es-issue}101182[#101182]) -* Register `repository_s3` settings {es-pull}101344[#101344] -* Reinstate `RepositoryData` BwC {es-pull}100447[#100447] - -TSDB:: -* Don't ignore empty index template that have no template definition {es-pull}98840[#98840] (issue: {es-issue}98834[#98834]) -* Fix painless execute api and tsdb issue {es-pull}101212[#101212] (issue: {es-issue}101072[#101072]) -* Make tsdb settings public in Serverless {es-pull}99567[#99567] (issue: {es-issue}99563[#99563]) - -Transform:: -* Fix possible NPE when getting transform stats for failed transforms {es-pull}98061[#98061] (issue: {es-issue}98052[#98052]) -* Ignore `IndexNotFound` error when refreshing destination index {es-pull}101627[#101627] -* Make Transform Feature Reset really wait for all the tasks {es-pull}100624[#100624] -* Make tasks that calculate checkpoints cancellable {es-pull}100808[#100808] - -Watcher:: -* Treating watcher webhook response header names as case-insensitive {es-pull}99717[#99717] - -[[deprecation-8.11.0]] -[float] -=== Deprecations - -Rollup:: -* Rollup functionality is now deprecated {es-pull}101265[#101265] - -[[enhancement-8.11.0]] -[float] -=== Enhancements - -Aggregations:: -* Disable `FilterByFilterAggregator` through `ClusterSettings` {es-pull}99417[#99417] (issue: {es-issue}99335[#99335]) -* Represent histogram value count as long {es-pull}99912[#99912] (issue: {es-issue}99820[#99820]) -* Skip `DisiPriorityQueue` on single filter agg {es-pull}99215[#99215] (issue: {es-issue}99202[#99202]) -* Use a competitive iterator in `FiltersAggregator` {es-pull}98360[#98360] (issue: {es-issue}97544[#97544]) - -Allocation:: -* Report a node's "roles" setting in the /_cluster/allocation/explain response {es-pull}98550[#98550] (issue: {es-issue}97859[#97859]) - -Application:: -* Add flamegraph API {es-pull}99091[#99091] -* [Profiling] Allow to customize the ILM policy {es-pull}99909[#99909] -* [Profiling] Allow to wait until resources created {es-pull}99655[#99655] - -Audit:: -* Reduce verbosity of the bulk indexing audit log {es-pull}98470[#98470] - -Authentication:: -* Allow native users/roles to be disabled via setting {es-pull}98654[#98654] - -CAT APIs:: -* Add 'dataset' size to cat indices and cat shards {es-pull}98622[#98622] (issue: {es-issue}95092[#95092]) - -Data streams:: -* Allow explain data stream lifecycle to accept a data stream {es-pull}98811[#98811] - -ES|QL:: -* Add `CEIL` function {es-pull}98847[#98847] -* Add ability to perform date math {es-pull}98870[#98870] (issue: {es-issue}98402[#98402]) -* Add support for TEXT fields in comparison operators and SORT {es-pull}98528[#98528] (issue: {es-issue}98642[#98642]) -* Compact topn {es-pull}99316[#99316] -* Date math for negatives {es-pull}99711[#99711] -* Enable arithmetics for durations and periods {es-pull}99432[#99432] (issue: {es-issue}99293[#99293]) -* Enhance SHOW FUNCTIONS command {es-pull}99736[#99736] (issue: {es-issue}99507[#99507]) -* Improve log messages {es-pull}99470[#99470] -* Log execution time consistently {es-pull}99286[#99286] -* Log query and execution time {es-pull}99058[#99058] -* Log start and end of queries {es-pull}99746[#99746] -* Lower the implicit limit, if none is user-provided {es-pull}99816[#99816] (issue: {es-issue}99458[#99458]) -* Make settings dynamic {es-pull}101516[#101516] -* Mark counter fields as unsupported {es-pull}99054[#99054] -* Remove the swapped-args check for date_xxx() {es-pull}101362[#101362] (issue: {es-issue}99562[#99562]) -* Serialize the source in expressions {es-pull}99956[#99956] -* Simple check if all blocks get released {es-pull}100199[#100199] -* Support unsigned long in sqrt and log10 {es-pull}98711[#98711] -* Use DEBUG log level to report execution steps {es-pull}99303[#99303] - -Engine:: -* Use `IndexWriter.flushNextBuffer()` to reclaim memory from indexing buffers {es-pull}94607[#94607] - -Health:: -* Avoiding the use of nodes that are no longer in the cluster when computing master stability {es-pull}98809[#98809] (issue: {es-issue}98636[#98636]) -* When a primary is inactive but this is considered expected, the same applies for the replica of this shard. {es-pull}99995[#99995] (issue: {es-issue}99951[#99951]) - -Infra/Core:: -* APM Metering API {es-pull}99832[#99832] -* Update the elastic-apm-agent version {es-pull}100064[#100064] -* Use mappings version to retrieve system index mappings at creation time {es-pull}99555[#99555] - -Infra/Node Lifecycle:: -* Add links to docs from failing bootstrap checks {es-pull}99644[#99644] (issue: {es-issue}99614[#99614]) -* Chunk `SingleNodeShutdownStatus` and `ShutdownShardMigrationStatus` (and related action) response {es-pull}99798[#99798] (issue: {es-issue}99678[#99678]) - -Infra/REST API:: -* Add `IndexVersion` to node info {es-pull}99515[#99515] -* Add component info versions to node info in a pluggable way {es-pull}99631[#99631] -* Return a 410 (Gone) status code for unavailable API endpoints {es-pull}97397[#97397] - -Machine Learning:: -* Add new _inference API {es-pull}99224[#99224] -* Adding an option for trained models to be platform specific {es-pull}99584[#99584] -* Log warnings for jobs unassigned for a long time {es-pull}100154[#100154] -* Simplify the Inference Ingest Processor configuration {es-pull}100205[#100205] - -Mapping:: -* Automatically flatten objects when subobjects:false {es-pull}97972[#97972] (issue: {es-issue}88934[#88934]) -* Explicit parsing object capabilities of `FieldMappers` {es-pull}98684[#98684] (issue: {es-issue}98537[#98537]) -* Reintroduce `sparse_vector` mapping {es-pull}98996[#98996] - -Network:: -* Chunk the cluster allocation explain response {es-pull}99641[#99641] (issue: {es-issue}97803[#97803]) - -Recovery:: -* Wait for cluster state in recovery {es-pull}99193[#99193] - -Search:: -* Add additional counters to `_clusters` response for all Cluster search states {es-pull}99566[#99566] (issue: {es-issue}98927[#98927]) -* Adding support for exist queries to `sparse_vector` fields {es-pull}99775[#99775] (issue: {es-issue}99319[#99319]) -* Make `_index` optional for pinned query docs {es-pull}97450[#97450] -* Reduce copying when creating scroll/PIT ids {es-pull}99219[#99219] -* Refactor `SearchResponseClusters` to use CHM {es-pull}100129[#100129] (issue: {es-issue}99101[#99101]) -* Support cluster/details for CCS minimize_roundtrips=false {es-pull}98457[#98457] - -Security:: -* Support rotatating the JWT shared secret {es-pull}99278[#99278] - -Snapshot/Restore:: -* Remove shard data files when they fail to write for snapshot {es-pull}99694[#99694] - -Stats:: -* Prune unnecessary information from TransportNodesInfoAction.NodeInfoRequest {es-pull}99938[#99938] (issue: {es-issue}99744[#99744]) - -TSDB:: -* Add `index.look_back_time` setting for tsdb data streams {es-pull}98518[#98518] (issue: {es-issue}98463[#98463]) -* Improve time-series error and documentation {es-pull}100018[#100018] -* Trim stored fields for `_id` field in tsdb {es-pull}97409[#97409] - -Transform:: -* Add accessors required to recreate `TransformStats` object from the fields {es-pull}98844[#98844] - -Vector Search:: -* Add new max_inner_product vector similarity function {es-pull}99527[#99527] -* Adds `nested` support for indexed `dense_vector` fields {es-pull}99763[#99763] -* Dense vector field types are indexed by default {es-pull}98268[#98268] -* Increase the max vector dims to 4096 {es-pull}99682[#99682] - -[[feature-8.11.0]] -[float] -=== New features - -Analysis:: -* Add support for Persian language stemmer {es-pull}99106[#99106] (issue: {es-issue}98911[#98911]) - -Application:: -* Automatically map float arrays of lengths 128 - 2048 as dense_vector {es-pull}98512[#98512] (issue: {es-issue}97532[#97532]) - -Data streams:: -* GA the data stream lifecycle {es-pull}100187[#100187] -* GET `_data_stream` displays both ILM and DSL information {es-pull}99947[#99947] - -ES|QL:: -* Integrate Elasticsearch Query Language, ES|QL {es-pull}98309[#98309] -* LEAST and GREATEST functions {es-pull}98630[#98630] -* LEFT function {es-pull}98942[#98942] -* LTRIM, RTRIM and fix unicode whitespace {es-pull}98590[#98590] -* RIGHT function {es-pull}98974[#98974] -* TopN sorting with min and max for multi-value fields {es-pull}98337[#98337] - -[[upgrade-8.11.0]] -[float] -=== Upgrades - -Packaging:: -* Update bundled JDK to 21.0.1 {es-pull}101133[#101133] - -Search:: -* Upgrade main to Lucene 9.8.0 {es-pull}100138[#100138] - - diff --git a/docs/reference/release-notes/8.11.1.asciidoc b/docs/reference/release-notes/8.11.1.asciidoc deleted file mode 100644 index b1dbc4a95c963..0000000000000 --- a/docs/reference/release-notes/8.11.1.asciidoc +++ /dev/null @@ -1,43 +0,0 @@ -[[release-notes-8.11.1]] -== {es} version 8.11.1 - -Also see <>. - -[[known-issues-8.11.1]] -[float] -=== Known issues -include::8.10.3.asciidoc[tag=no-preventive-gc-issue] - -[[bug-8.11.1]] -[float] -=== Bug fixes - -Allocation:: -* Avoid negative `DesiredBalanceStats#lastConvergedIndex` {es-pull}101998[#101998] - -Authentication:: -* Fix memory leak from JWT cache (and fix the usage of the JWT auth cache) {es-pull}101799[#101799] - -Machine Learning:: -* Fix inference timeout from the Inference Ingest Processor {es-pull}101971[#101971] - -Mapping:: -* Fix incorrect dynamic mapping for non-numeric-value arrays #101965 {es-pull}101967[#101967] - -Network:: -* Fail listener on exception in `TcpTransport#openConnection` {es-pull}101907[#101907] (issue: {es-issue}100510[#100510]) - -Search:: -* Dry up `AsyncTaskIndexService` memory management and fix inefficient circuit breaker use {es-pull}101892[#101892] - -Snapshot/Restore:: -* Respect regional AWS STS endpoints {es-pull}101705[#101705] (issue: {es-issue}89175[#89175]) - -[[enhancement-8.11.1]] -[float] -=== Enhancements - -Machine Learning:: -* Add inference counts by model to the machine learning usage stats {es-pull}101915[#101915] - - diff --git a/docs/reference/release-notes/8.11.2.asciidoc b/docs/reference/release-notes/8.11.2.asciidoc deleted file mode 100644 index 75987ce6139a6..0000000000000 --- a/docs/reference/release-notes/8.11.2.asciidoc +++ /dev/null @@ -1,89 +0,0 @@ -[[release-notes-8.11.2]] -== {es} version 8.11.2 - -Also see <>. - -[[known-issues-8.11.2]] -[float] -=== Known issues -include::8.10.3.asciidoc[tag=no-preventive-gc-issue] - -[float] -[[security-updates-8.11.2]] -=== Security updates - -* The 8.11.2 patch release contains a fix for a potential security vulnerability. https://discuss.elastic.co/c/announcements/security-announcements/31[Please see our security advisory for more details]. - -[[bug-8.11.2]] -[float] -=== Bug fixes - -Allocation:: -* Improve failure handling in `ContinuousComputation` {es-pull}102281[#102281] - -Application:: -* Default `run_ml_inference` should be true {es-pull}102151[#102151] -* [Query Rules] Fix bug where combining the same metadata with text/numeric values leads to error {es-pull}102891[#102891] (issue: {es-issue}102827[#102827]) - -Cluster Coordination:: -* Synchronize Coordinator#onClusterStateApplied {es-pull}100986[#100986] (issue: {es-issue}99023[#99023]) - -Data streams:: -* [Usage API] Count all the data streams that have lifecycle {es-pull}102259[#102259] - -ES|QL:: -* ES|QL: Fix drop of renamed grouping {es-pull}102282[#102282] (issue: {es-issue}102121[#102121]) -* ES|QL: Fix layout management for Project {es-pull}102399[#102399] (issue: {es-issue}102120[#102120]) -* Fix DISSECT with empty patterns {es-pull}102580[#102580] (issue: {es-issue}102577[#102577]) -* Fix leaking blocks in TopN {es-pull}102715[#102715] (issue: {es-issue}102646[#102646]) -* Fix leaking blocks in `BlockUtils` {es-pull}102716[#102716] -* Fix memory tracking in TopN.Row {es-pull}102831[#102831] (issues: {es-issue}100640[#100640], {es-issue}102784[#102784], {es-issue}102790[#102790], {es-issue}102683[#102683]) - -ILM+SLM:: -* [ILM] Fix downsample to skip already downsampled indices {es-pull}102250[#102250] (issue: {es-issue}102249[#102249]) - -Infra/Circuit Breakers:: -* Add more logging to the real memory circuit breaker and lower minimum interval {es-pull}102396[#102396] - -Ingest Node:: -* Better processor stat merge {es-pull}102821[#102821] - -Machine Learning:: -* Ensure datafeed previews with no start or end time don't search the cold or frozen tiers {es-pull}102492[#102492] -* Recreate the Elasticsearch private temporary directory if it doesn't exist when an ML job is opened {es-pull}102599[#102599] - -Mapping:: -* Fix dense_vector cluster stats indexed_vector_dim_min/max values {es-pull}102467[#102467] (issue: {es-issue}102416[#102416]) - -Search:: -* Allow mismatched sort-by field types if there are no docs to sort {es-pull}102779[#102779] - -Security:: -* Fix double-completion in `SecurityUsageTransportAction` {es-pull}102114[#102114] (issue: {es-issue}102111[#102111]) - -Snapshot/Restore:: -* Set region for the STS client via privileged calls in AWS SDK {es-pull}102230[#102230] (issue: {es-issue}102173[#102173]) -* Simplify `BlobStoreRepository` idle check {es-pull}102057[#102057] (issue: {es-issue}101948[#101948]) - -Transform:: -* Ensure transform updates only modify the expected transform task {es-pull}102934[#102934] (issue: {es-issue}102933[#102933]) -* Exclude stack traces from transform audit messages and health {es-pull}102240[#102240] - -[[enhancement-8.11.2]] -[float] -=== Enhancements - -Machine Learning:: -* Add inference counts by model to the machine learning usage stats {es-pull}101915[#101915] - -Security:: -* Upgrade xmlsec to 2.3.4 {es-pull}102220[#102220] - -[[upgrade-8.11.2]] -[float] -=== Upgrades - -Snapshot/Restore:: -* Upgrade reactor netty http version {es-pull}102311[#102311] - - diff --git a/docs/reference/release-notes/8.11.3.asciidoc b/docs/reference/release-notes/8.11.3.asciidoc deleted file mode 100644 index ddeb50dad1f75..0000000000000 --- a/docs/reference/release-notes/8.11.3.asciidoc +++ /dev/null @@ -1,28 +0,0 @@ -[[release-notes-8.11.3]] -== {es} version 8.11.3 - -coming[8.11.3] - -Also see <>. - -[[bug-8.11.3]] -[float] -=== Bug fixes - -Application:: -* Use latest version of entsearch ingestion pipeline {es-pull}103087[#103087] - -ES|QL:: -* Allow match field in enrich fields {es-pull}102734[#102734] -* Collect warnings in compute service {es-pull}103031[#103031] (issues: {es-issue}100163[#100163], {es-issue}103028[#103028], {es-issue}102871[#102871], {es-issue}102982[#102982]) - -ILM+SLM:: -* [ILM] More resilient when a policy is added to searchable snapshot {es-pull}102741[#102741] (issue: {es-issue}101958[#101958]) - -Mapping:: -* Ensure `dynamicMapping` updates are handled in insertion order {es-pull}103047[#103047] - -Transform:: -* Ensure transform `_schedule_now` API only triggers the expected transform task {es-pull}102958[#102958] (issue: {es-issue}102956[#102956]) - - diff --git a/docs/reference/release-notes/8.11.4.asciidoc b/docs/reference/release-notes/8.11.4.asciidoc deleted file mode 100644 index 0fd57c97b1a89..0000000000000 --- a/docs/reference/release-notes/8.11.4.asciidoc +++ /dev/null @@ -1,31 +0,0 @@ -[[release-notes-8.11.4]] -== {es} version 8.11.4 - -Also see <>. - -[[bug-8.11.4]] -[float] -=== Bug fixes - -EQL:: -* Fix NPE on missing event queries {es-pull}103611[#103611] (issue: {es-issue}103608[#103608]) - -ES|QL:: -* Fix now in millis for ESQL search contexts {es-pull}103474[#103474] (issue: {es-issue}103455[#103455]) -* Fix the transport version of `PlanStreamOutput` {es-pull}103758[#103758] -* `AsyncOperator#isFinished` must never return true on failure {es-pull}104029[#104029] - -Infra/Scripting:: -* Wrap painless explain error {es-pull}103151[#103151] (issue: {es-issue}103018[#103018]) - -Mapping:: -* Revert change {es-pull}103865[#103865] - -Snapshot/Restore:: -* Decref `SharedBytes.IO` after read is done not before {es-pull}102848[#102848] -* Restore `SharedBytes.IO` refcounting on reads & writes {es-pull}102843[#102843] - -Watcher:: -* Fix: Watcher REST API `GET /_watcher/settings` now includes product header {es-pull}103003[#103003] (issue: {es-issue}102928[#102928]) - - diff --git a/docs/reference/release-notes/8.12.0.asciidoc b/docs/reference/release-notes/8.12.0.asciidoc deleted file mode 100644 index bd0ae032ef0b9..0000000000000 --- a/docs/reference/release-notes/8.12.0.asciidoc +++ /dev/null @@ -1,435 +0,0 @@ -[[release-notes-8.12.0]] -== {es} version 8.12.0 - -Also see <>. - -[[known-issues-8.12.0]] -[float] -=== Known issues - -* `int8_hnsw` vector index format may fail to merge segments and prevent from indexing documents (issue: {es-issue}104617[#104617]) -+ -When using `int8_hnsw` and the default `confidence_interval` (or any `confidence_interval` less than `1.0`) and when -there are deleted documents in the segments, quantiles may fail to build and prevent merging. -+ -This issue is fixed in 8.12.1. - -* When upgrading clusters from version 8.11.4 or earlier, if your cluster contains non-master-eligible nodes, -information about the new functionality of these upgraded nodes may not be registered properly with the master node. -This can lead to some new functionality added since 8.12.0 not being accessible on the upgraded cluster. -If your cluster is running on ECK 2.12.1 and above, this may cause problems with finalizing the upgrade. -To resolve this issue, perform a rolling restart on the non-master-eligible nodes once all Elasticsearch nodes -are upgraded. This issue is fixed in 8.15.0. - -[[breaking-8.12.0]] -[float] -=== Breaking changes -There are no breaking changes in 8.12 - -[[notable-8.12.0]] -[float] -=== Notable changes -There are notable changes in 8.12 that you need to be aware of but that we do not consider breaking, items that we may consider as notable changes are - -* Changes to features that are in Technical Preview. -* Changes to log formats. -* Changes to non-public APIs. -* Behaviour changes that repair critical bugs. - -Authorization:: -* Fixed JWT principal from claims {es-pull}101333[#101333] - -ES|QL:: -* [ES|QL] pow function always returns double {es-pull}102183[#102183] (issue: {es-issue}99055[#99055]) - -Infra/Plugins:: -* Remove Plugin.createComponents method in favour of overload with a PluginServices object {es-pull}101457[#101457] - -[[bug-8.12.0]] -[float] -=== Bug fixes - -Aggregations:: -* Adjust Histogram's bucket accounting to be iteratively {es-pull}102172[#102172] -* Aggs error codes part 1 {es-pull}99963[#99963] -* Skip global ordinals loading if query does not match after rewrite {es-pull}102844[#102844] -* Trigger parent circuit breaker when building scorers in filters aggregation {es-pull}102511[#102511] -* Unwrap `ExecutionException` when loading from cache in `AbstractIndexOrdinalsFieldData` {es-pull}102476[#102476] - -Application:: -* [Connector API] Fix bug with nullable tooltip field in parser {es-pull}103427[#103427] -* [Connectors API] Fix `ClassCastException` when creating a new sync job {es-pull}103508[#103508] -* [Connectors API] Fix bug with missing TEXT `DisplayType` enum {es-pull}103430[#103430] -* [Connectors API] Handle nullable fields correctly in the `ConnectorSyncJob` parser {es-pull}103183[#103183] -* [Profiling] Query in parallel only if beneficial {es-pull}103061[#103061] -* [Search Applications] Return 400 response when template rendering produces invalid JSON {es-pull}101474[#101474] - -Authentication:: -* Fall through malformed JWTs to subsequent realms in the chain {es-pull}101660[#101660] (issue: {es-issue}101367[#101367]) - -Authorization:: -* Fix cache invalidation on privilege modification {es-pull}102193[#102193] - -Data streams:: -* Use dataset size instead of on-disk size for data stream stats {es-pull}103342[#103342] - -Distributed:: -* Active shards message corrected for search shards {es-pull}102808[#102808] (issue: {es-issue}101896[#101896]) -* Dispatch `ClusterStateAction#buildResponse` to executor {es-pull}103435[#103435] -* Fix listeners in `SharedBlobCacheService.readMultiRegions` {es-pull}101727[#101727] - -Downsampling:: -* Copy counter field properties to downsampled index {es-pull}103580[#103580] (issue: {es-issue}103569[#103569]) -* Fix downsample api by returning a failure in case one or more downsample persistent tasks failed {es-pull}103615[#103615] - -EQL:: -* Cover head/tail commands edge cases and data types coverage {es-pull}101859[#101859] (issue: {es-issue}101724[#101724]) -* Fix NPE on missing event queries {es-pull}103611[#103611] (issue: {es-issue}103608[#103608]) -* Samples should check if the aggregations result is empty or null {es-pull}103574[#103574] - -ES|QL:: -* ESQL: Fix `to_degrees()` returning infinity {es-pull}103209[#103209] (issue: {es-issue}102987[#102987]) -* ESQL: Fix planning of MV_EXPAND with foldable expressions {es-pull}101385[#101385] (issue: {es-issue}101118[#101118]) -* ESQL: Fix rare bug with empty string {es-pull}102350[#102350] (issue: {es-issue}101969[#101969]) -* ESQL: Fix resolution of MV_EXPAND after KEEP * {es-pull}103339[#103339] (issue: {es-issue}103331[#103331]) -* ESQL: Fix single value query {es-pull}102317[#102317] (issue: {es-issue}102298[#102298]) -* ESQL: Improve local folding of aggregates {es-pull}103670[#103670] -* ESQL: Improve pushdown of certain filters {es-pull}103671[#103671] -* ESQL: Narrow catch in convert functions {es-pull}101788[#101788] (issue: {es-issue}100820[#100820]) -* ESQL: Update the use of some user-caused exceptions {es-pull}104046[#104046] -* ESQL: remove `time_zone` request parameter {es-pull}102767[#102767] (issue: {es-issue}102159[#102159]) -* ES|QL: Fix NPE on single value detection {es-pull}103150[#103150] (issue: {es-issue}103141[#103141]) -* ES|QL: Improve resolution error management in `mv_expand` {es-pull}102967[#102967] (issue: {es-issue}102964[#102964]) -* Fix layout for MV_EXPAND {es-pull}102916[#102916] (issue: {es-issue}102912[#102912]) -* Fix now in millis for ESQL search contexts {es-pull}103474[#103474] (issue: {es-issue}103455[#103455]) -* Fix planning of duplicate aggs {es-pull}102165[#102165] (issue: {es-issue}102083[#102083]) -* Fix the transport version of `PlanStreamOutput` {es-pull}103758[#103758] -* `AsyncOperator#isFinished` must never return true on failure {es-pull}104029[#104029] - -Engine:: -* Fix `lastUnsafeSegmentGenerationForGets` for realtime get {es-pull}101700[#101700] - -Geo:: -* Fix geo tile bounding boxes to be consistent with arithmetic method {es-pull}100826[#100826] (issues: {es-issue}92611[#92611], {es-issue}95574[#95574]) - -ILM+SLM:: -* Collect data tiers usage stats more efficiently {es-pull}102140[#102140] (issue: {es-issue}100230[#100230]) - -Indices APIs:: -* Fix template simulate setting application ordering {es-pull}103024[#103024] (issue: {es-issue}103008[#103008]) - -Infra/Core:: -* Cache component versions {es-pull}103408[#103408] (issue: {es-issue}102103[#102103]) -* Fix metric gauge creation model {es-pull}100609[#100609] - -Infra/Node Lifecycle:: -* Wait for reroute before acking put-shutdown {es-pull}103251[#103251] - -Infra/Plugins:: -* Making classname optional in Transport protocol {es-pull}99702[#99702] (issue: {es-issue}98584[#98584]) - -Infra/Scripting:: -* Make IPAddress writeable {es-pull}101093[#101093] (issue: {es-issue}101082[#101082]) -* Wrap painless explain error {es-pull}103151[#103151] (issue: {es-issue}103018[#103018]) - -Infra/Settings:: -* Report full stack trace for non-state file settings transforms {es-pull}101346[#101346] - -Ingest Node:: -* Sending an index name to `DocumentParsingObserver` that is not ever null {es-pull}100862[#100862] - -License:: -* Error log when license verification fails locally {es-pull}102919[#102919] - -Machine Learning:: -* Catch exceptions during `pytorch_inference` startup {es-pull}103873[#103873] -* Exclude quantiles when fetching model snapshots where possible {es-pull}103530[#103530] -* Fix `frequent_item_sets` aggregation on empty index {es-pull}103116[#103116] (issue: {es-issue}103067[#103067]) -* If trained model download task is in progress, wait for it to finish before executing start trained model deployment {es-pull}102944[#102944] -* Persist data counts on job close before results index refresh {es-pull}101147[#101147] -* Preserve response headers in Datafeed preview {es-pull}103923[#103923] -* Prevent attempts to access non-existent node information during rebalancing {es-pull}103361[#103361] -* Prevent resource over-subscription in model allocation planner {es-pull}100392[#100392] -* Start a new trace context before loading a trained model {es-pull}103124[#103124] -* Wait for the model results on graceful shutdown {es-pull}103591[#103591] (issue: {es-issue}103414[#103414]) - -Mapping:: -* Revert change {es-pull}103865[#103865] - -Monitoring:: -* [Monitoring] Dont get cluster state until recovery {es-pull}100565[#100565] - -Network:: -* Ensure the correct `threadContext` for `RemoteClusterNodesAction` {es-pull}101050[#101050] - -Ranking:: -* Add an additional tiebreaker to RRF {es-pull}101847[#101847] (issue: {es-issue}101232[#101232]) - -Reindex:: -* Allow prefix index naming while reindexing from remote {es-pull}96968[#96968] (issue: {es-issue}89120[#89120]) - -Search:: -* Add JIT compiler excludes for `computeCommonPrefixLengthAndBuildHistogram` {es-pull}103112[#103112] -* Check that scripts produce correct json in render template action {es-pull}101518[#101518] (issue: {es-issue}101477[#101477]) -* Fix NPE & empty result handling in `CountOnlyQueryPhaseResultConsumer` {es-pull}103203[#103203] -* Fix format string in `OldLuceneVersions` {es-pull}103185[#103185] -* Handle timeout on standalone rewrite calls {es-pull}103546[#103546] -* Introduce Elasticsearch `PostingFormat` based on Lucene 90 positing format using PFOR {es-pull}103601[#103601] (issue: {es-issue}103002[#103002]) -* Restore inter-segment search concurrency with synthetic source is enabled {es-pull}103690[#103690] -* Support complex datemath expressions in index and index alias names {es-pull}100646[#100646] - -Snapshot/Restore:: -* Decref `SharedBytes.IO` after read is done not before {es-pull}102848[#102848] -* More consistent logging messages for snapshot deletion {es-pull}101024[#101024] -* Reroute on shard snapshot completion {es-pull}101585[#101585] (issue: {es-issue}101514[#101514]) -* Restore `SharedBytes.IO` refcounting on reads & writes {es-pull}102843[#102843] - -TSDB:: -* Throw when wrapping rate agg in `DeferableBucketAggregator` {es-pull}101032[#101032] - -Transform:: -* Add an assertion to the testTransformFeatureReset test case {es-pull}100287[#100287] -* Consider search context missing exceptions as recoverable {es-pull}102602[#102602] -* Consider task cancelled exceptions as recoverable {es-pull}100828[#100828] -* Fix NPE that is thrown by `_update` API {es-pull}104051[#104051] (issue: {es-issue}104048[#104048]) -* Log stacktrace together with log message in order to help debugging {es-pull}101607[#101607] -* Split comma-separated source index strings into separate indices {es-pull}102811[#102811] (issue: {es-issue}99564[#99564]) - -Vector Search:: -* Disallow vectors whose magnitudes will not fit in a float {es-pull}100519[#100519] - -Watcher:: -* Correctly logging watcher history write failures {es-pull}101802[#101802] -* Fix: Watcher REST API `GET /_watcher/settings` now includes product header {es-pull}103003[#103003] (issue: {es-issue}102928[#102928]) - -[[enhancement-8.12.0]] -[float] -=== Enhancements - -Aggregations:: -* Check the real memory circuit breaker when building global ordinals {es-pull}102462[#102462] -* Disable concurrency for sampler and diversified sampler {es-pull}102832[#102832] -* Disable parallelism for composite agg against high cardinality fields {es-pull}102644[#102644] -* Enable concurrency for multi terms agg {es-pull}102710[#102710] -* Enable concurrency for scripted metric agg {es-pull}102461[#102461] -* Enable inter-segment concurrency for terms aggs {es-pull}101390[#101390] -* Export circuit breaker trip count as a counter metric {es-pull}101423[#101423] -* Introduce fielddata cache ttl {es-pull}102682[#102682] -* Status codes for Aggregation errors, part 2 {es-pull}100368[#100368] -* Support keyed histograms {es-pull}101826[#101826] (issue: {es-issue}100242[#100242]) - -Allocation:: -* Add more desired balance stats {es-pull}102065[#102065] -* Add undesired shard count {es-pull}101426[#101426] -* Expose reconciliation metrics via APM {es-pull}102244[#102244] - -Application:: -* Calculate CO2 and emmission and costs {es-pull}101979[#101979] -* Consider duplicate stacktraces in custom index {es-pull}102292[#102292] -* Enable Universal Profiling as Enterprise feature {es-pull}100333[#100333] -* Include totals in flamegraph response {es-pull}101126[#101126] -* Retrieve stacktrace events from a custom index {es-pull}102020[#102020] -* [Profiling] Notify early about task cancellation {es-pull}102740[#102740] -* [Profiling] Report in status API if docs exist {es-pull}102735[#102735] - -Authentication:: -* Add ldap user metadata mappings for full name and email {es-pull}102925[#102925] -* Add manage_enrich cluster privilege to kibana_system role {es-pull}101682[#101682] - -Authorization:: -* Remove `auto_configure` privilege for profiling {es-pull}101026[#101026] -* Use `BulkRequest` to store Application Privileges {es-pull}102056[#102056] -* Use non-deprecated SAML callback URL in SAML smoketests {es-pull}99983[#99983] (issue: {es-issue}99986[#99986]) -* Use non-deprecated SAML callback URL in tests {es-pull}99983[#99983] (issue: {es-issue}99985[#99985]) - -CAT APIs:: -* Expose roles by default in cat allocation API {es-pull}101753[#101753] - -CRUD:: -* Cache resolved index for mgets {es-pull}101311[#101311] - -Data streams:: -* Introduce new endpoint to expose data stream lifecycle stats {es-pull}101845[#101845] -* Switch logs data streams to search all fields by default {es-pull}102456[#102456] (issue: {es-issue}99872[#99872]) - -Distributed:: -* Add support for configuring proxy scheme in S3 client settings and EC2 discovery plugin {es-pull}102495[#102495] (issue: {es-issue}101873[#101873]) -* Introduce a `StreamOutput` that counts how many bytes are written to the stream {es-pull}102906[#102906] -* Push s3 requests count via metrics API {es-pull}100383[#100383] -* Record operation purpose for s3 stats collection {es-pull}100236[#100236] - -EQL:: -* Add error logging for *QL {es-pull}101057[#101057] -* Use the eql query filter for the open-pit request {es-pull}103212[#103212] - -ES|QL:: -* ESQL: Add `profile` option {es-pull}102713[#102713] -* ESQL: Alias duplicated aggregations in a stats {es-pull}100642[#100642] (issue: {es-issue}100544[#100544]) -* ESQL: Load more than one field at once {es-pull}102192[#102192] -* ESQL: Load stored fields sequentially {es-pull}102727[#102727] -* ESQL: Load text field from parent keyword field {es-pull}102490[#102490] (issue: {es-issue}102473[#102473]) -* ESQL: Make blocks ref counted {es-pull}100408[#100408] -* ESQL: Make fieldcaps calls lighter {es-pull}102510[#102510] (issues: {es-issue}101763[#101763], {es-issue}102393[#102393]) -* ESQL: More tracking in `BlockHash` impls {es-pull}101488[#101488] -* ESQL: New telemetry commands {es-pull}102937[#102937] -* ESQL: Share constant null Blocks {es-pull}102673[#102673] -* ESQL: Short circuit loading empty doc values {es-pull}102434[#102434] -* ESQL: Support the `_source` metadata field {es-pull}102391[#102391] -* ESQL: Track blocks emitted from lucene {es-pull}101396[#101396] -* ESQL: Track memory from values loaded from lucene {es-pull}101383[#101383] -* Fast path for reading single doc with ordinals {es-pull}102902[#102902] -* Introduce local block factory {es-pull}102901[#102901] -* Load different way {es-pull}101235[#101235] -* Track ESQL enrich memory {es-pull}102184[#102184] -* Track blocks in `AsyncOperator` {es-pull}102188[#102188] -* Track blocks of intermediate state of aggs {es-pull}102562[#102562] -* Track blocks when hashing single multi-valued field {es-pull}102612[#102612] -* Track pages in ESQL enrich request/response {es-pull}102190[#102190] - -Engine:: -* Add static node settings to set default values for max merged segment sizes {es-pull}102208[#102208] - -Geo:: -* Add runtime field of type `geo_shape` {es-pull}100492[#100492] (issue: {es-issue}61299[#61299]) - -Health:: -* Add message field to `HealthPeriodicLogger` and `S3RequestRetryStats` {es-pull}101989[#101989] -* Add non-green indicator names to `HealthPeriodicLogger` message {es-pull}102245[#102245] - -ILM+SLM:: -* Health Report API should not return RED for unassigned cold/frozen shards when data is available {es-pull}100776[#100776] -* Switch fleet's built-in ILM policies to use .actions.rollover.max_primary_shard_size {es-pull}99984[#99984] (issue: {es-issue}99983[#99983]) - -Indices APIs:: -* Add executed pipelines to bulk api response {es-pull}100031[#100031] -* Add support for marking component templates as deprecated {es-pull}101148[#101148] (issue: {es-issue}100992[#100992]) -* Allowing non-dynamic index settings to be updated by automatically unassigning shards {es-pull}101723[#101723] -* Rename component templates and pipelines according to the new naming conventions {es-pull}99975[#99975] -* Run `TransportGetAliasesAction` on local node {es-pull}101815[#101815] - -Infra/CLI:: -* Set `ActiveProcessorCount` when `node.processors` is set {es-pull}101846[#101846] - -Infra/Core:: -* Add apm api for asynchronous counters (always increasing) {es-pull}102598[#102598] -* Log errors in `RestResponse` regardless of `error_trace` parameter {es-pull}101066[#101066] (issue: {es-issue}100884[#100884]) - -Infra/Logging:: -* Add status code to `rest.suppressed` log output {es-pull}100990[#100990] - -Ingest Node:: -* Deprecate the unused `elasticsearch_version` field of enrich policy json {es-pull}103013[#103013] -* Optimize `MurmurHash3` {es-pull}101202[#101202] - -Machine Learning:: -* Accept a single or multiple inputs to `_inference` {es-pull}102075[#102075] -* Add basic telelemetry for the inference feature {es-pull}102877[#102877] -* Add internal inference action for ml models an services {es-pull}102731[#102731] -* Add prefix strings option to trained models {es-pull}102089[#102089] -* Estimate the memory required to deploy trained models more accurately {es-pull}98874[#98874] -* Improve stability of spike and dip detection for the change point aggregation {es-pull}102637[#102637] -* Include ML processor limits in `_ml/info` response {es-pull}101392[#101392] -* Read scores from downloaded vocabulary for XLM Roberta tokenizers {es-pull}101868[#101868] -* Support for GET all models and by task type in the `_inference` API {es-pull}102806[#102806] - -Mapping:: -* Improve analyzer reload log message {es-pull}102273[#102273] - -Monitoring:: -* Add memory utilization Kibana metric to the monitoring index templates {es-pull}102810[#102810] -* Added `beat.stats.libbeat.pipeline.queue.max_events` {es-pull}102570[#102570] - -Network:: -* Record more detailed HTTP stats {es-pull}99852[#99852] - -Search:: -* Add metrics to the shared blob cache {es-pull}101577[#101577] -* Add support for Serbian Language Analyzer {es-pull}100921[#100921] -* Add support for `index_filter` to open pit {es-pull}102388[#102388] (issue: {es-issue}99740[#99740]) -* Added metric for cache eviction of entries with non zero frequency {es-pull}100570[#100570] -* Disable inter-segment concurrency when sorting by field {es-pull}101535[#101535] -* Enable query phase parallelism within a single shard {es-pull}101230[#101230] (issue: {es-issue}80693[#80693]) -* Node stats as metrics {es-pull}102248[#102248] -* Optimize `_count` type API requests {es-pull}102888[#102888] - -Security:: -* Expose the `invalidation` field in Get/Query `ApiKey` APIs {es-pull}102472[#102472] -* Make `api_key.delete.interval` a dynamic setting {es-pull}102680[#102680] - -Snapshot/Restore:: -* Fail S3 repository analysis on partial reads {es-pull}102840[#102840] -* Parallelize stale index deletion {es-pull}100316[#100316] (issue: {es-issue}61513[#61513]) -* Repo analysis of uncontended register behaviour {es-pull}101185[#101185] -* Repo analysis: allow configuration of register ops {es-pull}102051[#102051] -* Repo analysis: verify empty register {es-pull}102048[#102048] - -Stats:: -* Introduce includeShardsStats in the stats request to indicate that we only fetch a summary {es-pull}100466[#100466] (issue: {es-issue}99744[#99744]) -* Set includeShardsStats = false in NodesStatsRequest where the caller does not use shards-level statistics {es-pull}100938[#100938] - -Store:: -* Add methods for adding generation listeners with primary term {es-pull}100899[#100899] -* Allow executing multiple periodic flushes while they are being made durable {es-pull}102571[#102571] -* Pass shard's primary term to Engine#addSegmentGenerationListener {es-pull}99752[#99752] - -Transform:: -* Implement exponential backoff for transform state persistence retrying {es-pull}102512[#102512] (issue: {es-issue}102528[#102528]) -* Make tasks that calculate checkpoints time out {es-pull}101055[#101055] -* Pass source query to `_field_caps` (as `index_filter`) when deducing destination index mappings for better performance {es-pull}102379[#102379] -* Pass transform source query as `index_filter` to `open_point_in_time` request {es-pull}102447[#102447] (issue: {es-issue}101049[#101049]) -* Skip shards that don't match the source query during checkpointing {es-pull}102138[#102138] - -Vector Search:: -* Add vector_operation_count in profile output for knn searches {es-pull}102032[#102032] -* Make cosine similarity faster by storing magnitude and normalizing vectors {es-pull}99445[#99445] - -[[feature-8.12.0]] -[float] -=== New features - -Application:: -* Enable Connectors API as technical preview {es-pull}102994[#102994] -* [Behavioral Analytics] Analytics collections use Data Stream Lifecycle (DSL) instead of Index Lifecycle Management (ILM) for data retention management. Behavioral analytics has traditionally used ILM to manage data retention. Starting with 8.12.0, this will change. Analytics collections created prior to 8.12.0 will continue to use their existing ILM policies, but new analytics collections will be managed using DSL. {es-pull}100033[#100033] - -Authentication:: -* Patterns support for allowed subjects by the JWT realm {es-pull}102426[#102426] - -Cluster Coordination:: -* Add a node feature join barrier. This prevents nodes from joining clusters that do not have all the features already present in the cluster. This ensures that once a features is supported by all the nodes in a cluster, that feature will never then not be supported in the future. This is the corresponding functionality for the version join barrier, but for features - {es-pull}101609[#101609] - -Data streams:: -* Add ability to create a data stream failure store {es-pull}99134[#99134] - -ES|QL:: -* ESQL: emit warnings from single-value functions processing multi-values {es-pull}102417[#102417] (issue: {es-issue}98743[#98743]) -* GEO_POINT and CARTESIAN_POINT type support {es-pull}102177[#102177] - -Infra/Core:: -* Create new cluster state API for querying features present on a cluster {es-pull}100974[#100974] - -Ingest Node:: -* Adding a simulate ingest api {es-pull}101409[#101409] - -Security:: -* Allow granting API keys with JWT as the access_token {es-pull}101904[#101904] - -Vector Search:: -* Add byte quantization for float vectors in HNSW {es-pull}102093[#102093] -* Make knn search a query {es-pull}98916[#98916] - -[[regression-8.12.0]] -[float] -=== Regressions - -Infra/Core:: -* Revert non-semantic `NodeInfo` {es-pull}102636[#102636] - -[[upgrade-8.12.0]] -[float] -=== Upgrades - -Search:: -* Upgrade to Lucene 9.9.1 {es-pull}103549[#103549] - - diff --git a/docs/reference/release-notes/8.12.1.asciidoc b/docs/reference/release-notes/8.12.1.asciidoc deleted file mode 100644 index 8ebe5cbac3852..0000000000000 --- a/docs/reference/release-notes/8.12.1.asciidoc +++ /dev/null @@ -1,83 +0,0 @@ -[[release-notes-8.12.1]] -== {es} version 8.12.1 - -Also see <>. - -[[known-issues-8.12.1]] -[float] -=== Known issues -* When upgrading clusters from version 8.11.4 or earlier, if your cluster contains non-master-eligible nodes, -information about the new functionality of these upgraded nodes may not be registered properly with the master node. -This can lead to some new functionality added since 8.12.0 not being accessible on the upgraded cluster. -If your cluster is running on ECK 2.12.1 and above, this may cause problems with finalizing the upgrade. -To resolve this issue, perform a rolling restart on the non-master-eligible nodes once all Elasticsearch nodes -are upgraded. This issue is fixed in 8.15.0. - -[[bug-8.12.1]] -[float] -=== Bug fixes - -Allocation:: -* Improve `CANNOT_REBALANCE_CAN_ALLOCATE` explanation {es-pull}104904[#104904] - -Application:: -* [Connector API] Fix bug in configuration validation parser {es-pull}104198[#104198] -* [Connector API] Fix bug when triggering a sync job via API {es-pull}104802[#104802] -* [Profiling] Query in parallel on content nodes {es-pull}104600[#104600] - -Data streams:: -* Data streams fix failure store delete {es-pull}104281[#104281] -* Fix _alias/ returning non-matching data streams {es-pull}104145[#104145] (issue: {es-issue}96589[#96589]) - -Downsampling:: -* Downsampling supports `date_histogram` with tz {es-pull}103511[#103511] (issue: {es-issue}101309[#101309]) - -ES|QL:: -* Avoid execute ESQL planning on refresh thread {es-pull}104591[#104591] -* ESQL: Allow grouping by null blocks {es-pull}104523[#104523] -* ESQL: Fix `SearchStats#count(String)` to count values not rows {es-pull}104891[#104891] (issue: {es-issue}104795[#104795]) -* Limit concurrent shards per node for ESQL {es-pull}104832[#104832] (issue: {es-issue}103666[#103666]) -* Reduce the number of Evals `ReplaceMissingFieldWithNull` creates {es-pull}104586[#104586] (issue: {es-issue}104583[#104583]) - -Infra/Resiliency:: -* Limit nesting depth in Exception XContent {es-pull}103741[#103741] - -Ingest Node:: -* Better handling of async processor failures {es-pull}104289[#104289] (issue: {es-issue}101921[#101921]) -* Ingest correctly handle upsert operations and drop processors together {es-pull}104585[#104585] (issue: {es-issue}36746[#36746]) - -Machine Learning:: -* Add retry logic for 500 and 503 errors for OpenAI {es-pull}103819[#103819] -* Avoid possible datafeed infinite loop with filtering aggregations {es-pull}104722[#104722] (issue: {es-issue}104699[#104699]) -* [LTR] `FieldValueExtrator` - Checking if fetched values is empty {es-pull}104314[#104314] - -Network:: -* Fix lost headers with chunked responses {es-pull}104808[#104808] - -Search:: -* Don't throw error for remote shards that open PIT filtered out {es-pull}104288[#104288] (issue: {es-issue}102596[#102596]) - -Snapshot/Restore:: -* Fix deleting index during snapshot finalization {es-pull}103817[#103817] (issue: {es-issue}101029[#101029]) - -TSDB:: -* Fix `routing_path` when template has multiple `path_match` and multi-fields {es-pull}104418[#104418] (issue: {es-issue}104400[#104400]) - -Transform:: -* Fix bug when `latest` transform is used together with `from` parameter {es-pull}104606[#104606] (issue: {es-issue}104543[#104543]) - -[[deprecation-8.12.1]] -[float] -=== Deprecations - -Machine Learning:: -* Deprecate machine learning on Intel macOS {es-pull}104087[#104087] - -[[upgrade-8.12.1]] -[float] -=== Upgrades - -Search:: -* [8.12.1] Upgrade to Lucene 9.9.2 {es-pull}104761[#104761] (issue: {es-issue}104617[#104617]) - - diff --git a/docs/reference/release-notes/8.12.2.asciidoc b/docs/reference/release-notes/8.12.2.asciidoc deleted file mode 100644 index 44202ee8226eb..0000000000000 --- a/docs/reference/release-notes/8.12.2.asciidoc +++ /dev/null @@ -1,68 +0,0 @@ -[[release-notes-8.12.2]] -== {es} version 8.12.2 - -Also see <>. - -[[known-issues-8.12.2]] -[float] -=== Known issues -* When upgrading clusters from version 8.11.4 or earlier, if your cluster contains non-master-eligible nodes, -information about the new functionality of these upgraded nodes may not be registered properly with the master node. -This can lead to some new functionality added since 8.12.0 not being accessible on the upgraded cluster. -If your cluster is running on ECK 2.12.1 and above, this may cause problems with finalizing the upgrade. -To resolve this issue, perform a rolling restart on the non-master-eligible nodes once all Elasticsearch nodes -are upgraded. This issue is fixed in 8.15.0. - -[[bug-8.12.2]] -[float] -=== Bug fixes - -Application:: -* Fix bug in `rule_query` where `text_expansion` errored because it was not rewritten {es-pull}105365[#105365] -* [Connectors API] Fix bug with crawler configuration parsing and `sync_now` flag {es-pull}105024[#105024] - -Authentication:: -* Validate settings before reloading JWT shared secret {es-pull}105070[#105070] - -Downsampling:: -* Downsampling better handle if source index isn't allocated and fix bug in retrieving last processed tsid {es-pull}105228[#105228] - -ES|QL:: -* ESQL: Push CIDR_MATCH to Lucene if possible {es-pull}105061[#105061] (issue: {es-issue}105042[#105042]) -* ES|QL: Fix exception handling on `date_parse` with wrong date pattern {es-pull}105048[#105048] (issue: {es-issue}104124[#104124]) - -Indices APIs:: -* Always show `composed_of` field for composable index templates {es-pull}105315[#105315] (issue: {es-issue}104627[#104627]) - -Ingest Node:: -* Backport stable `ThreadPool` constructor from `LogstashInternalBridge` {es-pull}105165[#105165] -* Harden index mapping parameter check in enrich runner {es-pull}105096[#105096] - -Machine Learning:: -* Fix handling of `ml.config_version` node attribute for nodes with machine learning disabled {es-pull}105066[#105066] -* Fix handling surrogate pairs in the XLM Roberta tokenizer {es-pull}105183[#105183] (issues: {es-issue}104626[#104626], {es-issue}104981[#104981]) -* Inference service should reject tasks during shutdown {es-pull}105213[#105213] - -Network:: -* Close `currentChunkedWrite` on client cancel {es-pull}105258[#105258] -* Fix leaked HTTP response sent after close {es-pull}105293[#105293] (issue: {es-issue}104651[#104651]) -* Fix race in HTTP response shutdown handling {es-pull}105306[#105306] - -Search:: -* Field-caps should read fields from up-to-dated shards {es-pull}105153[#105153] (issue: {es-issue}104809[#104809]) - -Snapshot/Restore:: -* Finalize all snapshots completed by shard snapshot updates {es-pull}105245[#105245] (issue: {es-issue}104939[#104939]) - -Transform:: -* Do not log warning when triggering an `ABORTING` transform {es-pull}105234[#105234] (issue: {es-issue}105233[#105233]) -* Make `_reset` action stop transforms without force first {es-pull}104870[#104870] (issues: {es-issue}100596[#100596], {es-issue}104825[#104825]) - -[[enhancement-8.12.2]] -[float] -=== Enhancements - -IdentityProvider:: -* Include user's privileges actions in IdP plugin `_has_privileges` request {es-pull}104026[#104026] - - diff --git a/docs/reference/release-notes/8.13.0.asciidoc b/docs/reference/release-notes/8.13.0.asciidoc deleted file mode 100644 index 75e2341f33766..0000000000000 --- a/docs/reference/release-notes/8.13.0.asciidoc +++ /dev/null @@ -1,472 +0,0 @@ -[[release-notes-8.13.0]] -== {es} version 8.13.0 - -Also see <>. - -[[known-issues-8.13.0]] -[float] -=== Known issues - -* Searches involving nodes upgraded to 8.13.0 and a coordinator node that is running on version - 8.12 or earlier can produce duplicate buckets when running `date_histogram` or `histogram` - aggregations. This can happen during a rolling upgrade to 8.13 or while running cross-cluster - searches. (issue: {es-issue}108181[#108181]). - -* Due to a bug in the bundled JDK 22 nodes might crash abruptly under high memory pressure. - We recommend <> asap to mitigate the issue. - -* Nodes upgraded to 8.13.0 fail to load downsampling persistent tasks. This prevents them from joining the cluster, blocking its upgrade (issue: {es-issue}106880[#106880]) -+ -This affects clusters running version 8.10 or later, with an active downsampling -https://www.elastic.co/guide/en/elasticsearch/reference/current/downsampling-ilm.html[configuration] -or a configuration that was activated at some point since upgrading to version 8.10 or later. - -* When upgrading clusters from version 8.11.4 or earlier, if your cluster contains non-master-eligible nodes, -information about the new functionality of these upgraded nodes may not be registered properly with the master node. -This can lead to some new functionality added since 8.12.0 not being accessible on the upgraded cluster. -If your cluster is running on ECK 2.12.1 and above, this may cause problems with finalizing the upgrade. -To resolve this issue, perform a rolling restart on the non-master-eligible nodes once all Elasticsearch nodes -are upgraded. This issue is fixed in 8.15.0. - -* The `pytorch_inference` process used to run Machine Learning models can consume large amounts of memory. -In environments where the available memory is limited, the OS Out of Memory Killer will kill the `pytorch_inference` -process to reclaim memory. This can cause inference requests to fail. -Elasticsearch will automatically restart the `pytorch_inference` process -after it is killed up to four times in 24 hours. (issue: {es-issue}110530[#110530]) - -[[breaking-8.13.0]] -[float] -=== Breaking changes - -ES|QL:: -* ESQL: Grammar - FROM METADATA no longer requires [] {es-pull}105221[#105221] -* ES|QL: remove PROJECT keyword from the grammar {es-pull}105064[#105064] -* [ESQL] Remove is_nan, is_finite, and `is_infinite` {es-pull}104091[#104091] - -TSDB:: -* Change `index.look_ahead_time` index setting's default value from 2 hours to 30 minutes. {es-pull}103898[#103898] -* Lower the `look_ahead_time` index setting's max value from 7 days to 2 hours. {es-pull}103434[#103434] - -[[bug-8.13.0]] -[float] -=== Bug fixes - -Aggregations:: -* Disable parallel collection for terms aggregation with `min_doc_count` equals to 0 {es-pull}106156[#106156] -* `GlobalOrdCardinalityAggregator` should use `HyperLogLogPlusPlus` instead of `HyperLogLogPlusPlusSparse` {es-pull}105546[#105546] - -Allocation:: -* Fix disk computation when initializing new shards {es-pull}102879[#102879] -* Fix disk computation when initializing unassigned shards in desired balance computation {es-pull}102207[#102207] - -Application:: -* Fix Search Applications bug where deleting an alias before deleting an application intermittently caused errors {es-pull}106329[#106329] -* Use search to determine if cluster contains data {es-pull}103920[#103920] -* [Connector API] Bugfix: support list type in filtering advenced snippet value {es-pull}105633[#105633] -* [Connector API] Fix default ordering in `SyncJob` list endpoint {es-pull}105945[#105945] -* [Connector API] Fix serialisation of script params in connector index service {es-pull}106060[#106060] - -Authentication:: -* Execute SAML authentication on the generic threadpool {es-pull}105232[#105232] (issue: {es-issue}104962[#104962]) - -Authorization:: -* Adjust interception of requests for specific shard IDs {es-pull}101656[#101656] - -Client:: -* Validate settings in `ReloadSecureSettings` API {es-pull}103176[#103176] - -Data streams:: -* Apm-data: fix `@custom` component templates {es-pull}104182[#104182] -* Avoid false-positive matches on intermediate objects in `ecs@mappings` {es-pull}105440[#105440] (issue: {es-issue}102794[#102794]) -* Execute lazy rollover with an internal dedicated user #104732 {es-pull}104905[#104905] (issue: {es-issue}104732[#104732]) -* Fix write index resolution when an alias is pointing to a TSDS {es-pull}104440[#104440] (issue: {es-issue}104189[#104189]) -* x-pack/plugin/core: add `match_mapping_type` to `ecs@mappings` dynamic templates {es-pull}103035[#103035] - -Distributed:: -* Fix logger Strings.format calls {es-pull}104573[#104573] -* Request indexing memory pressure in APM node metrics publisher {es-pull}103520[#103520] - -ES|QL:: -* ESQL: Add single value checks on LIKE/RLIKE pushdown {es-pull}103807[#103807] (issue: {es-issue}103806[#103806]) -* ESQL: Correct out-of-range filter pushdowns {es-pull}99961[#99961] (issue: {es-issue}99960[#99960]) -* ESQL: Fix Analyzer to not interpret escaped * as a pattern {es-pull}105325[#105325] (issue: {es-issue}104955[#104955]) -* ESQL: Fix a bug loading unindexed text fields {es-pull}104553[#104553] -* ESQL: Fix bug in grammar that allowed spaces inside id pattern {es-pull}105476[#105476] (issue: {es-issue}105441[#105441]) -* ESQL: Fix replacement of nested expressions in aggs with multiple parameters {es-pull}104718[#104718] (issue: {es-issue}104706[#104706]) -* ESQL: Fix wrong attribute shadowing in pushdown rules {es-pull}105650[#105650] (issue: {es-issue}105434[#105434]) -* ESQL: Improve pushdown of certain filters {es-pull}103538[#103538] (issue: {es-issue}103536[#103536]) -* ESQL: allow `null` in date math {es-pull}103610[#103610] (issue: {es-issue}103085[#103085]) -* ESQL: make `cidr_match` foldable {es-pull}105403[#105403] (issue: {es-issue}105376[#105376]) -* ES|QL: Disable optimizations that rely on Expression.nullable() {es-pull}105691[#105691] -* ES|QL: Improve type validation in aggs for UNSIGNED_LONG better support for VERSION {es-pull}104911[#104911] (issue: {es-issue}102961[#102961]) -* ES|QL: better management of exact subfields for TEXT fields {es-pull}103510[#103510] (issue: {es-issue}99899[#99899]) -* Fix error on sorting unsortable `geo_point` and `cartesian_point` {es-pull}106351[#106351] (issue: {es-issue}106007[#106007]) -* For empty mappings use a `LocalRelation` {es-pull}105081[#105081] (issue: {es-issue}104809[#104809]) -* Resume driver when failing to fetch pages {es-pull}106392[#106392] (issue: {es-issue}106262[#106262]) -* Review KEEP logic to prevent duplicate column names {es-pull}103316[#103316] -* `ProjectOperator` should not retain references to released blocks {es-pull}105848[#105848] - -Engine:: -* Consider currently refreshing data in the memory usage of refresh {es-pull}104122[#104122] -* Release `TranslogSnapshot` buffer after iteration {es-pull}106398[#106398] (issue: {es-issue}106390[#106390]) - -Health:: -* Make Health API more resilient to multi-version clusters {es-pull}105789[#105789] (issue: {es-issue}90183[#90183]) -* Stop the periodic health logger when es is stopping {es-pull}105272[#105272] - -ILM+SLM:: -* Remove `hashCode` and `equals` from `OperationModeUpdateTask` {es-pull}104265[#104265] (issue: {es-issue}100871[#100871]) -* [ILM] Delete step deletes data stream with only one index {es-pull}105772[#105772] - -Indices APIs:: -* Fix `require_alias` implicit true value on presence {es-pull}104099[#104099] (issue: {es-issue}103945[#103945]) - -Infra/CLI:: -* Fix server cli to always pass through exit code {es-pull}104943[#104943] - -Infra/Core:: -* Do not enable APM agent 'instrument', it's not required for manual tracing {es-pull}105055[#105055] -* Fix bogus assertion tripped by force-executed tasks {es-pull}104581[#104581] (issue: {es-issue}104580[#104580]) -* Metrics: Allow `AsyncCounters` to switch providers {es-pull}103025[#103025] -* Metrics: Handle null observations in observers {es-pull}103091[#103091] - -Infra/Node Lifecycle:: -* Close rather than stop `HttpServerTransport` on shutdown {es-pull}102759[#102759] (issue: {es-issue}102501[#102501]) - -Ingest Node:: -* Add stable `ThreadPool` constructor to `LogstashInternalBridge` {es-pull}105163[#105163] -* Adding `executedPipelines` to the `IngestDocument` copy constructor {es-pull}105427[#105427] -* Revert "x-pack/plugin/apm-data: download geoip DB on pipeline creation" {es-pull}104505[#104505] -* X-pack/plugin/apm-data: fix `@custom` pipeline support {es-pull}104113[#104113] - -Machine Learning:: -* Allow GET inference models by user a with read only permission {es-pull}105346[#105346] -* Avoid computing `currentInferenceProcessors` on every cluster state {es-pull}106057[#106057] -* Catch all the potential exceptions in the ingest processor code {es-pull}105391[#105391] -* Changed system auditor to use levels {es-pull}105429[#105429] -* During ML maintenance, reset jobs in the reset state without a corresponding task {es-pull}106062[#106062] -* Fix `categorize_text` aggregation nested under empty buckets {es-pull}105987[#105987] (issue: {es-issue}105836[#105836]) -* Fix resetting a job if the original reset task no longer exists. {es-pull}106020[#106020] -* Retry updates to model snapshot ID on job config {es-pull}104077[#104077] -* The OpenAI model parameter should be in service settings not task settings. Move the configuration field to service settings {es-pull}105458[#105458] -* Undeploy elser when inference model deleted {es-pull}104230[#104230] - -Mapping:: -* Fix parsing of flattened fields within subobjects: false {es-pull}105373[#105373] - -Network:: -* Fix use-after-free at event-loop shutdown {es-pull}105486[#105486] - -Search:: -* Correct profiled rewrite time for knn with a pre-filter {es-pull}104150[#104150] -* Force execution of `SearchService.Reaper` {es-pull}106544[#106544] (issue: {es-issue}106543[#106543]) -* Move `TransportTermsEnumAction` coordination off transport threads {es-pull}104408[#104408] -* Remove `SearchException` usages without a proper status code {es-pull}105150[#105150] -* Require the name field for `inner_hits` for collapse {es-pull}104666[#104666] -* add validation on _id field when upsert new doc {es-pull}103399[#103399] (issue: {es-issue}102981[#102981]) - -Security:: -* Revert "Validate settings in `ReloadSecureSettings` API" {es-pull}103310[#103310] - -Snapshot/Restore:: -* Do not record s3 http request time when it is not available {es-pull}105103[#105103] -* `URLRepository` should not block shutdown {es-pull}105588[#105588] - -TLS:: -* Respect --pass option in certutil csr mode {es-pull}106105[#106105] - -Transform:: -* Fix `_reset` API when called with `force=true` on a failed transform {es-pull}106574[#106574] (issue: {es-issue}106573[#106573]) -* Fix a bug where destination index aliases are not set up for an unattended transform {es-pull}105499[#105499] -* Remove duplicate checkpoint audits {es-pull}105164[#105164] (issue: {es-issue}105106[#105106]) -* Return results in order {es-pull}105089[#105089] (issue: {es-issue}104847[#104847]) -* Use deduced mappings for determining proper fields' format even if `deduce_mappings==false` {es-pull}103682[#103682] (issue: {es-issue}103115[#103115]) - -Vector Search:: -* Fix bug when nested knn pre-filter might match nested docs {es-pull}105994[#105994] - -Watcher:: -* Handling exceptions on watcher reload {es-pull}105442[#105442] (issue: {es-issue}69842[#69842]) - -[[deprecation-8.13.0]] -[float] -=== Deprecations - -Distributed:: -* `DesiredNode:` deprecate `node_version` field and make it optional (unused) in current parser {es-pull}104209[#104209] - -Infra/Core:: -* Deprecate `client.type` {es-pull}104574[#104574] - -[[enhancement-8.13.0]] -[float] -=== Enhancements - -Aggregations:: -* Add index mapping parameter for `counted_keyword` {es-pull}103646[#103646] -* Introduce an `AggregatorReducer` to reduce the footprint of aggregations in the coordinating node {es-pull}105207[#105207] -* Release resources in `BestBucketsDeferringCollector` earlier {es-pull}104893[#104893] -* Support sampling in `counted_terms` aggregation {es-pull}103846[#103846] - -Allocation:: -* Account for reserved disk size {es-pull}103903[#103903] -* Derive expected replica size from primary {es-pull}102078[#102078] - -Application:: -* Add serverless scopes for Connector APIs {es-pull}104063[#104063] -* [Connector API] Change required privileges to indices:data/read(write) {es-pull}105289[#105289] -* [Connector API] Implement update `index_name` action {es-pull}104648[#104648] -* [Connector API] Support filtering by name, index name in list action {es-pull}105131[#105131] -* [Connector API] Support filtering connectors by service type and a query {es-pull}105178[#105178] -* [Connector API] Support updating configuration values only {es-pull}105249[#105249] -* [Connectors API] Add new field `api_key_secret_id` to Connector {es-pull}104982[#104982] -* [Connectors API] Implement connector status update action {es-pull}104750[#104750] -* [Connectors API] Implement update native action endpoint {es-pull}104654[#104654] -* [Connectors API] Implement update service type action {es-pull}104643[#104643] -* [Connectors API] Relax strict response parsing for get/list operations {es-pull}104909[#104909] -* [Profiling] Extract properties faster from source {es-pull}104356[#104356] -* [Profiling] Mark all templates as managed {es-pull}103783[#103783] -* [Profiling] Speed up processing of stacktraces {es-pull}104674[#104674] -* [Profiling] Support downsampling of generic events {es-pull}104730[#104730] -* [Profiling] Use shard request cache consistently {es-pull}103643[#103643] - -Authentication:: -* Expose API key authentication metrics {es-pull}103178[#103178] -* Expose realms authentication metrics {es-pull}104200[#104200] -* Expose service account authentication metrics {es-pull}104043[#104043] -* Expose token authentication metrics {es-pull}104142[#104142] -* Hot-reloadable LDAP bind password {es-pull}104320[#104320] -* Support of `match` for the Query API Key API {es-pull}104594[#104594] - -Authorization:: -* [Security Solution] Allow write permission for `kibana_system` role on endpoint response index {es-pull}103555[#103555] - -CRUD:: -* Avoid wrapping searchers multiple times in mget {es-pull}104227[#104227] (issue: {es-issue}85069[#85069]) - -Client:: -* Add rest spec for Query User API {es-pull}104529[#104529] - -Cluster Coordination:: -* Add troubleshooting docs link to `PeerFinder` logs {es-pull}104787[#104787] -* Report current master in `PeerFinder` {es-pull}104396[#104396] - -Data streams:: -* Introduce lazy rollover for mapping updates in data streams {es-pull}103309[#103309] (issue: {es-issue}89346[#89346]) -* Use new `ignore_dynamic_beyond_limit` in logs and metric data streams {es-pull}105180[#105180] -* X-pack/plugin/apm-data: add dynamic setting for enabling template registry {es-pull}104386[#104386] (issue: {es-issue}104385[#104385]) -* X-pack/plugin/core: rename `double_metrics` template {es-pull}103033[#103033] -* x-pack/plugin/apm-data: Add a new field transaction.profiler_stack_trace_ids to traces-apm@mappings.yaml {es-pull}105223[#105223] -* x-pack/plugin/apm-data: Map some APM fields as flattened and fix error.grouping_name script {es-pull}103032[#103032] -* x-pack/plugin/core: make automatic rollovers lazy {es-pull}105273[#105273] (issue: {es-issue}104083[#104083]) - -Discovery-Plugins:: -* Set read timeout for fetching IMDSv2 token {es-pull}104407[#104407] (issue: {es-issue}104244[#104244]) - -Downsampling:: -* Support patch transport version from 8.12 {es-pull}104406[#104406] - -ES|QL:: -* Add ES|QL async delete API {es-pull}103628[#103628] -* Avoid humongous blocks {es-pull}103340[#103340] -* ESQL: Add TO_UPPER and TO_LOWER functions {es-pull}104309[#104309] -* ESQL: Add option to drop null fields {es-pull}102428[#102428] -* ESQL: Add plan consistency verification after each optimizer {es-pull}105371[#105371] -* ESQL: Check field exists before load from `_source` {es-pull}103632[#103632] -* ESQL: Delay finding field load infrastructure {es-pull}103821[#103821] -* ESQL: Expand shallow copy with vecs {es-pull}103681[#103681] (issue: {es-issue}100528[#100528]) -* ESQL: Extend STATS command to support aggregate expressions {es-pull}104958[#104958] -* ESQL: Infer not null for aggregated fields {es-pull}103673[#103673] (issue: {es-issue}102787[#102787]) -* ESQL: Nested expressions inside stats command {es-pull}104387[#104387] (issue: {es-issue}99828[#99828]) -* ESQL: Pre-allocate rows in TopNOperator {es-pull}104796[#104796] -* ESQL: Referencing expressions that contain backticks requires <>. {es-pull}100740[#100740] (issue: {es-issue}100312[#100312]) -* ESQL: Simpify IS NULL/IS NOT NULL evaluation {es-pull}103099[#103099] (issue: {es-issue}103097[#103097]) -* ESQL: Speed up reading many nulls {es-pull}105088[#105088] -* ESQL: Support loading shapes from source into WKB blocks {es-pull}104269[#104269] -* ESQL: Track the rest of `DocVector` {es-pull}103727[#103727] -* ESQL: `MV_FIRST` and `MV_LAST` {es-pull}103928[#103928] -* ESQL: add `date_diff` function {es-pull}104118[#104118] (issue: {es-issue}101942[#101942]) -* ESQL: push down "[text_field] is not null" {es-pull}105593[#105593] -* ES|QL Async Query API {es-pull}103398[#103398] -* Prepare enrich plan to support multi clusters {es-pull}104355[#104355] -* Reading points from source to reduce precision loss {es-pull}103698[#103698] -* Remove deprecated Block APIs {es-pull}103592[#103592] -* Reserve bytes before serializing page {es-pull}105269[#105269] -* Support ST_CENTROID over spatial points {es-pull}104218[#104218] (issue: {es-issue}104656[#104656]) -* Support cross clusters query in ESQL {es-pull}101640[#101640] -* Support enrich ANY mode in cross clusters query {es-pull}104840[#104840] -* Support enrich coordinator mode {es-pull}104936[#104936] -* Support enrich remote mode {es-pull}104993[#104993] - -Geo:: -* Add support for Well Known Binary (WKB) in the fields API for spatial fields {es-pull}103461[#103461] -* Add the possibility to transform WKT to WKB directly {es-pull}104030[#104030] - -Health:: -* Add APM metrics to `HealthPeriodicLogger` {es-pull}102765[#102765] -* Extend `repository_integrity` health indicator for unknown and invalid repos {es-pull}104614[#104614] (issue: {es-issue}103784[#103784]) - -ILM+SLM:: -* Add "step":"ERROR" to ILM explain response for missing policy {es-pull}103720[#103720] (issue: {es-issue}99030[#99030]) -* Add default rollover conditions to ILM explain API response {es-pull}104721[#104721] (issue: {es-issue}103395[#103395]) -* ILM/SLM history policies forcemerge in hot and dsl configuration {es-pull}103190[#103190] - -Infra/CLI:: -* Add replay diagnostic dir to system jvm options {es-pull}103535[#103535] - -Infra/Circuit Breakers:: -* Lower G1 minimum full GC interval {es-pull}105259[#105259] - -Infra/Core:: -* Adding threadpool metrics {es-pull}102371[#102371] -* ES - document observing with rejections {es-pull}104859[#104859] -* Thread pool metrics {es-pull}104500[#104500] - -Infra/Metrics:: -* Modify name of threadpool metric for rejected {es-pull}105015[#105015] - -Infra/Node Lifecycle:: -* Wait for async searches to finish when shutting down {es-pull}101487[#101487] - -Infra/Transport API:: -* Make `ParentTaskAssigningClient.getRemoteClusterClient` method also return `ParentTaskAssigningClient` {es-pull}100813[#100813] - -Ingest Node:: -* Adding `ActionRequestLazyBuilder` implementation of `RequestBuilder` {es-pull}104927[#104927] -* Adding a `RequestBuilder` interface {es-pull}104778[#104778] -* Adding a custom exception for problems with the graph of pipelines to be applied to a document {es-pull}105196[#105196] -* Improving the performance of the ingest simulate verbose API {es-pull}105265[#105265] -* Ingest geoip processor cache 'no results' from the database {es-pull}104092[#104092] -* Limiting the number of nested pipelines that can be executed {es-pull}105428[#105428] -* Modifying request builders {es-pull}104636[#104636] - -Java Low Level REST Client:: -* Set thread name used by REST client {es-pull}103160[#103160] - -Machine Learning:: -* Add optional pruning configuration (weighted terms scoring) to text expansion query {es-pull}102862[#102862] -* Add text_embedding inference service with multilingual-e5 and custom eland models {es-pull}104949[#104949] -* Add 3 automatic restarts for `pytorch_inference` processes that stop unexpectedly {es-pull}104433[#104433] -* Add support for Cohere inference service {es-pull}104559[#104559] -* Always test for spikes and dips as well as changes in the change point aggregation {es-pull}103922[#103922] -* Apply windowing and chunking to long documents {es-pull}104363[#104363] -* Automatically download the ELSER model when PUT in `_inference` {es-pull}104334[#104334] -* Better handling of number of allocations in pytorch_inference in the case that hardware_concurrency fails {ml-pull}2607[#2607] -* Change detection aggregation improvements {es-pull}102824[#102824] -* Conditionally send the dimensions field as part of the openai requests {es-pull}105299[#105299] (issue: {es-issue}105005[#105005]) -* Endpoint to find positions of Grok pattern matches {es-pull}104394[#104394] -* Ensure unique IDs between inference models and trained model deployments {es-pull}103996[#103996] -* Expose some ML metrics via APM {es-pull}102584[#102584] -* Make `task_type` optional in `_inference` APIs {es-pull}104483[#104483] -* Update `missingTrainedModel` message to include: you may need to create it {es-pull}104155[#104155] -* Upgrade MKL to version 2024.0 on Linux x86_64 {ml-pull}2619[#2619] -* Upgrade PyTorch to version 2.1.2. {ml-pull}2588[#2588] -* Upgrade zlib to version 1.2.13 on Windows {ml-pull}2588[#2588] -* Use Boost.JSON for JSON processing {ml-pull}2614[#2614] -* Validate inference model ids {es-pull}103669[#103669] - - -Mapping:: -* Add `index.mapping.total_fields.ignore_dynamic_beyond_limit` setting to ignore dynamic fields when field limit is reached {es-pull}96235[#96235] -* Make field limit more predictable {es-pull}102885[#102885] - -Network:: -* Prune unnecessary information from TransportNodesStatsAction.NodeStatsRequest {es-pull}102559[#102559] (issue: {es-issue}100878[#100878]) - -Percolator:: -* Return `matched_queries` in Percolator {es-pull}103084[#103084] (issue: {es-issue}10163[#10163]) - -Query Languages:: -* Introduce Alias.unwrap method {es-pull}104575[#104575] - -Search:: -* Dyamically adjust node metrics cache expire {es-pull}104460[#104460] -* Enhancement: Metrics for Search Took Times using Action Listeners {es-pull}104996[#104996] -* Field caps performance pt2 {es-pull}105941[#105941] -* Field-caps field has value lookup use map instead of looping array {es-pull}105770[#105770] -* Flag in `_field_caps` to return only fields with values in index {es-pull}103651[#103651] -* Include better output in profiling & `toString` for automaton based queries {es-pull}105468[#105468] -* Metrics for search latencies {es-pull}102557[#102557] -* Ref count search response bytes {es-pull}103763[#103763] (issue: {es-issue}102657[#102657]) -* Remove leniency in msearch parsing {es-pull}103232[#103232] -* Resolve Cluster API {es-pull}102726[#102726] -* Reuse number field mapper tests in other modules {es-pull}99142[#99142] (issue: {es-issue}92947[#92947]) -* S3 first byte latency metric {es-pull}102435[#102435] -* Update s3 latency metric to use micros {es-pull}103633[#103633] -* Upgrade to Lucene 9.10.0 {es-pull}105578[#105578] - -Security:: -* Add Query Users API {es-pull}104033[#104033] -* Add `ApiKey` expiration time to audit log {es-pull}103959[#103959] -* Add expiration time to update api key api {es-pull}103453[#103453] -* Add stricter validation for api key expiration time {es-pull}103973[#103973] -* Add support for the `simple_query_string` to the Query API Key API {es-pull}104132[#104132] -* Add support for the `type` parameter, for sorting, to the Query API Key API {es-pull}104625[#104625] -* Aggs support for Query API Key Information API {es-pull}104895[#104895] -* Hot-reloadable remote cluster credentials {es-pull}102798[#102798] - -Snapshot/Restore:: -* Add s3 `HeadObject` request to request stats {es-pull}105105[#105105] -* Expose `OperationPurpose` in S3 access logs using a https://docs.aws.amazon.com/AmazonS3/latest/userguide/LogFormat.html#LogFormatCustom[custom query-string parameter] {es-pull}105044[#105044] -* Fix blob cache race, decay, time dependency {es-pull}104784[#104784] -* Pause shard snapshots on graceful shutdown {es-pull}101717[#101717] -* Retry indefinitely for s3 indices blob read errors {es-pull}103300[#103300] - -Store:: -* List hidden shard stores by default {es-pull}103710[#103710] - -TLS:: -* 'elasticsearch-certutil cert' now verifies the issuing chain of the generated certificate {es-pull}103948[#103948] - -TSDB:: -* Improve storage efficiency for non-metric fields in TSDB {es-pull}99747[#99747] -* Introduce experimental pass-through field type {es-pull}103648[#103648] -* Nest pass-through objects within objects {es-pull}105062[#105062] -* Restrict usage of certain aggregations when in sort order execution is required {es-pull}104665[#104665] -* Small time series agg improvement {es-pull}106288[#106288] - -Transform:: -* Allow transforms to use PIT with remote clusters again {es-pull}105192[#105192] (issue: {es-issue}104518[#104518]) -* Transforms: Adding basic stats API param {es-pull}104878[#104878] - -Vector Search:: -* Add new int8_flat and flat vector index types {es-pull}104872[#104872] -* Add support for more than one `inner_hit` when searching nested vectors {es-pull}104006[#104006] -* Making `k` and `num_candidates` optional for knn search {es-pull}101209[#101209] (issue: {es-issue}97533[#97533]) - -[[feature-8.13.0]] -[float] -=== New features - -Data streams:: -* Add `require_data_stream` parameter to indexing requests to enforce indexing operations target a data stream {es-pull}101872[#101872] (issue: {es-issue}97032[#97032]) -* Redirect failed ingest node operations to a failure store when available {es-pull}103481[#103481] - -ES|QL:: -* ESQL: Introduce mode setting for ENRICH {es-pull}103949[#103949] -* ESQL: add =~ operator (case insensitive equality) {es-pull}103656[#103656] - -Health:: -* Create a DSL health indicator as part of the health API {es-pull}103130[#103130] - -Infra/Core:: -* Add gradle tasks and code to modify and access mappings between version ids and release versions {es-pull}103627[#103627] - -Mapping:: -* Add `unmatch_mapping_type`, and support array of types {es-pull}103171[#103171] (issues: {es-issue}102807[#102807], {es-issue}102795[#102795]) - -Search:: -* Added Duplicate Word Check Feature to Analysis Nori {es-pull}103325[#103325] (issue: {es-issue}103321[#103321]) -* [Synonyms] Mark Synonyms as GA {es-pull}103223[#103223] - -[[upgrade-8.13.0]] -[float] -=== Upgrades - -Query Languages:: -* Upgrade ANTLR4 to 4.13.1 {es-pull}105334[#105334] (issue: {es-issue}102953[#102953]) - -Search:: -* Upgrade to Lucene 9.9.0 {es-pull}102782[#102782] -* Upgrade to Lucene 9.9.1 {es-pull}103387[#103387] -* Upgrade to Lucene 9.9.2 {es-pull}104753[#104753] diff --git a/docs/reference/release-notes/8.13.1.asciidoc b/docs/reference/release-notes/8.13.1.asciidoc deleted file mode 100644 index c654af3dd5cc0..0000000000000 --- a/docs/reference/release-notes/8.13.1.asciidoc +++ /dev/null @@ -1,53 +0,0 @@ -[[release-notes-8.13.1]] -== {es} version 8.13.1 - -Also see <>. - -[[known-issues-8.13.1]] -[float] -=== Known issues -* When upgrading clusters from version 8.11.4 or earlier, if your cluster contains non-master-eligible nodes, -information about the new functionality of these upgraded nodes may not be registered properly with the master node. -This can lead to some new functionality added since 8.12.0 not being accessible on the upgraded cluster. -If your cluster is running on ECK 2.12.1 and above, this may cause problems with finalizing the upgrade. -To resolve this issue, perform a rolling restart on the non-master-eligible nodes once all Elasticsearch nodes -are upgraded. This issue is fixed in 8.15.0. - -* The `pytorch_inference` process used to run Machine Learning models can consume large amounts of memory. -In environments where the available memory is limited, the OS Out of Memory Killer will kill the `pytorch_inference` -process to reclaim memory. This can cause inference requests to fail. -Elasticsearch will automatically restart the `pytorch_inference` process -after it is killed up to four times in 24 hours. (issue: {es-issue}110530[#110530]) - -[[bug-8.13.1]] -[float] - -* Searches involving nodes upgraded to 8.13.0 and a coordinator node that is running on version - 8.12 or earlier can produce duplicate buckets when running `date_histogram` or `histogram` - aggregations. This can happen during a rolling upgrade to 8.13 or while running cross-cluster - searches. (issue: {es-issue}108181[#108181]). - -=== Bug fixes - -Aggregations:: -* Add test to exercise reduction of terms aggregation order by key {es-pull}106799[#106799] - -Downsampling:: -* Gate reading of optional string array for bwc {es-pull}106878[#106878] - -Machine Learning:: -* Fix Array out of bounds exception in the XLM Roberta tokenizer {es-pull}106655[#106655] - -Search:: -* Fix concurrency bug in `AbstractStringScriptFieldAutomatonQuery` {es-pull}106678[#106678] (issue: {es-issue}105911[#105911]) -* Fix the background set of significant terms aggregations in case the data is in different shards than the foreground set {es-pull}106564[#106564] - -Transform:: -* Fail checkpoint on missing clusters {es-pull}106793[#106793] (issues: {es-issue}104533[#104533], {es-issue}106790[#106790]) - -[[enhancement-8.13.1]] -[float] -=== Enhancements - -Transform:: -* Raise loglevel of events related to transform lifecycle from DEBUG to INFO {es-pull}106602[#106602] diff --git a/docs/reference/release-notes/8.13.2.asciidoc b/docs/reference/release-notes/8.13.2.asciidoc deleted file mode 100644 index f4540343ca9ea..0000000000000 --- a/docs/reference/release-notes/8.13.2.asciidoc +++ /dev/null @@ -1,54 +0,0 @@ -[[release-notes-8.13.2]] -== {es} version 8.13.2 - -Also see <>. - -[[known-issues-8.13.2]] -[float] -=== Known issues -* When upgrading clusters from version 8.11.4 or earlier, if your cluster contains non-master-eligible nodes, -information about the new functionality of these upgraded nodes may not be registered properly with the master node. -This can lead to some new functionality added since 8.12.0 not being accessible on the upgraded cluster. -If your cluster is running on ECK 2.12.1 and above, this may cause problems with finalizing the upgrade. -To resolve this issue, perform a rolling restart on the non-master-eligible nodes once all Elasticsearch nodes -are upgraded. This issue is fixed in 8.15.0. - -* The `pytorch_inference` process used to run Machine Learning models can consume large amounts of memory. -In environments where the available memory is limited, the OS Out of Memory Killer will kill the `pytorch_inference` -process to reclaim memory. This can cause inference requests to fail. -Elasticsearch will automatically restart the `pytorch_inference` process -after it is killed up to four times in 24 hours. (issue: {es-issue}110530[#110530]) - -[[bug-8.13.2]] -[float] - -* Searches involving nodes upgraded to 8.13.0 and a coordinator node that is running on version - 8.12 or earlier can produce duplicate buckets when running `date_histogram` or `histogram` - aggregations. This can happen during a rolling upgrade to 8.13 or while running cross-cluster - searches. (issue: {es-issue}108181[#108181]). - -=== Bug fixes - -Aggregations:: -* Address concurrency issue in top hits aggregation {es-pull}106990[#106990] - -Application:: -* [Connector API] Support numeric for configuration select option value type {es-pull}107059[#107059] - -Downsampling:: -* Fix a downsample persistent task assignment bug {es-pull}106247[#106247] -* Fix downsample action request serialization {es-pull}106920[#106920] - -ES|QL:: -* ESQL: Fix fully pruned aggregates {es-pull}106673[#106673] (issue: {es-issue}106427[#106427]) - -Packaging:: -* Downgrade JDK to JDK 21.0.2 {es-pull}107137[#107137] (issue: {es-issue}106987[#106987]) - -[[enhancement-8.13.2]] -[float] -=== Enhancements - -Security:: -* Query API Key Information API support for the `typed_keys` request parameter {es-pull}106873[#106873] (issue: {es-issue}106817[#106817]) -* Query API Keys support for both `aggs` and `aggregations` keywords {es-pull}107054[#107054] (issue: {es-issue}106839[#106839]) diff --git a/docs/reference/release-notes/8.13.3.asciidoc b/docs/reference/release-notes/8.13.3.asciidoc deleted file mode 100644 index f1bb4211f4676..0000000000000 --- a/docs/reference/release-notes/8.13.3.asciidoc +++ /dev/null @@ -1,60 +0,0 @@ -[[release-notes-8.13.3]] -== {es} version 8.13.3 - -Also see <>. - -[[breaking-8.13.3]] -[float] -=== Breaking changes - -SQL:: -* Limit how much space some string functions can use {es-pull}107333[#107333] - -[[known-issues-8.13.3]] -[float] -=== Known issues -* When upgrading clusters from version 8.11.4 or earlier, if your cluster contains non-master-eligible nodes, -information about the new functionality of these upgraded nodes may not be registered properly with the master node. -This can lead to some new functionality added since 8.12.0 not being accessible on the upgraded cluster. -If your cluster is running on ECK 2.12.1 and above, this may cause problems with finalizing the upgrade. -To resolve this issue, perform a rolling restart on the non-master-eligible nodes once all Elasticsearch nodes -are upgraded. This issue is fixed in 8.15.0. - -* The `pytorch_inference` process used to run Machine Learning models can consume large amounts of memory. -In environments where the available memory is limited, the OS Out of Memory Killer will kill the `pytorch_inference` -process to reclaim memory. This can cause inference requests to fail. -Elasticsearch will automatically restart the `pytorch_inference` process -after it is killed up to four times in 24 hours. (issue: {es-issue}110530[#110530]) - -[[bug-8.13.3]] -[float] -=== Bug fixes - -Data streams:: -* Avoid unintentionally clearing the `DataStream.rolloverOnWrite` flag {es-pull}107122[#107122] - -ES|QL:: -* ESQL: Fix bug when combining projections {es-pull}107131[#107131] (issue: {es-issue}107083[#107083]) -* ESQL: Fix missing refs due to pruning renamed grouping columns {es-pull}107328[#107328] (issues: {es-issue}107083[#107083], {es-issue}107166[#107166]) - -Indices APIs:: -* GET /_all should return hidden indices with visible aliases {es-pull}106975[#106975] - -Mapping:: -* Fix merging component templates with a mix of dotted and nested object mapper definitions {es-pull}106077[#106077] (issue: {es-issue}105482[#105482]) - -Network:: -* Handle exceptions thrown by HTTP header validation {es-pull}107355[#107355] (issue: {es-issue}107338[#107338]) - -Percolator:: -* Percolator named queries: rewrite for matched info {es-pull}107432[#107432] (issue: {es-issue}107176[#107176]) - -Search:: -* Fix `minimized_round_trips` in lookup runtime fields {es-pull}107785[#107785] - -[[enhancement-8.13.3]] -[float] -=== Enhancements - -ES|QL:: -* ESQL: Introduce language versioning to REST API {es-pull}106824[#106824] diff --git a/docs/reference/release-notes/8.13.4.asciidoc b/docs/reference/release-notes/8.13.4.asciidoc deleted file mode 100644 index 446aae048945b..0000000000000 --- a/docs/reference/release-notes/8.13.4.asciidoc +++ /dev/null @@ -1,36 +0,0 @@ -[[release-notes-8.13.4]] -== {es} version 8.13.4 - -Also see <>. - -[[known-issues-8.13.4]] -[float] -=== Known issues -* When upgrading clusters from version 8.11.4 or earlier, if your cluster contains non-master-eligible nodes, -information about the new functionality of these upgraded nodes may not be registered properly with the master node. -This can lead to some new functionality added since 8.12.0 not being accessible on the upgraded cluster. -If your cluster is running on ECK 2.12.1 and above, this may cause problems with finalizing the upgrade. -To resolve this issue, perform a rolling restart on the non-master-eligible nodes once all Elasticsearch nodes -are upgraded. This issue is fixed in 8.15.0. - -* The `pytorch_inference` process used to run Machine Learning models can consume large amounts of memory. -In environments where the available memory is limited, the OS Out of Memory Killer will kill the `pytorch_inference` -process to reclaim memory. This can cause inference requests to fail. -Elasticsearch will automatically restart the `pytorch_inference` process -after it is killed up to four times in 24 hours. (issue: {es-issue}110530[#110530]) - -[[bug-8.13.4]] -[float] -=== Bug fixes - -Aggregations:: -* Fix Bucket ordering for partial reduction in date histogram and histogram aggregation {es-pull}108184[#108184] (issue: {es-issue}108181[#108181]) - -ES|QL:: -* Fix `BlockHash` `DirectEncoder` {es-pull}108283[#108283] (issue: {es-issue}108268[#108268]) - -Snapshot/Restore:: -* Ensure necessary security context for s3 bulk deletions {es-pull}108280[#108280] (issue: {es-issue}108049[#108049]) - -TSDB:: -* Fix tsdb codec when doc-values spread in two blocks {es-pull}108276[#108276] diff --git a/docs/reference/release-notes/8.14.0.asciidoc b/docs/reference/release-notes/8.14.0.asciidoc deleted file mode 100644 index c2fee6ecaa07a..0000000000000 --- a/docs/reference/release-notes/8.14.0.asciidoc +++ /dev/null @@ -1,364 +0,0 @@ -[[release-notes-8.14.0]] -== {es} version 8.14.0 - -Also see <>. - -[[breaking-8.14.0]] -[float] -=== Breaking changes - -Security:: -* Prevent DLS/FLS if `replication` is assigned {es-pull}108600[#108600] -* Apply stricter Document Level Security (DLS) rules for the validate query API with the rewrite parameter {es-pull}105709[#105709] -* Apply stricter Document Level Security (DLS) rules for terms aggregations when min_doc_count is set to 0 {es-pull}105714[#105714] - -[[known-issues-8.14.0]] -[float] -=== Known issues -* When upgrading clusters from version 8.11.4 or earlier, if your cluster contains non-master-eligible nodes, -information about the new functionality of these upgraded nodes may not be registered properly with the master node. -This can lead to some new functionality added since 8.12.0 not being accessible on the upgraded cluster. -If your cluster is running on ECK 2.12.1 and above, this may cause problems with finalizing the upgrade. -To resolve this issue, perform a rolling restart on the non-master-eligible nodes once all Elasticsearch nodes -are upgraded. This issue is fixed in 8.15.0. - -* The `pytorch_inference` process used to run Machine Learning models can consume large amounts of memory. -In environments where the available memory is limited, the OS Out of Memory Killer will kill the `pytorch_inference` -process to reclaim memory. This can cause inference requests to fail. -Elasticsearch will automatically restart the `pytorch_inference` process -after it is killed up to four times in 24 hours. (issue: {es-issue}110530[#110530]) - -[[bug-8.14.0]] -[float] -=== Bug fixes - -Aggregations:: -* Cross check livedocs for terms aggs when index access control list is non-null {es-pull}105714[#105714] -* ESQL: Enable VALUES agg for datetime {es-pull}107016[#107016] -* Fix IOOBE in TTest aggregation when using filters {es-pull}109034[#109034] -* Validate stats formatting in standard `InternalStats` constructor {es-pull}107678[#107678] (issue: {es-issue}107671[#107671]) - -Application:: -* [Bugfix] Connector API - fix status serialisation issue in termquery {es-pull}108365[#108365] -* [Connector API] Fix bug with filtering validation toXContent {es-pull}107467[#107467] -* [Connector API] Fix bug with parsing *_doc_count nullable fields {es-pull}108854[#108854] -* [Connector API] Fix bug with with wrong target index for access control sync {es-pull}109097[#109097] - -Authorization:: -* Users with monitor privileges can access async_search/status endpoint even when setting keep_alive {es-pull}107383[#107383] - -CAT APIs:: -* Fix numeric sorts in `_cat/nodes` {es-pull}106189[#106189] (issue: {es-issue}48070[#48070]) - -CCR:: -* Add ?master_timeout query parameter to ccr apis {es-pull}105168[#105168] - -CRUD:: -* Fix `noop_update_total` is not being updated when using the `_bulk` {es-pull}105745[#105745] (issue: {es-issue}105742[#105742]) -* Use correct system index bulk executor {es-pull}106150[#106150] - -Cluster Coordination:: -* Fix support for infinite `?master_timeout` {es-pull}107050[#107050] - -Data streams:: -* Add non-indexed fields to ecs templates {es-pull}106714[#106714] -* Fix bulk NPE when retrying failure redirect after cluster block {es-pull}107598[#107598] -* Improve error message when rolling over DS alias {es-pull}106708[#106708] (issue: {es-issue}106137[#106137]) -* Only skip deleting a downsampled index if downsampling is in progress as part of DSL retention {es-pull}109020[#109020] - -Downsampling:: -* Fix downsample action request serialization {es-pull}106919[#106919] (issue: {es-issue}106917[#106917]) - -EQL:: -* Use #addWithoutBreaking when adding a negative number of bytes to the circuit breaker in `SequenceMatcher` {es-pull}107655[#107655] - -ES|QL:: -* ESQL: Allow reusing BUCKET grouping expressions in aggs {es-pull}107578[#107578] -* ESQL: Disable quoting in FROM command {es-pull}108431[#108431] -* ESQL: Fix MV_DEDUPE when using data from an index {es-pull}107577[#107577] (issue: {es-issue}104745[#104745]) -* ESQL: Fix error message when failing to resolve aggregate groupings {es-pull}108101[#108101] (issue: {es-issue}108053[#108053]) -* ESQL: Fix treating all fields as MV in COUNT pushdown {es-pull}106720[#106720] -* ESQL: Re-enable logical dependency check {es-pull}105860[#105860] -* ESQL: median, count and `count_distinct` over constants {es-pull}107414[#107414] (issues: {es-issue}105248[#105248], {es-issue}104900[#104900]) -* ES|QL fix no-length substring with supplementary (4-byte) character {es-pull}107183[#107183] -* ES|QL: Fix usage of IN operator with TEXT fields {es-pull}106654[#106654] (issue: {es-issue}105379[#105379]) -* ES|QL: Improve support for TEXT fields in functions {es-pull}106810[#106810] -* Fix docs generation of signatures for variadic functions {es-pull}107865[#107865] -* [ESQL] Mark `date_diff` as requiring all three arguments {es-pull}108834[#108834] (issue: {es-issue}108383[#108383]) - -Health:: -* Don't stop checking if the `HealthNode` persistent task is present {es-pull}105449[#105449] (issue: {es-issue}98926[#98926]) -* Health monitor concurrency fixes {es-pull}105674[#105674] (issue: {es-issue}105065[#105065]) - -Highlighting:: -* Check preTags and postTags params for empty values {es-pull}106396[#106396] (issue: {es-issue}69009[#69009]) -* added fix for inconsistent text trimming in Unified Highlighter {es-pull}99961[#99961] (issue: {es-issue}101803[#101803]) - -Infra/CLI:: -* Workaround G1 bug for JDK 22 and 22.0.1 {es-pull}108571[#108571] - -Infra/Core:: -* Add a check for the same feature being declared regular and historical {es-pull}106285[#106285] -* Fix `AffixSetting.exists` to include secure settings {es-pull}106745[#106745] -* Fix regression in get index settings (human=true) where the version was not displayed in human-readable format {es-pull}107447[#107447] -* Nativeaccess: try to load all located libsystemds {es-pull}108238[#108238] (issue: {es-issue}107878[#107878]) -* Update several references to `IndexVersion.toString` to use `toReleaseVersion` {es-pull}107828[#107828] (issue: {es-issue}107821[#107821]) -* Update several references to `TransportVersion.toString` to use `toReleaseVersion` {es-pull}107902[#107902] - -Infra/Logging:: -* Log when update AffixSetting using addAffixMapUpdateConsumer {es-pull}97072[#97072] - -Infra/Node Lifecycle:: -* Consider `ShardRouting` roles when calculating shard copies in shutdown status {es-pull}106063[#106063] -* Wait indefintely for http connections on shutdown by default {es-pull}106511[#106511] - -Infra/Scripting:: -* Guard against a null scorer in painless execute {es-pull}109048[#109048] (issue: {es-issue}43541[#43541]) -* Painless: Apply true regex limit factor with FIND and MATCH operation {es-pull}105670[#105670] - -Ingest Node:: -* Catching `StackOverflowErrors` from bad regexes in `GsubProcessor` {es-pull}106851[#106851] -* Fix `uri_parts` processor behaviour for missing extensions {es-pull}105689[#105689] (issue: {es-issue}105612[#105612]) -* Remove leading is_ prefix from Enterprise geoip docs {es-pull}108518[#108518] -* Slightly better geoip `databaseType` validation {es-pull}106889[#106889] - -License:: -* Fix lingering license warning header {es-pull}108031[#108031] (issue: {es-issue}107573[#107573]) - -Machine Learning:: -* Fix NPE in ML assignment notifier {es-pull}107312[#107312] -* Fix `startOffset` must be non-negative error in XLMRoBERTa tokenizer {es-pull}107891[#107891] (issue: {es-issue}104626[#104626]) -* Fix the position of spike, dip and distribution changes bucket when the sibling aggregation includes empty buckets {es-pull}106472[#106472] -* Make OpenAI embeddings parser more flexible {es-pull}106808[#106808] - -Mapping:: -* Dedupe terms in terms queries {es-pull}106381[#106381] -* Extend support of `allowedFields` to `getMatchingFieldNames` and `getAllFields` {es-pull}106862[#106862] -* Fix for raw mapping merge of fields named "properties" {es-pull}108867[#108867] (issue: {es-issue}108866[#108866]) -* Handle infinity during synthetic source construction for scaled float field {es-pull}107494[#107494] (issue: {es-issue}107101[#107101]) -* Handle pass-through subfields with deep nesting {es-pull}106767[#106767] -* Wrap "Pattern too complex" exception into an `IllegalArgumentException` {es-pull}109173[#109173] - -Network:: -* Fix HTTP corner-case response leaks {es-pull}105617[#105617] - -Search:: -* Add `internalClusterTest` for and fix leak in `ExpandSearchPhase` {es-pull}108562[#108562] (issue: {es-issue}108369[#108369]) -* Avoid attempting to load the same empty field twice in fetch phase {es-pull}107551[#107551] -* Bugfix: Disable eager loading `BitSetFilterCache` on Indexing Nodes {es-pull}105791[#105791] -* Cross-cluster painless/execute actions should check permissions only on target remote cluster {es-pull}105360[#105360] -* Fix error 500 on invalid `ParentIdQuery` {es-pull}105693[#105693] (issue: {es-issue}105366[#105366]) -* Fix range queries for float/half_float fields when bounds are out of type's range {es-pull}106691[#106691] -* Fixing NPE when requesting [_none_] for `stored_fields` {es-pull}104711[#104711] -* Fork when handling remote field-caps responses {es-pull}107370[#107370] -* Handle parallel calls to `createWeight` when profiling is on {es-pull}108041[#108041] (issues: {es-issue}104131[#104131], {es-issue}104235[#104235]) -* Harden field-caps request dispatcher {es-pull}108736[#108736] -* Replace `UnsupportedOperationException` with `IllegalArgumentException` for non-existing columns {es-pull}107038[#107038] -* Unable to retrieve multiple stored field values {es-pull}106575[#106575] -* Validate `model_id` is required when using the `learning_to_rank` rescorer {es-pull}107743[#107743] - -Security:: -* Disable validate when rewrite parameter is sent and the index access control list is non-null {es-pull}105709[#105709] -* Fix field caps and field level security {es-pull}106731[#106731] - -Snapshot/Restore:: -* Fix double-pausing shard snapshot {es-pull}109148[#109148] (issue: {es-issue}109143[#109143]) -* Treat 404 as empty register in `AzureBlobStore` {es-pull}108900[#108900] (issue: {es-issue}108504[#108504]) -* `SharedBlobCacheService.maybeFetchRegion` should use `computeCacheFileRegionSize` {es-pull}106685[#106685] - -TSDB:: -* Flip dynamic mapping condition when create tsid {es-pull}105636[#105636] - -Transform:: -* Consolidate permissions checks {es-pull}106413[#106413] (issue: {es-issue}105794[#105794]) -* Disable PIT for remote clusters {es-pull}107969[#107969] -* Make force-stopping the transform always remove persistent task from cluster state {es-pull}106989[#106989] (issue: {es-issue}106811[#106811]) -* Only trigger action once per thread {es-pull}107232[#107232] (issue: {es-issue}107215[#107215]) -* [Transform] Auto retry Transform start {es-pull}106243[#106243] - -Vector Search:: -* Fix multithreading copies in lib vec {es-pull}108802[#108802] -* [8.14] Fix multithreading copies in lib vec {es-pull}108810[#108810] - -[[deprecation-8.14.0]] -[float] -=== Deprecations - -Mapping:: -* Deprecate allowing `fields` in scenarios where it is ignored {es-pull}106031[#106031] - -[[enhancement-8.14.0]] -[float] -=== Enhancements - -Aggregations:: -* Add a `PriorityQueue` backed by `BigArrays` {es-pull}106361[#106361] -* All new `shard_seed` parameter for `random_sampler` agg {es-pull}104830[#104830] - -Allocation:: -* Add allocation stats {es-pull}105894[#105894] -* Add index forecasts to /_cat/allocation output {es-pull}97561[#97561] - -Application:: -* [Profiling] Add TopN Functions API {es-pull}106860[#106860] -* [Profiling] Allow to override index settings {es-pull}106172[#106172] -* [Profiling] Speed up serialization of flamegraph {es-pull}105779[#105779] - -Authentication:: -* Support Profile Activate with JWTs with client authn {es-pull}105439[#105439] (issue: {es-issue}105342[#105342]) - -Authorization:: -* Allow users to get status of own async search tasks {es-pull}106638[#106638] -* [Security Solution] Add `read` permission for third party agent indices for `kibana_system` {es-pull}107046[#107046] - -Data streams:: -* Add data stream lifecycle to kibana reporting template {es-pull}106259[#106259] - -ES|QL:: -* Add ES|QL Locate function {es-pull}106899[#106899] (issue: {es-issue}106818[#106818]) -* Add ES|QL signum function {es-pull}106866[#106866] -* Add status for enrich operator {es-pull}106036[#106036] -* Add two new OGC functions ST_X and ST_Y {es-pull}105768[#105768] -* Adjust array resizing in block builder {es-pull}106934[#106934] -* Bulk loading enrich fields in ESQL {es-pull}106796[#106796] -* ENRICH support for TEXT fields {es-pull}106435[#106435] (issue: {es-issue}105384[#105384]) -* ESQL: Add timers to many status results {es-pull}105421[#105421] -* ESQL: Allow grouping key inside stats expressions {es-pull}106579[#106579] -* ESQL: Introduce expression validation phase {es-pull}105477[#105477] (issue: {es-issue}105425[#105425]) -* ESQL: Log queries at debug level {es-pull}108257[#108257] -* ESQL: Regex improvements {es-pull}106429[#106429] -* ESQL: Sum of constants {es-pull}105454[#105454] -* ESQL: Support ST_DISJOINT {es-pull}107007[#107007] -* ESQL: Support partially folding CASE {es-pull}106094[#106094] -* ESQL: Use faster field caps {es-pull}105067[#105067] -* ESQL: extend BUCKET with spans {es-pull}107272[#107272] -* ESQL: perform a reduction on the data node {es-pull}106516[#106516] -* Expand support for ENRICH to full set supported by ES ingest processors {es-pull}106186[#106186] (issue: {es-issue}106162[#106162]) -* Introduce ordinal bytesref block {es-pull}106852[#106852] (issue: {es-issue}106387[#106387]) -* Leverage ordinals in enrich lookup {es-pull}107449[#107449] -* Serialize big array blocks {es-pull}106373[#106373] -* Serialize big array vectors {es-pull}106327[#106327] -* Specialize serialization for `ArrayVectors` {es-pull}105893[#105893] -* Specialize serialization of array blocks {es-pull}106102[#106102] -* Speed up serialization of `BytesRefArray` {es-pull}106053[#106053] -* Support ST_CONTAINS and ST_WITHIN {es-pull}106503[#106503] -* Support ST_INTERSECTS between geometry column and other geometry or string {es-pull}104907[#104907] (issue: {es-issue}104874[#104874]) - -Engine:: -* Add metric for calculating index flush time excluding waiting on locks {es-pull}107196[#107196] - -Highlighting:: -* Enable 'encoder' and 'tags_schema' highlighting settings at field level {es-pull}107224[#107224] (issue: {es-issue}94028[#94028]) - -ILM+SLM:: -* Add a flag to re-enable writes on the final index after an ILM shrink action. {es-pull}107121[#107121] (issue: {es-issue}106599[#106599]) - -Indices APIs:: -* Wait forever for `IndexTemplateRegistry` asset installation {es-pull}105985[#105985] - -Infra/CLI:: -* Enhance search tier GC options {es-pull}106526[#106526] -* Increase KDF iteration count in `KeyStoreWrapper` {es-pull}107107[#107107] - -Infra/Core:: -* Add pluggable `BuildVersion` in `NodeMetadata` {es-pull}105757[#105757] - -Infra/Metrics:: -* Infrastructure for metering the update requests {es-pull}105063[#105063] -* `DocumentParsingObserver` to accept an `indexName` to allow skipping system indices {es-pull}107041[#107041] - -Infra/Scripting:: -* String sha512() painless function {es-pull}99048[#99048] (issue: {es-issue}97691[#97691]) - -Ingest Node:: -* Add support for the 'Anonymous IP' database to the geoip processor {es-pull}107287[#107287] (issue: {es-issue}90789[#90789]) -* Add support for the 'Enterprise' database to the geoip processor {es-pull}107377[#107377] -* Adding `cache_stats` to geoip stats API {es-pull}107334[#107334] -* Support data streams in enrich policy indices {es-pull}107291[#107291] (issue: {es-issue}98836[#98836]) - -Machine Learning:: -* Add GET `_inference` for all inference endpoints {es-pull}107517[#107517] -* Added a timeout parameter to the inference API {es-pull}107242[#107242] -* Enable retrying on 500 error response from Cohere text embedding API {es-pull}105797[#105797] - -Mapping:: -* Make int8_hnsw our default index for new dense-vector fields {es-pull}106836[#106836] - -Ranking:: -* Add retrievers using the parser-only approach {es-pull}105470[#105470] - -Search:: -* Add Lucene spanish plural stemmer {es-pull}106952[#106952] -* Add `modelId` and `modelText` to `KnnVectorQueryBuilder` {es-pull}106068[#106068] -* Add a SIMD (Neon) optimised vector distance function for int8 {es-pull}106133[#106133] -* Add transport version for search load autoscaling {es-pull}106377[#106377] -* CCS with `minimize_roundtrips` performs incremental merges of each `SearchResponse` {es-pull}105781[#105781] -* Track ongoing search tasks {es-pull}107129[#107129] - -Security:: -* Invalidating cross cluster API keys requires `manage_security` {es-pull}107411[#107411] -* Show owner `realm_type` for returned API keys {es-pull}105629[#105629] - -Snapshot/Restore:: -* Add setting for max connections to S3 {es-pull}107533[#107533] -* Distinguish different snapshot failures by log level {es-pull}105622[#105622] - -Stats:: -* (API+) CAT Nodes alias for shard header to match CAT Allocation {es-pull}105847[#105847] -* Add total size in bytes to doc stats {es-pull}106840[#106840] (issue: {es-issue}97670[#97670]) - -TSDB:: -* Improve short-circuiting downsample execution {es-pull}106563[#106563] -* Support non-keyword dimensions as routing fields in TSDB {es-pull}105501[#105501] -* Text fields are stored by default in TSDB indices {es-pull}106338[#106338] (issue: {es-issue}97039[#97039]) - -Transform:: -* Check node shutdown before fail {es-pull}107358[#107358] (issue: {es-issue}100891[#100891]) -* Do not log error on node restart when the transform is already failed {es-pull}106171[#106171] (issue: {es-issue}106168[#106168]) - -[[feature-8.14.0]] -[float] -=== New features - -Application:: -* Allow `typed_keys` for search application Search API {es-pull}108007[#108007] -* [Connector API] Support cleaning up sync jobs when deleting a connector {es-pull}107253[#107253] - -ES|QL:: -* ESQL: Values aggregation function {es-pull}106065[#106065] (issue: {es-issue}103600[#103600]) -* ESQL: allow sorting by expressions and not only regular fields {es-pull}107158[#107158] -* Support ES|QL requests through the `NodeClient::execute` {es-pull}106244[#106244] - -Indices APIs:: -* Add granular error list to alias action response {es-pull}106514[#106514] (issue: {es-issue}94478[#94478]) - -Machine Learning:: -* Add Cohere rerank to `_inference` service {es-pull}106378[#106378] -* Add support for Azure OpenAI embeddings to inference service {es-pull}107178[#107178] -* Create default word based chunker {es-pull}107303[#107303] -* Text structure endpoints to determine the structure of a list of messages and of an indexed field {es-pull}105660[#105660] - -Mapping:: -* Flatten object mappings when subobjects is false {es-pull}103542[#103542] (issues: {es-issue}99860[#99860], {es-issue}103497[#103497]) - -Security:: -* Get and Query API Key with profile uid {es-pull}106531[#106531] - -Vector Search:: -* Adding support for hex-encoded byte vectors on knn-search {es-pull}105393[#105393] - -[[upgrade-8.14.0]] -[float] -=== Upgrades - -Infra/Core:: -* Upgrade jna to 5.12.1 {es-pull}105717[#105717] - -Ingest Node:: -* Updating the tika version to 2.9.1 in the ingest attachment plugin {es-pull}106315[#106315] - -Network:: -* Upgrade to Netty 4.1.107 {es-pull}105517[#105517] - -Packaging:: -* Update bundled JDK to Java 22 (again) {es-pull}108654[#108654] diff --git a/docs/reference/release-notes/8.14.1.asciidoc b/docs/reference/release-notes/8.14.1.asciidoc deleted file mode 100644 index de3ecd210b488..0000000000000 --- a/docs/reference/release-notes/8.14.1.asciidoc +++ /dev/null @@ -1,50 +0,0 @@ -[[release-notes-8.14.1]] -== {es} version 8.14.1 - - -Also see <>. - -[[known-issues-8.14.1]] -[float] -=== Known issues -* When upgrading clusters from version 8.11.4 or earlier, if your cluster contains non-master-eligible nodes, -information about the new functionality of these upgraded nodes may not be registered properly with the master node. -This can lead to some new functionality added since 8.12.0 not being accessible on the upgraded cluster. -If your cluster is running on ECK 2.12.1 and above, this may cause problems with finalizing the upgrade. -To resolve this issue, perform a rolling restart on the non-master-eligible nodes once all Elasticsearch nodes -are upgraded. This issue is fixed in 8.15.0. - -* The `pytorch_inference` process used to run Machine Learning models can consume large amounts of memory. -In environments where the available memory is limited, the OS Out of Memory Killer will kill the `pytorch_inference` -process to reclaim memory. This can cause inference requests to fail. -Elasticsearch will automatically restart the `pytorch_inference` process -after it is killed up to four times in 24 hours. (issue: {es-issue}110530[#110530]) - -[[bug-8.14.1]] -[float] -=== Bug fixes - -Authorization:: -* Fix task cancellation authz on fulfilling cluster {es-pull}109357[#109357] - -Infra/Core:: -* Guard systemd library lookup from unreadable directories {es-pull}108931[#108931] - -Machine Learning:: -* Reset retryable index requests after failures {es-pull}109320[#109320] - -Network:: -* Fix task cancellation on remote cluster when original request fails {es-pull}109440[#109440] - -Transform:: -* Reset max page size to settings value {es-pull}109532[#109532] (issue: {es-issue}109308[#109308]) - -Vector Search:: -* Correct how hex strings are handled when dynamically updating vector dims {es-pull}109423[#109423] - -[[enhancement-8.14.1]] -[float] -=== Enhancements - -Infra/Settings:: -* Add remove index setting command {es-pull}109276[#109276] diff --git a/docs/reference/release-notes/8.14.2.asciidoc b/docs/reference/release-notes/8.14.2.asciidoc deleted file mode 100644 index f3f0651508dca..0000000000000 --- a/docs/reference/release-notes/8.14.2.asciidoc +++ /dev/null @@ -1,52 +0,0 @@ -[[release-notes-8.14.2]] -== {es} version 8.14.2 - -Also see <>. - -[[known-issues-8.14.2]] -[float] -=== Known issues -* When upgrading clusters from version 8.11.4 or earlier, if your cluster contains non-master-eligible nodes, -information about the new functionality of these upgraded nodes may not be registered properly with the master node. -This can lead to some new functionality added since 8.12.0 not being accessible on the upgraded cluster. -If your cluster is running on ECK 2.12.1 and above, this may cause problems with finalizing the upgrade. -To resolve this issue, perform a rolling restart on the non-master-eligible nodes once all Elasticsearch nodes -are upgraded. This issue is fixed in 8.15.0. - -* The `pytorch_inference` process used to run Machine Learning models can consume large amounts of memory. -In environments where the available memory is limited, the OS Out of Memory Killer will kill the `pytorch_inference` -process to reclaim memory. This can cause inference requests to fail. -Elasticsearch will automatically restart the `pytorch_inference` process -after it is killed up to four times in 24 hours. (issue: {es-issue}110530[#110530]) - -[[bug-8.14.2]] -[float] -=== Bug fixes - -Data streams:: -* Ensure a lazy rollover request will rollover the target data stream once. {es-pull}109636[#109636] -* [Data streams] Fix the description of the lazy rollover task {es-pull}109629[#109629] - -ES|QL:: -* Fix ESQL cancellation for exchange requests {es-pull}109695[#109695] -* Fix equals and hashcode for `SingleValueQuery.LuceneQuery` {es-pull}110035[#110035] -* Force execute inactive sink reaper {es-pull}109632[#109632] - -Infra/Scripting:: -* Check array size before returning array item in script doc values {es-pull}109824[#109824] (issue: {es-issue}104998[#104998]) - -Infra/Settings:: -* Guard file settings readiness on file settings support {es-pull}109500[#109500] - -Machine Learning:: -* Fix IndexOutOfBoundsException during inference {es-pull}109533[#109533] - -Mapping:: -* Re-define `index.mapper.dynamic` setting in 8.x for a better 7.x to 8.x upgrade if this setting is used. {es-pull}109341[#109341] - -Ranking:: -* Fix for from parameter when using `sub_searches` and rank {es-pull}106253[#106253] (issue: {es-issue}99011[#99011]) - -Search:: -* Add hexstring support byte painless scorers {es-pull}109492[#109492] -* Fix automatic tracking of collapse with `docvalue_fields` {es-pull}110103[#110103] diff --git a/docs/reference/release-notes/8.14.3.asciidoc b/docs/reference/release-notes/8.14.3.asciidoc deleted file mode 100644 index 17c53faa4a37f..0000000000000 --- a/docs/reference/release-notes/8.14.3.asciidoc +++ /dev/null @@ -1,32 +0,0 @@ -[[release-notes-8.14.3]] -== {es} version 8.14.3 - -Also see <>. - -[[known-issues-8.14.3]] -[float] -=== Known issues -* When upgrading clusters from version 8.11.4 or earlier, if your cluster contains non-master-eligible nodes, -information about the new functionality of these upgraded nodes may not be registered properly with the master node. -This can lead to some new functionality added since 8.12.0 not being accessible on the upgraded cluster. -If your cluster is running on ECK 2.12.1 and above, this may cause problems with finalizing the upgrade. -To resolve this issue, perform a rolling restart on the non-master-eligible nodes once all Elasticsearch nodes -are upgraded. This issue is fixed in 8.15.0. - -[[bug-8.14.3]] -[float] -=== Bug fixes - -Cluster Coordination:: -* Ensure tasks preserve versions in `MasterService` {es-pull}109850[#109850] - -ES|QL:: -* Introduce compute listener {es-pull}110400[#110400] - -Mapping:: -* Automatically adjust `ignore_malformed` only for the @timestamp {es-pull}109948[#109948] - -TSDB:: -* Disallow index.time_series.end_time setting from being set or updated in normal indices {es-pull}110268[#110268] (issue: {es-issue}110265[#110265]) - - diff --git a/docs/reference/release-notes/8.15.0.asciidoc b/docs/reference/release-notes/8.15.0.asciidoc deleted file mode 100644 index 80c86c7079f0c..0000000000000 --- a/docs/reference/release-notes/8.15.0.asciidoc +++ /dev/null @@ -1,558 +0,0 @@ -[[release-notes-8.15.0]] -== {es} version 8.15.0 - -Also see <>. - - -[[known-issues-8.15.0]] -[float] -=== Known issues -* The `pytorch_inference` process used to run Machine Learning models can consume large amounts of memory. -In environments where the available memory is limited, the OS Out of Memory Killer will kill the `pytorch_inference` -process to reclaim memory. This can cause inference requests to fail. -Elasticsearch will automatically restart the `pytorch_inference` process -after it is killed up to four times in 24 hours. (issue: {es-issue}110530[#110530]) - -* Pipeline aggregations under `time_series` and `categorize_text` aggregations are never -returned (issue: {es-issue}111679[#111679]) - -* Elasticsearch will not start on Windows machines if -[`bootstrap.memory_lock` is set to `true`](https://www.elastic.co/guide/en/elasticsearch/reference/current/setup-configuration-memory.html#bootstrap-memory_lock). -Either downgrade to an earlier version, upgrade to 8.15.1, or else follow the -recommendation in the manual to entirely disable swap instead of using the -memory lock feature (issue: {es-issue}111847[#111847]) - -* The `took` field of the response to the <> API is incorrect and may be rather large. Clients which -<> assume that this value will be within a particular range (e.g. that it fits into a 32-bit -signed integer) may encounter errors (issue: {es-issue}111854[#111854]) - -* Elasticsearch will not start if custom role mappings are configured using the -`xpack.security.authc.realms.*.files.role_mapping` configuration option. As a workaround, custom role mappings -can be configured using the https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-put-role-mapping.html[REST API] (issue: {es-issue}112503[#112503]) - -* ES|QL queries can lead to node crashes due to Out Of Memory errors when: -** Multiple indices match the query pattern -** These indices have many conflicting field mappings -** Many of those fields are included in the request -These issues deplete heap memory, increasing the likelihood of OOM errors. (issue: {es-issue}111964[#111964], {es-issue}111358[#111358]). -In Kibana, you might indirectly execute these queries when using Discover, or adding a Field Statistics panel to a dashboard. -+ -To work around this issue, you have a number of options: -** Downgrade to an earlier version -** Upgrade to 8.15.2 upon release -** Follow the instructions to -<> -** Change the default data view in Discover to a smaller set of indices and/or one with fewer mapping conflicts. - -* Synthetic source bug. Synthetic source may fail generating the _source at runtime, causing failures in get APIs or -partial failures in the search APIs. The result is that for the affected documents the _source can't be retrieved. -There is no workaround and the only option to is to upgrade to 8.15.2 when released. -+ -If you use synthetic source then you may be affected by this bug if the following is true: -** If you have more fields then the `index.mapping.total_fields.limit` setting allows. -** If you use dynamic mappings and the `index.mapping.total_fields.ignore_dynamic_beyond_limit` setting is enabled. - -[[breaking-8.15.0]] -[float] -=== Breaking changes - -Cluster Coordination:: -* Interpret `?timeout=-1` as infinite ack timeout {es-pull}107675[#107675] - -Inference API:: -* Replace `model_id` with `inference_id` in GET inference API {es-pull}111366[#111366] - -Rollup:: -* Disallow new rollup jobs in clusters with no rollup usage {es-pull}108624[#108624] (issue: {es-issue}108381[#108381]) - -Search:: -* Change `skip_unavailable` remote cluster setting default value to true {es-pull}105792[#105792] - -[[bug-8.15.0]] -[float] -=== Bug fixes - -Aggregations:: -* Don't sample calls to `ReduceContext#consumeBucketsAndMaybeBreak` ins `InternalDateHistogram` and `InternalHistogram` during reduction {es-pull}110186[#110186] -* Fix `ClassCastException` in Significant Terms {es-pull}108429[#108429] (issue: {es-issue}108427[#108427]) -* Run terms concurrently when cardinality is only lower than shard size {es-pull}110369[#110369] (issue: {es-issue}105505[#105505]) - -Allocation:: -* Fix misc trappy allocation API timeouts {es-pull}109241[#109241] -* Fix trappy timeout in allocation explain API {es-pull}109240[#109240] - -Analysis:: -* Correct positioning for unique token filter {es-pull}109395[#109395] - -Authentication:: -* Add comma before charset parameter in WWW-Authenticate response header {es-pull}110906[#110906] -* Avoid NPE if `users_roles` file does not exist {es-pull}109606[#109606] -* Improve security-crypto threadpool overflow handling {es-pull}111369[#111369] - -Authorization:: -* Fix trailing slash in `security.put_privileges` specification {es-pull}110177[#110177] -* Fixes cluster state-based role mappings not recovered from disk {es-pull}109167[#109167] -* Handle unmatching remote cluster wildcards properly for `IndicesRequest.SingleIndexNoWildcards` requests {es-pull}109185[#109185] - -Autoscaling:: -* Expose `?master_timeout` in autoscaling APIs {es-pull}108759[#108759] - -CRUD:: -* Update checkpoints after post-replication actions, even on failure {es-pull}109908[#109908] - -Cluster Coordination:: -* Deserialize publish requests on generic thread-pool {es-pull}108814[#108814] (issue: {es-issue}106352[#106352]) -* Fail cluster state API if blocked {es-pull}109618[#109618] (issue: {es-issue}107503[#107503]) -* Use `scheduleUnlessShuttingDown` in `LeaderChecker` {es-pull}108643[#108643] (issue: {es-issue}108642[#108642]) - -Data streams:: -* Apm-data: set concrete values for `metricset.interval` {es-pull}109043[#109043] -* Ecs@mappings: reduce scope for `ecs_geo_point` {es-pull}108349[#108349] (issue: {es-issue}108338[#108338]) -* Include component templates in retention validaiton {es-pull}109779[#109779] - -Distributed:: -* Associate restore snapshot task to parent mount task {es-pull}108705[#108705] (issue: {es-issue}105830[#105830]) -* Don't detect `PlainActionFuture` deadlock on concurrent complete {es-pull}110361[#110361] (issues: {es-issue}110181[#110181], {es-issue}110360[#110360]) -* Handle nullable `DocsStats` and `StoresStats` {es-pull}109196[#109196] - -Downsampling:: -* Support flattened fields and multi-fields as dimensions in downsampling {es-pull}110066[#110066] (issue: {es-issue}99297[#99297]) - -ES|QL:: -* ESQL: Change "substring" function to not return null on empty string {es-pull}109174[#109174] -* ESQL: Fix Join references {es-pull}109989[#109989] -* ESQL: Fix LOOKUP attribute shadowing {es-pull}109807[#109807] (issue: {es-issue}109392[#109392]) -* ESQL: Fix Max doubles bug with negatives and add tests for Max and Min {es-pull}110586[#110586] -* ESQL: Fix `IpPrefix` function not handling correctly `ByteRefs` {es-pull}109205[#109205] (issue: {es-issue}109198[#109198]) -* ESQL: Fix equals `hashCode` for functions {es-pull}107947[#107947] (issue: {es-issue}104393[#104393]) -* ESQL: Fix variable shadowing when pushing down past Project {es-pull}108360[#108360] (issue: {es-issue}108008[#108008]) -* ESQL: Validate unique plan attribute names {es-pull}110488[#110488] (issue: {es-issue}110541[#110541]) -* ESQL: change from quoting from backtick to quote {es-pull}108395[#108395] -* ESQL: make named params objects truly per request {es-pull}110046[#110046] (issue: {es-issue}110028[#110028]) -* ES|QL: Fix DISSECT that overwrites input {es-pull}110201[#110201] (issue: {es-issue}110184[#110184]) -* ES|QL: limit query depth to 500 levels {es-pull}108089[#108089] (issue: {es-issue}107752[#107752]) -* ES|QL: reduce max expression depth to 400 {es-pull}111186[#111186] (issue: {es-issue}109846[#109846]) -* Fix ST_DISTANCE Lucene push-down for complex predicates {es-pull}110391[#110391] (issue: {es-issue}110349[#110349]) -* Fix `ClassCastException` with MV_EXPAND on missing field {es-pull}110096[#110096] (issue: {es-issue}109974[#109974]) -* Fix bug in union-types with type-casting in grouping key of STATS {es-pull}110476[#110476] (issues: {es-issue}109922[#109922], {es-issue}110477[#110477]) -* Fix for union-types for multiple columns with the same name {es-pull}110793[#110793] (issues: {es-issue}110490[#110490], {es-issue}109916[#109916]) -* [ESQL] Count_distinct(_source) should return a 400 {es-pull}110824[#110824] -* [ESQL] Fix parsing of large magnitude negative numbers {es-pull}110665[#110665] (issue: {es-issue}104323[#104323]) -* [ESQL] Migrate `SimplifyComparisonArithmetics` optimization {es-pull}109256[#109256] (issues: {es-issue}108388[#108388], {es-issue}108743[#108743]) - -Engine:: -* Async close of `IndexShard` {es-pull}108145[#108145] - -Highlighting:: -* Fix issue with returning incomplete fragment for plain highlighter {es-pull}110707[#110707] - -ILM+SLM:: -* Allow `read_slm` to call GET /_slm/status {es-pull}108333[#108333] - -Indices APIs:: -* Create a new `NodeRequest` for every `NodesDataTiersUsageTransport` use {es-pull}108379[#108379] - -Infra/Core:: -* Add a cluster listener to fix missing node features after upgrading from a version prior to 8.13 {es-pull}110710[#110710] (issue: {es-issue}109254[#109254]) -* Add bounds checking to parsing ISO8601 timezone offset values {es-pull}108672[#108672] -* Fix native preallocate to actually run {es-pull}110851[#110851] -* Ignore additional cpu.stat fields {es-pull}108019[#108019] (issue: {es-issue}107983[#107983]) -* Specify parse index when error occurs on multiple datetime parses {es-pull}108607[#108607] - -Infra/Metrics:: -* Provide document size reporter with `MapperService` {es-pull}109794[#109794] - -Infra/Node Lifecycle:: -* Expose `?master_timeout` on get-shutdown API {es-pull}108886[#108886] -* Fix serialization of put-shutdown request {es-pull}107862[#107862] (issue: {es-issue}107857[#107857]) -* Support wait indefinitely for search tasks to complete on node shutdown {es-pull}107426[#107426] - -Infra/REST API:: -* Add some missing timeout params to REST API specs {es-pull}108761[#108761] -* Consider `error_trace` supported by all endpoints {es-pull}109613[#109613] (issue: {es-issue}109612[#109612]) - -Ingest Node:: -* Fix Dissect with leading non-ascii characters {es-pull}111184[#111184] -* Fix enrich policy runner exception handling on empty segments response {es-pull}111290[#111290] -* GeoIP tasks should wait longer for master {es-pull}108410[#108410] -* Removing the use of Stream::peek from `GeoIpDownloader::cleanDatabases` {es-pull}110666[#110666] -* Simulate should succeed if `ignore_missing_pipeline` {es-pull}108106[#108106] (issue: {es-issue}107314[#107314]) - -Machine Learning:: -* Allow deletion of the ELSER inference service when reference in ingest {es-pull}108146[#108146] -* Avoid `InferenceRunner` deadlock {es-pull}109551[#109551] -* Correctly handle duplicate model ids for the `_cat` trained models api and usage statistics {es-pull}109126[#109126] -* Do not use global ordinals strategy if the leaf reader context cannot be obtained {es-pull}108459[#108459] -* Fix NPE in trained model assignment updater {es-pull}108942[#108942] -* Fix serialising inference delete response {es-pull}109384[#109384] -* Fix "stack use after scope" memory error {ml-pull}2673[#2673] -* Fix trailing slash in `ml.get_categories` specification {es-pull}110146[#110146] -* Handle any exception thrown by inference {ml-pull}2680[#2680] -* Increase response size limit for batched requests {es-pull}110112[#110112] -* Offload request to generic threadpool {es-pull}109104[#109104] (issue: {es-issue}109100[#109100]) -* Propagate accurate deployment timeout {es-pull}109534[#109534] (issue: {es-issue}109407[#109407]) -* Refactor TextEmbeddingResults to use primitives rather than objects {es-pull}108161[#108161] -* Require question to be non-null in `QuestionAnsweringConfig` {es-pull}107972[#107972] -* Start Trained Model Deployment API request query params now override body params {es-pull}109487[#109487] -* Suppress deprecation warnings from ingest pipelines when deleting trained model {es-pull}108679[#108679] (issue: {es-issue}105004[#105004]) -* Use default translog durability on AD results index {es-pull}108999[#108999] -* Use the multi node routing action for internal inference services {es-pull}109358[#109358] -* [Inference API] Extract optional long instead of integer in `RateLimitSettings#of` {es-pull}108602[#108602] -* [Inference API] Fix serialization for inference delete endpoint response {es-pull}110431[#110431] -* [Inference API] Replace `model_id` with `inference_id` in inference API except when stored {es-pull}111366[#111366] - -Mapping:: -* Fix off by one error when handling null values in range fields {es-pull}107977[#107977] (issue: {es-issue}107282[#107282]) -* Limit number of synonym rules that can be created {es-pull}109981[#109981] (issue: {es-issue}108785[#108785]) -* Propagate mapper builder context flags across nested mapper builder context creation {es-pull}109963[#109963] -* `DenseVectorFieldMapper` fixed typo {es-pull}108065[#108065] - -Network:: -* Use proper executor for failing requests when connection closes {es-pull}109236[#109236] (issue: {es-issue}109225[#109225]) -* `NoSuchRemoteClusterException` should not be thrown when a remote is configured {es-pull}107435[#107435] (issue: {es-issue}107381[#107381]) - -Packaging:: -* Adding override for lintian false positive on `libvec.so` {es-pull}108521[#108521] (issue: {es-issue}108514[#108514]) - -Ranking:: -* Fix score count validation in reranker response {es-pull}111424[#111424] (issue: {es-issue}111202[#111202]) - -Rollup:: -* Fix trailing slash in two rollup specifications {es-pull}110176[#110176] - -Search:: -* Adding score from `RankDoc` to `SearchHit` {es-pull}108870[#108870] -* Better handling of multiple rescorers clauses with LTR {es-pull}109071[#109071] -* Correct query profiling for conjunctions {es-pull}108122[#108122] (issue: {es-issue}108116[#108116]) -* Fix `DecayFunctions'` `toString` {es-pull}107415[#107415] (issue: {es-issue}100870[#100870]) -* Fix leak in collapsing search results {es-pull}110927[#110927] -* Fork freeing search/scroll contexts to GENERIC pool {es-pull}109481[#109481] - -Security:: -* Add permission to secure access to certain config files {es-pull}107827[#107827] -* Add permission to secure access to certain config files specified by settings {es-pull}108895[#108895] -* Fix trappy timeouts in security settings APIs {es-pull}109233[#109233] - -Snapshot/Restore:: -* Stricter failure handling in multi-repo get-snapshots request handling {es-pull}107191[#107191] - -TSDB:: -* Sort time series indices by time range in `GetDataStreams` API {es-pull}107967[#107967] (issue: {es-issue}102088[#102088]) - -Transform:: -* Always pick the user `maxPageSize` value {es-pull}109876[#109876] (issue: {es-issue}109844[#109844]) -* Exit gracefully when deleted {es-pull}107917[#107917] (issue: {es-issue}107266[#107266]) -* Fix NPE during destination index creation {es-pull}108891[#108891] (issue: {es-issue}108890[#108890]) -* Forward `indexServiceSafe` exception to listener {es-pull}108517[#108517] (issue: {es-issue}108418[#108418]) -* Halt Indexer on Stop/Abort API {es-pull}107792[#107792] -* Handle `IndexNotFoundException` {es-pull}108394[#108394] (issue: {es-issue}107263[#107263]) -* Prevent concurrent jobs during cleanup {es-pull}109047[#109047] -* Redirect `VersionConflict` to reset code {es-pull}108070[#108070] -* Reset max page size to settings value {es-pull}109449[#109449] (issue: {es-issue}109308[#109308]) - -Vector Search:: -* Ensure vector similarity correctly limits `inner_hits` returned for nested kNN {es-pull}111363[#111363] (issue: {es-issue}111093[#111093]) -* Ensure we return non-negative scores when scoring scalar dot-products {es-pull}108522[#108522] - -Watcher:: -* Avoiding running watch jobs in TickerScheduleTriggerEngine if it is paused {es-pull}110061[#110061] (issue: {es-issue}105933[#105933]) - -[[deprecation-8.15.0]] -[float] -=== Deprecations - -ILM+SLM:: -* Deprecate using slm privileges to access ilm {es-pull}110540[#110540] - -Infra/Settings:: -* `ParseHeapRatioOrDeprecatedByteSizeValue` for `indices.breaker.total.limit` {es-pull}110236[#110236] - -Machine Learning:: -* Deprecate `text_expansion` and `weighted_tokens` queries {es-pull}109880[#109880] - -[[enhancement-8.15.0]] -[float] -=== Enhancements - -Aggregations:: -* Aggs: Scripted metric allow list {es-pull}109444[#109444] -* Enable inter-segment concurrency for low cardinality numeric terms aggs {es-pull}108306[#108306] -* Increase size of big arrays only when there is an actual value in the aggregators {es-pull}107764[#107764] -* Increase size of big arrays only when there is an actual value in the aggregators (Analytics module) {es-pull}107813[#107813] -* Optimise `BinaryRangeAggregator` for single value fields {es-pull}108016[#108016] -* Optimise cardinality aggregations for single value fields {es-pull}107892[#107892] -* Optimise composite aggregations for single value fields {es-pull}107897[#107897] -* Optimise few metric aggregations for single value fields {es-pull}107832[#107832] -* Optimise histogram aggregations for single value fields {es-pull}107893[#107893] -* Optimise multiterms aggregation for single value fields {es-pull}107937[#107937] -* Optimise terms aggregations for single value fields {es-pull}107930[#107930] -* Speed up collecting zero document string terms {es-pull}110922[#110922] - -Allocation:: -* Log shard movements {es-pull}105829[#105829] -* Support effective watermark thresholds in node stats API {es-pull}107244[#107244] (issue: {es-issue}106676[#106676]) - -Application:: -* Add Create or update query rule API call {es-pull}109042[#109042] -* Rename rule query and add support for multiple rulesets {es-pull}108831[#108831] -* Support multiple associated groups for TopN {es-pull}108409[#108409] (issue: {es-issue}108018[#108018]) -* [Connector API] Change `UpdateConnectorFiltering` API to have better defaults {es-pull}108612[#108612] - -Authentication:: -* Expose API Key cache metrics {es-pull}109078[#109078] - -Authorization:: -* Cluster state role mapper file settings service {es-pull}107886[#107886] -* Cluster-state based Security role mapper {es-pull}107410[#107410] -* Introduce role description field {es-pull}107088[#107088] -* [Osquery] Extend `kibana_system` role with an access to new `osquery_manager` index {es-pull}108849[#108849] - -Data streams:: -* Add metrics@custom component template to metrics-*-* index template {es-pull}109540[#109540] (issue: {es-issue}109475[#109475]) -* Apm-data: enable plugin by default {es-pull}108860[#108860] -* Apm-data: ignore malformed fields, and too many dynamic fields {es-pull}108444[#108444] -* Apm-data: improve default pipeline performance {es-pull}108396[#108396] (issue: {es-issue}108290[#108290]) -* Apm-data: improve indexing resilience {es-pull}108227[#108227] -* Apm-data: increase priority above Fleet templates {es-pull}108885[#108885] -* Apm-data: increase version for templates {es-pull}108340[#108340] -* Apm-data: set codec: best_compression for logs-apm.* data streams {es-pull}108862[#108862] -* Remove `default_field: message` from metrics index templates {es-pull}110651[#110651] - -Distributed:: -* Add `wait_for_completion` parameter to delete snapshot request {es-pull}109462[#109462] (issue: {es-issue}101300[#101300]) -* Improve mechanism for extracting the result of a `PlainActionFuture` {es-pull}110019[#110019] (issue: {es-issue}108125[#108125]) - -ES|QL:: -* Add `BlockHash` for 3 `BytesRefs` {es-pull}108165[#108165] -* Allow `LuceneSourceOperator` to early terminate {es-pull}108820[#108820] -* Check if `CsvTests` required capabilities exist {es-pull}108684[#108684] -* ESQL: Add aggregates node level reduction {es-pull}107876[#107876] -* ESQL: Add more time span units {es-pull}108300[#108300] -* ESQL: Implement LOOKUP, an "inline" enrich {es-pull}107987[#107987] (issue: {es-issue}107306[#107306]) -* ESQL: Renamed `TopList` to Top {es-pull}110347[#110347] -* ESQL: Union Types Support {es-pull}107545[#107545] (issue: {es-issue}100603[#100603]) -* ESQL: add REPEAT string function {es-pull}109220[#109220] -* ES|QL Add primitive float support to the Compute Engine {es-pull}109746[#109746] (issue: {es-issue}109178[#109178]) -* ES|QL Add primitive float variants of all aggregators to the compute engine {es-pull}109781[#109781] -* ES|QL: vectorize eval {es-pull}109332[#109332] -* Optimize ST_DISTANCE filtering with Lucene circle intersection query {es-pull}110102[#110102] (issue: {es-issue}109972[#109972]) -* Optimize for single value in ordinals grouping {es-pull}108118[#108118] -* Rewrite away type converting functions that do not convert types {es-pull}108713[#108713] (issue: {es-issue}107716[#107716]) -* ST_DISTANCE Function {es-pull}108764[#108764] (issue: {es-issue}108212[#108212]) -* Support metrics counter types in ESQL {es-pull}107877[#107877] -* [ESQL] CBRT function {es-pull}108574[#108574] -* [ES|QL] Convert string to datetime when the other size of an arithmetic operator is `date_period` or `time_duration` {es-pull}108455[#108455] -* [ES|QL] Support Named and Positional Parameters in `EsqlQueryRequest` {es-pull}108421[#108421] (issue: {es-issue}107029[#107029]) -* [ES|QL] `weighted_avg` {es-pull}109993[#109993] - -Engine:: -* Drop shards close timeout when stopping node. {es-pull}107978[#107978] (issue: {es-issue}107938[#107938]) -* Update translog `writeLocation` for `flushListener` after commit {es-pull}109603[#109603] - -Geo:: -* Optimize `GeoBounds` and `GeoCentroid` aggregations for single value fields {es-pull}107663[#107663] - -Health:: -* Log details of non-green indicators in `HealthPeriodicLogger` {es-pull}108266[#108266] - -Highlighting:: -* Unified Highlighter to support matched_fields {es-pull}107640[#107640] (issue: {es-issue}5172[#5172]) - -Infra/Core:: -* Add allocation explain output for THROTTLING shards {es-pull}109563[#109563] -* Create custom parser for ISO-8601 datetimes {es-pull}106486[#106486] (issue: {es-issue}102063[#102063]) -* Extend ISO8601 datetime parser to specify forbidden fields, allowing it to be used on more formats {es-pull}108606[#108606] -* add Elastic-internal stable bridge api for use by Logstash {es-pull}108171[#108171] - -Infra/Metrics:: -* Add auto-sharding APM metrics {es-pull}107593[#107593] -* Add request metric to `RestController` to track success/failure (by status code) {es-pull}109957[#109957] -* Allow RA metrics to be reported upon parsing completed or accumulated {es-pull}108726[#108726] -* Provide the `DocumentSizeReporter` with index mode {es-pull}108947[#108947] -* Return noop instance `DocSizeObserver` for updates with scripts {es-pull}108856[#108856] - -Ingest Node:: -* Add `continent_code` support to the geoip processor {es-pull}108780[#108780] (issue: {es-issue}85820[#85820]) -* Add support for the 'Connection Type' database to the geoip processor {es-pull}108683[#108683] -* Add support for the 'Domain' database to the geoip processor {es-pull}108639[#108639] -* Add support for the 'ISP' database to the geoip processor {es-pull}108651[#108651] -* Adding `hits_time_in_millis` and `misses_time_in_millis` to enrich cache stats {es-pull}107579[#107579] -* Adding `user_type` support for the enterprise database for the geoip processor {es-pull}108687[#108687] -* Adding human readable times to geoip stats {es-pull}107647[#107647] -* Include doc size info in ingest stats {es-pull}107240[#107240] (issue: {es-issue}106386[#106386]) -* Make ingest byte stat names more descriptive {es-pull}108786[#108786] -* Return ingest byte stats even when 0-valued {es-pull}108796[#108796] -* Test pipeline run after reroute {es-pull}108693[#108693] - -Logs:: -* Introduce a node setting controlling the activation of the `logs` index mode in logs@settings component template {es-pull}109025[#109025] (issue: {es-issue}108762[#108762]) -* Support index sorting with nested fields {es-pull}110251[#110251] (issue: {es-issue}107349[#107349]) - -Machine Learning:: -* Add Anthropic messages integration to Inference API {es-pull}109893[#109893] -* Add `sparse_vector` query {es-pull}108254[#108254] -* Add model download progress to the download task status {es-pull}107676[#107676] -* Add rate limiting support for the Inference API {es-pull}107706[#107706] -* Add the rerank task to the Elasticsearch internal inference service {es-pull}108452[#108452] -* Default the HF service to cosine similarity {es-pull}109967[#109967] -* GA the update trained model action {es-pull}108868[#108868] -* Handle the "JSON memory allocator bytes" field {es-pull}109653[#109653] -* Inference Processor: skip inference when all fields are missing {es-pull}108131[#108131] -* Log 'No statistics at.. ' message as a warning {ml-pull}2684[#2684] -* Optimise frequent item sets aggregation for single value fields {es-pull}108130[#108130] -* Sentence Chunker {es-pull}110334[#110334] -* [Inference API] Add Amazon Bedrock Support to Inference API {es-pull}110248[#110248] -* [Inference API] Add Mistral Embeddings Support to Inference API {es-pull}109194[#109194] -* [Inference API] Check for related pipelines on delete inference endpoint {es-pull}109123[#109123] - -Mapping:: -* Add ignored field values to synthetic source {es-pull}107567[#107567] -* Apply FLS to the contents of `IgnoredSourceFieldMapper` {es-pull}109931[#109931] -* Binary field enables doc values by default for index mode with synthe… {es-pull}107739[#107739] (issue: {es-issue}107554[#107554]) -* Feature/annotated text store defaults {es-pull}107922[#107922] (issue: {es-issue}107734[#107734]) -* Handle `ignore_above` in synthetic source for flattened fields {es-pull}110214[#110214] -* Opt in keyword field into fallback synthetic source if needed {es-pull}110016[#110016] -* Opt in number fields into fallback synthetic source when doc values a… {es-pull}110160[#110160] -* Reflect latest changes in synthetic source documentation {es-pull}109501[#109501] -* Store source for fields in objects with `dynamic` override {es-pull}108911[#108911] -* Store source for nested objects {es-pull}108818[#108818] -* Support synthetic source for `geo_point` when `ignore_malformed` is used {es-pull}109651[#109651] -* Support synthetic source for `scaled_float` and `unsigned_long` when `ignore_malformed` is used {es-pull}109506[#109506] -* Support synthetic source for date fields when `ignore_malformed` is used {es-pull}109410[#109410] -* Support synthetic source together with `ignore_malformed` in histogram fields {es-pull}109882[#109882] -* Track source for arrays of objects {es-pull}108417[#108417] (issue: {es-issue}90708[#90708]) -* Track synthetic source for disabled objects {es-pull}108051[#108051] - -Network:: -* Detect long-running tasks on network threads {es-pull}109204[#109204] - -Ranking:: -* Enabling profiling for `RankBuilders` and adding tests for RRF {es-pull}109470[#109470] - -Relevance:: -* [Query Rules] Add API calls to get or delete individual query rules within a ruleset {es-pull}109554[#109554] -* [Query Rules] Require Enterprise License for Query Rules {es-pull}109634[#109634] - -Search:: -* Add AVX-512 optimised vector distance functions for int7 on x64 {es-pull}109084[#109084] -* Add `SparseVectorStats` {es-pull}108793[#108793] -* Add `_name` support for top level `knn` clauses {es-pull}107645[#107645] (issues: {es-issue}106254[#106254], {es-issue}107448[#107448]) -* Add a SIMD (AVX2) optimised vector distance function for int7 on x64 {es-pull}108088[#108088] -* Add min/max range of the `event.ingested` field to cluster state for searchable snapshots {es-pull}106252[#106252] -* Add per-field KNN vector format to Index Segments API {es-pull}107216[#107216] -* Add support for hiragana_uppercase & katakana_uppercase token filters in kuromoji analysis plugin {es-pull}106553[#106553] -* Adding support for explain in rrf {es-pull}108682[#108682] -* Allow rescorer with field collapsing {es-pull}107779[#107779] (issue: {es-issue}27243[#27243]) -* Cut over stored fields to ZSTD for compression {es-pull}103374[#103374] -* Limit the value in prefix query {es-pull}108537[#108537] (issue: {es-issue}108486[#108486]) -* Make dense vector field type updatable {es-pull}106591[#106591] -* Multivalue Sparse Vector Support {es-pull}109007[#109007] - -Security:: -* Add bulk delete roles API {es-pull}110383[#110383] -* Remote cluster - API key security model - cluster privileges {es-pull}107493[#107493] - -Snapshot/Restore:: -* Denser in-memory representation of `ShardBlobsToDelete` {es-pull}109848[#109848] -* Log repo UUID at generation/registration time {es-pull}109672[#109672] -* Make repository analysis API available to non-operators {es-pull}110179[#110179] (issue: {es-issue}100318[#100318]) -* Track `RequestedRangeNotSatisfiedException` separately in S3 Metrics {es-pull}109657[#109657] - -Stats:: -* DocsStats: Add human readable bytesize {es-pull}109720[#109720] - -TSDB:: -* Optimise `time_series` aggregation for single value fields {es-pull}107990[#107990] -* Support `ignore_above` on keyword dimensions {es-pull}110337[#110337] - -Vector Search:: -* Adding hamming distance function to painless for `dense_vector` fields {es-pull}109359[#109359] -* Support k parameter for knn query {es-pull}110233[#110233] (issue: {es-issue}108473[#108473]) - -[[feature-8.15.0]] -[float] -=== New features - -Aggregations:: -* Opt `scripted_metric` out of parallelization {es-pull}109597[#109597] - -Application:: -* [Connector API] Add claim sync job endpoint {es-pull}109480[#109480] - -ES|QL:: -* ESQL: Add `ip_prefix` function {es-pull}109070[#109070] (issue: {es-issue}99064[#99064]) -* ESQL: Introduce a casting operator, `::` {es-pull}107409[#107409] -* ESQL: `top_list` aggregation {es-pull}109386[#109386] (issue: {es-issue}109213[#109213]) -* ESQL: add Arrow dataframes output format {es-pull}109873[#109873] -* Reapply "ESQL: Expose "_ignored" metadata field" {es-pull}108871[#108871] - -Infra/REST API:: -* Add a capabilities API to check node and cluster capabilities {es-pull}106820[#106820] - -Ingest Node:: -* Directly download commercial ip geolocation databases from providers {es-pull}110844[#110844] -* Mark the Redact processor as Generally Available {es-pull}110395[#110395] - -Logs:: -* Introduce logs index mode as Tech Preview {es-pull}108896[#108896] (issue: {es-issue}108896[#108896]) - -Machine Learning:: -* Add support for Azure AI Studio embeddings and completions to the inference service. {es-pull}108472[#108472] - -Mapping:: -* Add `semantic_text` field type and `semantic` query {es-pull}110338[#110338] -* Add generic fallback implementation for synthetic source {es-pull}108222[#108222] -* Add synthetic source support for `geo_shape` via fallback implementation {es-pull}108881[#108881] -* Add synthetic source support for binary fields {es-pull}107549[#107549] -* Enable fallback synthetic source by default {es-pull}109370[#109370] (issue: {es-issue}106460[#106460]) -* Enable fallback synthetic source for `point` and `shape` {es-pull}109312[#109312] -* Enable fallback synthetic source for `token_count` {es-pull}109044[#109044] -* Implement synthetic source support for annotated text field {es-pull}107735[#107735] -* Implement synthetic source support for range fields {es-pull}107081[#107081] -* Support arrays in fallback synthetic source implementation {es-pull}108878[#108878] -* Support synthetic source for `aggregate_metric_double` when ignore_malf… {es-pull}108746[#108746] - -Ranking:: -* Add text similarity reranker retriever {es-pull}109813[#109813] - -Relevance:: -* Mark Query Rules as GA {es-pull}110004[#110004] - -Search:: -* Add new int4 quantization to dense_vector {es-pull}109317[#109317] -* Adding RankFeature search phase implementation {es-pull}108538[#108538] -* Adding aggregations support for the `_ignored` field {es-pull}101373[#101373] (issue: {es-issue}59946[#59946]) -* Update Lucene version to 9.11 {es-pull}109219[#109219] - -Security:: -* Query Roles API {es-pull}108733[#108733] - -Transform:: -* Introduce _transform/_node_stats API {es-pull}107279[#107279] - -Vector Search:: -* Adds new `bit` `element_type` for `dense_vectors` {es-pull}110059[#110059] - -[[upgrade-8.15.0]] -[float] -=== Upgrades - -Infra/Plugins:: -* Update ASM to 9.7 for plugin scanner {es-pull}108822[#108822] (issue: {es-issue}108776[#108776]) - -Ingest Node:: -* Bump Tika dependency to 2.9.2 {es-pull}108144[#108144] - -Network:: -* Upgrade to Netty 4.1.109 {es-pull}108155[#108155] - -Search:: -* Upgrade to Lucene-9.11.1 {es-pull}110234[#110234] - -Security:: -* Upgrade bouncy castle (non-fips) to 1.78.1 {es-pull}108223[#108223] - -Snapshot/Restore:: -* Bump jackson version in modules:repository-azure {es-pull}109717[#109717] - - diff --git a/docs/reference/release-notes/8.15.1.asciidoc b/docs/reference/release-notes/8.15.1.asciidoc deleted file mode 100644 index 7c48f457e3b4e..0000000000000 --- a/docs/reference/release-notes/8.15.1.asciidoc +++ /dev/null @@ -1,103 +0,0 @@ -[[release-notes-8.15.1]] -== {es} version 8.15.1 - -Also see <>. - -[[known-issues-8.15.1]] -[float] -=== Known issues -* Elasticsearch will not start if custom role mappings are configured using the -`xpack.security.authc.realms.*.files.role_mapping` configuration option. As a workaround, custom role mappings -can be configured using the https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-put-role-mapping.html[REST API] (issue: {es-issue}112503[#112503]) - -* ES|QL queries can lead to node crashes due to Out Of Memory errors when: -** Multiple indices match the query pattern -** These indices have many conflicting field mappings -** Many of those fields are included in the request -These issues deplete heap memory, increasing the likelihood of OOM errors. (issue: {es-issue}111964[#111964], {es-issue}111358[#111358]). -In Kibana, you might indirectly execute these queries when using Discover, or adding a Field Statistics panel to a dashboard. -+ -To work around this issue, you have a number of options: -** Downgrade to an earlier version -** Upgrade to 8.15.2 upon release -** Follow the instructions to -<> -** Change the default data view in Discover to a smaller set of indices and/or one with fewer mapping conflicts. - -* Index Stats, Node Stats and Cluster Stats API can return a null pointer exception if an index contains a `dense_vector` field -but there is an index segment that does not contain any documents with a dense vector field ({es-pull}112720[#112720]). Workarounds: -** If the affected index already contains documents with a dense vector field, force merge the index to a single segment. -** If the affected index does not already contain documents with a dense vector field, index a document with a dense vector field -and then force merge to a single segment. -** If the affected index's `dense_vector` fields are unused, reindex without the `dense_vector` fields. - -* Synthetic source bug. Synthetic source may fail generating the _source at runtime, causing failures in get APIs or -partial failures in the search APIs. The result is that for the affected documents the _source can't be retrieved. -There is no workaround and the only option to is to upgrade to 8.15.2 when released. -+ -If you use synthetic source then you may be affected by this bug if the following is true: -** If you have more fields then the `index.mapping.total_fields.limit` setting allows. -** If you use dynamic mappings and the `index.mapping.total_fields.ignore_dynamic_beyond_limit` setting is enabled. - -[[bug-8.15.1]] -[float] -=== Bug fixes - -Aggregations:: -* Revert "Avoid bucket copies in Aggs" {es-pull}111758[#111758] (issue: {es-issue}111679[#111679]) - -Authorization:: -* Fix DLS over Runtime Fields {es-pull}112260[#112260] (issue: {es-issue}111637[#111637]) - -ES|QL:: -* Avoid losing error message in failure collector {es-pull}111983[#111983] (issue: {es-issue}111894[#111894]) -* Avoid wrapping rejection exception in exchange {es-pull}112178[#112178] (issue: {es-issue}112106[#112106]) -* ESQL: Fix for overzealous validation in case of invalid mapped fields {es-pull}111475[#111475] (issue: {es-issue}111452[#111452]) - -Geo:: -* Add maximum nested depth check to WKT parser {es-pull}111843[#111843] -* Always check `crsType` when folding spatial functions {es-pull}112090[#112090] (issue: {es-issue}112089[#112089]) -* Fix NPE when executing doc value queries over shape geometries with empty segments {es-pull}112139[#112139] - -Indices APIs:: -* Fix template alias parsing livelock {es-pull}112217[#112217] - -Infra/Core:: -* Fix windows memory locking {es-pull}111866[#111866] (issue: {es-issue}111847[#111847]) - -Ingest Node:: -* Fixing incorrect bulk request took time {es-pull}111863[#111863] (issue: {es-issue}111854[#111854]) -* Improve performance of grok pattern cycle detection {es-pull}111947[#111947] - -Logs:: -* Merge multiple ignored source entires for the same field {es-pull}111994[#111994] (issue: {es-issue}111694[#111694]) - -Machine Learning:: -* [Inference API] Move Delete inference checks to threadpool worker {es-pull}111646[#111646] - -Mapping:: -* Check for valid `parentDoc` before retrieving its previous {es-pull}112005[#112005] (issue: {es-issue}111990[#111990]) -* Fix calculation of parent offset for ignored source in some cases {es-pull}112046[#112046] -* Fix synthetic source for empty nested objects {es-pull}111943[#111943] (issue: {es-issue}111811[#111811]) -* No error when `store_array_source` is used without synthetic source {es-pull}111966[#111966] -* Prevent synthetic field loaders accessing stored fields from using stale data {es-pull}112173[#112173] (issue: {es-issue}112156[#112156]) - -Ranking:: -* Properly handle filters on `TextSimilarityRank` retriever {es-pull}111673[#111673] - -Relevance:: -* Semantic reranking should fail whenever inference ID does not exist {es-pull}112038[#112038] (issue: {es-issue}111934[#111934]) -* [Bugfix] Add `accessDeclaredMembers` permission to allow search application templates to parse floats {es-pull}111285[#111285] - -Search:: -* Explain Function Score Query {es-pull}111807[#111807] - -Security:: -* Fix "unexpected field [remote_cluster]" for CCS (RCS 1.0) when using API key that references `remote_cluster` {es-pull}112226[#112226] -* Fix connection timeout for `OpenIdConnectAuthenticator` get Userinfo {es-pull}112230[#112230] - -Vector Search:: -* Fix `NullPointerException` when doing knn search on empty index without dims {es-pull}111756[#111756] (issue: {es-issue}111733[#111733]) -* Speed up dense/sparse vector stats {es-pull}111729[#111729] (issue: {es-issue}111715[#111715]) - - diff --git a/docs/reference/release-notes/8.15.2.asciidoc b/docs/reference/release-notes/8.15.2.asciidoc deleted file mode 100644 index 7dfd8690109b2..0000000000000 --- a/docs/reference/release-notes/8.15.2.asciidoc +++ /dev/null @@ -1,42 +0,0 @@ -[[release-notes-8.15.2]] -== {es} version 8.15.2 - -Also see <>. - -[[bug-8.15.2]] -[float] -=== Bug fixes - -Authorization:: -* Fix remote cluster credential secure settings reload {es-pull}111535[#111535] - -ES|QL:: -* ESQL: Don't mutate the `BoolQueryBuilder` in plan {es-pull}111519[#111519] -* ES|QL: Fix `ResolvedEnrichPolicy` serialization (bwc) in v 8.15 {es-pull}112985[#112985] (issue: {es-issue}112968[#112968]) -* Fix union-types where one index is missing the field {es-pull}111932[#111932] (issue: {es-issue}111912[#111912]) -* Support widening of numeric types in union-types {es-pull}112610[#112610] (issue: {es-issue}111277[#111277]) - -Infra/Core:: -* JSON parse failures should be 4xx codes {es-pull}112703[#112703] -* Json parsing exceptions should not cause 500 errors {es-pull}111548[#111548] (issue: {es-issue}111542[#111542]) -* Make sure file accesses in `DnRoleMapper` are done in stack frames with permissions {es-pull}112400[#112400] - -Ingest Node:: -* Fix missing header in `put_geoip_database` JSON spec {es-pull}112581[#112581] - -Logs:: -* Fix encoding of dynamic arrays in ignored source {es-pull}112713[#112713] - -Mapping:: -* Full coverage of ECS by ecs@mappings when `date_detection` is disabled {es-pull}112444[#112444] (issue: {es-issue}112398[#112398]) - -Search:: -* Fix parsing error in `_terms_enum` API {es-pull}112872[#112872] (issue: {es-issue}94378[#94378]) - -Security:: -* Allowlist `tracestate` header on remote server port {es-pull}112649[#112649] - -Vector Search:: -* Fix NPE in `dense_vector` stats {es-pull}112720[#112720] - - diff --git a/docs/reference/release-notes/8.16.0.asciidoc b/docs/reference/release-notes/8.16.0.asciidoc deleted file mode 100644 index 7b2e7459be968..0000000000000 --- a/docs/reference/release-notes/8.16.0.asciidoc +++ /dev/null @@ -1,8 +0,0 @@ -[[release-notes-8.16.0]] -== {es} version 8.16.0 - -coming[8.16.0] - -Also see <>. - - diff --git a/docs/reference/release-notes/8.17.0.asciidoc b/docs/reference/release-notes/8.17.0.asciidoc deleted file mode 100644 index 59962fd83e9b7..0000000000000 --- a/docs/reference/release-notes/8.17.0.asciidoc +++ /dev/null @@ -1,8 +0,0 @@ -[[release-notes-8.17.0]] -== {es} version 8.17.0 - -coming[8.17.0] - -Also see <>. - - diff --git a/docs/reference/release-notes/8.2.0.asciidoc b/docs/reference/release-notes/8.2.0.asciidoc deleted file mode 100644 index 1305845ff0e0b..0000000000000 --- a/docs/reference/release-notes/8.2.0.asciidoc +++ /dev/null @@ -1,350 +0,0 @@ -[[release-notes-8.2.0]] -== {es} version 8.2.0 - -// Also see <>. -[[known-issues-8.2.0]] -[float] -=== Known issues - -include::8.0.0.asciidoc[tag=jackson-filtering-bug] -[[bug-8.2.0]] -[float] -=== Bug fixes - -Aggregations:: -* Don't apply the rewrite-as-range optimization if field is multivalued {es-pull}84535[#84535] (issue: {es-issue}82903[#82903]) -* Fix `AdaptingAggregator` `toString` method {es-pull}86042[#86042] -* Fix: nested top metrics sort on keyword field {es-pull}85058[#85058] -* Fix: use the correct field name when reading data from multi fields {es-pull}84752[#84752] - -Analysis:: -* Add tests for `min_hash` configuration and fix settings names {es-pull}84753[#84753] (issue: {es-issue}84578[#84578]) - -Authorization:: -* Add delete privilege to `kibana_system` for APM {es-pull}85085[#85085] -* Ensure API key can only see itself with `QueryApiKey` API {es-pull}84859[#84859] -* Fix ownership of refresh tokens {es-pull}85010[#85010] -* Grant `kibana_system` role read access to APM data streams {es-pull}85744[#85744] -* Handle role descriptor retrieval for internal users {es-pull}85049[#85049] -* Ignore app priv failures when resolving superuser {es-pull}85519[#85519] - -EQL:: -* Clean any used memory by the sequence matcher and circuit breaker used bytes in case of exception {es-pull}84451[#84451] - -Engine:: -* Increase store ref before snapshotting index commit {es-pull}84776[#84776] - -Geo:: -* Fix fields wildcard support to vector tile search API {es-pull}85595[#85595] (issue: {es-issue}85592[#85592]) - -Highlighting:: -* Fix wildcard highlighting on `match_only_text` {es-pull}85500[#85500] (issue: {es-issue}85493[#85493]) - -ILM+SLM:: -* Fix Stacktraces Taking Memory in ILM Error Step Serialization {es-pull}84266[#84266] -* Invoke initial `AsyncActionStep` for newly created indices {es-pull}84541[#84541] (issue: {es-issue}77269[#77269]) -* Retry clean and create snapshot if it already exists #83694 {es-pull}84829[#84829] (issue: {es-issue}83694[#83694]) -* Skip the shrink step if the number of shards of the shrunk index is the same with the original {es-pull}84434[#84434] (issue: {es-issue}80180[#80180]) - -Indices APIs:: -* Remove existing indices/datastreams/aliases before simulating index template {es-pull}84675[#84675] (issue: {es-issue}84256[#84256]) - -Infra/Core:: -* Fix `NullPointerException` in `SystemIndexMetadataUpgradeService` hidden alias handling {es-pull}84780[#84780] (issue: {es-issue}81411[#81411]) -* Prevent `ThreadContext` header leak when sending response {es-pull}68649[#68649] (issue: {es-issue}68278[#68278]) -* Relax data path deprecations from critical to warn {es-pull}85952[#85952] -* Require and preserve content type for filtered rest requests {es-pull}84914[#84914] (issue: {es-issue}84784[#84784]) -* Return empty version instead of blowing up if we cannot find it {es-pull}85244[#85244] -* Validate index format agreement for system index descriptors {es-pull}85173[#85173] -* Wrap thread creation in `doPrivileged` call {es-pull}85180[#85180] - -Infra/Plugins:: -* Strengthen elasticsearch plugin version check {es-pull}85340[#85340] (issue: {es-issue}85336[#85336]) - -Infra/REST API:: -* Correctly return `_type` field for documents in V7 compatiblity mode {es-pull}84873[#84873] (issue: {es-issue}84173[#84173]) - -Ingest:: -* Mark `GeoIpDownloaderTask` as completed after cancellation {es-pull}84028[#84028] -* `CompoundProcessor` should also catch exceptions when executing a processor {es-pull}84838[#84838] (issue: {es-issue}84781[#84781]) - -License:: -* Fix license downgrade warnings for API Keys and Tokens {es-pull}85276[#85276] (issue: {es-issue}75271[#75271]) - -Machine Learning:: -* Allow retrieving `boolean` fields from `_source` in DFA jobs {es-pull}85672[#85672] -* Avoid multiple queued quantiles documents in renormalizer {es-pull}85555[#85555] (issue: {es-issue}85539[#85539]) -* Disallow new trained model deployments when nodes are different versions {es-pull}85465[#85465] -* Do not fetch source when finding index of last state docs {es-pull}85334[#85334] -* Ensure that inference index primary shards are available before attempting to load model {es-pull}85569[#85569] -* Fix Kibana date format and similar overrides in text structure endpoint {es-pull}84967[#84967] -* Fix race condition when stopping a recently relocated datafeed {es-pull}84636[#84636] -* Fix serialisation of text embedding updates {es-pull}85863[#85863] -* Fixes for multi-line start patterns in text structure endpoint {es-pull}85066[#85066] -* Fixes to making old ML indices hidden {es-pull}85383[#85383] -* Reallocate model deployments on node shutdown events {es-pull}85310[#85310] -* Retry datafeed searches on skipped CCS clusters {es-pull}84052[#84052] (issue: {es-issue}83838[#83838]) -* Return all Datafeeds with GET Anomaly Detector {es-pull}84759[#84759] - -Mapping:: -* Do not fail on duplicated content field filters {es-pull}85382[#85382] -* Runtime fields core-with-mapped tests support tsdb {es-pull}83577[#83577] - -Packaging:: -* Remove use of Cloudflare zlib {es-pull}84680[#84680] - -Rollup:: -* Add support for comma delimited index patterns to rollup job configuration {es-pull}47041[#47041] (issue: {es-issue}45591[#45591]) - -SQL:: -* Add range checks to interval multiplication operation {es-pull}83478[#83478] (issue: {es-issue}83336[#83336]) -* Avoid empty last pages for GROUP BY queries when possible {es-pull}84356[#84356] (issue: {es-issue}75528[#75528]) -* Fix SQLCompatIT.testCursorFromOldNodeFailsOnNewNode {es-pull}85531[#85531] (issue: {es-issue}85520[#85520]) -* Fix issues with format=txt when paging through result sets and in mixed node environments {es-pull}83833[#83833] (issues: {es-issue}83581[#83581], {es-issue}83788[#83788]) -* Improve ROUND and TRUNCATE to better manage Long values and big Doubles {es-pull}85106[#85106] (issues: {es-issue}85105[#85105], {es-issue}49391[#49391]) -* Use exact attributes for script templates from scalar functions {es-pull}84813[#84813] (issue: {es-issue}80551[#80551]) -* `RANDOM()` always evaluates to `NULL` if `` is `NULL` {es-pull}84632[#84632] (issue: {es-issue}84627[#84627]) - -Search:: -* Fix point visitor in `DiskUsage` API {es-pull}84909[#84909] -* Fix skip caching factor with `indices.queries.cache.all_segments` {es-pull}85510[#85510] -* Increase store ref before analyzing disk usage {es-pull}84774[#84774] -* Limit concurrent shard requests in disk usage API {es-pull}84900[#84900] (issue: {es-issue}84779[#84779]) -* Rewrite `match_all` inside `must_not` {es-pull}85999[#85999] -* `DotExpandingXContentParser` to expose the original token location {es-pull}84970[#84970] -* `TransportBroadcastAction` should always set response for each shard {es-pull}84926[#84926] - -Security:: -* `Authentication.token` now uses version from the existing authentication {es-pull}85978[#85978] -* Ensure tokens represent effective user's identity in all cases {es-pull}84263[#84263] - -Snapshot/Restore:: -* Don't fail if there is no symlink for AWS Web Identity Token {es-pull}84697[#84697] -* Expose proxy settings for GCS repositories {es-pull}85785[#85785] (issue: {es-issue}84569[#84569]) -* Fix atomic writes in HDFS {es-pull}85210[#85210] -* Fix leaking listeners bug on frozen tier {es-pull}85239[#85239] -* Fix snapshot status messages on node-left {es-pull}85021[#85021] -* Ignore frozen shared cache file during data folder upgrades {es-pull}85638[#85638] (issue: {es-issue}85603[#85603]) -* [s3-repository] Lookup AWS Region for STS Client from STS endpoint {es-pull}84585[#84585] (issue: {es-issue}83826[#83826]) - -Stats:: -* Discard intermediate results upon cancellation for stats endpoints {es-pull}82685[#82685] (issue: {es-issue}82337[#82337]) - -Transform:: -* Correctly validate permissions when retention policy is configured {es-pull}85413[#85413] (issue: {es-issue}85409[#85409]) - -Watcher:: -* Avoiding watcher validation errors when a data stream points to more than one index {es-pull}85507[#85507] (issue: {es-issue}85508[#85508]) -* Log at WARN level for Watcher cluster state validation errors {es-pull}85632[#85632] -* No longer require master node to install Watcher templates {es-pull}85287[#85287] (issue: {es-issue}85043[#85043]) - -[[enhancement-8.2.0]] -[float] -=== Enhancements - -Aggregations:: -* Aggs: no filter-by-filter if `_doc_count` field {es-pull}84427[#84427] (issue: {es-issue}84048[#84048]) -* Extract agg bounds from queries in FILTER {es-pull}83902[#83902] -* Give Lucene more opportunities to enable the filter-by-filter optimization {es-pull}85322[#85322] -* Improve performance of `date_histogram` when date histogram is in a BoostingQuery {es-pull}83751[#83751] (issues: {es-issue}82384[#82384], {es-issue}75542[#75542]) - -Allocation:: -* Make allocation explanations more actionable {es-pull}83983[#83983] -* Use static empty store files metadata {es-pull}84034[#84034] - -Audit:: -* User Profile - Audit security config change for profile APIs {es-pull}84785[#84785] - -Authentication:: -* Adds domain information to authentication object {es-pull}82639[#82639] -* Improve BWC for persisted authentication headers {es-pull}83913[#83913] (issue: {es-issue}83567[#83567]) -* Warn on SAML attributes with special attribute names {es-pull}85248[#85248] (issue: {es-issue}48613[#48613]) - -Authorization:: -* Add elastic/enterprise-search-server service account {es-pull}83325[#83325] -* Add index privileges for logs-enterprise_search.api-default to the enterprise-search-server service account {es-pull}84965[#84965] -* Add indices permissions to Enterprise Search service account {es-pull}85726[#85726] -* Note restricted indices in access denied message {es-pull}85013[#85013] -* Security global privilege for updating profile data of applications {es-pull}83728[#83728] -* [Osquery] Extend `kibana_system` role with an access to `osquery_manager` indices {es-pull}84279[#84279] - -CRUD:: -* Speed up Reading `RetentionLeases` from the Wire {es-pull}85159[#85159] - -Cluster Coordination:: -* Avoid deserializing cluster states on master {es-pull}58416[#58416] -* Improve logging for connect-back failures {es-pull}84915[#84915] -* Remove intermediate map from master task execution {es-pull}84406[#84406] -* Reuse `JoinTaskExecutor` {es-pull}85325[#85325] -* Speed up `MetadataStateFormat` Writes {es-pull}85138[#85138] - -Data streams:: -* Speed up `DatastreamTimestampFieldMapper#postParse` {es-pull}85270[#85270] - -Discovery-Plugins:: -* Support IMDSv2 for EC2 Discovery {es-pull}84410[#84410] (issue: {es-issue}80398[#80398]) - -Distributed:: -* Add elasticsearch health API {es-pull}83119[#83119] - -Geo:: -* Add `geohex_grid` aggregation to vector tiles API {es-pull}84553[#84553] -* Added buffer pixels to vector tile spec parsing {es-pull}84710[#84710] (issue: {es-issue}84492[#84492]) -* Normalise polygons only when necessary {es-pull}84229[#84229] (issue: {es-issue}35349[#35349]) -* Support GeoJSON for `geo_point` {es-pull}85120[#85120] - -Health:: -* Fix naming in health indicators {es-pull}83587[#83587] -* ILM/SLM health indicator services {es-pull}83440[#83440] -* Introduce dedicated interface for health indicator details {es-pull}83417[#83417] -* Repository integrity health indicator services {es-pull}83445[#83445] -* Shards allocation health indicator services {es-pull}83513[#83513] - -ILM+SLM:: -* Cache ILM policy name on `IndexMetadata` {es-pull}83603[#83603] (issue: {es-issue}83582[#83582]) -* GET _index_template and GET _component_template request support query parameter flat_settings {es-pull}83297[#83297] -* Make rollover cancellable #81763 {es-pull}84584[#84584] (issue: {es-issue}81763[#81763]) -* Rollover add max_primary_shard_docs condition {es-pull}80981[#80981] -* Speed up ILM cluster task execution {es-pull}85405[#85405] (issue: {es-issue}82708[#82708]) - -Indices APIs:: -* Batch add index block cluster state updates {es-pull}84374[#84374] -* Batch close-indices cluster state updates {es-pull}84259[#84259] -* Batch open-indices cluster state updates {es-pull}83760[#83760] -* Remove LegacyCTRAL from `TransportRolloverAction` {es-pull}84166[#84166] - -Infra/Core:: -* Add support for negtive epoch timestamps {es-pull}80208[#80208] (issues: {es-issue}79135[#79135], {es-issue}72123[#72123], {es-issue}40983[#40983]) -* Allow yaml values for dynamic node settings {es-pull}85186[#85186] (issue: {es-issue}65577[#65577]) -* Improve XContent Array Parser {es-pull}84477[#84477] -* Optimize `ImmutableOpenMap.Builder` {es-pull}85184[#85184] -* Provide 'system' attribute when resolving system indices {es-pull}85042[#85042] (issue: {es-issue}82671[#82671]) -* Remove Lucene split packages {es-pull}82132[#82132] (issue: {es-issue}81981[#81981]) -* Simplify reading a list and converting it to a map from stream {es-pull}84183[#84183] -* Speed up CompressedXContent Serialization {es-pull}84802[#84802] -* Update `readMap` to avoid resizing map during reading {es-pull}84045[#84045] - -Infra/Plugins:: -* Warn on slow signature verification {es-pull}84766[#84766] (issue: {es-issue}80480[#80480]) - -Infra/Scripting:: -* Script: Fields API for Dense Vector {es-pull}83550[#83550] - -Ingest:: -* Do not throw exceptions when resolving paths in ingest documents {es-pull}84659[#84659] -* RemoveProcessor updated to support fieldsToKeep {es-pull}83665[#83665] - -Machine Learning:: -* Add ML memory stats API {es-pull}83802[#83802] -* Add support for RoBERTa and BART NLP models {es-pull}84777[#84777] -* Add throughput stats for Trained Model Deployments {es-pull}84628[#84628] -* Improve `zero_shot_classification` tokenization performance {es-pull}84988[#84988] (issue: {es-issue}84820[#84820]) - -Mapping:: -* Check the utf8 length of keyword field is not bigger than 32766 in ES, rather than in Lucene. {es-pull}83738[#83738] (issue: {es-issue}80865[#80865]) -* Make `FieldMapper.Param` Cheaper to Construct {es-pull}85191[#85191] -* Terms enum support for doc value only keyword fields {es-pull}83482[#83482] (issue: {es-issue}83451[#83451]) - -Network:: -* Use Throttling Netty Write Handler on HTTP Path {es-pull}84751[#84751] - -Query Languages:: -* Add `unsigned_long` type support {es-pull}65145[#65145] (issue: {es-issue}63312[#63312]) - -Recovery:: -* Improve failure logging in recovery-from-snapshot {es-pull}84910[#84910] - -Reindex:: -* Use `SecureString` for reindex from remote password {es-pull}85091[#85091] - -SQL:: -* Add leniency option to SQL CLI {es-pull}83795[#83795] (issue: {es-issue}67436[#67436]) -* Forward warning headers to JDBC driver {es-pull}84499[#84499] -* List data streams as VIEWs {es-pull}85168[#85168] (issue: {es-issue}83449[#83449]) -* PIT for `GROUP BY` and `PIVOT` queries {es-pull}84605[#84605] (issue: {es-issue}84349[#84349]) -* Replace scroll cursors with point-in-time and `search_after` {es-pull}83381[#83381] (issues: {es-issue}61873[#61873], {es-issue}80523[#80523]) - -Search:: -* Add filtering to fieldcaps endpoint {es-pull}83636[#83636] (issue: {es-issue}82966[#82966]) -* Group field caps response by index mapping hash {es-pull}83494[#83494] (issues: {es-issue}78665[#78665], {es-issue}82879[#82879]) -* Integrate filtering support for ANN {es-pull}84734[#84734] (issue: {es-issue}81788[#81788]) -* Speed up merging field-caps response {es-pull}83704[#83704] - -Security:: -* Bind host all instead of just _site_ when needed {es-pull}83145[#83145] -* Fleet: Add a new mapping for .fleet-actions-results `action_input_type` field {es-pull}84316[#84316] -* Update X509Certificate principal methods {es-pull}85163[#85163] (issue: {es-issue}81008[#81008]) -* User Profile - Add APIs for enable/disable profile {es-pull}84548[#84548] -* User Profile - Add rest spec files and tests {es-pull}83307[#83307] -* User Profile - More REST spec, tests, API docs {es-pull}84597[#84597] -* User Profile - Update APIs to work with domain {es-pull}83570[#83570] -* User Profile - Update xpack usage output for domains {es-pull}84747[#84747] -* User Profile - capture domain when creating API keys and tokens {es-pull}84547[#84547] -* User Profile: Add feature flag {es-pull}83347[#83347] -* User Profile: Add initial search profile API {es-pull}83191[#83191] -* User Profile: handle racing on creating new profile {es-pull}84208[#84208] - -TSDB:: -* TSDB: Expand `_id` on version conflict {es-pull}84957[#84957] -* TSDB: Reject the nested object fields that are configured time_series_dimension {es-pull}83920[#83920] -* TSDB: routingPath object type check improvement {es-pull}83310[#83310] -* TSDB: shrink `_id` inverted index {es-pull}85008[#85008] - -Watcher:: -* Add list of allowed domains for Watcher email action {es-pull}84894[#84894] (issue: {es-issue}84739[#84739]) - -[[feature-8.2.0]] -[float] -=== New features - -Aggregations:: -* New `random_sampler` aggregation for sampling documents in aggregations {es-pull}84363[#84363] - -Authentication:: -* Add JWT realm support for JWT validation {es-pull}83155[#83155] -* Add smoke test for JWT realm wiring {es-pull}84249[#84249] -* Support mail, name, and dn claims in JWT realms {es-pull}84907[#84907] - -Authorization:: -* API Key APIs with Security Domain {es-pull}84704[#84704] - -Health:: -* Add Health Indicator Plugin {es-pull}83205[#83205] -* Adding impacts block to the health info API response {es-pull}84899[#84899] (issue: {es-issue}84773[#84773]) - -Indices APIs:: -* Adding cat api for component template {es-pull}71274[#71274] (issue: {es-issue}68941[#68941]) - -Infra/Core:: -* Introduce an unauthenticated endpoint for readiness checks {es-pull}84375[#84375] (issue: {es-issue}81168[#81168]) - -Machine Learning:: -* Adds new `change_point` pipeline aggregation {es-pull}83428[#83428] - -Search:: -* Introduce lookup runtime fields {es-pull}82385[#82385] -* Resolve wildcards in disk usage API {es-pull}84832[#84832] - -TSDB:: -* TSDB: Support GET and DELETE and doc versioning {es-pull}82633[#82633] - -[[upgrade-8.2.0]] -[float] -=== Upgrades - -Infra/Core:: -* Upgrade jackson for x-content to 2.13.2 {es-pull}84905[#84905] - -Ingest:: -* Bump commons-compress to 1.21 {es-pull}85581[#85581] - -Network:: -* Upgrade Netty to 4.1.74 {es-pull}84562[#84562] - -Packaging:: -* Upgrade to JDK 18.0.0+36 {es-pull}85376[#85376] (issue: {es-issue}85357[#85357]) - -Search:: -* Upgrade to lucene 9.1.0-snapshot-5b522487ba8 {es-pull}85025[#85025] - - diff --git a/docs/reference/release-notes/8.2.1.asciidoc b/docs/reference/release-notes/8.2.1.asciidoc deleted file mode 100644 index 309488d48d3e7..0000000000000 --- a/docs/reference/release-notes/8.2.1.asciidoc +++ /dev/null @@ -1,73 +0,0 @@ -[[release-notes-8.2.1]] -== {es} version 8.2.1 - -Also see <>. -[[known-issues-8.2.1]] -[float] -=== Known issues - -include::8.0.0.asciidoc[tag=jackson-filtering-bug] -[[bug-8.2.1]] -[float] -=== Bug fixes - -Aggregations:: -* Fix `AdaptingAggregator` `toString` method {es-pull}86042[#86042] -* Less complexity in after-key parsing for unmapped fields {es-pull}86359[#86359] (issue: {es-issue}85928[#85928]) - -Authentication:: -* Ensure authentication is wire compatible when setting user {es-pull}86741[#86741] (issue: {es-issue}86716[#86716]) - -Cluster Coordination:: -* Avoid breaking add/clear voting exclusions {es-pull}86657[#86657] - -Geo:: -* Fix bounded hexagonal grids when they contain the bin on one of the poles {es-pull}86460[#86460] -* Fix mvt polygon orientation {es-pull}86555[#86555] (issue: {es-issue}86560[#86560]) - -ILM+SLM:: -* Fix `max_primary_shard_size` resize factor math {es-pull}86897[#86897] -* Reroute after migrating to data tiers routing {es-pull}86574[#86574] (issue: {es-issue}86572[#86572]) - -Infra/Core:: -* Fix `assertDefaultThreadContext` enumerating allowed headers {es-pull}86262[#86262] -* Forward port MDP deprecation info API {es-pull}86103[#86103] -* Make data directories work with symlinks again {es-pull}85878[#85878] (issue: {es-issue}85701[#85701]) -* Set autoexpand replicas on Fleet actions data stream {es-pull}85511[#85511] -* Do not autocreate alias for non-primary system indices {es-pull}85977[#85977] (issue: {es-issue}85072[#85072]) - -Ingest:: -* Don't download geoip databases if geoip system index is blocked {es-pull}86842[#86842] -* Fix NPE when using object field as match field for enrich policy {es-pull}86089[#86089] (issue: {es-issue}86058[#86058]) -* Handle `.geoip_databases` being an alias or a concrete index {es-pull}85792[#85792] (issue: {es-issue}85756[#85756]) - -Machine Learning:: -* Adjust memory overhead for `PyTorch` models {es-pull}86416[#86416] -* Fix `max_model_memory_limit` reported by `_ml/info` when autoscaling is enabled {es-pull}86660[#86660] -* Improve reliability of job stats in larger clusters {es-pull}86305[#86305] -* Make autoscaling and task assignment use same memory staleness definition {es-pull}86632[#86632] (issue: {es-issue}86616[#86616]) -* Fix edge case which could cause the model bounds to inflate after detecting seasonality {ml-pull}2261[#2261] - -Packaging:: -* Fix edge case where user-defined heap settings are ignored {es-pull}86438[#86438] (issue: {es-issue}86431[#86431]) - -Security:: -* Authentication.token now uses version from the existing authentication {es-pull}85978[#85978] - -Snapshot/Restore:: -* Better failure for source-only snapshots of partially/fully mounted indices {es-pull}86207[#86207] -* Check if searchable snapshots cache pre-allocation is successful in Windows {es-pull}86192[#86192] (issue: {es-issue}85725[#85725]) -* Delay searchable snapshot allocation during shutdown {es-pull}86153[#86153] (issue: {es-issue}85052[#85052]) -* Support generating AWS role session name in case it's not provided {es-pull}86255[#86255] - -Stats:: -* Correctly calculate disk usage for frozen data tier telemetry {es-pull}86580[#86580] (issue: {es-issue}86055[#86055]) - -[[upgrade-8.2.1]] -[float] -=== Upgrades - -Packaging:: -* Switch to OpenJDK and upgrade to 18.0.1 {es-pull}86554[#86554] - - diff --git a/docs/reference/release-notes/8.2.2.asciidoc b/docs/reference/release-notes/8.2.2.asciidoc deleted file mode 100644 index 28647c00cbb0c..0000000000000 --- a/docs/reference/release-notes/8.2.2.asciidoc +++ /dev/null @@ -1,35 +0,0 @@ -[[release-notes-8.2.2]] -== {es} version 8.2.2 - -Also see <>. -[[known-issues-8.2.2]] -[float] -=== Known issues - -include::8.0.0.asciidoc[tag=jackson-filtering-bug] -[[bug-8.2.2]] -[float] -=== Bug fixes - -Audit:: -* Fix audit logging to consistently include port number in `origin.address` {es-pull}86732[#86732] - -CCR:: -* Fix CCR following a datastream with closed indices on the follower corrupting the datastream {es-pull}87076[#87076] (issue: {es-issue}87048[#87048]) - -Geo:: -* Guard for adding null value tags to vector tiles {es-pull}87051[#87051] - -Infra/Core:: -* Adjust osprobe assertion for burst cpu {es-pull}86990[#86990] - -Machine Learning:: -* Fix ML task auditor exception early in cluster lifecycle {es-pull}87023[#87023] (issue: {es-issue}87002[#87002]) -* Adjacency weighting fixes in categorization {ml-pull}2277[#2277] - -[[enhancement-8.2.2]] -[float] -=== Enhancements - -Machine Learning:: -* Make ML native processes work with glibc 2.35 (required for Ubuntu 22.04) {ml-pull}2272[#2272] diff --git a/docs/reference/release-notes/8.2.3.asciidoc b/docs/reference/release-notes/8.2.3.asciidoc deleted file mode 100644 index f4742e38b1e20..0000000000000 --- a/docs/reference/release-notes/8.2.3.asciidoc +++ /dev/null @@ -1,39 +0,0 @@ -[[release-notes-8.2.3]] -== {es} version 8.2.3 - -Also see <>. -[[known-issues-8.2.3]] -[float] -=== Known issues - -include::8.0.0.asciidoc[tag=jackson-filtering-bug] -[[bug-8.2.3]] -[float] -=== Bug fixes - -Authorization:: -* Fix resolution of wildcard application privileges {es-pull}87293[#87293] - -CCR:: -* Remove some blocking in CcrRepository {es-pull}87235[#87235] - -Indices APIs:: -* Add Resolve Index API to the "read" permission for an index {es-pull}87052[#87052] (issue: {es-issue}86977[#86977]) - -Infra/Core:: -* Clean up `DeflateCompressor` after exception {es-pull}87163[#87163] (issue: {es-issue}87160[#87160]) - -Security:: -* Security plugin close releasable realms {es-pull}87429[#87429] (issue: {es-issue}86286[#86286]) - -Snapshot/Restore:: -* Fork after calling `getRepositoryData` from `StoreRecovery` {es-pull}87254[#87254] (issue: {es-issue}87237[#87237]) - -[[enhancement-8.2.3]] -[float] -=== Enhancements - -Infra/Core:: -* Force property expansion for security policy {es-pull}87396[#87396] - - diff --git a/docs/reference/release-notes/8.3.0.asciidoc b/docs/reference/release-notes/8.3.0.asciidoc deleted file mode 100644 index 17b5edcbed392..0000000000000 --- a/docs/reference/release-notes/8.3.0.asciidoc +++ /dev/null @@ -1,367 +0,0 @@ -[[release-notes-8.3.0]] -== {es} version 8.3.0 - -Also see <>. -[[known-issues-8.3.0]] -[float] -=== Known issues - -include::8.0.0.asciidoc[tag=jackson-filtering-bug] -[[bug-8.3.0]] -[float] -=== Bug fixes - -Aggregations:: -* Allow `serial_diff` under `min_doc_count` aggs {es-pull}86401[#86401] -* Allow bucket paths to specify `_count` within a bucket {es-pull}85720[#85720] -* Fix a bug with flattened fields in terms aggregations {es-pull}87392[#87392] -* Fix flaky `top_metrics` test {es-pull}86582[#86582] (issue: {es-issue}86377[#86377]) -* Fix: check field existence before trying to merge running stats {es-pull}86926[#86926] -* Fix: ordering terms aggregation on top metrics null values {es-pull}85774[#85774] -* Serialize interval in auto date histogram aggregation {es-pull}85473[#85473] - -Audit:: -* Fix audit logging to consistently include port number in `origin.address` {es-pull}86732[#86732] -* Support removing ignore filters for audit logging {es-pull}87675[#87675] (issue: {es-issue}68588[#68588]) - -Authentication:: -* An authorized user can disable a user with same name but different realm {es-pull}86473[#86473] -* Fix clearing of `lastSuccessfulAuthCache` when clear all realm cache API is called {es-pull}86909[#86909] (issue: {es-issue}86650[#86650]) - -Authorization:: -* Fix resolution of wildcard application privileges {es-pull}87293[#87293] - -CAT APIs:: -* Get hidden indices stats in `GET _cat/shards` {es-pull}86601[#86601] (issue: {es-issue}84656[#84656]) - -CCR:: -* Prevent invalid datastream metadata when CCR follows a datastream with closed indices on the follower {es-pull}87076[#87076] (issue: {es-issue}87048[#87048]) -* Remove some blocking in CcrRepository {es-pull}87235[#87235] - -Cluster Coordination:: -* Add `master_timeout` support to voting config exclusions APIs {es-pull}86670[#86670] -* Small fixes to clear voting config excls API {es-pull}87828[#87828] - -Discovery-Plugins:: -* [discovery-gce] Fix initialisation of transport in FIPS mode {es-pull}85817[#85817] (issue: {es-issue}85803[#85803]) - -Distributed:: -* Enforce external id uniqueness during `DesiredNode` construction {es-pull}84227[#84227] - -Engine:: -* Fork to WRITE thread before failing shard in `updateCheckPoints` {es-pull}87458[#87458] (issue: {es-issue}87094[#87094]) -* Removing Blocking Wait for Close in `RecoverySourceHandler` {es-pull}86127[#86127] (issue: {es-issue}85839[#85839]) - -Features:: -* Fix 'autoGeneratedTimestamp should not be set externally' error when retrying IndexRequest {es-pull}86184[#86184] (issue: {es-issue}83927[#83927]) - -Geo:: -* Fix Geotile aggregations on `geo_shapes` for precision 0 {es-pull}87202[#87202] (issue: {es-issue}87201[#87201]) -* Fix `null_value` for array-valued `geo_point` fields {es-pull}85959[#85959] -* Guard for adding null value tags to vector tiles {es-pull}87051[#87051] -* Quantize geo queries to remove true negatives from search results {es-pull}85441[#85441] (issue: {es-issue}40891[#40891]) - -Highlighting:: -* `FastVectorHighlighter` should use `ValueFetchers` to load source data {es-pull}85815[#85815] (issues: {es-issue}75011[#75011], {es-issue}84690[#84690], {es-issue}82458[#82458], {es-issue}80895[#80895]) - -ILM+SLM:: -* Make the ILM Move to Error Step Batched {es-pull}85565[#85565] (issue: {es-issue}81880[#81880]) - -Indices APIs:: -* Make `GetIndexAction` cancellable {es-pull}87681[#87681] - -Infra/Circuit Breakers:: -* Make CBE message creation more robust {es-pull}87881[#87881] - -Infra/Core:: -* Adjust osprobe assertion for burst cpu {es-pull}86990[#86990] -* Clean up `DeflateCompressor` after exception {es-pull}87163[#87163] (issue: {es-issue}87160[#87160]) -* Error on direct creation of non-primary system index {es-pull}86707[#86707] -* Fix null message in output {es-pull}86981[#86981] -* Fix using `FilterOutputStream` without overriding bulk write {es-pull}86304[#86304] -* Hide system indices and their aliases in upgraded clusters {es-pull}87125[#87125] -* Refactor code to avoid JDK bug: JDK-8285835 {es-pull}86614[#86614] - -Infra/Logging:: -* Temporarily provide `SystemPropertiesPropertySource` {es-pull}87149[#87149] - -Infra/Node Lifecycle:: -* Upgrade folders after settings validation {es-pull}87319[#87319] - -Infra/Plugins:: -* Use Windows newlines when listing plugin information on Windows {es-pull}86408[#86408] (issue: {es-issue}86352[#86352]) - -Infra/REST API:: -* Fix min node version before state recovery {es-pull}86482[#86482] - -Infra/Scripting:: -* Allow to sort by script value using `SemVer` semantics {es-pull}85990[#85990] (issues: {es-issue}85989[#85989], {es-issue}82287[#82287]) -* Script: Fix setter shortcut for unbridged setters {es-pull}86868[#86868] -* Script: Load Whitelists as Resource {es-pull}87539[#87539] - -Infra/Settings:: -* Permit removal of archived index settings {es-pull}86107[#86107] - -Ingest:: -* Execute self-reference checks once per pipeline {es-pull}85926[#85926] (issue: {es-issue}85790[#85790]) - -Java Low Level REST Client:: -* Do not retry client requests when failing with `ContentTooLargeException` {es-pull}87248[#87248] (issue: {es-issue}86041[#86041]) - -License:: -* Consistent response for starting basic license {es-pull}86272[#86272] (issue: {es-issue}86244[#86244]) - -Machine Learning:: -* Fix ML task auditor exception early in cluster lifecycle {es-pull}87023[#87023] (issue: {es-issue}87002[#87002]) -* Fix `WordPiece` tokenization of unknown words with known subwords {es-pull}87510[#87510] -* Fix distribution change check for `change_point` aggregation {es-pull}86423[#86423] -* Fixes inference timeout handling bug that throws unexpected `NullPointerException` {es-pull}87533[#87533] -* Correct logic for restart from failover fine tuning hyperparameters for training classification and regression models {ml-pull}2251[#2251] -* Fix possible source of "x = NaN, distribution = class boost::math::normal_distribution<..." log errors training classification and regression models {ml-pull}2249[#2249] -* Fix some bugs affecting decision to stop optimizing hyperparameters for training classification and regression models {ml-pull}2259[#2259] -* Fix cause of "Must provide points at which to evaluate function" log error training classification and regression models {ml-pull}2268[#2268] -* Fix a source of "Discarding sample = nan, weights = ..." log errors for time series anomaly detection {ml-pull}2286[#2286] - -Mapping:: -* Don't run `include_in_parent` when in `copy_to` context {es-pull}87123[#87123] (issue: {es-issue}87036[#87036]) - -Network:: -* Reject `openConnection` attempt while closing {es-pull}86315[#86315] (issue: {es-issue}86249[#86249]) - -Recovery:: -* Fail shard if STARTED after master failover {es-pull}87451[#87451] (issue: {es-issue}87367[#87367]) - -SQL:: -* Fix FORMAT function to comply with Microsoft SQL Server specification {es-pull}86225[#86225] (issue: {es-issue}66560[#66560]) -* Implement binary format support for SQL clear cursor {es-pull}84230[#84230] (issue: {es-issue}53359[#53359]) - -Search:: -* Add status field to Multi Search Template Responses {es-pull}85496[#85496] (issue: {es-issue}83029[#83029]) -* Fields API to allow fetching values when `_source` is disabled {es-pull}87267[#87267] (issue: {es-issue}87072[#87072]) -* Fix `_terms_enum` on unconfigured `constant_keyword` {es-pull}86191[#86191] (issues: {es-issue}86187[#86187], {es-issue}86267[#86267]) -* Fix status code when open point in time without `keep_alive` {es-pull}87011[#87011] (issue: {es-issue}87003[#87003]) -* Handle empty point values in `DiskUsage` API {es-pull}87826[#87826] (issue: {es-issue}87761[#87761]) -* Make sure to rewrite explain query on coordinator {es-pull}87013[#87013] (issue: {es-issue}64281[#64281]) - -Security:: -* Make user and role name constraint consistent with max document ID {es-pull}86728[#86728] (issue: {es-issue}66020[#66020]) -* Security plugin close releasable realms {es-pull}87429[#87429] (issue: {es-issue}86286[#86286]) - -Snapshot/Restore:: -* DONE should mean fully processed in snapshot status {es-pull}86414[#86414] -* Distinguish missing and invalid repositories {es-pull}85551[#85551] (issue: {es-issue}85550[#85550]) -* Fork after calling `getRepositoryData` from `StoreRecovery` {es-pull}87264[#87264] (issue: {es-issue}87237[#87237]) -* Fork after calling `getRepositoryData` from `StoreRecovery` {es-pull}87254[#87254] (issue: {es-issue}87237[#87237]) -* Throw exception on illegal `RepositoryData` updates {es-pull}87654[#87654] -* Upgrade Azure SDK to 12.16.0 {es-pull}86135[#86135] - -Stats:: -* Run `TransportClusterInfoActions` on MANAGEMENT pool {es-pull}87679[#87679] - -TSDB:: -* TSDB: fix the time_series in order collect priority {es-pull}85526[#85526] -* TSDB: fix wrong initial value of tsidOrd in TimeSeriesIndexSearcher {es-pull}85713[#85713] (issue: {es-issue}85711[#85711]) - -Transform:: -* Fix transform `_start` permissions to use stored headers in the config {es-pull}86802[#86802] -* [Transforms] fix bug when unsetting retention policy {es-pull}87711[#87711] - -[[deprecation-8.3.0]] -[float] -=== Deprecations - -Authentication:: -* Configuring a bind DN in an LDAP or Active Directory (AD) realm without a corresponding bind password is deprecated {es-pull}85326[#85326] (issue: {es-issue}47191[#47191]) - -[[enhancement-8.3.0]] -[float] -=== Enhancements - -Aggregations:: -* Improve min and max performance while in a `random_sampler` aggregation {es-pull}85118[#85118] - -Authentication:: -* Support configurable claims in JWT Realm Tokens {es-pull}86533[#86533] -* Warn on user roles disabled due to licensing requirements for document or field level security {es-pull}85393[#85393] (issue: {es-issue}79207[#79207]) -* `TokenService` decode JWTs, change warn to debug {es-pull}86498[#86498] - -Authorization:: -* Add delete privilege to `kibana_system` for Synthetics {es-pull}85844[#85844] -* Authorize painless execute as index action when an index is specified {es-pull}85512[#85512] (issue: {es-issue}86428[#86428]) -* Better error message for run-as denials {es-pull}85501[#85501] (issue: {es-issue}72904[#72904]) -* Improve "Has Privilege" performance for boolean-only response {es-pull}86685[#86685] -* Relax restrictions for role names in roles API {es-pull}86604[#86604] (issue: {es-issue}86480[#86480]) -* [Osquery] Extend `kibana_system` role with an access to osquery_manager… {es-pull}86609[#86609] - -Autoscaling:: -* Add support for CPU ranges in desired nodes {es-pull}86434[#86434] - -Cluster Coordination:: -* Block joins while applier is busy {es-pull}84919[#84919] -* Compute master task batch summary lazily {es-pull}86210[#86210] -* Log `cluster.initial_master_nodes` at startup {es-pull}86101[#86101] -* Reduce resource needs of join validation {es-pull}85380[#85380] (issue: {es-issue}83204[#83204]) -* Report pending joins in `ClusterFormationFailureHelper` {es-pull}85635[#85635] -* Speed up map diffing (2) {es-pull}86375[#86375] - -Discovery-Plugins:: -* Remove redundant jackson dependencies from discovery-azure {es-pull}87898[#87898] - -Distributed:: -* Keep track of desired nodes cluster membership {es-pull}84165[#84165] - -Engine:: -* Cache immutable translog lastModifiedTime {es-pull}82721[#82721] (issue: {es-issue}82720[#82720]) -* Increase `force_merge` threadpool size based on the allocated processors {es-pull}87082[#87082] (issue: {es-issue}84943[#84943]) -* More optimal forced merges when max_num_segments is greater than 1 {es-pull}85065[#85065] - -Geo:: -* Support 'GeoJSON' in CartesianPoint for 'point' {es-pull}85442[#85442] -* Support geo label position as runtime field {es-pull}86154[#86154] -* Support geo label position through REST vector tiles API {es-pull}86458[#86458] (issue: {es-issue}86044[#86044]) - -Health:: -* Add a basic check for tier preference and allocation filter clashing {es-pull}85071[#85071] -* Add preflight checks to Health API to ensure health is obtainable {es-pull}86404[#86404] -* Add tier information on health api migrate tiers user actions {es-pull}87486[#87486] -* Health api add indicator doc links {es-pull}86904[#86904] (issue: {es-issue}86892[#86892]) -* Health api copy editing {es-pull}87010[#87010] -* Return a default user action if no actions could be determined {es-pull}87079[#87079] - -ILM+SLM:: -* Make the ILM and SLM `history_index_enabled` settings dynamic {es-pull}86493[#86493] - -Indices APIs:: -* Batch execute template and pipeline cluster state operations {es-pull}86017[#86017] - -Infra/Core:: -* Add mapping for tags for the elastic agent {es-pull}86298[#86298] -* Expand jar hell to include modules {es-pull}86622[#86622] -* Faster GET _cluster/settings API {es-pull}86405[#86405] (issue: {es-issue}82342[#82342]) -* Faster string writes by saving stream flushes {es-pull}86114[#86114] -* Fleet: Add `start_time` and `minimum_execution_duration` attributes to actions {es-pull}86167[#86167] -* Force property expansion for security policy {es-pull}87396[#87396] -* Refactor array part into a `BytesRefArray` which can be serialized and … {es-pull}85826[#85826] -* Speed up ip v4 parser {es-pull}86253[#86253] -* Use varhandles for primitive type conversion in more places {es-pull}85577[#85577] (issue: {es-issue}78823[#78823]) - -Infra/Scripting:: -* Script: add ability to alias classes in whitelist {es-pull}86899[#86899] - -Ingest:: -* Iteratively execute synchronous ingest processors {es-pull}84250[#84250] (issue: {es-issue}84274[#84274]) -* Skip `ensureNoSelfReferences` check in `IngestService` {es-pull}87337[#87337] - -License:: -* Initialize active realms without logging a message {es-pull}86134[#86134] (issue: {es-issue}81380[#81380]) - -Machine Learning:: -* A text categorization aggregation that works like ML categorization {es-pull}80867[#80867] -* Add new _infer endpoint for all supervised models and deprecate deployment infer api {es-pull}86361[#86361] -* Adds new `question_answering` NLP task for extracting answers to questions from a document {es-pull}85958[#85958] -* Adds start and end params to `_preview` and excludes cold/frozen tiers from unbounded previews {es-pull}86989[#86989] -* Adjust automatic JVM heap sizing for dedicated ML nodes {es-pull}86399[#86399] -* Replace the implementation of the `categorize_text` aggregation {es-pull}85872[#85872] -* Upgrade PyTorch to version 1.11 {ml-pull}2233[#2233], {ml-pull}2235[#2235],{ml-pull}2238[#2238] -* Upgrade zlib to version 1.2.12 on Windows {ml-pull}2253[#2253] -* Upgrade libxml2 to version 2.9.14 on Linux and Windows {ml-pull}2287[#2287] -* Improve time series model stability and anomaly scoring consistency for data - for which many buckets are empty {ml-pull}2267[#2267] -* Address root cause for actual equals typical equals zero anomalies {ml-pull}2270[#2270] -* Better handling of outliers in update immediately after detecting changes in time series {ml-pull}2280[#2280] - -Mapping:: -* Intern field names in Mappers {es-pull}86301[#86301] -* Replace BYTE_BLOCK_SIZE - 2 with indexWriter#MAX_TERM_LENGTH {es-pull}85518[#85518] - -Network:: -* Log node identity at startup {es-pull}85773[#85773] - -Search:: -* GeoBoundingBox query should work on bounding box with equal latitude or longitude {es-pull}85788[#85788] (issue: {es-issue}77717[#77717]) -* Improve error message for search API url parameters {es-pull}86984[#86984] (issue: {es-issue}79719[#79719]) - -Security:: -* Add run-as support for OAuth2 tokens {es-pull}86680[#86680] -* Relax username restrictions for User APIs {es-pull}86398[#86398] (issue: {es-issue}86326[#86326]) -* User Profile - Add hint support to SuggestProfiles API {es-pull}85890[#85890] -* User Profile - Add new action origin and internal user {es-pull}86026[#86026] -* User Profile - Support request cancellation on HTTP disconnect {es-pull}86332[#86332] -* User Profile - add caching for `hasPrivileges` check {es-pull}86543[#86543] - -Snapshot/Restore:: -* Add parameter to exclude indices in a snapshot from response {es-pull}86269[#86269] (issue: {es-issue}82937[#82937]) - -Stats:: -* Add documentation for "io_time_in_millis" {es-pull}84911[#84911] - -TLS:: -* Set `serverAuth` extended key usage for generated certificates and CSRs {es-pull}86311[#86311] (issue: {es-issue}81067[#81067]) - -TSDB:: -* Aggregation Execution Context add timestamp provider {es-pull}85850[#85850] - -Transform:: -* Prefer secondary auth headers for transforms {es-pull}86757[#86757] -* Support `range` aggregation in transform {es-pull}86501[#86501] - -[[feature-8.3.0]] -[float] -=== New features - -Authorization:: -* Has privileges API for profiles {es-pull}85898[#85898] - -Geo:: -* New geo_grid query to be used with geogrid aggregations {es-pull}86596[#86596] (issue: {es-issue}85727[#85727]) - -Health:: -* Add support for `impact_areas` to health impacts {es-pull}85830[#85830] (issue: {es-issue}85829[#85829]) -* Add troubleshooting guides to shards allocation actions {es-pull}87078[#87078] -* Adding potential impacts to remaining health indicators {es-pull}86197[#86197] -* Health api drill down {es-pull}85234[#85234] (issue: {es-issue}84793[#84793]) -* New service to keep track of the master history as seen from each node {es-pull}85941[#85941] -* Sorting impact index names by index priority {es-pull}85347[#85347] - -Mapping:: -* Add support for dots in field names for metrics usecases {es-pull}86166[#86166] (issue: {es-issue}63530[#63530]) -* Synthetic source {es-pull}85649[#85649] - -SQL:: -* SQ: Allow partial results in SQL queries {es-pull}85897[#85897] (issue: {es-issue}33148[#33148]) - -Search:: -* Snapshots as simple archives {es-pull}86261[#86261] (issue: {es-issue}81210[#81210]) - -TSDB:: -* TSDB: Implement downsampling on time-series indices {es-pull}85708[#85708] (issues: {es-issue}69799[#69799], {es-issue}65769[#65769]) - -[[upgrade-8.3.0]] -[float] -=== Upgrades - -Infra/CLI:: -* Upgrade procrun executables to 1.3.1 {es-pull}86710[#86710] - -Infra/Core:: -* Upgrade jackson to 2.13.2 {es-pull}86051[#86051] - -Ingest:: -* Upgrading to tika 2.4 {es-pull}86015[#86015] - -Network:: -* Upgrade to Netty 4.1.76 {es-pull}86252[#86252] - -Packaging:: -* Update Iron Bank base image to 8.6 {es-pull}86796[#86796] - -SQL:: -* Update dependency - JLine - to v 3.21.0 {es-pull}83767[#83767] (issue: {es-issue}83575[#83575]) - -Search:: -* Update to public lucene 9.2.0 release {es-pull}87162[#87162] - -Snapshot/Restore:: -* Upgrade GCS Plugin to 1.118.1 {es-pull}87800[#87800] - - diff --git a/docs/reference/release-notes/8.3.1.asciidoc b/docs/reference/release-notes/8.3.1.asciidoc deleted file mode 100644 index 6ea9d008f7989..0000000000000 --- a/docs/reference/release-notes/8.3.1.asciidoc +++ /dev/null @@ -1,43 +0,0 @@ -[[release-notes-8.3.1]] -== {es} version 8.3.1 - -Also see <>. -[[known-issues-8.3.1]] -[float] -=== Known issues - -include::8.0.0.asciidoc[tag=jackson-filtering-bug] -[[bug-8.3.1]] -[float] -=== Bug fixes - -Audit:: -* Support removing ignore filters for audit logging {es-pull}87675[#87675] (issue: {es-issue}68588[#68588]) - -Ingest:: -* Don't ignore pipeline for upserts in bulk api {es-pull}87719[#87719] (issue: {es-issue}87131[#87131]) -* Geoip processor should respect the `ignore_missing` in case of missing database {es-pull}87793[#87793] (issue: {es-issue}87345[#87345]) - -Machine Learning:: -* Improve trained model stats API performance {es-pull}87978[#87978] - -Snapshot/Restore:: -* Use the provided SAS token without SDK sanitation that can produce invalid signatures {es-pull}88155[#88155] (issue: {es-issue}88140[#88140]) - -Stats:: -* Run `TransportClusterInfoActions` on MANAGEMENT pool {es-pull}87679[#87679] - -Transform:: -* Execute `_refresh` (with system permissions) separately from `delete by query` (with user permissions) {es-pull}88005[#88005] (issue: {es-issue}88001[#88001]) - -[[enhancement-8.3.1]] -[float] -=== Enhancements - -Discovery-Plugins:: -* Remove redundant jackson dependencies from discovery-azure {es-pull}87898[#87898] - -Performance:: -* Warn about impact of large readahead on search {es-pull}88007[#88007] - - diff --git a/docs/reference/release-notes/8.3.2.asciidoc b/docs/reference/release-notes/8.3.2.asciidoc deleted file mode 100644 index 9e70db95fbc0e..0000000000000 --- a/docs/reference/release-notes/8.3.2.asciidoc +++ /dev/null @@ -1,26 +0,0 @@ -[[release-notes-8.3.2]] -== {es} version 8.3.2 - -Also see <>. - -{es} 8.3.2 is a version compatibility release for the {stack}. - -[[known-issues-8.3.2]] -[float] -=== Known issues - -include::8.0.0.asciidoc[tag=jackson-filtering-bug] - -[[bug-8.3.2]] -[float] -=== Bug fixes - -Geo:: -* Fix potential circuit breaker leak on `InternalGeoGrid` {es-pull}88273[#88273] (issue: {es-issue}88261[#88261]) - -[[feature-8.3.2]] -[float] -=== New features - -Heath:: -* Add user action for the `instance_has_master` indicator {es-pull}87963[#87963] diff --git a/docs/reference/release-notes/8.3.3.asciidoc b/docs/reference/release-notes/8.3.3.asciidoc deleted file mode 100644 index 8dc807ca293cb..0000000000000 --- a/docs/reference/release-notes/8.3.3.asciidoc +++ /dev/null @@ -1,46 +0,0 @@ -[[release-notes-8.3.3]] -== {es} version 8.3.3 - -Also see <>. - -[[known-issues-8.3.3]] -[float] -=== Known issues - -include::8.0.0.asciidoc[tag=jackson-filtering-bug] - - -[[bug-8.3.3]] -[float] -=== Bug fixes - -Infra/Core:: -* Add `build_flavor` back to info API REST response {es-pull}88336[#88336] (issue: {es-issue}88318[#88318]) - -Mapping:: -* Enforce max values limit only when running a script {es-pull}88295[#88295] - -Monitoring:: -* Switch cgroup memory fields to keyword {es-pull}88260[#88260] - -Packaging:: -* Fix Docker positional parameter passing {es-pull}88584[#88584] - -Security:: -* Ensure `CreateApiKey` always creates a new document {es-pull}88413[#88413] - -[[enhancement-8.3.3]] -[float] -=== Enhancements - -Security:: -* New setting to close idle connections in OIDC back-channel {es-pull}87773[#87773] - -[[upgrade-8.3.3]] -[float] -=== Upgrades - -Packaging:: -* Upgrade to OpenJDK 18.0.2+9 {es-pull}88675[#88675] (issue: {es-issue}88673[#88673]) - - diff --git a/docs/reference/release-notes/8.4.0.asciidoc b/docs/reference/release-notes/8.4.0.asciidoc deleted file mode 100644 index c3820ac12481a..0000000000000 --- a/docs/reference/release-notes/8.4.0.asciidoc +++ /dev/null @@ -1,356 +0,0 @@ -[[release-notes-8.4.0]] -== {es} version 8.4.0 - -Also see <>. - -[[known-issues-8.4.0]] -[float] -=== Known issues - -// tag::ml-pre-7-datafeeds-known-issue[] -* {ml-cap} {dfeeds} cannot be listed if any are not modified since version 6.x -+ -If you have a {dfeed} that was created in version 5.x or 6.x and has not -been updated since 7.0, it is not possible to list {dfeeds} in -8.4 and 8.5. This means that {anomaly-jobs} cannot be managed using -{kib}. This issue is fixed in 8.6.0. -+ -If you upgrade to 8.4 or 8.5 with such a {dfeed}, you need to -work around the problem by updating each {dfeed}'s authorization information -using https://support.elastic.dev/knowledge/view/b5a879db[these steps]. -// end::ml-pre-7-datafeeds-known-issue[] - -// tag::file-based-settings-deadlock-known-issue[] -* Orchestrators which use a `settings.json` file to configure Elasticsearch may -encounter deadlocks during master elections (issue: {es-issue}92812[#92812]) -+ -To resolve the deadlock, remove the `settings.json` file and restart the -affected node. -// end::file-based-settings-deadlock-known-issue[] - -include::8.0.0.asciidoc[tag=jackson-filtering-bug] - -// tag::ingest-processor-log4j-cluster-instability-known-issue[] -* When the {ref}/attachment.html[ingest attachment processor] is used, the -interaction of https://tika.apache.org/[Tika] with log4j 2.18.0 and higher -(introduced in {es} 8.4.0) results in excessive logging. This logging is so -excessive that it can lead to cluster instability, to the point where the -cluster is unusable and nodes must be restarted. (issue: {es-issue}91964[#91964]). -This issue is fixed in {es} 8.7.0 ({es-pull}93878[#93878]) -+ -To resolve the issue, upgrade to 8.7.0 or higher. -// end::ingest-processor-log4j-cluster-instability-known-issue[] - -[[bug-8.4.0]] -[float] -=== Bug fixes - -Aggregations:: -* Fix multi-value handling in composite agg {es-pull}88638[#88638] -* Fix: extract matrix stats using `bucket_selector` `buckets_path` {es-pull}88271[#88271] (issue: {es-issue}87454[#87454]) -* Make the metric in the `buckets_path` parameter optional {es-pull}87220[#87220] (issue: {es-issue}72983[#72983]) -* Propagate alias filters to significance aggs filters {es-pull}88221[#88221] (issue: {es-issue}81585[#81585]) - -Allocation:: -* Clamp auto-expand replicas to the closest value {es-pull}87505[#87505] (issue: {es-issue}84788[#84788]) -* Prevent re-balancing using outdated node weights in some cases {es-pull}88385[#88385] (issue: {es-issue}88384[#88384]) -* Remove any existing `read_only_allow_delete` index blocks when `cluster.routing.allocation.disk.threshold_enabled` is set to `false` {es-pull}87841[#87841] (issue: {es-issue}86383[#86383]) -* Replace health request with a state observer {es-pull}88641[#88641] - -Authentication:: -* Fix unique realm name check to cover default realms {es-pull}87999[#87999] - -Authorization:: -* Add rollover permissions for `remote_monitoring_agent` {es-pull}87717[#87717] (issue: {es-issue}84161[#84161]) - -Autoscaling:: -* Autoscaling during shrink {es-pull}88292[#88292] (issue: {es-issue}85480[#85480]) -* Do not include desired nodes in snapshots {es-pull}87695[#87695] - -Cluster Coordination:: -* Improve rejection of ambiguous voting config name {es-pull}89239[#89239] - -Data streams:: -* Fix renaming data streams with CCR replication {es-pull}88875[#88875] (issue: {es-issue}81751[#81751]) - -Distributed:: -* Fixed NullPointerException on bulk request {es-pull}88385[#88385] - -EQL:: -* Avoid attempting PIT close on PIT open failure {es-pull}87498[#87498] -* Improve EQL Sequence circuit breaker precision {es-pull}88538[#88538] (issue: {es-issue}88300[#88300]) - -Geo:: -* Geo_line aggregation returns a geojson point when the resulting line has only one point {es-pull}89199[#89199] (issue: {es-issue}85748[#85748]) -* Sort ranges in `geo_distance` aggregation {es-pull}89154[#89154] (issue: {es-issue}89147[#89147]) - -Health:: -* Fix NPE when checking if the last snapshot was success {es-pull}88811[#88811] -* Fixing a version check for master stability functionality {es-pull}89322[#89322] -* Fixing internal action names {es-pull}89182[#89182] -* Using the correct connection to fetch remote master history {es-pull}87299[#87299] - -Highlighting:: -* Handle ordering in plain highlighter for multiple inputs {es-pull}87414[#87414] (issue: {es-issue}87210[#87210]) - -ILM+SLM:: -* Batch ILM move to retry step task update {es-pull}86759[#86759] - -Infra/CLI:: -* Quote paths with whitespace in Windows service CLIs {es-pull}89072[#89072] (issue: {es-issue}89043[#89043]) - -Infra/Core:: -* Always close directory streams {es-pull}88560[#88560] -* Delete invalid settings for system indices {es-pull}88903[#88903] (issue: {es-issue}88324[#88324]) -* Disallow three-digit minor and revision versions {es-pull}87338[#87338] -* Handle snapshot restore in file settings {es-pull}89321[#89321] (issue: {es-issue}89183[#89183]) -* System indices ignore all user templates {es-pull}87260[#87260] (issues: {es-issue}42508[#42508], {es-issue}74271[#74271]) - -Infra/Node Lifecycle:: -* Fix message for stalled shutdown {es-pull}89254[#89254] - -Infra/Plugins:: -* Disable URL connection caching in SPIClassIterator {es-pull}88586[#88586] (issue: {es-issue}88275[#88275]) - -Infra/Scripting:: -* Script: `UpdateByQuery` can read doc version if requested {es-pull}88740[#88740] - -Machine Learning:: -* Address potential bug where trained models get stuck in starting after being allocated to node {es-pull}88945[#88945] -* Fix BERT and MPNet tokenization bug when handling unicode accents {es-pull}88907[#88907] (issue: {es-issue}88900[#88900]) -* Fix NLP `question_answering` task when best answer is only one token {es-pull}88347[#88347] -* Include start params in `_stats` for non-started model deployments {es-pull}89091[#89091] -* Fix minor tokenization bug when using fill_mask task with roberta tokenizer {es-pull}88825[#88825] -* Fix potential cause of classification and regression job failures {ml-pull}2385[#2385] - -Mapping:: -* Assign the right path to objects merged when parsing mappings {es-pull}89389[#89389] (issue: {es-issue}88573[#88573]) -* Don't modify source map when parsing composite runtime field {es-pull}89114[#89114] - -Network:: -* Ensure that the extended socket options TCP_KEEPXXX are available {es-pull}88935[#88935] (issue: {es-issue}88897[#88897]) - -SQL:: -* Fix `SqlSearchIT` `testAllTypesWithRequestToOldNodes` {es-pull}88883[#88883] (issue: {es-issue}88866[#88866]) -* Fix date range checks {es-pull}87151[#87151] (issue: {es-issue}77179[#77179]) -* fix object equals {es-pull}87887[#87887] - -Search:: -* Fix: use status code 500 for aggregation reduce phase errors if no shard failed {es-pull}88551[#88551] (issue: {es-issue}20004[#20004]) -* Override bulk visit methods of exitable point visitor {es-pull}82120[#82120] - -Security:: -* Ensure `secureString` remain open when reloading secure settings {es-pull}88922[#88922] - -Snapshot/Restore:: -* Fix queued snapshot assignments after partial snapshot fails due to delete {es-pull}88470[#88470] (issue: {es-issue}86724[#86724]) - -Transform:: -* Handle update error correctly {es-pull}88619[#88619] - -[[deprecation-8.4.0]] -[float] -=== Deprecations - -Vector Search:: -* Deprecate the `_knn_search` endpoint {es-pull}88828[#88828] - -[[enhancement-8.4.0]] -[float] -=== Enhancements - -Aggregations:: -* Adding cardinality support for `random_sampler` agg {es-pull}86838[#86838] -* Minor `RangeAgg` optimization {es-pull}86935[#86935] (issue: {es-issue}84262[#84262]) -* Speed counting filters/range/date_histogram aggs {es-pull}81322[#81322] -* Update bucket metric pipeline agg paths to allow intermediate single bucket and bucket qualified multi-bucket aggs {es-pull}85729[#85729] - -Allocation:: -* Add debug information to `ReactiveReason` about assigned and unassigned shards {es-pull}86132[#86132] (issue: {es-issue}85243[#85243]) -* Optimize log cluster health performance. {es-pull}87723[#87723] -* Use desired nodes during data tier allocation decisions {es-pull}87735[#87735] - -Audit:: -* Audit API key ID when create or grant API keys {es-pull}88456[#88456] -* Include API key metadata in audit log when an API key is created, granted, or updated {es-pull}88642[#88642] -* Updatable API keys - logging audit trail event {es-pull}88276[#88276] -* User Profile - audit support for security domain {es-pull}87097[#87097] - -Authentication:: -* If signature validation fails, reload JWKs and retry if new JWKs are found {es-pull}88023[#88023] - -Authorization:: -* App permissions with action patterns do not retrieve privileges {es-pull}85455[#85455] -* Cancellable Profile Has Privilege check {es-pull}87224[#87224] -* Return action denied error when user with insufficient privileges (`manage_own_api_key`) attempts a grant API key request {es-pull}87461[#87461] (issue: {es-issue}87438[#87438]) -* Update indices permissions to Enterprise Search service account {es-pull}88703[#88703] - -Autoscaling:: -* Add processors to autoscaling capacity response {es-pull}87895[#87895] -* Keep track of desired nodes status in cluster state {es-pull}87474[#87474] - -Cluster Coordination:: -* Deduplicate mappings in persisted cluster state {es-pull}88479[#88479] -* Expose segment details in PCSS debug log {es-pull}87412[#87412] -* Periodic warning for 1-node cluster w/ seed hosts {es-pull}88013[#88013] (issue: {es-issue}85222[#85222]) -* Report overall mapping size in cluster stats {es-pull}87556[#87556] - -Data streams:: -* Give doc-value-only mappings to numeric fields on metrics templates {es-pull}87100[#87100] - -Distributed:: -* Adding the ability to register a `PeerFinderListener` to Coordinator {es-pull}88626[#88626] -* Make Desired Nodes API operator-only {es-pull}87778[#87778] (issue: {es-issue}87777[#87777]) -* Support "dry run" mode for updating Desired Nodes {es-pull}88305[#88305] - -FIPS:: -* Log warning when hash function used by cache is not recommended in FIPS mode {es-pull}86740[#86740] -* Log warning when hashers for stored API keys or service tokens are not compliant with FIPS {es-pull}87363[#87363] - -Geo:: -* Optimize geogrid aggregations for singleton points {es-pull}87439[#87439] -* Support cartesian shape with doc values {es-pull}88487[#88487] -* Use a faster but less accurate log algorithm for computing Geotile Y coordinate {es-pull}87515[#87515] -* Use faster maths to project WGS84 to mercator {es-pull}88231[#88231] - -Health:: -* Add health user action for unhealthy SLM policy failure counts {es-pull}88523[#88523] -* Adding a transport action to get cluster formation info {es-pull}87306[#87306] -* Adding additional capability to the `master_is_stable` health indicator service {es-pull}87482[#87482] -* Creating a transport action for the `CoordinationDiagnosticsService` {es-pull}87984[#87984] -* Move the master stability logic into its own service separate from the `HealthIndicatorService` {es-pull}87672[#87672] -* Polling cluster formation state for master-is-stable health indicator {es-pull}88397[#88397] -* Remove cluster block preflight check from health api {es-pull}87520[#87520] (issue: {es-issue}87464[#87464]) - -ILM+SLM:: -* Add min_* conditions to rollover {es-pull}83345[#83345] -* Track the count of failed invocations since last successful policy snapshot {es-pull}88398[#88398] - -Infra/Core:: -* Improve console exception messages {es-pull}87942[#87942] -* Print full exception when console is non-interactive {es-pull}88297[#88297] -* Stop making index read-only when executing force merge index lifecycle management action {es-pull}81162[#81162] (issue: {es-issue}81162[#81162]) -* Stream input and output support for optional collections {es-pull}88127[#88127] -* Update version of internal http client {es-pull}87491[#87491] - -Infra/Logging:: -* Catch an exception when formatting a string fails {es-pull}87132[#87132] - -Infra/Scripting:: -* Script: Add Metadata to ingest context {es-pull}87309[#87309] -* Script: Metadata for update context {es-pull}88333[#88333] - -Infra/Settings:: -* Convert disk watermarks to RelativeByteSizeValues {es-pull}88719[#88719] - -Ingest:: -* Allow pipeline processor to ignore missing pipelines {es-pull}87354[#87354] -* Move the ingest attachment processor to the default distribution {es-pull}87989[#87989] -* Only perform `ensureNoSelfReferences` check during ingest when needed {es-pull}87352[#87352] (issue: {es-issue}87335[#87335]) -* Removing `BouncyCastle` dependencies from ingest-attachment plugin {es-pull}88031[#88031] - -Machine Learning:: -* Add authorization info to ML config listings {es-pull}87884[#87884] -* Add deployed native models to `inference_stats` in trained model stats response {es-pull}88187[#88187] -* Add inference cache hit count to inference node stats {es-pull}88807[#88807] -* Add new `cache_size` parameter to `trained_model` deployments API {es-pull}88450[#88450] -* Expand allowed NER labels to be any I-O-B tagged labels {es-pull}87091[#87091] -* Improve scalability of NLP models {es-pull}87366[#87366] -* Indicate overall deployment failure if all node routes are failed {es-pull}88378[#88378] -* New `frequent_items` aggregation {es-pull}83055[#83055] -* Fairer application of size penalty for model selection for training classification and regression models {ml-pull}2291[#2291] -* Accelerate training for data frame analytics by skipping fine parameter tuning if it is unnecessary {ml-pull}2298[#2298] -* Address some causes of high runtimes training regression and classification models on large data sets with many features {ml-pull}2332[#2332] -* Add caching for PyTorch inference {ml-pull}2305[#2305] -* Improve accuracy of anomaly detection median estimation {ml-pull}2367[#2367] (issue: {ml-issue}2364[#2364]) - -Mapping:: -* Enable synthetic source support on constant keyword fields {es-pull}88603[#88603] -* Speed up `NumberFieldMapper` {es-pull}85688[#85688] - -Monitoring:: -* JvmService use SingleObjectCache {es-pull}87236[#87236] - -Network:: -* Allow start cluster with unreachable remote clusters {es-pull}87298[#87298] -* Increase `http.max_header_size` default to 16kb {es-pull}88725[#88725] (issue: {es-issue}88501[#88501]) - -Query Languages:: -* Add support for VERSION field type in SQL and EQL {es-pull}87590[#87590] (issue: {es-issue}83375[#83375]) - -Rollup:: -* [TSDB] Add Kahan support to downsampling summation {es-pull}87554[#87554] - -SQL:: -* Implement support for partial search results in SQL CLI {es-pull}86982[#86982] (issue: {es-issue}86082[#86082]) -* Update Tableau connector to use connection dialog v2 {es-pull}88462[#88462] - -Search:: -* Add mapping stats for indexed `dense_vectors` {es-pull}86859[#86859] -* Improve error when sorting on incompatible types {es-pull}88399[#88399] (issue: {es-issue}73146[#73146]) -* Support kNN vectors in disk usage action {es-pull}88785[#88785] (issue: {es-issue}84801[#84801]) - -Security:: -* Add setting for `tcp_keepalive` for oidc back-channel {es-pull}87868[#87868] -* Support `run_as` another user when granting API keys {es-pull}88335[#88335] -* Support exists query for API key query {es-pull}87229[#87229] -* Updatable API keys - REST API spec and tests {es-pull}88270[#88270] -* Updatable API keys - noop check {es-pull}88346[#88346] - -Snapshot/Restore:: -* INFO logging of snapshot restore and completion {es-pull}88257[#88257] (issue: {es-issue}86610[#86610]) -* Make snapshot deletes not block the repository during data blob deletes {es-pull}86514[#86514] -* Retry after all S3 get failures that made progress {es-pull}88015[#88015] (issue: {es-issue}87243[#87243]) -* Speed up creating new `IndexMetaDataGenerations` without removed snapshots {es-pull}88344[#88344] -* Update HDFS Repository to HDFS 3.3.3 {es-pull}88039[#88039] - -Stats:: -* Sort ingest pipeline stats by use {es-pull}88035[#88035] - -TLS:: -* Add issuer to GET _ssl/certificates {es-pull}88445[#88445] - -Transform:: -* Add authorization info to transform config listings {es-pull}87570[#87570] -* Implement per-transform num_failure_retries setting {es-pull}87361[#87361] - -[[feature-8.4.0]] -[float] -=== New features - -Authentication:: -* Support updates of API key attributes (single operation route) {es-pull}88186[#88186] - -Health:: -* Master stability health indicator part 1 (when a master has been seen recently) {es-pull}86524[#86524] -* Remove help_url,rename summary to symptom, and `user_actions` to diagnosis {es-pull}88553[#88553] (issue: {es-issue}88474[#88474]) - -Infra/Core:: -* File Settings Service {es-pull}88329[#88329] - -Infra/Logging:: -* Stable logging API - the basic use case {es-pull}86612[#86612] - -Machine Learning:: -* Make composite aggs in datafeeds Generally Available {es-pull}88589[#88589] - -Search:: -* Add 'mode' option to `_source` field mapper {es-pull}88211[#88211] - -TSDB:: -* TSDB: Implement downsampling ILM Action for time-series indices {es-pull}87269[#87269] (issue: {es-issue}68609[#68609]) - -Vector Search:: -* Integrate ANN into `_search` endpoint {es-pull}88694[#88694] (issue: {es-issue}87625[#87625]) - -[[upgrade-8.4.0]] -[float] -=== Upgrades - -Infra/Core:: -* Upgrade to Log4J 2.18.0 {es-pull}88237[#88237] - -Network:: -* Upgrade to Netty 4.1.77 {es-pull}86630[#86630] - - diff --git a/docs/reference/release-notes/8.4.1.asciidoc b/docs/reference/release-notes/8.4.1.asciidoc deleted file mode 100644 index b0339e14a689f..0000000000000 --- a/docs/reference/release-notes/8.4.1.asciidoc +++ /dev/null @@ -1,27 +0,0 @@ -[[release-notes-8.4.1]] -== {es} version 8.4.1 - -Also see <>. - -[[known-issues-8.4.1]] -[float] -=== Known issues - -* When using date range search with format that does not have all date fields (missing month or day) -an incorrectly parsed date could be used. The workaround is to use date pattern with all date fields (year, month, day) -(issue: {es-issue}90187[#90187]) - -include::8.4.0.asciidoc[tag=ml-pre-7-datafeeds-known-issue] - -include::8.4.0.asciidoc[tag=file-based-settings-deadlock-known-issue] - -include::8.0.0.asciidoc[tag=jackson-filtering-bug] - -include::8.4.0.asciidoc[tag=ingest-processor-log4j-cluster-instability-known-issue] - -[[bug-8.4.1]] -[float] -=== Bug fixes - -Machine Learning:: -* [ML] Validate trained model deployment `queue_capacity` limit {es-pull}89611[#89611] (issue: {es-issue}89555[#89555]) diff --git a/docs/reference/release-notes/8.4.2.asciidoc b/docs/reference/release-notes/8.4.2.asciidoc deleted file mode 100644 index 7fad1e6c467ae..0000000000000 --- a/docs/reference/release-notes/8.4.2.asciidoc +++ /dev/null @@ -1,92 +0,0 @@ -[[release-notes-8.4.2]] -== {es} version 8.4.2 - -Also see <>. - -[[known-issues-8.4.2]] -[float] -=== Known issues - -* **This version contains a regression in `multi_match` queries that use the -`cross_fields` scoring type.** {es} -+ -When running a <> query with the -`cross_fields` type, {es} can sometimes throw an IllegalArgument exception -with the message "totalTermFreq must be at least docFreq". If you use the -`cross_fields` scoring type, it is recommended that you skip version 8.4.2. -This regression was fixed in version 8.4.3. - -* When using date range search with format that does not have all date fields (missing month or day) -an incorrectly parsed date could be used. The workaround is to use date pattern with all date fields (year, month, day) -(issue: {es-issue}90187[#90187]) - -include::8.4.0.asciidoc[tag=ml-pre-7-datafeeds-known-issue] - -include::8.4.0.asciidoc[tag=file-based-settings-deadlock-known-issue] - -include::8.0.0.asciidoc[tag=jackson-filtering-bug] - -include::8.4.0.asciidoc[tag=ingest-processor-log4j-cluster-instability-known-issue] - -[[bug-8.4.2]] -[float] -=== Bug fixes - -Allocation:: -* Fix debug mode in `MaxRetryAllocationDecider` {es-pull}89973[#89973] - -Authentication:: -* Fix double sending of response in `TransportOpenIdConnectPrepareAuthenticationAction` {es-pull}89930[#89930] - -Autoscaling:: -* Fix issue with autoscaling after a clone or split {es-pull}89768[#89768] (issue: {es-issue}89758[#89758]) - -Health:: -* Fix the conditions for fetching remote master history {es-pull}89472[#89472] (issue: {es-issue}89431[#89431]) - -ILM+SLM:: -* Copy `isHidden` during ILM alias swap {es-pull}89650[#89650] (issue: {es-issue}89604[#89604]) - -Infra/Core:: -* Extend the date rounding logic to be conditional {es-pull}89693[#89693] (issues: {es-issue}89096[#89096], {es-issue}58986[#58986]) -* Fix `FileSettingsService` hang on error update {es-pull}89630[#89630] -* Implement fix to terminate file Watcher thread to avoid deadlock {es-pull}89934[#89934] - -Ingest Node:: -* Fix pipeline `id` not present in ingest metadata inside `on_failure` block {es-pull}89632[#89632] - -Machine Learning:: -* Fix memory leak in `TransportDeleteExpiredDataAction` {es-pull}89935[#89935] -* Do not retain categorization tokens when existing category matches {ml-pull}2398[#2398] - -Network:: -* Fix memory leak when double invoking `RestChannel.sendResponse` {es-pull}89873[#89873] - -Ranking:: -* Avoid negative scores with `cross_fields` type {es-pull}89016[#89016] (issue: {es-issue}44700[#44700]) - -Rollup:: -* Fork `TransportRollupCapsAction` to MANAGEMENT POOL {es-pull}89803[#89803] - -Search:: -* Empty intervals needs to start in position -1 {es-pull}89962[#89962] (issue: {es-issue}89789[#89789]) - -Transform:: -* Scheduler concurrency fix {es-pull}89716[#89716] (issue: {es-issue}88991[#88991]) - -[[enhancement-8.4.2]] -[float] -=== Enhancements - -Allocation:: -* Log unsuccessful attempts to get credentials from web identity tokens {es-pull}88241[#88241] - -Health:: -* Add delayed allocation diagnosis case to shards availability indicator {es-pull}89056[#89056] - -[[upgrade-8.4.2]] -[float] -=== Upgrades - -Packaging:: -* Update OpenJDK to 18.0.2.1 {es-pull}89535[#89535] (issue: {es-issue}89531[#89531]) diff --git a/docs/reference/release-notes/8.4.3.asciidoc b/docs/reference/release-notes/8.4.3.asciidoc deleted file mode 100644 index 2efa07f51f44b..0000000000000 --- a/docs/reference/release-notes/8.4.3.asciidoc +++ /dev/null @@ -1,37 +0,0 @@ -[[release-notes-8.4.3]] -== {es} version 8.4.3 - -Also see <>. - -[[known-issues-8.4.3]] -[float] -=== Known issues - -* When using date range search with format that does not have all date fields (missing month or day) -an incorrectly parsed date could be used. The workaround is to use date pattern with all date fields (year, month, day) -(issue: {es-issue}90187[#90187]) - -include::8.4.0.asciidoc[tag=ml-pre-7-datafeeds-known-issue] - -include::8.4.0.asciidoc[tag=file-based-settings-deadlock-known-issue] - -include::8.0.0.asciidoc[tag=jackson-filtering-bug] - -include::8.4.0.asciidoc[tag=ingest-processor-log4j-cluster-instability-known-issue] - -[[bug-8.4.3]] -[float] -=== Bug fixes - -Infra/Core:: -* Fix file permission errors to avoid repeated error save loops on Windows {es-pull}90271[#90271] (issue: {es-issue}90222[#90222]) - -Ingest Node:: -* Prevent serialization errors in the nodes stats API {es-pull}90319[#90319] (issue: {es-issue}77973[#77973]) - -[[regression-8.4.3]] -[float] -=== Regressions - -Ranking:: -* Ensure `cross_fields` always uses valid term statistics {es-pull}90314[#90314] diff --git a/docs/reference/release-notes/8.5.0.asciidoc b/docs/reference/release-notes/8.5.0.asciidoc deleted file mode 100644 index 682353f930189..0000000000000 --- a/docs/reference/release-notes/8.5.0.asciidoc +++ /dev/null @@ -1,340 +0,0 @@ -[[release-notes-8.5.0]] -== {es} version 8.5.0 - -Also see <>. - -[[known-issues-8.5.0]] -[float] -=== Known issues - -* It is possible to inadvertently create an alias with the same name as an -index in version 8.5.0. This action leaves the cluster in an invalid state in -which several features will not work correctly, and it may not even be possible -to restart nodes while in this state. Upgrade to 8.5.1 as soon as possible to -avoid the risk of this occurring ({es-pull}91456[#91456]). If your cluster is -affected by this issue, upgrade to 8.5.3 to repair it ({es-pull}91887[#91887]). - -include::8.4.0.asciidoc[tag=ml-pre-7-datafeeds-known-issue] - -include::8.4.0.asciidoc[tag=file-based-settings-deadlock-known-issue] - -include::8.0.0.asciidoc[tag=jackson-filtering-bug] - -include::8.4.0.asciidoc[tag=ingest-processor-log4j-cluster-instability-known-issue] - -[[breaking-8.5.0]] -[float] -=== Breaking changes - -CRUD:: -* Reject unknown bulk actions {es-pull}89450[#89450] - -[[bug-8.5.0]] -[float] -=== Bug fixes - -Aggregations:: -* Fix `auto_date_histogram` > `ip_range` {es-pull}90317[#90317] (issue: {es-issue}90121[#90121]) -* Fail when rebuilding scorer in `breadth_first` mode and query context has changed {es-pull}89993[#89993] (issue: {es-issue}37650[#37650]) -* Fix merging with empty results {es-pull}86939[#86939] (issue: {es-issue}84622[#84622]) -* Fix partial reduce bug in `ip_prefix` {es-pull}89734[#89734] (issue: {es-issue}89686[#89686]) -* Remove unexpected meta parameter in aggregation response {es-pull}89467[#89467] (issue: {es-issue}89455[#89455]) -* Consistently allow sorting `top_metrics` aggregations whose size is greater than 1 {es-pull}89974[#89974] (issue: {es-issue}86663[#86663]) -* Check parent circuit breaker when allocating an empty bucket {es-pull}89568[#89568] (issue: {es-issue}80789[#80789]) - -Authorization:: -* Remove magic string for the `"__empty"` role {es-pull}89766[#89766] - -CCR:: -* Order follower backing indices for data streams by original name {es-pull}90850[#90850] (issue: {es-issue}90820[#90820]) - -Cluster Coordination:: -* Capture deprecation warnings in batched master tasks {es-pull}85525[#85525] (issue: {es-issue}85506[#85506]) -* Check circuit breaker before sending join request {es-pull}89318[#89318] (issue: {es-issue}85003[#85003]) - -Distributed:: -* Fork `TransportClusterStateAction` to `MANAGEMENT` thread pool {es-pull}90996[#90996] - -Engine:: -* Fix `"path.conf'"` typo in `Security.java` {es-pull}89248[#89248] (issue: {es-issue}89327[#89327]) - -Geo:: -* Buffer H3Polygon2D bounding boxes to avoid edge precision issues {es-pull}89196[#89196] (issues: {es-issue}89868[#89868], {es-issue}87391[#87391]) -* Fix date histogram range edge case {es-pull}88957[#88957] -* Format runtime `geo_points` {es-pull}85449[#85449] (issue: {es-issue}85245[#85245]) -* Generate error if scripts load `_source` with synthetic `_source` enabled {es-pull}88334[#88334] - -Graph:: -* Fix race condition in timeout {es-pull}88946[#88946] (issue: {es-issue}55396[#55396]) - -Health:: -* Modify disk indicator details to provide an overview of the disk space health {es-pull}90189[#90189] -* Fix disk indicator impacts and diagnosis {es-pull}90262[#90262] -* Fix the details calculation of the disk indicator {es-pull}90869[#90869] -* Report impact and diagnosis data nodes without disk space and no blocked indices {es-pull}90772[#90772] (issue: {es-issue}90442[#90442]) -* Update minimum version for health node reporting to 8.5 {es-pull}90365[#90365] (issue: {es-issue}90359[#90359]) - -Indices APIs:: -* Avoid capturing per-task `RolloverResult` {es-pull}90626[#90626] (issue: {es-issue}90620[#90620]) -* Fix shard splitting for `nested` {es-pull}89351[#89351] (issue: {es-issue}88109[#88109]) - -Infra/Core:: -* Fix `allow_no_indices` request option in special cases {es-pull}89622[#89622] -* Fix repeated error save loops in File Settings Service {es-pull}90271[#90271] (issue: {es-issue}90222[#90222]) -* Fix date rounding for date math parsing {es-pull}90458[#90458] (issue: {es-issue}90187[#90187]) -* Fix disabling APM tracing for `CancellableTask` in `TrainedModelAssignmentNodeService` {es-pull}90972[#90972] (issue: {es-issue}89850[#89850]) -* Support camel case dates on 7.x indices {es-pull}88914[#88914] (issue: {es-issue}84199[#84199]) - -Infra/Scripting:: -* Fix true/false accumulation bug in boolean `source` fallback {es-pull}90895[#90895] - -Ingest Node:: -* Set the enrich maintenance cluster lifecycle listener only once {es-pull}90486[#90486] - -Machine Learning:: -* Require correct tier processors when multiple AZs are present {es-pull}90903[#90903] -* Return 408 instead of 500 when open/start APIs time out {es-pull}89775[#89775] (issue: {es-issue}89585[#89585]) - -Mapping:: -* Fix duplication bug for `source` fallback in numeric types {es-pull}89352[#89352] -* Include runtime fields in total fields count {es-pull}89251[#89251] (issue: {es-issue}88265[#88265]) -* Fix `aggregate_metric_double` multi-values exception {es-pull}90290[#90290] -* Validate field names when subobjects are disabled {es-pull}90950[#90950] - -Monitoring:: -* Add fields to fix {ls} cgroup graphs {es-pull}90493[#90493] - -Network:: -* Fix `RecyclerBytesStreamOutput` allocating unlimited heap for some capacities {es-pull}90632[#90632] - -Recovery:: -* Fix overcounting recovered bytes after network disconnect {es-pull}90477[#90477] (issue: {es-issue}90441[#90441]) - -Search:: -* Add support for predefined char class regexp on wildcard fields {es-pull}90064[#90064] -* Deduplicate fetching doc-values fields {es-pull}89094[#89094] -* Don't shortcut the total hit count for text fields {es-pull}90341[#90341] (issue: {es-issue}89760[#89760]) -* Safeguard `RegExp` use against `StackOverflowError` {es-pull}84624[#84624] (issue: {es-issue}82923[#82923]) -* Use MB rather than GB to calculate max boolean clauses {es-pull}90309[#90309] (issue: {es-issue}86136[#86136]) - -Snapshot/Restore:: -* Fix incorrect failed shards count in APIs for current snapshots {es-pull}89534[#89534] -* Fix over-allocation of mounted indices on a cold/frozen node {es-pull}86331[#86331] -* Fix quadratic complexity in `SnapshotStatus` serialization {es-pull}90795[#90795] -* Fork building snapshot status response off of transport thread {es-pull}90651[#90651] -* Make sure listener is resolved when file queue is cleared {es-pull}89929[#89929] -* Re-register a corrupt repository to unblock it {es-pull}89719[#89719] (issue: {es-issue}89130[#89130]) -* Reject unknown request body fields in mount API {es-pull}88987[#88987] (issue: {es-issue}75982[#75982]) - -TSDS:: -* Fix segment stats in TSDS {es-pull}89754[#89754] (issue: {es-issue}89609[#89609]) -* Fix extra fields in `GET` request for synthetic `_source` {es-pull}89778[#89778] -* Fix `scaled_float` rounding for synthetic `_source` {es-pull}88916[#88916] (issue: {es-issue}88854[#88854]) - -Transform:: -* Don't fail a transform on a ClusterBlockException, this may be due to ILM closing an index {es-pull}90396[#90396] (issue: {es-issue}89802[#89802]) -* Fix NPE in transform scheduling {es-pull}90347[#90347] (issues: {es-issue}90356[#90356], {es-issue}88203[#88203], {es-issue}90301[#90301], {es-issue}90255[#90255]) -* Improve error handling in state persistence {es-pull}88910[#88910] (issue: {es-issue}88905[#88905]) -* Return `408` instead of `500` when the start API times out {es-pull}89774[#89774] - -Vector Search:: -* Fix bug for `kNN` with filtered aliases {es-pull}89621[#89621] - -Watcher:: -* Allow `xpack.notification.email.account.domain_allowlist` to be set dynamically {es-pull}90426[#90426] (issue: {es-issue}89913[#89913]) -* Handling timeout exceptions on watcher startup {es-pull}90421[#90421] (issue: {es-issue}44981[#44981]) - -[[deprecation-8.5.0]] -[float] -=== Deprecations - -Infra/Plugins:: -* Deprecate network plugins {es-pull}88924[#88924] -* Deprecate overriding `DiscoveryPlugin` internals {es-pull}88925[#88925] - -[[enhancement-8.5.0]] -[float] -=== Enhancements - -Authentication:: -* Add more accurate error message for LDAP user modes {es-pull}89492[#89492] - -Authorization:: -* Add indices permissions to {ents} service account {es-pull}89869[#89869] -* Add information of resolved roles in denial messages {es-pull}89680[#89680] - -Autoscaling:: -* Centralize the concept of processors configuration {es-pull}89662[#89662] - -Cluster Coordination:: -* Preemptively compute `RoutingNodes` and the indices lookup during publication {es-pull}89005[#89005] -* Preemptively initialize routing nodes and indices lookup on all node types {es-pull}89032[#89032] - -Distributed:: -* Batch index delete cluster state updates {es-pull}90033[#90033] (issue: {es-issue}90022[#90022]) -* Increase the minimum size of the management pool to `2` {es-pull}90193[#90193] - -Health:: -* Add IDs to health API diagnoses and impacts {es-pull}90072[#90072] -* Add a check to the master stability health API when there is no master and the current node is not master eligible {es-pull}89219[#89219] -* Add logic to `master_is_stable` indicator to check for discovery problems {es-pull}88020[#88020] -* Poll for cluster diagnostics information {es-pull}89014[#89014] -* Update SLM health diagnosis message to include unhealthy policy details {es-pull}89138[#89138] - -Highlighting:: -* Improve efficiency of `BoundedBreakIteratorScanner` fragmentation algorithm {es-pull}89041[#89041] (issues: {es-issue}73569[#73569], {es-issue}73785[#73785]) - -ILM+SLM:: -* Add validations for the downsampling ILM action {es-pull}90295[#90295] -* Ensure that ILM does not roll over empty indices {es-pull}89557[#89557] (issue: {es-issue}86203[#86203]) -* Reuse informational message in lifecycle step {es-pull}89419[#89419] -* Move log-related logic into log block in `IndexLifecycleRunner` {es-pull}89292[#89292] - -Infra/Core:: -* Add reserved `/_snapshot/repo` file based settings {es-pull}89601[#89601] -* Add `upgrade_status` attributes to Fleet Agents {es-pull}89845[#89845] -* Add support for `/_autoscaling/policy` for file based settings {es-pull}89708[#89708] -* Add support for `/_security/role_mapping` for file based settings {es-pull}89667[#89667] -* Add support for support for `/_slm/policy` in file based settings {es-pull}89567[#89567] -* Retry file watch registration {es-pull}90537[#90537] (issue: {es-issue}89500[#89500]) - -Infra/Node Lifecycle:: -* Distinguish no shutdowns case in `NodeShutdownAllocationDecider` {es-pull}89851[#89851] (issue: {es-issue}89823[#89823]) - -Infra/Plugins:: -* Add deprecation message for deprecated plugin APIs {es-pull}88961[#88961] -* Register stable plugins in `ActionModule` {es-pull}90067[#90067] -* Load plugin named components {es-pull}89969[#89969] - -Infra/Scripting:: -* Initial code to support binary expression scripts {es-pull}89895[#89895] -* Protect `_source` inside update scripts {es-pull}88733[#88733] -* Reindex and `UpdateByQuery` metadata {es-pull}88665[#88665] -* Add write Field API `NestedDocument` support {es-pull}90021[#90021] -* Add write Field API path manipulation {es-pull}89889[#89889] -* Add write Field API with basic path resolution {es-pull}89738[#89738] -* Add write Fields API for reindex, update, and update by query {es-pull}90145[#90145] - -Infra/Settings:: -* Introduce max headroom for disk watermark stages {es-pull}88639[#88639] (issue: {es-issue}81406[#81406]) - -License:: -* License check for user profile collaboration feature {es-pull}89990[#89990] - -Machine Learning:: -* Add measure of non-cache hit inference count {es-pull}90464[#90464] -* Add new `text_similarity` nlp task {es-pull}88439[#88439] -* Add new trained model deployment cache clear API {es-pull}89074[#89074] -* Add processor autoscaling decider {es-pull}89645[#89645] -* Distribute trained model allocations across availability zones {es-pull}89822[#89822] -* Use a bitset for deduplication of frequent items {es-pull}88943[#88943] -* Optimize frequent items transaction lookup {es-pull}89062[#89062] -* Release native inference functionality as beta {es-pull}90418[#90418] -* Return `408` when the start deployment API times out {es-pull}89612[#89612] -* Skip renormalization after calling the node shutdown API {es-pull}89347[#89347] -* Compute outlier feature influence via the Gateaux derivative to improve attribution for high dimension vectors {ml-pull}2256[#2256] -* Improve classification and regression model train runtimes for data sets with many numeric features {ml-pull}2380[#2380], {ml-pull}2388[#2388], {ml-pull}2390[#2390], {ml-pull}2401[#2401] -* Increase the limit on the maximum number of classes to `100` for training classification models {ml-pull}2395[#2395] (issue: {ml-issue}2246[#2246]) - -Mapping:: -* Add `synthetic_source` support to `aggregate_metric_double` fields {es-pull}88909[#88909] -* Add `source` fallback for keyword fields using operation {es-pull}88735[#88735] -* Add `source` fallback support for `match_only_text` mapped type {es-pull}89473[#89473] -* Add `source` fallback support for date and `date_nanos` mapped types {es-pull}89440[#89440] -* Add `source` fallback support for unsigned long mapped type {es-pull}89349[#89349] -* Add support for `source` fallback with scaled float field type {es-pull}89053[#89053] -* Add support for `source` fallback with the boolean field type {es-pull}89052[#89052] -* Add text field support in the Painless scripting fields API {es-pull}89396[#89396] -* Clarify that fielddata is not supported for text fields error message {es-pull}89770[#89770] (issue: {es-issue}89485[#89485]) -* Add new mappings for Fleet Agent `last_checkin_message` and components fields {es-pull}89599[#89599] -* Support `source` fallback for `byte`, `short`, and `long` fields {es-pull}88954[#88954] -* Support `source` fallback for `double`, `float`, and `half_float` field types {es-pull}89010[#89010] - -Network:: -* Use chunked REST serialization for large REST responses {es-pull}88311[#88311] - -Recovery:: -* Disable recovering from snapshots in searchable snapshots {es-pull}86388[#86388] - -SQL:: -* Implement `DATE_FORMAT` function {es-pull}88388[#88388] (issue: {es-issue}55065[#55065]) -* Set `track_total_hits` to false when not needed {es-pull}89106[#89106] (issue: {es-issue}88764[#88764]) - -Search:: -* Enable `BloomFilter` for `_id` of non-datastream indices {es-pull}88409[#88409] -* In the field capabilities API, renew support for fields in the request body {es-pull}88972[#88972] (issue: {es-issue}86875[#86875]) - -Security:: -* Add usage stats report for user profiles {es-pull}90123[#90123] -* Implement grace period for user profile activation {es-pull}89566[#89566] -* Return limited-by role descriptors in Get/QueryApiKey response {es-pull}89273[#89273] -* Add option to return profile uid in `GetUser` response {es-pull}89570[#89570] -* Return `400` error for `GetUserPrivileges` call with API keys {es-pull}89333[#89333] -* Show assigned role descriptors in Get/QueryApiKey response {es-pull}89166[#89166] -* Add detailed errors in `hasPrivileges` response {es-pull}89224[#89224] -* Add support for multiple UIDs to the `GetProfile` API {es-pull}89023[#89023] - -Snapshot/Restore:: -* Add support for comparing `SnapshotsInProgress` {es-pull}89619[#89619] (issue: {es-issue}88732[#88732]) -* Prioritize shard snapshot tasks over file snapshot tasks and limit the number of the concurrently running snapshot tasks {es-pull}88209[#88209] (issue: {es-issue}83408[#83408]) - -Stats:: -* Introduce node mappings stats {es-pull}89807[#89807] - -TSDS:: -* Support `match_only_text` for synthetic `_source` {es-pull}89516[#89516] -* Support histogram field for synthetic `_source` {es-pull}89833[#89833] -* Support version field type for synthetic `_source` {es-pull}89706[#89706] -* Build `_id` without reparsing {es-pull}88789[#88789] -* Return metric fields in the field caps API {es-pull}88695[#88695] - -Transform:: -* Add an unattended mode setting to transform {es-pull}89212[#89212] - -[[feature-8.5.0]] -[float] -=== New features - -Authorization:: -* Introduce the new `read_security` cluster privilege {es-pull}89790[#89790] (issue: {es-issue}89245[#89245]) - -Health:: -* Enable the health node and the disk health indicator {es-pull}90085[#90085] (issue: {es-issue}84811[#84811]) - -Infra/Core:: -* Provide tracing implementation using OpenTelemetry and APM Java agent {es-pull}88443[#88443] (issue: {es-issue}84369[#84369]) - -Infra/Plugins:: -* Add the stable Plugin API module and analysis interfaces {es-pull}88775[#88775] - -Machine Learning:: -* Make `bucket_correlation` aggregation generally available {es-pull}88655[#88655] -* Make `bucket_count_ks_test` aggregation generally available {es-pull}88657[#88657] - -Security:: -* Support bulk updates of API keys {es-pull}88856[#88856] - -TSDS:: -* Add a TSID global ordinal to `TimeSeriesIndexSearcher` {es-pull}90035[#90035] -* Release time series data stream functionality {es-pull}90116[#90116] (issue: {es-issue}74660[#74660]) -* Add synthetic `_source` support for the `ignore_above` parameter on `keyword` fields {es-pull}89466[#89466] - -Vector Search:: -* Add synthetic `_source` support for `dense_vector` {es-pull}89840[#89840] - -[[regression-8.5.0]] -[float] -=== Regressions - -Infra/Scripting:: -* Fix fields API caching regression {es-pull}90017[#90017] - -[[upgrade-8.5.0]] -[float] -=== Upgrades - -Client:: -* Upgrade Apache Commons Logging to 1.2 {es-pull}85745[#85745] (issue: {es-issue}40305[#40305]) - -Packaging:: -* Upgrade bundled JDK to Java 19 {es-pull}90571[#90571] diff --git a/docs/reference/release-notes/8.5.1.asciidoc b/docs/reference/release-notes/8.5.1.asciidoc deleted file mode 100644 index 89ccaec8af21e..0000000000000 --- a/docs/reference/release-notes/8.5.1.asciidoc +++ /dev/null @@ -1,76 +0,0 @@ -[[release-notes-8.5.1]] -== {es} version 8.5.1 - - -Also see <>. - -[[known-issues-8.5.1]] -[float] -=== Known issues - -include::8.4.0.asciidoc[tag=ml-pre-7-datafeeds-known-issue] - -include::8.4.0.asciidoc[tag=file-based-settings-deadlock-known-issue] - -include::8.0.0.asciidoc[tag=jackson-filtering-bug] - -include::8.4.0.asciidoc[tag=ingest-processor-log4j-cluster-instability-known-issue] - -[[bug-8.5.1]] -[float] -=== Bug fixes - -Audit:: -* Fix NPE in auditing `authenticationSuccess` for non-existing run-as user {es-pull}91171[#91171] - -Authentication:: -* Ensure PKI's `delegated_by_realm` metadata respect run-as {es-pull}91173[#91173] - -Authorization:: -* Ensure `TermsEnum` action works correctly with API keys {es-pull}91170[#91170] - -Cluster Coordination:: -* Fix corrupted Metadata from index and alias having the same name {es-pull}91456[#91456] - -EQL:: -* Fix EQLSearchRequest serialization (bwc) {es-pull}91402[#91402] - -Geo:: -* Fix handling indexed envelopes crossing the dateline in mvt API {es-pull}91105[#91105] (issue: {es-issue}91060[#91060]) - -Infra/CLI:: -* Fix carriage return removal when reading a long line from terminal {es-pull}91131[#91131] (issue: {es-issue}89227[#89227]) - -Infra/Core:: -* Fix APM configuration file delete {es-pull}91058[#91058] (issue: {es-issue}89439[#89439]) - -Machine Learning:: -* Allow NLP truncate option to be updated when span is set {es-pull}91224[#91224] -* Interim buckets should not count towards the total bucket count {es-pull}91288[#91288] - -Network:: -* Fix `TransportActionProxy` for local execution {es-pull}91289[#91289] - -Transform:: -* Make transform `_preview` request cancellable {es-pull}91313[#91313] (issue: {es-issue}91286[#91286]) - -[[enhancement-8.5.1]] -[float] -=== Enhancements - -Authorization:: -* Add privileges for connectors index creation {es-pull}91026[#91026] -* Add privileges for crawler logs indices in Enterprise Search service account {es-pull}91094[#91094] - -Infra/Core:: -* Allow legacy index settings on legacy indices {es-pull}90264[#90264] (issue: {es-issue}84992[#84992]) -* Check for unassigned shards on node shutdown {es-pull}91297[#91297] (issue: {es-issue}88635[#88635]) - -[[upgrade-8.5.1]] -[float] -=== Upgrades - -Packaging:: -* Update bundled JDK to Java 19.0.1 {es-pull}91025[#91025] (issue: {es-issue}91010[#91010]) - - diff --git a/docs/reference/release-notes/8.5.2.asciidoc b/docs/reference/release-notes/8.5.2.asciidoc deleted file mode 100644 index b6f8ec1646496..0000000000000 --- a/docs/reference/release-notes/8.5.2.asciidoc +++ /dev/null @@ -1,51 +0,0 @@ -[[release-notes-8.5.2]] -== {es} version 8.5.2 - - -Also see <>. - -[[known-issues-8.5.2]] -[float] -=== Known issues - -include::8.4.0.asciidoc[tag=ml-pre-7-datafeeds-known-issue] - -include::8.4.0.asciidoc[tag=file-based-settings-deadlock-known-issue] - -include::8.0.0.asciidoc[tag=jackson-filtering-bug] - -include::8.4.0.asciidoc[tag=ingest-processor-log4j-cluster-instability-known-issue] - -[[bug-8.5.2]] -[float] -=== Bug fixes - -Authorization:: -* Avoid potential unsupported operation exception in doc bitset cache {es-pull}91490[#91490] - -EQL:: -* Refine bwc version checks on `EqlSearchRequest` {es-pull}91510[#91510] - -Health:: -* SLM uneahlthy policies diagnosis recommends correct URL in action {es-pull}91506[#91506] - -Ingest Node:: -* Refactor `DatabaseNodeService` as a cluster state listener {es-pull}91567[#91567] (issue: {es-issue}86999[#86999]) - -Stats:: -* Fix NPE in IndexService getNodeMappingStats {es-pull}91334[#91334] (issue: {es-issue}91259[#91259]) - -Transform:: -* Fix failure when resolving indices from CCS {es-pull}91622[#91622] (issue: {es-issue}91550[#91550]) - -[[enhancement-8.5.2]] -[float] -=== Enhancements - -EQL:: -* Remove version limitations for CCS {es-pull}91409[#91409] - -Ingest Node:: -* Refactor enrich maintenance coordination logic {es-pull}90931[#90931] - - diff --git a/docs/reference/release-notes/8.5.3.asciidoc b/docs/reference/release-notes/8.5.3.asciidoc deleted file mode 100644 index 70d92d42a038c..0000000000000 --- a/docs/reference/release-notes/8.5.3.asciidoc +++ /dev/null @@ -1,55 +0,0 @@ -[[release-notes-8.5.3]] -== {es} version 8.5.3 - -Also see <>. - -[[known-issues-8.5.3]] -[float] -=== Known issues - -include::8.4.0.asciidoc[tag=ml-pre-7-datafeeds-known-issue] - -include::8.4.0.asciidoc[tag=file-based-settings-deadlock-known-issue] - -include::8.0.0.asciidoc[tag=jackson-filtering-bug] - -include::8.4.0.asciidoc[tag=ingest-processor-log4j-cluster-instability-known-issue] - -[[bug-8.5.3]] -[float] -=== Bug fixes - -Infra/Core:: -* Add `trace.id` to request trace logs {es-pull}91772[#91772] (issue: {es-issue}88174[#88174]) -* `DoPrivileged` in `ElasticsearchEncaughtExceptionHandler` and check modify thread {es-pull}91704[#91704] (issue: {es-issue}91650[#91650]) - -Ingest Node:: -* Handle any exception thrown while generating source for an `IngestDocument` {es-pull}91981[#91981] - -Machine Learning:: -* ML stats failures should not stop the usage API working {es-pull}91917[#91917] (issue: {es-issue}91893[#91893]) - -Stats:: -* Fix NPE in IndexService getNodeMappingStats {es-pull}91334[#91334] (issue: {es-issue}91259[#91259]) - -Transform:: -* Fix failure when resolving indices from CCS {es-pull}91622[#91622] (issue: {es-issue}91550[#91550]) - -[[enhancement-8.5.3]] -[float] -=== Enhancements - -Ingest Node:: -* Refactor enrich maintenance coordination logic {es-pull}90931[#90931] - -TLS:: -* Support SAN/dnsName for restricted trust {es-pull}91946[#91946] - -[[upgrade-8.5.3]] -[float] -=== Upgrades - -Engine:: -* Upgrade Lucene to version 9.4.2 {es-pull}91823[#91823] - - diff --git a/docs/reference/release-notes/8.6.0.asciidoc b/docs/reference/release-notes/8.6.0.asciidoc deleted file mode 100644 index 1e30c4a4a9c49..0000000000000 --- a/docs/reference/release-notes/8.6.0.asciidoc +++ /dev/null @@ -1,295 +0,0 @@ -[[release-notes-8.6.0]] -== {es} version 8.6.0 - -Also see <>. - -[[known-issues-8.6.0]] -[float] -=== Known issues - -include::8.4.0.asciidoc[tag=file-based-settings-deadlock-known-issue] - -include::8.0.0.asciidoc[tag=jackson-filtering-bug] - -include::8.4.0.asciidoc[tag=ingest-processor-log4j-cluster-instability-known-issue] - -// tag::reconciliation-imbalance-known-issue[] -* Shard rebalancing may temporarily unbalance cluster -+ -From 8.6.0 onwards the default shard rebalancing algorithm will compute the -final desired balance and then make shard movements to reconcile the current -state of the cluster with the desired state. However the order in which the -shard movements take place may be skewed towards certain nodes, causing the -cluster to become temporarily unbalanced while the reconciliation is ongoing. -As always, once a node reaches a disk watermark it will not accept any -additional shards, but this skew may result in nodes reaching their disk -watermarks more often than expected in normal operation. Once the -reconciliation process completes, the cluster will be balanced again. -+ -To avoid this problem, upgrade to 8.8.0 or later. -// end::reconciliation-imbalance-known-issue[] - -[[bug-8.6.0]] -[float] -=== Bug fixes - -Aggregations:: -* GeoBoundsAggregations reject sub aggregations {es-pull}91073[#91073] (issue: {es-issue}91072[#91072]) - -Allocation:: -* Avoid NPE when disassociateDeadNodes is executed for a node present in the desired balance {es-pull}91659[#91659] -* Check `NodesShutdownMetadata` type before assuming restart {es-pull}90807[#90807] - -Authentication:: -* Fix time unit for connection request timeout of JWKs reload {es-pull}92080[#92080] -* Improve performance for role mapping with DNs {es-pull}92074[#92074] -* Improve robustness of `JwkSet` reloading {es-pull}92081[#92081] -* Support stored authentication headers prior to version 6.7 {es-pull}92221[#92221] - -Authorization:: -* Make adding auth info to REST responses more robust {es-pull}92168[#92168] -* Security remove datemath special handling {es-pull}91047[#91047] - -Cluster Coordination:: -* Fix `TransportMasterNodeAction` holding a CS reference needlessly {es-pull}90724[#90724] (issue: {es-issue}89220[#89220]) -* Include last-committed data in publication {es-pull}92259[#92259] (issue: {es-issue}90158[#90158]) -* Unsafe bootstrap memory optimization {es-pull}92493[#92493] - -EQL:: -* EQL sequences: support join on multi-values {es-pull}89965[#89965] - -Graph:: -* Fix potential issue with graph api's timed out field in response {es-pull}91006[#91006] - -Health:: -* Don't account for the unassigned reason when diagnosing NO_VALID_SHARD_COPY {es-pull}92416[#92416] -* Fix NPE when evaluating the disk health for non-data nodes {es-pull}92643[#92643] -* Use https in the short URLs for the `shards_availability` indicator {es-pull}92310[#92310] - -Indices APIs:: -* Trigger index settings providers when updating component templates {es-pull}91615[#91615] (issue: {es-issue}91592[#91592]) - -Infra/Core:: -* Check reserved state in Metadata.isGlobalStateEquals {es-pull}92124[#92124] -* Datastream unavailable exception metadata {es-pull}91461[#91461] -* Fix `BytesRefArray` on append empty `BytesRef` {es-pull}91364[#91364] -* Fix index expression options for requests with a single name or pattern {es-pull}91231[#91231] -* Force init of Unbox in log4j {es-pull}92377[#92377] (issue: {es-issue}91964[#91964]) -* In file based settings, wait until security index is ready for role mappings {es-pull}92173[#92173] (issue: {es-issue}91939[#91939]) -* Index expression exclusions never trigger "not found" {es-pull}90902[#90902] -* Update error states from inside the main state executor {es-pull}90346[#90346] (issue: {es-issue}90337[#90337]) - -Infra/Scripting:: -* Fix compile with hex literals ending with d/f {es-pull}91501[#91501] (issue: {es-issue}88614[#88614]) - -Ingest Node:: -* Fixing a race condition in `EnrichCoordinatorProxyAction` that can leave an item stuck in its queue {es-pull}90688[#90688] (issue: {es-issue}90598[#90598]) - -Machine Learning:: -* Copy more settings when creating DF analytics destination index {es-pull}91546[#91546] (issue: {es-issue}89795[#89795]) -* Fix for 'No statistics' error message {ml-pull}2410[#2410] -* Fix for 'No counts available' error message {ml-pull}2414[#2414] -* Guard against input sequences that are too long for Question Answering models {es-pull}91924[#91924] -* Improve performance of closing files before spawning {ml-pull}2424[#2424] -* Skip remote clusters when performing up front privileges validation for datafeeds {es-pull}91895[#91895] (issue: {es-issue}87832[#87832]) -* Support fields with commas in data frame analytics `analyzed_fields` {es-pull}91710[#91710] (issue: {es-issue}72541[#72541]) -* Validate rule filters are present on open anomaly detection api {es-pull}92207[#92207] - - -Mapping:: -* Consolidate field name validation when parsing mappings and documents {es-pull}91328[#91328] -* Fix handling empty key case in the terms aggregation {es-pull}90822[#90822] - -Monitoring:: -* Fix logstash loadavg (xpack cases) {es-pull}90494[#90494] -* [Stack Monitoring] Update ES module mappings {es-pull}90649[#90649] - -Network:: -* Clean up on exception while chunking XContent {es-pull}92024[#92024] -* Fix Chunked APIs sending incorrect responses to HEAD requests {es-pull}92042[#92042] (issue: {es-issue}92032[#92032]) -* Reject connection attempts while closing {es-pull}92465[#92465] - -SQL:: -* Fix NPE on logging when not tracking total hits {es-pull}92425[#92425] - -Search:: -* Allow different decay values depending on the score function {es-pull}91195[#91195] (issue: {es-issue}78887[#78887]) -* Fix timing bug with DFS profiling {es-pull}92421[#92421] - -Snapshot/Restore:: -* Simplify and optimize deduplication of `RepositoryData` for a non-caching repository instance {es-pull}91851[#91851] (issue: {es-issue}89952[#89952]) - -Store:: -* Fix numOpenOutputs and modCount in ByteSizeCachingDirectory {es-pull}92440[#92440] (issue: {es-issue}92434[#92434]) - -Transform:: -* Skip remote clusters when performing up front privileges validation {es-pull}91788[#91788] - -Vector Search:: -* Make `knn` search requests fully cancellable {es-pull}90612[#90612] - -[[deprecation-8.6.0]] -[float] -=== Deprecations - -Allocation:: -* Deprecate state field in /_cluster/reroute response {es-pull}90399[#90399] -* Ensure balance threshold is at least 1 {es-pull}92100[#92100] - -Ingest Node:: -* Deprecate 'remove_binary' default of false for ingest attachment processor {es-pull}90460[#90460] - -Mapping:: -* Deprecate silently ignoring type, fields, copy_to and boost in metadata field definition {es-pull}90989[#90989] (issue: {es-issue}35389[#35389]) - -[[enhancement-8.6.0]] -[float] -=== Enhancements - -Allocation:: -* Clear up forecasted write load and shard size from previous write index during rollovers {es-pull}91590[#91590] -* Forecast average shard size during rollovers {es-pull}91561[#91561] -* Forecast write load during rollovers {es-pull}91425[#91425] -* Improve shard balancing {es-pull}91603[#91603] -* Introduce desired-balance allocator {es-pull}91343[#91343] -* Limit shard realocation retries {es-pull}90296[#90296] -* Prevalidate node removal API {es-pull}88952[#88952] -* Set default `cluster.routing.allocation.balance.disk_usage` {es-pull}91951[#91951] -* Store write load in the `IndexMetadata` during data streams rollovers {es-pull}91019[#91019] -* Update the default `cluster.routing.allocation.balance.disk_usage` {es-pull}92065[#92065] -* `DesiredBalance:` expose it via _internal/desired_balance {es-pull}91038[#91038] (issue: {es-issue}90583[#90583]) - -Authorization:: -* [Fleet] Added logs-elastic_agent* read privileges to `kibana_system` {es-pull}91701[#91701] - -CRUD:: -* Keep track of average shard write load {es-pull}90768[#90768] (issue: {es-issue}90102[#90102]) - -Geo:: -* Centroid aggregation for cartesian points and shapes {es-pull}89216[#89216] (issue: {es-issue}90156[#90156]) -* Improve H3#hexRing logic and add H3#areNeighborCells method {es-pull}91140[#91140] -* Move SpatialUtils to geo library {es-pull}88088[#88088] (issue: {es-issue}86607[#86607]) -* Reduce number of object allocations in H3#geoToH3 and speed up computations {es-pull}91492[#91492] -* Support `cartesian_bounds` aggregation on point and shape {es-pull}91298[#91298] (issue: {es-issue}90157[#90157]) - -ILM+SLM:: -* ILM: Get policy support wildcard name {es-pull}89238[#89238] - -Infra/Core:: -* Handle APM global labels as affix setting {es-pull}91438[#91438] (issue: {es-issue}91278[#91278]) -* Improve date math exclusions in expressions {es-pull}90298[#90298] -* Introduce a phase to use String.equals on constant strings, rather than def equality {es-pull}91362[#91362] (issue: {es-issue}91235[#91235]) -* More actionable error for ancient indices {es-pull}91243[#91243] -* Operator/index templates {es-pull}90143[#90143] -* Operator/ingest {es-pull}89735[#89735] -* Transport threads and `_hot_threads` {es-pull}90482[#90482] (issue: {es-issue}90334[#90334]) -* Upgrade XContent to Jackson 2.14.0 and enable Fast Double Parser {es-pull}90553[#90553] - -Infra/Plugins:: -* Create placeholder plugin when loading stable plugins {es-pull}90870[#90870] -* Example stable plugin {es-pull}90805[#90805] -* Make `extendedPlugins,` `HasNativeController` and `moduleName` optional in plugin descriptor {es-pull}90835[#90835] -* Rename `NamedComponent` name parameter to value {es-pull}91306[#91306] - -Infra/Scripting:: -* Use an explicit null check for null receivers in painless, rather than an NPE {es-pull}91347[#91347] (issue: {es-issue}91236[#91236]) - -Machine Learning:: -* Add a filter parameter to frequent items {es-pull}91137[#91137] -* Add a regex to the output of the `categorize_text` aggregation {es-pull}90723[#90723] -* Add ability to filter and sort buckets by `change_point` numeric values {es-pull}91299[#91299] -* Add api to update trained model deployment `number_of_allocations` {es-pull}90728[#90728] -* Alias timestamp to @timestamp in anomaly detection results index {es-pull}90812[#90812] -* Allow `model_aliases` to be used with Pytorch trained models {es-pull}91296[#91296] -* Allow overriding timestamp field to null in file structure finder {es-pull}90764[#90764] -* Audit a message every day the datafeed has seen no data {es-pull}91774[#91774] -* Low priority trained model deployments {es-pull}91234[#91234] (issue: {es-issue}91024[#91024]) -* Provide additional information about anomaly score factors {es-pull}90675[#90675] - -Mapping:: -* Don't create IndexCaps objects when recording unmapped fields {es-pull}90806[#90806] (issue: {es-issue}90796[#90796]) -* aggregate metric double add a max min validation {es-pull}90381[#90381] - -Recovery:: -* Remove resize index settings once shards are started {es-pull}90391[#90391] (issue: {es-issue}90127[#90127]) - -Rollup:: -* Test downsample runtime fields and security {es-pull}90593[#90593] - -Search:: -* Add LimitedOffsetsEnum to Limited offset token {es-pull}86110[#86110] (issue: {es-issue}86109[#86109]) -* Add profiling and documentation for dfs phase {es-pull}90536[#90536] (issue: {es-issue}89713[#89713]) -* Bulk merge field-caps responses using mapping hash {es-pull}86323[#86323] -* Enhance nested depth tracking when parsing queries {es-pull}90425[#90425] -* Expose telemetry about search usage {es-pull}91528[#91528] -* Return docs when using nested mappings in archive indices {es-pull}90585[#90585] (issue: {es-issue}90523[#90523]) -* Use `IndexOrDocValues` query for IP range queries {es-pull}90303[#90303] (issue: {es-issue}83658[#83658]) - -Snapshot/Restore:: -* Increase snaphot pool max size to 10 {es-pull}90282[#90282] (issue: {es-issue}89608[#89608]) -* Tie snapshot speed to node bandwidth settings {es-pull}91021[#91021] (issue: {es-issue}57023[#57023]) - -Store:: -* Allow plugins to wrap Lucene directories created by the `IndexModule` {es-pull}91556[#91556] - -TLS:: -* Add certificate start/expiry dates to SSL Diagnostic message {es-pull}89461[#89461] - -TSDB:: -* Generate 'index.routing_path' from dynamic mapping templates {es-pull}90552[#90552] (issue: {es-issue}90528[#90528]) -* Support malformed numbers in synthetic `_source` {es-pull}90428[#90428] -* Support synthetic `_source` for `_doc_count` field {es-pull}91465[#91465] -* Synthetic _source: support `field` in many cases {es-pull}89950[#89950] -* Synthetic `_source`: `ignore_malformed` for `ip` {es-pull}90038[#90038] -* Synthetic `_source`: support `wildcard` field {es-pull}90196[#90196] - -Transform:: -* Add a health section to transform stats {es-pull}90760[#90760] -* Support `aggregate_metric_double` field type in transform aggregations {es-pull}91045[#91045] - -Vector Search:: -* Add profiling information for knn vector queries {es-pull}90200[#90200] - -[[feature-8.6.0]] -[float] -=== New features - -Distributed:: -* Add "index" and "search" node roles with feature flag and setting {es-pull}90993[#90993] - -EQL:: -* EQL samples {es-pull}91312[#91312] - -Health:: -* Use chunked encoding for `RestGetHealthAction` {es-pull}91515[#91515] (issue: {es-issue}90223[#90223]) -* [HealthAPI] Use the `RestCancellableNodeClient` infrastructure {es-pull}91587[#91587] - -Machine Learning:: -* Make `categorize_text` aggregation GA {es-pull}88600[#88600] - -Vector Search:: -* Add fielddata and scripting support for byte-sized vectors {es-pull}91184[#91184] -* Add support for indexing byte-sized knn vectors {es-pull}90774[#90774] - -[[regression-8.6.0]] -[float] -=== Regressions - -Infra/Core:: -* Revert "Remove `ImmutableOpenMap` from snapshot services" {es-pull}90287[#90287] - -[[upgrade-8.6.0]] -[float] -=== Upgrades - -Infra/Logging:: -* Upgrade to log4j 2.19.0 {es-pull}90589[#90589] (issue: {es-issue}90584[#90584]) - -Network:: -* Upgrade to Netty 4.1.82.Final {es-pull}90604[#90604] -* Upgrade to Netty 4.1.84 {es-pull}91271[#91271] - -Snapshot/Restore:: -* Upgrade GCS SDK to 2.13.1 {es-pull}92327[#92327] - - diff --git a/docs/reference/release-notes/8.6.1.asciidoc b/docs/reference/release-notes/8.6.1.asciidoc deleted file mode 100644 index cad0a466606be..0000000000000 --- a/docs/reference/release-notes/8.6.1.asciidoc +++ /dev/null @@ -1,43 +0,0 @@ -[[release-notes-8.6.1]] -== {es} version 8.6.1 - -Also see <>. - -[[known-issues-8.6.1]] -[float] -=== Known issues - -include::8.6.0.asciidoc[tag=reconciliation-imbalance-known-issue] - -include::8.4.0.asciidoc[tag=ingest-processor-log4j-cluster-instability-known-issue] - -[[bug-8.6.1]] -[float] -=== Bug fixes - -Data streams:: -* Fix wildcard expansion for delete-by-query on data streams {es-pull}92891[#92891] -* Fix wildcard expansion for update-by-query on data streams {es-pull}92717[#92717] (issue: {es-issue}90272[#90272]) -* Patch jackson-core with locally modified class #92984 -This fixes an issue in jackson parsing (issue: {es-issue}92480[#92480]) - -Distributed:: -* Fix `ByteArrayIndexInput` with nonzero offset {es-pull}93205[#93205] - -ILM+SLM:: -* Get repository metadata from the cluster state doesn't throw an exception if a repo is missing {es-pull}92914[#92914] - -Infra/Core:: -* Don't announce ready until file settings are applied {es-pull}92856[#92856] (issue: {es-issue}92812[#92812]) - -Machine Learning:: -* Utilise parallel allocations where the inference request contains multiple documents {es-pull}92359[#92359] - -Mapping:: -* Fix `_bulk` api `dynamic_templates` and explicit `op_type` {es-pull}92687[#92687] - -Search:: -* Avoid doing I/O when fetching min and max for keyword fields {es-pull}92026[#92026] -* Reduce memory required for search responses when many shards are unavailable {es-pull}91365[#91365] (issue: {es-issue}90622[#90622]) - - diff --git a/docs/reference/release-notes/8.6.2.asciidoc b/docs/reference/release-notes/8.6.2.asciidoc deleted file mode 100644 index 8da2bd2c5e0e5..0000000000000 --- a/docs/reference/release-notes/8.6.2.asciidoc +++ /dev/null @@ -1,33 +0,0 @@ -[[release-notes-8.6.2]] -== {es} version 8.6.2 - -Also see <>. - -[[known-issues-8.6.2]] -[float] -=== Known issues - -include::8.6.0.asciidoc[tag=reconciliation-imbalance-known-issue] - -include::8.4.0.asciidoc[tag=ingest-processor-log4j-cluster-instability-known-issue] - -[[bug-8.6.2]] -[float] -=== Bug fixes - -Allocation:: -* Only simulate legal desired moves {es-pull}93635[#93635] (issue: {es-issue}93271[#93271]) - -Health:: -* Fix the reporting of initializing shards in the Health API {es-pull}93502[#93502] (issue: {es-issue}90327[#90327]) - -Infra/Core:: -* Don't report MIGRATION_NEEDED for 7.x indices {es-pull}93666[#93666] - -Ingest Node:: -* Fix geo ip database file leak when processing IP arrays {es-pull}93177[#93177] - -Machine Learning:: -* Use long inference timeout at ingest {es-pull}93731[#93731] - - diff --git a/docs/reference/release-notes/8.7.0.asciidoc b/docs/reference/release-notes/8.7.0.asciidoc deleted file mode 100644 index 75cc1d80987aa..0000000000000 --- a/docs/reference/release-notes/8.7.0.asciidoc +++ /dev/null @@ -1,402 +0,0 @@ -[[release-notes-8.7.0]] -== {es} version 8.7.0 - -Also see <>. - -[[known-issues-8.7.0]] -[float] -=== Known issues - -include::8.6.0.asciidoc[tag=reconciliation-imbalance-known-issue] - -[[breaking-8.7.0]] -[float] -=== Breaking changes - -Ingest Node:: -* Making `JsonProcessor` stricter so that it does not silently drop data {es-pull}93179[#93179] (issue: {es-issue}92898[#92898]) - -Indices APIs:: -* The <> API implementation was adjusted to use the -same index resolution mechanism as other similar APIs, adding support for the -`ignore_unavailable` and `allow_no_indices` flags and the `_all` meta-index. If -there are no matching indices then earlier versions of this API would return an -empty result with the `200 OK` HTTP response code, but from 8.7.0 onwards by -default it returns an `IndexNotFoundException` with the `404 Not Found` HTTP -response code. To recover the old behaviour, add the query parameter -`?ignore_unavailable=true` ({es-pull}92820[#92820]). - -[[bug-8.7.0]] -[float] -=== Bug fixes - -Aggregations:: -* Don't create a new `DoubleHistogram` instance for empty buckets {es-pull}92547[#92547] -* Fix: do not allow map key types other than String {es-pull}88686[#88686] (issue: {es-issue}66057[#66057]) - -Allocation:: -* Fallback to the actual shard size when forecast is not available {es-pull}93461[#93461] -* Skip `DiskThresholdMonitor` when cluster state is not recovered {es-pull}93699[#93699] -* Suppress response headers in `AllocationActionMultiListener` {es-pull}93777[#93777] (issue: {es-issue}93773[#93773]) - -Authentication:: -* Correctly remove domain from realm when rewriting `Authentication` for compatibility with node versions that don't -support domains {es-pull}93276[#93276] - -Authorization:: -* Fix Security's expression resolver to not remove unavailable but authorized names {es-pull}92625[#92625] - -CCR:: -* Deduplicate Heavy CCR Repository CS Requests {es-pull}91398[#91398] - -CRUD:: -* Avoid NPE in Stateless Get/mGet {es-pull}94164[#94164] -* Do not refresh all indices in `TransportBulkAction` {es-pull}93417[#93417] - -Cluster Coordination:: -* Delay master task failure notifications until commit {es-pull}92693[#92693] (issue: {es-issue}92677[#92677]) - -Data streams:: -* Allow different filters per `DataStream` in a `DataStreamAlias` {es-pull}92692[#92692] (issue: {es-issue}92050[#92050]) - -Geo:: -* Build index qualified name in cross cluster vector tile search {es-pull}94574[#94574] (issue: {es-issue}94557[#94557]) -* Check `GeohexGrid` bounds on geopoint using spherical coordinates {es-pull}92460[#92460] -* Fix bug when clipping Geometry collections in vector tiles {es-pull}93562[#93562] - -Health:: -* Take into account `max_headroom` in disk watermark calculations {es-pull}93157[#93157] (issue: {es-issue}93155[#93155]) - -ILM+SLM:: -* Allow ILM step transition to the phase terminal step {es-pull}91754[#91754] -* Avoiding `BulkProcessor` deadlock in ILMHistoryStore {es-pull}91238[#91238] (issues: {es-issue}68468[#68468], {es-issue}50440[#50440]) -* Fixed changing only the `forceMerge` flag in `SearchableSnapshotAction` wouldn't update the policy {es-pull}93847[#93847] -* Preventing ILM and SLM runtime state from being stored in a snapshot {es-pull}92252[#92252] - -Infra/CLI:: -* Restore printing bootstrap checks as errors {es-pull}93178[#93178] (issue: {es-issue}93074[#93074]) - -Infra/Core:: -* Add `jdk.internal.reflect` permission to es codebase {es-pull}92387[#92387] (issue: {es-issue}92356[#92356]) -* Add checks for exception loops through suppressed exceptions only {es-pull}93944[#93944] (issue: {es-issue}93943[#93943]) -* Ensure one-shot wrappers release their delegates {es-pull}92928[#92928] -* Fix `InputStream#readAllBytes` on `InputStreamIndexInput` {es-pull}92680[#92680] -* Fix indices resolver for datemath with colon {es-pull}92973[#92973] -* Make `FilterStreamInput` less trappy {es-pull}92422[#92422] - -Infra/Plugins:: -* Ensure ordering of plugin initialization {es-pull}93882[#93882] (issue: {es-issue}93851[#93851]) -* Fix unclosed directory stream in `ClassReaders` {es-pull}92890[#92890] (issue: {es-issue}92866[#92866]) -* Update the version of asm used by plugin scanner {es-pull}92784[#92784] (issue: {es-issue}92782[#92782]) - -Infra/REST API:: -* [Rest Api Compatibility] Format response media type with parameters {es-pull}92695[#92695] - -Infra/Scripting:: -* Fix NPE when method was called on an array type {es-pull}91713[#91713] (issue: {es-issue}87562[#87562]) - -Infra/Settings:: -* Fix parse failures for ILM operator settings {es-pull}94477[#94477] (issue: {es-issue}94465[#94465]) - -Ingest Node:: -* Better names and types for ingest stats {es-pull}93533[#93533] (issue: {es-issue}80763[#80763]) -* Correctly handle an exception case for ingest failure {es-pull}92455[#92455] -* Disable ingest-attachment logging {es-pull}93878[#93878] -* Download the geoip databases only when needed {es-pull}92335[#92335] (issue: {es-issue}90673[#90673]) -* Forwarding simulate calls to ingest nodes {es-pull}92171[#92171] -* Grok returns a list of matches for repeated pattern names #92092 {es-pull}92586[#92586] (issue: {es-issue}92092[#92092]) -* Handle a default/request pipeline and a final pipeline with minimal additional overhead {es-pull}93329[#93329] (issues: {es-issue}92843[#92843], {es-issue}81244[#81244], {es-issue}93118[#93118]) -* Ingest-attachment module tika dependency versions {es-pull}93755[#93755] -* More accurate total ingest stats {es-pull}91730[#91730] (issue: {es-issue}91358[#91358]) -* Speed up ingest geoip processors {es-pull}92372[#92372] -* Speed up ingest set and append processors {es-pull}92395[#92395] - -Machine Learning:: -* Allocate trained models if zone awareness attributes not set {es-pull}94128[#94128] (issue: {es-issue}94123[#94123]) -* Fix data counts race condition when starting a datafeed {es-pull}93324[#93324] (issue: {es-issue}93298[#93298]) -* Fix tokenization bug when handling normalization in BERT and MPNet {es-pull}92329[#92329] -* Free resources correctly when model loading is cancelled {es-pull}92204[#92204] -* Stop the `frequent_items` aggregation reporting a subset when a superset exists {es-pull}92239[#92239] -* Use long inference timeout at ingest {es-pull}93731[#93731] - -Mapping:: -* Fix dynamic mapping detection for invalid dates {es-pull}94115[#94115] (issue: {es-issue}93888[#93888]) -* No length check for source-only keyword fields {es-pull}93299[#93299] (issue: {es-issue}9304[#9304]) - -Network:: -* Delay Connection#onRemoved while pending {es-pull}92546[#92546] -* Fix fransport handshake starting before tls handshake completes {es-pull}90534[#90534] (issue: {es-issue}77999[#77999]) -* Protect `NodeConnectionsService` from stale conns {es-pull}92558[#92558] (issue: {es-issue}92029[#92029]) - -Recovery:: -* Disable recovery monitor before recovery start {es-pull}93551[#93551] (issue: {es-issue}93542[#93542]) -* Fix potential leak in `RemoteRecoveryHandler` {es-pull}91802[#91802] -* Report recovered files as recovered from snapshot for fully mounted searchable snapshots {es-pull}92976[#92976] - -Rollup:: -* Downsampling unmapped text fields {es-pull}94387[#94387] (issue: {es-issue}94346[#94346]) -* Propagate timestamp format and convert nanoseconds to milliseconds {es-pull}94141[#94141] (issue: {es-issue}94085[#94085]) -* Stop processing `TransportDownsampleAction` on failure {es-pull}94624[#94624] -* Support downsampling of histogram as labels {es-pull}93445[#93445] (issue: {es-issue}93263[#93263]) - -Search:: -* Add null check for sort fields over collapse fields {es-pull}94546[#94546] (issue: {es-issue}94407[#94407]) -* Annotated highlighter does not match when search contains both annotation and annotated term {es-pull}92920[#92920] (issue: {es-issue}91944[#91944]) -* Clear field caps index responses on cancelled {es-pull}93716[#93716] (issue: {es-issue}93029[#93029]) -* Do not include frozen indices in PIT by default {es-pull}94377[#94377] -* Fix NPE thrown by prefix query in strange scenarios {es-pull}94369[#94369] -* Fix _id field fetch issue. {es-pull}94528[#94528] (issue: {es-issue}94515[#94515]) -* Fix metadata `_size` when it comes to stored fields extraction {es-pull}94483[#94483] (issue: {es-issue}94468[#94468]) -* Fix missing override for matches in `ProfileWeight` {es-pull}92360[#92360] -* Nested path info shouldn't be added during `copy_to` {es-pull}93340[#93340] (issue: {es-issue}93117[#93117]) -* Use all profiling events on startup {es-pull}92087[#92087] -* Use keyword analyzer for untokenized fields in `TermVectorsService` {es-pull}94518[#94518] -* [Profiling] Adjust handling of last data slice {es-pull}94283[#94283] -* [Profiling] Ensure responses are only sent once {es-pull}93692[#93692] (issue: {es-issue}93691[#93691]) -* [Profiling] Handle response processing errors {es-pull}93860[#93860] - -Snapshot/Restore:: -* Fix unhandled exception when blobstore repository contains unexpected file {es-pull}93914[#93914] -* Support for GCS proxies everywhere in the GCS API {es-pull}92192[#92192] (issue: {es-issue}91952[#91952]) - -Stats:: -* Avoid capturing cluster state in TBbNA {es-pull}92255[#92255] - -TSDB:: -* Fix synthetic `_source` for sparse `_doc_count` field {es-pull}91769[#91769] (issue: {es-issue}91731[#91731]) - -Task Management:: -* Fix context leak in list tasks API {es-pull}93431[#93431] (issue: {es-issue}93428[#93428]) - -Transform:: -* Integrate "sourceHasChanged" call into failure handling and retry logic {es-pull}92762[#92762] (issue: {es-issue}92133[#92133]) - -Vector Search:: -* Fix `maxScore` calculation for kNN search {es-pull}93875[#93875] -* Fix explain for kNN search matches {es-pull}93876[#93876] - -[[enhancement-8.7.0]] -[float] -=== Enhancements - -Aggregations:: -* Optimize composite agg with leading global ordinal value source {es-pull}92197[#92197] - -Allocation:: -* Add `forecasted_write_load` and `forecasted_shard_size_in_bytes` to the endpoint {es-pull}92303[#92303] -* Expose tier balancing stats via internal endpoint {es-pull}92199[#92199] -* Introduce ShardRouting.Role {es-pull}92668[#92668] -* Prevalidate node removal API (pt. 2) {es-pull}91256[#91256] (issue: {es-issue}87776[#87776]) -* Simulate moves using cluster_concurrent_rebalance=2 {es-pull}93977[#93977] -* Unpromotables skip replication and peer recovery {es-pull}93210[#93210] - -Authentication:: -* Add new `token_type` setting to JWT realm {es-pull}91536[#91536] -* JWT realm - Initial support for access tokens {es-pull}91781[#91781] -* JWT realm - Simplify token principal calculation {es-pull}92315[#92315] -* JWT realm - add support for required claims {es-pull}92314[#92314] -* Support custom PBKDF2 password hashes {es-pull}92871[#92871] - -Authorization:: -* Allowed indices matcher supports nested limited roles {es-pull}93306[#93306] -* Extra `kibana_system` privileges for Fleet transform upgrades {es-pull}91499[#91499] -* Pre-authorize child search transport actions {es-pull}91886[#91886] - -Cluster Coordination:: -* Add links to troubleshooting docs {es-pull}92755[#92755] (issue: {es-issue}92741[#92741]) -* Improve node-{join,left} logging for troubleshooting {es-pull}92742[#92742] -* Repeat `cluster.initial_master_nodes` log warning {es-pull}92744[#92744] - -EQL:: -* EQL Samples: add support for multiple samples per key {es-pull}91783[#91783] - -Engine:: -* Add commits listener for `InternalEngine` and `CombinedDeletionPolicy` {es-pull}92017[#92017] -* Add primary term supplier to Engine.IndexCommitListener {es-pull}92101[#92101] -* Adjust range of allowed percentages of deletes in an index {es-pull}93188[#93188] -* Diff the list of filenames that are added by each new commit {es-pull}92238[#92238] -* Set a fixed compound file threshold of 1GB {es-pull}92659[#92659] - -Geo:: -* Add methods to H3#hexRing to prevent allocating long arrays {es-pull}92711[#92711] -* Add methods to prevent allocating long arrays during child navigation on H3 api {es-pull}92099[#92099] -* Add new H3 api method #h3ToNoChildrenIntersecting {es-pull}91673[#91673] -* In H3, compute destination point from distance and azimuth using planar 3d math" {es-pull}93084[#93084] -* Protect H3 library against integer overflow {es-pull}92829[#92829] -* Reduce number of object allocations in H3#h3ToGeoBoundary {es-pull}91586[#91586] -* Speed H3 library by using `FastMath` implementation for trigonometric functions {es-pull}91839[#91839] - -Health:: -* Expose Health Api telemetry via xpack {es-pull}91708[#91708] (issue: {es-issue}90877[#90877]) -* Health api stats {es-pull}91559[#91559] - -Indices APIs:: -* Add `ignore_missing_component_templates` config option {es-pull}92436[#92436] (issue: {es-issue}92426[#92426]) - -Infra/CLI:: -* Scan stable plugins for named components upon install {es-pull}92528[#92528] - -Infra/Core:: -* Add log level for JVM logs {es-pull}92382[#92382] -* Added new field `rollout_duration_seconds` to fleet-actions {es-pull}92640[#92640] -* Bind the readiness service to the wildcard address {es-pull}91329[#91329] (issue: {es-issue}90997[#90997]) -* Provide locally mounted secure settings implementation {es-pull}93392[#93392] - -Infra/Plugins:: -* Check stable plugin version at install and load time {es-pull}91780[#91780] -* Example stable plugins with settings {es-pull}92334[#92334] -* Load stable plugins as synthetic modules {es-pull}91869[#91869] -* Settings api for stable plugins {es-pull}91467[#91467] - -Infra/Scripting:: -* Script: Metadata `validateMetadata` optimization {es-pull}93333[#93333] -* Short-circuit painless def equality {es-pull}92102[#92102] -* Use primitive types rather than boxing/unboxing for iterating over primitive arrays from defs {es-pull}92025[#92025] - -Ingest Node:: -* Cache the creation of parsers within DateProcessor {es-pull}92880[#92880] -* Make `GeoIpProcessor` backing database instance pluggable {es-pull}93285[#93285] - -Machine Learning:: -* Add identification of multimodal distribution to anomaly explanations {ml-pull}2440[#2440] -* Add the ability to include and exclude values in Frequent items {es-pull}92414[#92414] -* Better error when `aggregate_metric_double` used in scrolling datafeeds {es-pull}92232[#92232] (issue: {es-issue}90592[#90592]) -* Implement extension pruning in frequent items to improve runtime {es-pull}92322[#92322] -* Improve `frequent_items` performance using global ordinals {es-pull}93304[#93304] -* Improve anomaly detection results indexing speed {es-pull}92417[#92417] -* Improve frequent items runtime {es-pull}93255[#93255] -* Increase the default timeout for the start trained model deployment API {es-pull}92328[#92328] -* Option to delete user-added annotations for the reset/delete job APIs {es-pull}91698[#91698] (issue: {es-issue}74310[#74310]) -* Persist data counts and datafeed timing stats asynchronously {es-pull}93000[#93000] -* Remove the PyTorch inference work queue as now handled in Elasticsearch {ml-pull}2456[#2456] -* Text Embedding search {es-pull}93531[#93531] -* Upgrade PyTorch to version 1.13.1 {ml-pull}2430[#2430] - - -Mapping:: -* Switch to Lucene's new `IntField/LongField/FloatField/DoubleField` {es-pull}93165[#93165] - -Monitoring:: -* Add kibana.stats.elasticsearch_client stats to the monitoring index templates. {es-pull}91508[#91508] -* Add monitoring mappings for es ingest metricset {es-pull}92950[#92950] - -Network:: -* Deserialize responses on the handling thread-pool {es-pull}91367[#91367] - -Performance:: -* Add vector distance scoring to micro benchmarks {es-pull}92340[#92340] - -Query Languages:: -* Introduce parameterized rule and executor {es-pull}92428[#92428] - -Recovery:: -* Make clean up files step configurable for peer-recovery of replicas {es-pull}92490[#92490] - -Search:: -* Access term dictionary more efficiently {es-pull}92269[#92269] -* Add `term` query support to `rank_features` mapped field {es-pull}93247[#93247] -* Add new `query_vector_builder` option to knn search clause {es-pull}93331[#93331] -* Add profiling plugin {es-pull}91640[#91640] -* Enable profiling plugin by default {es-pull}92787[#92787] -* Get stackframes and executables more concurrently {es-pull}93559[#93559] -* Improve the false positive rate of the bloom filter by setting 7 hash functions {es-pull}93283[#93283] -* Increase the number of threads of GET threadpool {es-pull}92309[#92309] -* Instrument Weight#count in ProfileWeight {es-pull}85656[#85656] (issue: {es-issue}85203[#85203]) -* Reduce memory usage of match all bitset {es-pull}92777[#92777] -* Runtime fields to optionally ignore script errors {es-pull}92380[#92380] -* Speed up retrieval of data for flamegraphs {es-pull}93448[#93448] -* Support retrieving inlined stack frames {es-pull}92863[#92863] -* [Profiling] Reduce GC pressure {es-pull}93590[#93590] - -Security:: -* Configurable retention period for invalidated or expired API keys {es-pull}92219[#92219] -* Record timestamp on API key invalidation {es-pull}91873[#91873] - -Snapshot/Restore:: -* Make `RecoveryPlannerService` optional {es-pull}92489[#92489] - -TSDB:: -* Enable bloom filter for `_id` field in tsdb indices {es-pull}92115[#92115] -* Improve downsampling performance by removing map lookups {es-pull}92494[#92494] (issue: {es-issue}90226[#90226]) -* Minor TSDB parsing speedup {es-pull}92276[#92276] -* Skip duplicate checks on segments that don't contain the document's timestamp {es-pull}92456[#92456] -* Support `fields` in synthetic source in last cases {es-pull}91595[#91595] - -Task Management:: -* `TransportGetTaskAction:` Wait for the task asynchronously {es-pull}93375[#93375] -* `TransportListTaskAction:` wait for tasks to finish asynchronously {es-pull}90977[#90977] (issue: {es-issue}89564[#89564]) - -Transform:: -* Add from parameter to Transform Start API {es-pull}91116[#91116] (issue: {es-issue}88646[#88646]) -* Support "offset" parameter in `DateHistogramGroupSource` {es-pull}93203[#93203] -* Trigger state persistence based on time {es-pull}93221[#93221] - -Vector Search:: -* Allow `null` to be provided for `dense_vector` field values {es-pull}93388[#93388] -* Allow more than one KNN search clause {es-pull}92118[#92118] (issue: {es-issue}91187[#91187]) - -Watcher:: -* Add ability for Watcher's webhook actions to send additional header {es-pull}93426[#93426] - -[[feature-8.7.0]] -[float] -=== New features - -Distributed:: -* Secure settings that can fall back to yml in Stateless {es-pull}91925[#91925] - -Geo:: -* Geohex aggregation on `geo_shape` field {es-pull}91956[#91956] (issue: {es-issue}90163[#90163]) -* Support geo_grid ingest processor {es-pull}93370[#93370] (issue: {es-issue}92473[#92473]) - -Health:: -* The Health API is now generally available {es-pull}92879[#92879] -* [HealthAPI] Add size parameter that controls the number of affected resources returned {es-pull}92399[#92399] (issue: {es-issue}91930[#91930]) -* [HealthAPI] Add support for the FEATURE_STATE affected resource {es-pull}92296[#92296] (issue: {es-issue}91353[#91353]) - -Infra/Plugins:: -* [Fleet] Add files and files data index templates and ILM policies {es-pull}91413[#91413] - -Ingest Node:: -* Redact Ingest Processor {es-pull}92951[#92951] - -Machine Learning:: -* Make `frequent_item_sets` aggregation GA {es-pull}93421[#93421] -* Make native inference generally available {es-pull}92213[#92213] - -TSDB:: -* Add a TSDB rate aggregation {es-pull}90447[#90447] -* Downsampling GA {es-pull}92913[#92913] -* Release time_series and rate (on counter fields) aggegations as tech preview {es-pull}93546[#93546] -* Time series (TSDS) GA {es-pull}91519[#91519] - -Transform:: -* Transform _schedule_now API {es-pull}92948[#92948] (issue: {es-issue}44722[#44722]) - -[[upgrade-8.7.0]] -[float] -=== Upgrades - -Infra/Core:: -* Align all usages of Jackson to be 2.14.2 {es-pull}93438[#93438] - -Ingest Node:: -* Upgrading tika to 2.6.0 {es-pull}92104[#92104] - -Network:: -* Upgrade to Netty 4.1.85 {es-pull}91846[#91846] -* Upgrade to Netty 4.1.86 {es-pull}92587[#92587] - -Query Languages:: -* Upgrade antlr to 4.11.1 for ql, eql and sql {es-pull}93238[#93238] - -Search:: -* Upgrade to Lucene 9.5.0 {es-pull}93385[#93385] -* Upgrade to lucene-9.5.0-snapshot-d19c3e2e0ed {es-pull}92957[#92957] - -Snapshot/Restore:: -* Align all usages of protobuf to be 3.21.9 {es-pull}92123[#92123] -* Bump reactor netty version {es-pull}92457[#92457] -* Consolidate google-oauth-client to latest version {es-pull}91722[#91722] - - diff --git a/docs/reference/release-notes/8.7.1.asciidoc b/docs/reference/release-notes/8.7.1.asciidoc deleted file mode 100644 index 70f5e4add88ca..0000000000000 --- a/docs/reference/release-notes/8.7.1.asciidoc +++ /dev/null @@ -1,80 +0,0 @@ -[[release-notes-8.7.1]] -== {es} version 8.7.1 - -Also see <>. - -[[known-issues-8.7.1]] -[float] -=== Known issues - -* `ArrayIndexOutOfBoundsException` may be thrown while creating a transport message -+ -Certain sequences of writes and seeks to the buffer used to create a transport -message may encounter an alignment bug which results in an -`ArrayIndexOutOfBoundsException`, preventing the transport message from being -sent. -+ -This issue is fixed in 8.8.0. - -include::8.6.0.asciidoc[tag=reconciliation-imbalance-known-issue] - -// tag::no-preventive-gc-issue[] -* High Memory Pressure due to a GC JVM setting change -+ -This version of Elasticsearch is bundled with JDK 20. In JDK 20 -https://bugs.openjdk.org/browse/JDK-8293861[Preventive GC is disabled by default]. -This may lead to increased memory pressure and an increased number of CircuitBreakerExceptions when retrieving large -documents under some load patterns. (issue: {es-issue}99592[#99592]) -+ -If this change affects your use of Elasticsearch, consider re-enabling the previous behaviour -by adding the JVM arguments `-XX:+UnlockDiagnosticVMOptions -XX:+G1UsePreventiveGC` (reference: -https://www.oracle.com/java/technologies/javase/20-relnote-issues.html#JDK-8293861[JDK 20 release notes]). It is -important to note that this workaround is temporary and works only with JDK 20, which is bundled with Elasticsearch up -to version 8.10.2 inclusive. Successive versions are bundling JDK 21+, where this setting -https://bugs.openjdk.org/browse/JDK-8297639[has been removed]. Specifying those JVM arguments will prevent the -JVM (and therefore Elasticsearch Nodes) from starting. -// end::no-preventive-gc-issue[] - -[[bug-8.7.1]] -[float] -=== Bug fixes - -Allocation:: -* Compute balancer threshold based on max shard size {es-pull}95090[#95090] -* Use applied state after `DiskThresholdMonitor` reroute {es-pull}94916[#94916] -* Weaken node-replacement decider during reconciliation {es-pull}95070[#95070] - -ILM+SLM:: -* Downsample ILM action should skip non-time-series indices {es-pull}94835[#94835] (issue: {es-issue}93123[#93123]) - -Ingest Node:: -* Fix async enrich execution prematurely releases enrich policy lock {es-pull}94702[#94702] (issue: {es-issue}94690[#94690]) - -Network:: -* Fix off-by-one bug in `RecyclerBytesStreamOutput` {es-pull}95036[#95036] - -Recovery:: -* Async creation of `IndexShard` instances {es-pull}94545[#94545] - -Search:: -* Return 200 when closing empty PIT or scroll {es-pull}94708[#94708] - -Stats:: -* Fix _cluster/stats `.nodes.fs` deduplication {es-pull}94798[#94798] (issue: {es-issue}24472[#24472]) -* Fix `FsInfo` device deduplication {es-pull}94744[#94744] - -[[enhancement-8.7.1]] -[float] -=== Enhancements - -Authorization:: -* Reuse `FieldPermissionsCache` in Role parsing {es-pull}94931[#94931] - -[[upgrade-8.7.1]] -[float] -=== Upgrades - -Packaging:: -* Upgrade bundled JDK to Java 20 {es-pull}94600[#94600] - - diff --git a/docs/reference/release-notes/8.8.0.asciidoc b/docs/reference/release-notes/8.8.0.asciidoc deleted file mode 100644 index da47bd5e386bb..0000000000000 --- a/docs/reference/release-notes/8.8.0.asciidoc +++ /dev/null @@ -1,310 +0,0 @@ -[[release-notes-8.8.0]] -== {es} version 8.8.0 - -Also see <>. - -[[bug-8.8.0]] -[float] -=== Bug fixes - -Aggregations:: -* Merge two histograms using the higher number of digits among all histograms {es-pull}93704[#93704] (issue: {es-issue}92822[#92822]) - -Allocation:: -* Avoid copying during iteration of all shards in routing table {es-pull}94417[#94417] -* Avoid duplicate application of `RoutingTable` diff {es-pull}94379[#94379] -* Balance priorities during reconciliation {es-pull}95454[#95454] -* Fix `RebalanceOnlyWhenActiveAllocationDecider` {es-pull}96025[#96025] -* Streamline `AsyncShardFetch#getNumberOfInFlightFetches` {es-pull}93632[#93632] (issue: {es-issue}93631[#93631]) - -Application:: -* Check if an analytics event data stream exists before installing pipeline {es-pull}95621[#95621] -* [Behavioral Analytics] Use a client with ent-search origin in the `BulkProcessorFactory` {es-pull}95614[#95614] - -Authorization:: -* Fix role transformation to include missing properties {es-pull}94714[#94714] -* [Fleet] Add read privileges to `profiling-*` for symbolization support {es-pull}95596[#95596] - -CRUD:: -* Avoid null `Location` in post write refresh {es-pull}95229[#95229] - -Cluster Coordination:: -* Read register current term asynchronously in `StoreHeartbeatService` {es-pull}95351[#95351] - -DLM:: -* Remove rollover cluster setting validator {es-pull}94447[#94447] -* [DLM] Fix the new endpoint rest-api specification {es-pull}95665[#95665] - -Data streams:: -* Allow deletion of component templates that are specified in the `ignore_missing_component_templates` array {es-pull}95527[#95527] -* Fix searching a filtered and unfiltered data stream alias {es-pull}95865[#95865] (issue: {es-issue}95786[#95786]) - -Distributed:: -* Check shard availability before including in stats {es-pull}96015[#96015] (issues: {es-issue}96000[#96000], {es-issue}87001[#87001]) -* Fix `GetPipelineResponse` equality {es-pull}93695[#93695] - -Engine:: -* Ensure refresh to return the latest commit generation {es-pull}94249[#94249] - -Geo:: -* Adjust `BoundedGeoHexGridTiler#FACTOR` to prevent missing hits {es-pull}96088[#96088] (issue: {es-issue}96057[#96057]) -* Fix bug where `geo_line` does not respect `sort_order` {es-pull}94734[#94734] (issue: {es-issue}94733[#94733]) - -ILM+SLM:: -* Retry downsample ILM action using a new target index {es-pull}94965[#94965] (issue: {es-issue}93580[#93580]) -* Strip disallowed chars from generated snapshot name {es-pull}95767[#95767] (issue: {es-issue}95593[#95593]) -* [ILM] Fix the migrate to tiers service and migrate action tiers configuration {es-pull}95934[#95934] - -Infra/Core:: -* Fix race condition in `NodeEnvironment.close()` {es-pull}94677[#94677] (issue: {es-issue}94672[#94672]) -* Use double wildcards for filtered excludes properly {es-pull}94195[#94195] (issue: {es-issue}92632[#92632]) - -Infra/REST API:: -* Add level parameter validation in REST layer {es-pull}94136[#94136] (issue: {es-issue}93981[#93981]) - -Infra/Scripting:: -* Allow low level paging in `LeafDocLookup` {es-pull}93711[#93711] -* Revert usage of `SafeMustacheFactory` in `CustomMustacheFactory` {es-pull}95557[#95557] - -Ingest Node:: -* Fix `Grok.match()` with offset and suffix pattern {es-pull}95003[#95003] (issue: {es-issue}95002[#95002]) -* Fix bug in verbose simulations of the ingest pipeline API {es-pull}95232[#95232] - -Machine Learning:: -* Avoid expensive source parsing by using doc values when querying model definition meta fields {es-pull}95590[#95590] - -Mapping:: -* Longer timeout for mapping update during resize {es-pull}95221[#95221] - -Network:: -* Fix `RecyclerBytesStreamOutput` corrupting when ending write on page boundary {es-pull}95114[#95114] -* Fix maximum seek limit `RecyclerBytesStreamOutput` {es-pull}95133[#95133] - -Ranking:: -* Fix versioning for tests cases using a randomly generated rank builder {es-pull}95514[#95514] - -Search:: -* Fix `_terms_enum` display values {es-pull}94080[#94080] (issue: {es-issue}94041[#94041]) -* Support ignore malformed in boolean fields {es-pull}93239[#93239] (issue: {es-issue}89542[#89542]) -* Support search template api explain query string argument {es-pull}94832[#94832] (issue: {es-issue}83363[#83363]) - -Snapshot/Restore:: -* Cancel cold cache prewarming tasks if store is closing {es-pull}95891[#95891] (issue: {es-issue}95504[#95504]) -* Fix 0 default value for repo snapshot speed {es-pull}95854[#95854] (issue: {es-issue}95561[#95561]) -* Fix Azure `InputStream#read` method {es-pull}96034[#96034] -* Stop sorting indices in get-snapshots API {es-pull}94890[#94890] - -Transform:: -* Call listener in order to prevent the request from hanging {es-pull}96221[#96221] -* Do not fail upon `ResourceAlreadyExistsException` during destination index creation {es-pull}96274[#96274] (issue: {es-issue}95310[#95310]) -* Fix privileges check failures by adding `allow_restricted_indices` flag {es-pull}95187[#95187] -* Secondary credentials used with transforms should only require source and destination index privileges, not transform privileges {es-pull}94420[#94420] -* Use monotonic time in `TransformScheduler` {es-pull}95456[#95456] (issue: {es-issue}95445[#95445]) - -[[deprecation-8.8.0]] -[float] -=== Deprecations - -Allocation:: -* Deprecate `cluster.routing.allocation.type` {es-pull}94066[#94066] - -[[enhancement-8.8.0]] -[float] -=== Enhancements - -Aggregations:: -* Add `keyed` parameter to filters agg, allowing the user to get non-keyed buckets of named filters agg {es-pull}89256[#89256] (issue: {es-issue}83957[#83957]) -* Add global ordinal info to stats APIs {es-pull}94500[#94500] -* Don't create many `Rounding.Prepared` instances when checking for empty buckets in date_histogram aggregator. {es-pull}94649[#94649] - -Analysis:: -* Add origin of synonym rules to exception message {es-pull}93702[#93702] - -Application:: -* Behavioral Analytics event ingest tuning {es-pull}95405[#95405] -* [Behavioral Analytics] Add geo ip and user agent to events {es-pull}95433[#95433] -* [Behavioral analytics] Implement search filters into events {es-pull}95212[#95212] - -Authentication:: -* Do not fail node if SAML HTTP metadata is unavailable {es-pull}92810[#92810] (issue: {es-issue}37608[#37608]) -* Finer control over authentication metadata serialization {es-pull}93726[#93726] - -Authorization:: -* Add permissions to `kibana_system` for TI package transforms to support IOC expiration {es-pull}94506[#94506] (issue: {es-issue}94505[#94505]) -* Ensure checking indices privileges works with `nested-limited-role` {es-pull}95170[#95170] - -Cluster Coordination:: -* Improve master service batching queues {es-pull}92021[#92021] (issue: {es-issue}81626[#81626]) - -DLM:: -* Adding origination date to DLM {es-pull}95113[#95113] - -Engine:: -* Increase the merge factor to 32 for time-based data {es-pull}94134[#94134] -* Reduce the likelihood of writing small segments due to an oversize translog {es-pull}93524[#93524] (issue: {es-issue}75611[#75611]) -* Sort segments on timestamp in read only engine {es-pull}93576[#93576] -* Use `LogByteSizeMergePolicy` instead of `TieredMergePolicy` for time-based data {es-pull}92684[#92684] -* Use mmap for temporary files {es-pull}93595[#93595] - -Geo:: -* Allow docvalues-only search on `geo_shape` {es-pull}94396[#94396] -* Support for store parameter in `geo_shape` field {es-pull}94418[#94418] (issue: {es-issue}83655[#83655]) - -Highlighting:: -* Use `storedFieldsSpec` to load stored fields for highlighting {es-pull}91841[#91841] - -ILM+SLM:: -* Implicitly rollover data streams / aliases based on `max_primary_shard_docs` {es-pull}94065[#94065] (issue: {es-issue}87246[#87246]) -* Sort ILM explain output by natural index name {es-pull}94879[#94879] (issue: {es-issue}94768[#94768]) - -Indices APIs:: -* Adding initial public and internal serverless scopes to data management rest handlers {es-pull}93990[#93990] -* Servlerless API protection with annotations {es-pull}93607[#93607] - -Infra/Core:: -* Allow preserving specific headers on thread context stash {es-pull}94680[#94680] - -Infra/Plugins:: -* Improve module/plugin loading logging message. {es-pull}93952[#93952] (issue: {es-issue}93881[#93881]) - -Infra/Transport API:: -* Add `transport_version` to node info JSON {es-pull}94669[#94669] - -Ingest Node:: -* Add `reroute` processor {es-pull}76511[#76511] -* Introduce redirect method on `IngestDocument` {es-pull}94000[#94000] (issue: {es-issue}83653[#83653]) -* [Ingest Processor] Add `ignore_missing` param to the `uri_parts` ingest processor {es-pull}95068[#95068] - -Machine Learning:: -* Add `_meta` field to data frame analytics config {es-pull}94529[#94529] -* Add `embedding_size` to text embedding config {es-pull}95176[#95176] -* Include model definition install status for Pytorch models {es-pull}95271[#95271] -* Integrate ELSER model download into put trained model API {es-pull}95281[#95281] -* Start, stop and infer of a trained model can now optionally use a deployment ID that is different to the model ID {es-pull}95168[#95168] -* [ML] Get trained model stats by deployment id or model id {es-pull}95440[#95440] - -Mapping:: -* Cut over from Field to `StringField` when applicable {es-pull}94540[#94540] -* Enable `_terms_enum` on `ip` fields {es-pull}94322[#94322] (issue: {es-issue}89933[#89933]) -* Enable synthetic source for malformed booleans {es-pull}94121[#94121] -* Index sequence numbers via a single Lucene field {es-pull}94504[#94504] -* Use a combined field to index terms and doc values on keyword fields {es-pull}93579[#93579] - -Monitoring:: -* Add `event_loop_utilization` Kibana stats to the monitoring index templates {es-pull}95388[#95388] - -Network:: -* Add request/response body logging to HTTP tracer {es-pull}93133[#93133] -* Avoid deserializing responses in proxy node {es-pull}93799[#93799] -* Report transport message size per action {es-pull}94543[#94543] (issue: {es-issue}88151[#88151]) -* Retain underlying error on proxy mode connection failure {es-pull}94998[#94998] - -SQL:: -* Add `WildcardLike/Pattern` to QL {es-pull}95357[#95357] - -Search:: -* Adding initial public and internal serverless scopes to Search team REST handlers {es-pull}94035[#94035] -* Enable `_terms_enum` on version fields {es-pull}93839[#93839] (issue: {es-issue}83403[#83403]) -* Introduce `DocumentParsingException` {es-pull}92646[#92646] (issue: {es-issue}85083[#85083]) -* Leverage `Weight#count` when size is set to 0 {es-pull}94858[#94858] -* Make `SourceProvider` using stored fields segment-thread-safe {es-pull}95082[#95082] -* Shortcut total hit count when `terminate_after` is used {es-pull}94889[#94889] -* [Profiling] Map stack frames more efficiently {es-pull}94327[#94327] -* [Profiling] Parallelize response handling {es-pull}93960[#93960] - -Security:: -* Fleet: Add new mappings for `.fleet-actions` signing {es-pull}93802[#93802] - -Snapshot/Restore:: -* Add register analysis to repo analysis API {es-pull}93955[#93955] -* Add snapshot activity in cluster stats {es-pull}93680[#93680] -* Add support for custom endpoints in the Azure repository {es-pull}94576[#94576] (issue: {es-issue}94537[#94537]) -* Failed tasks proactively cancel children tasks {es-pull}92588[#92588] (issue: {es-issue}90353[#90353]) - -TSDB:: -* Support position `time_series_metric` on `geo_point` fields {es-pull}93946[#93946] - -Transform:: -* Add `delete_destination_index` parameter to the `Delete Transform API` {es-pull}94162[#94162] -* Allow specifying destination index aliases in the Transform's `dest` config {es-pull}94943[#94943] -* Expose authorization failure as transform health issue {es-pull}94724[#94724] - -Vector Search:: -* Increase max number of vector dims to 2048 {es-pull}95257[#95257] - -Watcher:: -* Add Watcher APIs for updating/retrieving settings {es-pull}95342[#95342] (issue: {es-issue}92991[#92991]) -* Porting watcher over to `BulkProcessor2` {es-pull}94133[#94133] - -[[feature-8.8.0]] -[float] -=== New features - -Application:: -* Initial Search Application Search API with templates {es-pull}95026[#95026] -* [Behavioral Analytics] Add a `final_pipeline` to event data streams {es-pull}95198[#95198] - -Authentication:: -* GA release of the JWT realm {es-pull}95398[#95398] - -CRUD:: -* New `TransportBroadcastUnpromotableAction` action {es-pull}93600[#93600] - -DLM:: -* Add new endpoints to configure data lifecycle on a data stream level {es-pull}94590[#94590] -* Dlm add auto rollover condition max age {es-pull}94950[#94950] -* Initial implementation for `DataLifecycleService` {es-pull}94012[#94012] -* Introduce a _lifecycle/explain API for data stream backing indices {es-pull}94621[#94621] -* Introduce the `index.lifecycle.prefer_ilm` setting {es-pull}95423[#95423] -* [DLM] Extend the template to simulate api to support include defaults {es-pull}94861[#94861] -* [DLM] Introduce default rollover cluster setting & expose it via APIs {es-pull}94240[#94240] - -Health:: -* Add new `ShardsCapacity` Health Indicator Service {es-pull}94552[#94552] -* Add to `HealthMetadata` information about `ShardLimits` {es-pull}94116[#94116] - -Ingest Node:: -* Add license checking to the redact processor {es-pull}95477[#95477] - -Machine Learning:: -* Text Expansion Query {es-pull}93694[#93694] - -Ranking:: -* Add support for Reciprocal Rank Fusion to the search API {es-pull}93396[#93396] - -Search:: -* Add Enterprise Search Module {es-pull}94381[#94381] -* Add new `similarity` field to `knn` clause in `_search` {es-pull}94828[#94828] -* Add the ability to return the score of the named queries {es-pull}94564[#94564] (issue: {es-issue}29606[#29606]) -* Implements behavioral analytics events ingest API {es-pull}95027[#95027] - -TSDB:: -* Encode using 40, 48 and 56 bits per value {es-pull}93371[#93371] -* Flattened field synthetic support {es-pull}94842[#94842] -* Support flattened fields as time series dimension fields {es-pull}95273[#95273] - -[[upgrade-8.8.0]] -[float] -=== Upgrades - -Engine:: -* Upgrade to `lucene-9.6-snapshot-dcc2154a1d3` {es-pull}94955[#94955] - -Infra/Core:: -* Upgrade Jackson xml to 2.15.0 {es-pull}95641[#95641] - -Ingest Node:: -* Upgrading tika to `2.7.0` {es-pull}93759[#93759] - -Network:: -* Upgrade to Netty `4.1.89` {es-pull}94179[#94179] - -Packaging:: -* Bump bundled JDK to Java `20.0.1` {es-pull}95359[#95359] - -Search:: -* Upgrade Lucene to the final 9.6.0 release {es-pull}95967[#95967] -* Upgrade to `lucene-9.6.0-snapshot-8a815153fbe` {es-pull}94635[#94635] -* Upgrade to `lucene-9.6.0-snapshot-f5d1e1c787c` {es-pull}94494[#94494] - - diff --git a/docs/reference/release-notes/8.8.1.asciidoc b/docs/reference/release-notes/8.8.1.asciidoc deleted file mode 100644 index 249c351241bdd..0000000000000 --- a/docs/reference/release-notes/8.8.1.asciidoc +++ /dev/null @@ -1,34 +0,0 @@ -[[release-notes-8.8.1]] -== {es} version 8.8.1 - -Also see <>. - -[[bug-8.8.1]] -[float] -=== Bug fixes - -Data streams:: -* Allow the removal of an in-use template if there are other ones matching the dependent data streams {es-pull}96286[#96286] - -Geo:: -* API rest compatibility for type parameter in `geo_bounding_box` query {es-pull}96317[#96317] - -Rollup:: -* Do not copy `index.default_pipeline` and `index.final_pipeline` {es-pull}96494[#96494] (issue: {es-issue}96478[#96478]) - -TSDB:: -* Expand start and end time to nanoseconds during coordinator rewrite when needed {es-pull}96035[#96035] (issue: {es-issue}96030[#96030]) -* Fix NPE when indexing a document that just has been deleted in a tsdb index {es-pull}96461[#96461] - -Transform:: -* Improve error message on transform `_update` conflict {es-pull}96432[#96432] -* Report version conflict on concurrent updates {es-pull}96293[#96293] (issue: {es-issue}96311[#96311]) - -[[enhancement-8.8.1]] -[float] -=== Enhancements - -Query Languages:: -* Reduce nesting of same bool queries {es-pull}96265[#96265] (issue: {es-issue}96236[#96236]) - - diff --git a/docs/reference/release-notes/8.8.2.asciidoc b/docs/reference/release-notes/8.8.2.asciidoc deleted file mode 100644 index 8a24ae2e8d4ef..0000000000000 --- a/docs/reference/release-notes/8.8.2.asciidoc +++ /dev/null @@ -1,46 +0,0 @@ -[[release-notes-8.8.2]] -== {es} version 8.8.2 - -Also see <>. - -[[known-issues-8.8.2]] -[float] -=== Known issues -include::8.7.1.asciidoc[tag=no-preventive-gc-issue] - -[[bug-8.8.2]] -[float] -=== Bug fixes - -Aggregations:: -* Fix iteration of empty percentiles throwing Null Pointer Exception {es-pull}96668[#96668] (issue: {es-issue}96626[#96626]) - -Health:: -* Uses `ClusterSettings` instead of Node `Settings` in `HealthMetadataService` {es-pull}96843[#96843] (issue: {es-issue}96219[#96219]) - -Ingest Node:: -* Support dotted field notations in the reroute processor {es-pull}96243[#96243] - -Machine Learning:: -* Ensure NLP model inference queue is always cleared after shutdown or failure {es-pull}96738[#96738] - -SQL:: -* Fix translation of queries involving Version vals {es-pull}96540[#96540] (issue: {es-issue}96509[#96509]) - -Search:: -* Increase concurrent request of opening point-in-time {es-pull}96782[#96782] - -TSDB:: -* The get data stream api incorrectly prints warning log for upgraded tsdb data streams {es-pull}96606[#96606] - -[[enhancement-8.8.2]] -[float] -=== Enhancements - -TSDB:: -* Change rollup thread pool settings {es-pull}96821[#96821] (issue: {es-issue}96758[#96758]) - -Transform:: -* Adding null check to fix potential NPE {es-pull}96785[#96785] (issue: {es-issue}96781[#96781]) - - diff --git a/docs/reference/release-notes/8.9.0.asciidoc b/docs/reference/release-notes/8.9.0.asciidoc deleted file mode 100644 index c49eac9f0327c..0000000000000 --- a/docs/reference/release-notes/8.9.0.asciidoc +++ /dev/null @@ -1,286 +0,0 @@ -[[release-notes-8.9.0]] -== {es} version 8.9.0 - -Also see <>. - -[[known-issues-8.9.0]] -[float] -=== Known issues - -* Question Answering fails on long input text. If the context supplied to the -task is longer than the model's max_sequence_length and truncate is set to none -then inference fails with the message `question answering result has -invalid dimension`. (issue: {es-issue}97917[#97917]) - -include::8.7.1.asciidoc[tag=no-preventive-gc-issue] - -[[breaking-8.9.0]] -[float] -=== Breaking changes - -Aggregations:: -* Switch TDigestState to use `HybridDigest` by default {es-pull}96904[#96904] - -[[bug-8.9.0]] -[float] -=== Bug fixes - -Allocation:: -* Attempt to fix delay allocation {es-pull}95921[#95921] -* Fix NPE in Desired Balance API {es-pull}97775[#97775] -* Fix autoexpand during node replace {es-pull}96281[#96281] - -Authorization:: -* Resolving wildcard application names without prefix query {es-pull}96479[#96479] (issue: {es-issue}96465[#96465]) - -CRUD:: -* Fix `retry_on_conflict` parameter in update API to not retry indefinitely {es-pull}96262[#96262] -* Handle failure in `TransportUpdateAction#handleUpdateFailureWithRetry` {es-pull}97290[#97290] (issue: {es-issue}97286[#97286]) - -Cluster Coordination:: -* Avoid `getStateForMasterService` where possible {es-pull}97304[#97304] -* Become candidate on publication failure {es-pull}96490[#96490] (issue: {es-issue}96273[#96273]) -* Fix cluster settings update task acknowledgment {es-pull}97111[#97111] - -Data streams:: -* Accept timestamp as object at root level {es-pull}97401[#97401] - -Geo:: -* Fix bug when creating empty `geo_lines` {es-pull}97509[#97509] (issue: {es-issue}97311[#97311]) -* Fix time-series geo_line to include reduce phase in MergedGeoLines {es-pull}96953[#96953] (issue: {es-issue}96983[#96983]) -* Support for Byte and Short as vector tiles features {es-pull}97619[#97619] (issue: {es-issue}97612[#97612]) - -ILM+SLM:: -* Limit the details field length we store for each SLM invocation {es-pull}97038[#97038] (issue: {es-issue}96918[#96918]) - -Infra/CLI:: -* Initialise ES logging in CLI {es-pull}97353[#97353] (issue: {es-issue}97350[#97350]) - -Infra/Core:: -* Capture max processors in static init {es-pull}97119[#97119] (issue: {es-issue}97088[#97088]) -* Interpret microseconds cpu stats from cgroups2 properly as nanos {es-pull}96924[#96924] (issue: {es-issue}96089[#96089]) - -Infra/Logging:: -* Add slf4j-nop in order to prevent startup warnings {es-pull}95459[#95459] - -Infra/REST API:: -* Fix tchar pattern in `RestRequest` {es-pull}96406[#96406] - -Infra/Scripting:: -* Fix Painless method lookup over unknown super interfaces {es-pull}97062[#97062] (issue: {es-issue}97022[#97022]) - -Infra/Settings:: -* Enable validation for `versionSettings` {es-pull}95874[#95874] (issue: {es-issue}95873[#95873]) - -Ingest Node:: -* Fixing `DateProcessor` when the format is `epoch_millis` {es-pull}95996[#95996] -* Fixing `GeoIpDownloaderStatsAction$NodeResponse` serialization by defensively copying inputs {es-pull}96777[#96777] (issue: {es-issue}96438[#96438]) -* Trim field references in reroute processor {es-pull}96941[#96941] (issue: {es-issue}96939[#96939]) - -Machine Learning:: -* Catch exceptions thrown during inference and report as errors {ml-pull}2542[#2542] -* Fix `WordPiece` tokenization where stripping accents results in an empty string {es-pull}97354[#97354] -* Improve model downloader robustness {es-pull}97274[#97274] -* Prevent high memory usage by evaluating batch inference singularly {ml-pull}2538[#2538] - -Mapping:: -* Avoid stack overflow while parsing mapping {es-pull}95705[#95705] (issue: {es-issue}52098[#52098]) -* Fix mapping parsing logic to determine synthetic source is active {es-pull}97355[#97355] (issue: {es-issue}97320[#97320]) - -Ranking:: -* Fix `sub_searches` serialization bug {es-pull}97587[#97587] - -Recovery:: -* Promptly fail recovery from snapshot {es-pull}96421[#96421] (issue: {es-issue}95525[#95525]) - -Search:: -* Prevent instantiation of `top_metrics` when sub-aggregations are present {es-pull}96180[#96180] (issue: {es-issue}95663[#95663]) -* Set new providers before building `FetchSubPhaseProcessors` {es-pull}97460[#97460] (issue: {es-issue}96284[#96284]) - -Snapshot/Restore:: -* Fix blob cache races/assertion errors {es-pull}96458[#96458] -* Fix reused/recovered bytes for files that are only partially recovered from cache {es-pull}95987[#95987] (issues: {es-issue}95970[#95970], {es-issue}95994[#95994]) -* Fix reused/recovered bytes for files that are recovered from cache {es-pull}97278[#97278] (issue: {es-issue}95994[#95994]) -* Refactor `RestoreClusterStateListener` to use `ClusterStateObserver` {es-pull}96662[#96662] (issue: {es-issue}96425[#96425]) - -TSDB:: -* Error message for misconfigured TSDB index {es-pull}96956[#96956] (issue: {es-issue}96445[#96445]) -* Min score for time series {es-pull}96878[#96878] - -Task Management:: -* Improve cancellability in `TransportTasksAction` {es-pull}96279[#96279] - -Transform:: -* Improve reporting status of the transform that is about to finish {es-pull}95672[#95672] - -[[enhancement-8.9.0]] -[float] -=== Enhancements - -Aggregations:: -* Add cluster setting to `SearchExecutionContext` to configure `TDigestExecutionHint` {es-pull}96943[#96943] -* Add support for dynamic pruning to cardinality aggregations on low-cardinality keyword fields {es-pull}92060[#92060] -* Make TDigestState configurable {es-pull}96794[#96794] -* Skip `SortingDigest` when merging a large digest in `HybridDigest` {es-pull}97099[#97099] -* Support value retrieval in `top_hits` {es-pull}95828[#95828] - -Allocation:: -* Take into account `expectedShardSize` when initializing shard in simulation {es-pull}95734[#95734] - -Analysis:: -* Create `.synonyms` system index {es-pull}95548[#95548] - -Application:: -* Add template parameters to Search Applications {es-pull}95674[#95674] -* Chunk profiling stacktrace response {es-pull}96340[#96340] -* [Profiling] Add status API {es-pull}96272[#96272] -* [Profiling] Allow to upgrade managed ILM policy {es-pull}96550[#96550] -* [Profiling] Introduce ILM for K/V indices {es-pull}96268[#96268] -* [Profiling] Require POST to retrieve stacktraces {es-pull}96790[#96790] -* [Profiling] Tweak default ILM policy {es-pull}96516[#96516] -* [Search Applications] Support arrays in stored mustache templates {es-pull}96197[#96197] - -Authentication:: -* Header validator with Security {es-pull}95112[#95112] - -Authorization:: -* Add Search ALC filter index prefix to the enterprise search user {es-pull}96885[#96885] -* Ensure checking application privileges work with nested-limited roles {es-pull}96970[#96970] - -Autoscaling:: -* Add shard explain info to `ReactiveReason` about unassigned shards {es-pull}88590[#88590] (issue: {es-issue}85243[#85243]) - -DLM:: -* Add auto force merge functionality to DLM {es-pull}95204[#95204] -* Adding `data_lifecycle` to the _xpack/usage API {es-pull}96177[#96177] -* Adding `manage_data_stream_lifecycle` index privilege and expanding `view_index_metadata` for access to data stream lifecycle APIs {es-pull}95512[#95512] -* Allow for the data lifecycle and the retention to be explicitly nullified {es-pull}95979[#95979] - -Data streams:: -* Add support for `logs@custom` component template for `logs-*-* data streams {es-pull}95481[#95481] (issue: {es-issue}95469[#95469]) -* Adding ECS dynamic mappings component and applying it to logs data streams by default {es-pull}96171[#96171] (issue: {es-issue}95538[#95538]) -* Adjust ECS dynamic templates to support `subobjects: false` {es-pull}96712[#96712] -* Automatically parse log events in logs data streams, if their `message` field contains JSON content {es-pull}96083[#96083] (issue: {es-issue}95522[#95522]) -* Change default of `ignore_malformed` to `true` in `logs-*-*` data streams {es-pull}95329[#95329] (issue: {es-issue}95224[#95224]) -* Set `@timestamp` for documents in logs data streams if missing and add support for custom pipeline {es-pull}95971[#95971] (issues: {es-issue}95537[#95537], {es-issue}95551[#95551]) -* Update data streams implicit timestamp `ignore_malformed` settings {es-pull}96051[#96051] - -Engine:: -* Cache modification time of translog writer file {es-pull}95107[#95107] -* Trigger refresh when shard becomes search active {es-pull}96321[#96321] (issue: {es-issue}95544[#95544]) - -Geo:: -* Add brute force approach to `GeoHashGridTiler` {es-pull}96863[#96863] -* Asset tracking - geo_line in time-series aggregations {es-pull}94954[#94954] - -ILM+SLM:: -* Chunk the GET _ilm/policy response {es-pull}97251[#97251] (issue: {es-issue}96569[#96569]) -* Move get lifecycle API to Management thread pool and make cancellable {es-pull}97248[#97248] (issue: {es-issue}96568[#96568]) -* Reduce WaitForNoFollowersStep requests indices shard stats {es-pull}94510[#94510] - -Indices APIs:: -* Bootstrap profiling indices at startup {es-pull}95666[#95666] - -Infra/Node Lifecycle:: -* SIGTERM node shutdown type {es-pull}95430[#95430] - -Ingest Node:: -* Add mappings for enrich fields {es-pull}96056[#96056] -* Ingest: expose reroute inquiry/reset via Elastic-internal API bridge {es-pull}96958[#96958] - -Machine Learning:: -* Improved compliance with memory limitations {ml-pull}2469[#2469] -* Improve detection of calendar cyclic components with long bucket lengths {ml-pull}2493[#2493] -* Improve detection of time shifts, for example for daylight saving {ml-pull}2479[#2479] - -Mapping:: -* Allow unsigned long field to use decay functions {es-pull}96394[#96394] (issue: {es-issue}89603[#89603]) - -Ranking:: -* Add multiple queries for ranking to the search endpoint {es-pull}96224[#96224] - -Recovery:: -* Implement `StartRecoveryRequest#getDescription` {es-pull}95731[#95731] - -Search:: -* Add search shards endpoint {es-pull}94534[#94534] -* Don't generate stacktrace in `EarlyTerminationException` and `TimeExceededException` {es-pull}95910[#95910] -* Feature/speed up binary vector decoding {es-pull}96716[#96716] -* Improve brute force vector search speed by using Lucene functions {es-pull}96617[#96617] -* Include search idle info to shard stats {es-pull}95740[#95740] (issue: {es-issue}95727[#95727]) -* Integrate CCS with new `search_shards` API {es-pull}95894[#95894] (issue: {es-issue}93730[#93730]) -* Introduce a filtered collector manager {es-pull}96824[#96824] -* Introduce minimum score collector manager {es-pull}96834[#96834] -* Skip shards when querying constant keyword fields {es-pull}96161[#96161] (issue: {es-issue}95541[#95541]) -* Support CCS minimize round trips in async search {es-pull}96012[#96012] -* Support for patter_replace filter in keyword normalizer {es-pull}96588[#96588] -* Support null_value for rank_feature field type {es-pull}95811[#95811] - -Security:: -* Add "_storage" internal user {es-pull}95694[#95694] - -Snapshot/Restore:: -* Reduce overhead in blob cache service get {es-pull}96399[#96399] - -Stats:: -* Add `ingest` information to the cluster info endpoint {es-pull}96328[#96328] (issue: {es-issue}95392[#95392]) -* Add `script` information to the cluster info endpoint {es-pull}96613[#96613] (issue: {es-issue}95394[#95394]) -* Add `thread_pool` information to the cluster info endpoint {es-pull}96407[#96407] (issue: {es-issue}95393[#95393]) - -TSDB:: -* Feature: include unit support for time series rate aggregation {es-pull}96605[#96605] (issue: {es-issue}94630[#94630]) - -Vector Search:: -* Leverage SIMD hardware instructions in Vector Search {es-pull}96453[#96453] (issue: {es-issue}96370[#96370]) - -[[feature-8.9.0]] -[float] -=== New features - -Application:: -* Enable analytics geoip in behavioral analytics {es-pull}96624[#96624] - -Authorization:: -* Support restricting access of API keys to only certain workflows {es-pull}96744[#96744] - -Data streams:: -* Adding ability to auto-install ingest pipelines and refer to them from index templates {es-pull}95782[#95782] - -Geo:: -* Geometry simplifier {es-pull}94859[#94859] - -ILM+SLM:: -* Enhance ILM Health Indicator {es-pull}96092[#96092] - -Infra/Node Lifecycle:: -* Gracefully shutdown elasticsearch {es-pull}96363[#96363] - -Infra/Plugins:: -* [Fleet] Add `.fleet-secrets` system index {es-pull}95625[#95625] (issue: {es-issue}95143[#95143]) - -Machine Learning:: -* Add support for `xlm_roberta` tokenized models {es-pull}94089[#94089] -* Removes the technical preview admonition from query_vector_builder docs {es-pull}96735[#96735] - -Snapshot/Restore:: -* Add repo throttle metrics to node stats api response {es-pull}96678[#96678] (issue: {es-issue}89385[#89385]) - -Stats:: -* New HTTP info endpoint {es-pull}96198[#96198] (issue: {es-issue}95391[#95391]) - -[[upgrade-8.9.0]] -[float] -=== Upgrades - -Infra/Transport API:: -* Bump `TransportVersion` to the first non-release version number. Transport protocol is now versioned independently of release version. {es-pull}95286[#95286] - -Network:: -* Upgrade Netty to 4.1.92 {es-pull}95575[#95575] -* Upgrade Netty to 4.1.94.Final {es-pull}97112[#97112] - -Search:: -* Upgrade Lucene to a 9.7.0 snapshot {es-pull}96433[#96433] -* Upgrade to new lucene snapshot 9.7.0-snapshot-a8602d6ef88 {es-pull}96741[#96741] - - diff --git a/docs/reference/release-notes/8.9.1.asciidoc b/docs/reference/release-notes/8.9.1.asciidoc deleted file mode 100644 index 680860622c1bb..0000000000000 --- a/docs/reference/release-notes/8.9.1.asciidoc +++ /dev/null @@ -1,56 +0,0 @@ -[[release-notes-8.9.1]] -== {es} version 8.9.1 - -Also see <>. - -[[known-issues-8.9.1]] -[float] -=== Known issues -include::8.7.1.asciidoc[tag=no-preventive-gc-issue] - -[[bug-8.9.1]] -[float] -=== Bug fixes - -Aggregations:: -* `GlobalAggregator` should call rewrite() before `createWeight()` {es-pull}98091[#98091] (issue: {es-issue}98076[#98076]) - -Cluster Coordination:: -* Improve exception handling in Coordinator#publish {es-pull}97840[#97840] (issue: {es-issue}97798[#97798]) - -EQL:: -* Backport fix for async missing events and re-enable the feature {es-pull}98130[#98130] - -ILM+SLM:: -* Ignore the `total_shards_per_node` setting on searchable snapshots in frozen {es-pull}97979[#97979] -* Migrate to data tiers routing configures correct default for mounted indices {es-pull}97936[#97936] (issue: {es-issue}97898[#97898]) - -Infra/Core:: -* Fix APM trace start time {es-pull}98113[#98113] - -Infra/Logging:: -* Add Configuration to `PatternLayout` {es-pull}97679[#97679] - -Machine Learning:: -* Fix failure processing Question Answering model output where the input has been spanned over multiple sequences {es-pull}98167[#98167] (issue: {es-issue}97917[#97917]) - -Search:: -* `UnmappedFieldFetcher` should ignore nested fields {es-pull}97987[#97987] (issue: {es-issue}97684[#97684]) - -[[enhancement-8.9.1]] -[float] -=== Enhancements - -Authentication:: -* Upgrade xmlsec to 2.1.8 {es-pull}97741[#97741] - -Infra/Core:: -* Enhance regex performance with duplicate wildcards {es-pull}98176[#98176] - -Machine Learning:: -* Add setting to scale the processor count used in the model assignment planner {es-pull}98296[#98296] - -Search:: -* Refactor nested field handling in `FieldFetcher` {es-pull}97683[#97683] - - diff --git a/docs/reference/release-notes/8.9.2.asciidoc b/docs/reference/release-notes/8.9.2.asciidoc deleted file mode 100644 index 8464d21e1ccc4..0000000000000 --- a/docs/reference/release-notes/8.9.2.asciidoc +++ /dev/null @@ -1,43 +0,0 @@ -[[release-notes-8.9.2]] -== {es} version 8.9.2 - -Also see <>. - -[[known-issues-8.9.2]] -[float] -=== Known issues -include::8.7.1.asciidoc[tag=no-preventive-gc-issue] - -[float] -[[security-updates-8.9.2]] -=== Security updates - -* {es} generally filters out sensitive information and credentials before -logging to the audit log. It was found that this filtering was not applied when -requests to {es} use certain deprecated `_xpack/security` URIs for APIs. The -impact of this flaw is that sensitive information, such as passwords and tokens, -might be printed in cleartext in {es} audit logs. Note that audit logging is -disabled by default and needs to be explicitly enabled. Even when audit logging -is enabled, request bodies that could contain sensitive information are not -printed to the audit log unless explicitly configured. -+ -The issue is resolved in {es} 8.9.2. -+ -For more information, see our related -https://discuss.elastic.co/t/elasticsearch-8-9-2-and-7-17-13-security-update/342479[security -announcement]. - -[[bug-8.9.2]] -[float] -=== Bug fixes - -Data streams:: -* Avoid lifecycle NPE in the data stream lifecycle usage API {es-pull}98260[#98260] - -Geo:: -* Fix mvt error when returning partial results {es-pull}98765[#98765] (issue: {es-issue}98730[#98730]) - -Ingest Node:: -* Revert "Add mappings for enrich fields" {es-pull}98683[#98683] - - diff --git a/docs/reference/release-notes/9.0.0.asciidoc b/docs/reference/release-notes/9.0.0.asciidoc new file mode 100644 index 0000000000000..af26fd57385e3 --- /dev/null +++ b/docs/reference/release-notes/9.0.0.asciidoc @@ -0,0 +1,557 @@ +// THIS IS A GENERATED FILE. DO NOT EDIT DIRECTLY. +// The content generated here are is not correct and most has been manually commented out until it can be fixed. +// See ES-9931 for more details. +[[release-notes-9.0.0]] +== {es} version 9.0.0 + +coming[9.0.0] + +Also see <>. + +[[breaking-9.0.0]] +[float] +=== Breaking changes + +// Allocation:: +// * Remove cluster state from `/_cluster/reroute` response {es-pull}114231[#114231] (issue: {es-issue}88978[#88978]) +// +// Analysis:: +// * Set lenient to true by default when using updateable synonyms {es-pull}110901[#110901] +// * Snowball stemmers have been upgraded {es-pull}114146[#114146] +// * The 'german2' stemmer is now an alias for the 'german' snowball stemmer {es-pull}113614[#113614] +// * The 'persian' analyzer has stemmer by default {es-pull}113482[#113482] (issue: {es-issue}113050[#113050]) +// * The Korean dictionary for Nori has been updated {es-pull}114124[#114124] +// +// Cluster Coordination:: +// * Remove unsupported legacy value for `discovery.type` {es-pull}112903[#112903] +// +// Data streams:: +// * Update data stream lifecycle telemetry to track global retention {es-pull}112451[#112451] +// +// ES|QL:: +// * ESQL: Entirely remove META FUNCTIONS {es-pull}113967[#113967] +// +// Indices APIs:: +// * Remove deprecated local attribute from alias APIs {es-pull}115393[#115393] +// +// Mapping:: +// * JDK locale database change {es-pull}113975[#113975] +// +// Search:: +// * Adding breaking change entry for retrievers {es-pull}115399[#115399] + +[[bug-9.0.0]] +[float] +=== Bug fixes +// +// Aggregations:: +// * Always check the parent breaker with zero bytes in `PreallocatedCircuitBreakerService` {es-pull}115181[#115181] +// * Force using the last centroid during merging {es-pull}111644[#111644] (issue: {es-issue}111065[#111065]) +// +// Authentication:: +// * Check for disabling own user in Put User API {es-pull}112262[#112262] (issue: {es-issue}90205[#90205]) +// * Expose cluster-state role mappings in APIs {es-pull}114951[#114951] +// +// Authorization:: +// * Fix DLS & FLS sometimes being enforced when it is disabled {es-pull}111915[#111915] (issue: {es-issue}94709[#94709]) +// * Fix DLS using runtime fields and synthetic source {es-pull}112341[#112341] +// +// CRUD:: +// * Don't fail retention lease sync actions due to capacity constraints {es-pull}109414[#109414] (issue: {es-issue}105926[#105926]) +// * Preserve thread context when waiting for segment generation in RTG {es-pull}114623[#114623] +// * Standardize error code when bulk body is invalid {es-pull}114869[#114869] +// +// Cluster Coordination:: +// * Ensure clean thread context in `MasterService` {es-pull}114512[#114512] +// +// Data streams:: +// * Adding support for data streams with a match-all template {es-pull}111311[#111311] (issue: {es-issue}111204[#111204]) +// * Exclude internal data streams from global retention {es-pull}112100[#112100] +// * Fix verbose get data stream API not requiring extra privileges {es-pull}112973[#112973] +// * OTel mappings: avoid metrics to be rejected when attributes are malformed {es-pull}114856[#114856] +// * [otel-data] Add more kubernetes aliases {es-pull}115429[#115429] +// * logs-apm.error-*: define log.level field as keyword {es-pull}112440[#112440] +// +// Distributed:: +// * Handle `InternalSendException` inline for non-forking handlers {es-pull}114375[#114375] +// +// EQL:: +// * Don't use a `BytesStreamOutput` to copy keys in `BytesRefBlockHash` {es-pull}114819[#114819] (issue: {es-issue}114599[#114599]) +// * Fix validation of TEXT fields with case insensitive comparison {es-pull}111238[#111238] (issue: {es-issue}111235[#111235]) +// +// ES|QL:: +// * ESQL: Add Values aggregation tests, fix `ConstantBytesRefBlock` memory handling {es-pull}111367[#111367] +// * ESQL: Align year diffing to the rest of the units in DATE_DIFF: chronological {es-pull}113103[#113103] (issue: {es-issue}112482[#112482]) +// * ESQL: Disable pushdown of WHERE past STATS {es-pull}115308[#115308] (issue: {es-issue}115281[#115281]) +// * ESQL: Fix CASE when conditions are multivalued {es-pull}112401[#112401] (issue: {es-issue}112359[#112359]) +// * ESQL: Fix Double operations returning infinite {es-pull}111064[#111064] (issue: {es-issue}111026[#111026]) +// * ESQL: Fix `REVERSE` with backspace character {es-pull}115245[#115245] (issues: {es-issue}114372[#114372], {es-issue}115227[#115227], {es-issue}115228[#115228]) +// * ESQL: Fix a bug in `MV_PERCENTILE` {es-pull}112218[#112218] (issues: {es-issue}112193[#112193], {es-issue}112180[#112180], {es-issue}112187[#112187], {es-issue}112188[#112188]) +// * ESQL: Fix filtered grouping on ords {es-pull}115312[#115312] (issue: {es-issue}114897[#114897]) +// * ESQL: Fix grammar changes around per agg filtering {es-pull}114848[#114848] +// * ESQL: Fix serialization during `can_match` {es-pull}111779[#111779] (issues: {es-issue}111701[#111701], {es-issue}111726[#111726]) +// * ESQL: Fix synthetic attribute pruning {es-pull}111413[#111413] (issue: {es-issue}105821[#105821]) +// * ESQL: don't lose the original casting error message {es-pull}111968[#111968] (issue: {es-issue}111967[#111967]) +// * ESQL: fix for missing indices error message {es-pull}111797[#111797] (issue: {es-issue}111712[#111712]) +// * ES|QL: Fix stats by constant expression {es-pull}114899[#114899] +// * ES|QL: Restrict sorting for `_source` and counter field types {es-pull}114638[#114638] (issues: {es-issue}114423[#114423], {es-issue}111976[#111976]) +// * ES|QL: better validation for GROK patterns {es-pull}110574[#110574] (issue: {es-issue}110533[#110533]) +// * ES|QL: better validation for RLIKE patterns {es-pull}112489[#112489] (issue: {es-issue}112485[#112485]) +// * ES|QL: better validation of GROK patterns {es-pull}112200[#112200] (issue: {es-issue}112111[#112111]) +// * Fix ST_CENTROID_AGG when no records are aggregated {es-pull}114888[#114888] (issue: {es-issue}106025[#106025]) +// * Fix TDigestState.read CB leaks {es-pull}114303[#114303] (issue: {es-issue}114194[#114194]) +// * Spatial search functions support multi-valued fields in compute engine {es-pull}112063[#112063] (issues: {es-issue}112102[#112102], {es-issue}112505[#112505], {es-issue}110830[#110830]) +// * [ES|QL] Check expression resolved before checking its data type in `ImplicitCasting` {es-pull}113314[#113314] (issue: {es-issue}113242[#113242]) +// * [ES|QL] Simplify patterns for subfields {es-pull}111118[#111118] +// * [ES|QL] Simplify syntax of named parameter for identifier and pattern {es-pull}115061[#115061] +// * [ES|QL] Skip validating remote cluster index names in parser {es-pull}114271[#114271] +// * [ES|QL] Use `RangeQuery` and String in `BinaryComparison` on datetime fields {es-pull}110669[#110669] (issue: {es-issue}107900[#107900]) +// * [ES|QL] add tests for stats by constant {es-pull}110593[#110593] (issue: {es-issue}105383[#105383]) +// * [ES|QL] make named parameter for identifier and pattern snapshot {es-pull}114784[#114784] +// * [ES|QL] validate `mv_sort` order {es-pull}110021[#110021] (issue: {es-issue}109910[#109910]) +// +// Geo:: +// * Fix cases of collections with one point {es-pull}111193[#111193] (issue: {es-issue}110982[#110982]) +// +// Health:: +// * Set `replica_unassigned_buffer_time` in constructor {es-pull}112612[#112612] +// +// ILM+SLM:: +// * Make `SnapshotLifecycleStats` immutable so `SnapshotLifecycleMetadata.EMPTY` isn't changed as side-effect {es-pull}111215[#111215] +// +// Indices APIs:: +// * Revert "Add `ResolvedExpression` wrapper" {es-pull}115317[#115317] +// +// Infra/Core:: +// * Fix max file size check to use `getMaxFileSize` {es-pull}113723[#113723] (issue: {es-issue}113705[#113705]) +// * Guard blob store local directory creation with `doPrivileged` {es-pull}115459[#115459] +// * Handle `BigInteger` in xcontent copy {es-pull}111937[#111937] (issue: {es-issue}111812[#111812]) +// * Report JVM stats for all memory pools (97046) {es-pull}115117[#115117] (issue: {es-issue}97046[#97046]) +// * `ByteArrayStreamInput:` Return -1 when there are no more bytes to read {es-pull}112214[#112214] +// +// Infra/Logging:: +// * Only emit product origin in deprecation log if present {es-pull}111683[#111683] (issue: {es-issue}81757[#81757]) +// +// Infra/Metrics:: +// * Make `randomInstantBetween` always return value in range [minInstant, `maxInstant]` {es-pull}114177[#114177] +// +// Infra/REST API:: +// * Fixed a `NullPointerException` in `_capabilities` API when the `path` parameter is null. {es-pull}113413[#113413] (issue: {es-issue}113413[#113413]) +// +// Infra/Settings:: +// * GET _cluster/settings with include_defaults returns the expected fallback value if defined in elasticsearch.yml {es-pull}110816[#110816] (issue: {es-issue}110815[#110815]) +// +// Ingest Node:: +// * Add warning headers for ingest pipelines containing special characters {es-pull}114837[#114837] (issue: {es-issue}104411[#104411]) +// * Fix IPinfo geolocation schema {es-pull}115147[#115147] +// * Fix `getDatabaseType` for unusual MMDBs {es-pull}112888[#112888] +// * Reducing error-level stack trace logging for normal events in `GeoIpDownloader` {es-pull}114924[#114924] +// +// License:: +// * Fix Start Trial API output acknowledgement header for features {es-pull}111740[#111740] (issue: {es-issue}111739[#111739]) +// * Fix `TokenService` always appearing used in Feature Usage {es-pull}112263[#112263] (issue: {es-issue}61956[#61956]) +// +// Logs:: +// * Do not expand dots when storing objects in ignored source {es-pull}113910[#113910] +// * Fix `ignore_above` handling in synthetic source when index level setting is used {es-pull}113570[#113570] (issue: {es-issue}113538[#113538]) +// * Fix synthetic source for flattened field when used with `ignore_above` {es-pull}113499[#113499] (issue: {es-issue}112044[#112044]) +// +// Machine Learning:: +// * Avoid `ModelAssignment` deadlock {es-pull}109684[#109684] +// * Fix NPE in Get Deployment Stats {es-pull}115404[#115404] +// * Fix bug in ML serverless autoscaling which prevented trained model updates from triggering a scale up {es-pull}110734[#110734] +// * Ignore unrecognized openai sse fields {es-pull}114715[#114715] +// * Mitigate IOSession timeouts {es-pull}115414[#115414] (issues: {es-issue}114385[#114385], {es-issue}114327[#114327], {es-issue}114105[#114105], {es-issue}114232[#114232]) +// * Prevent NPE if model assignment is removed while waiting to start {es-pull}115430[#115430] +// * Send mid-stream errors to users {es-pull}114549[#114549] +// * Temporarily return both `modelId` and `inferenceId` for GET /_inference until we migrate clients to only `inferenceId` {es-pull}111490[#111490] +// * Warn for model load failures if they have a status code <500 {es-pull}113280[#113280] +// * [Inference API] Remove unused Cohere rerank service settings fields in a BWC way {es-pull}110427[#110427] +// * [ML] Create Inference API will no longer return model_id and now only return inference_id {es-pull}112508[#112508] +// +// Mapping:: +// * Fix `MapperBuilderContext#isDataStream` when used in dynamic mappers {es-pull}110554[#110554] +// * Fix synthetic source field names for multi-fields {es-pull}112850[#112850] +// * Retrieve the source for objects and arrays in a separate parsing phase {es-pull}113027[#113027] (issue: {es-issue}112374[#112374]) +// * Two empty mappings now are created equally {es-pull}107936[#107936] (issue: {es-issue}107031[#107031]) +// +// Ranking:: +// * Fix MLTQuery handling of custom term frequencies {es-pull}110846[#110846] +// * Fix RRF validation for `rank_constant` < 1 {es-pull}112058[#112058] +// * Fix score count validation in reranker response {es-pull}111212[#111212] (issue: {es-issue}111202[#111202]) +// +// Search:: +// * Allow for querries on `_tier` to skip shards in the `can_match` phase {es-pull}114990[#114990] (issue: {es-issue}114910[#114910]) +// * Allow out of range term queries for numeric types {es-pull}112916[#112916] +// * Do not exclude empty arrays or empty objects in source filtering {es-pull}112250[#112250] (issue: {es-issue}109668[#109668]) +// * Fix synthetic source handling for `bit` type in `dense_vector` field {es-pull}114407[#114407] (issue: {es-issue}114402[#114402]) +// * Improve DateTime error handling and add some bad date tests {es-pull}112723[#112723] (issue: {es-issue}112190[#112190]) +// * Improve date expression/remote handling in index names {es-pull}112405[#112405] (issue: {es-issue}112243[#112243]) +// * Make "too many clauses" throw IllegalArgumentException to avoid 500s {es-pull}112678[#112678] (issue: {es-issue}112177[#112177]) +// * Make empty string searches be consistent with case (in)sensitivity {es-pull}110833[#110833] +// * Prevent flattening of ordered and unordered interval sources {es-pull}114234[#114234] +// * Remove needless forking to GENERIC in `TransportMultiSearchAction` {es-pull}110796[#110796] +// * Search/Mapping: KnnVectorQueryBuilder support for allowUnmappedFields {es-pull}107047[#107047] (issue: {es-issue}106846[#106846]) +// * Span term query to convert to match no docs when unmapped field is targeted {es-pull}113251[#113251] +// * Speedup `CanMatchPreFilterSearchPhase` constructor {es-pull}110860[#110860] +// * Updated Date Range to Follow Documentation When Assuming Missing Values {es-pull}112258[#112258] (issue: {es-issue}111484[#111484]) +// +// Security:: +// * Updated the transport CA name in Security Auto-Configuration. {es-pull}106520[#106520] (issue: {es-issue}106455[#106455]) +// +// Snapshot/Restore:: +// * Retry throttled snapshot deletions {es-pull}113237[#113237] +// +// TSDB:: +// * Implement `parseBytesRef` for `TimeSeriesRoutingHashFieldType` {es-pull}113373[#113373] (issue: {es-issue}112399[#112399]) +// +// Task Management:: +// * Improve handling of failure to create persistent task {es-pull}114386[#114386] +// +// Transform:: +// * Allow task canceling of validate API calls {es-pull}110951[#110951] +// * Include reason when no nodes are found {es-pull}112409[#112409] (issue: {es-issue}112404[#112404]) +// +// Vector Search:: +// * Fix dim validation for bit `element_type` {es-pull}114533[#114533] +// * Support semantic_text in object fields {es-pull}114601[#114601] (issue: {es-issue}114401[#114401]) +// +// Watcher:: +// * Truncating watcher history if it is too large {es-pull}111245[#111245] (issue: {es-issue}94745[#94745]) +// * Watch Next Run Interval Resets On Shard Move or Node Restart {es-pull}115102[#115102] (issue: {es-issue}111433[#111433]) +// +// [[deprecation-9.0.0]] +// [float] +// === Deprecations +// +// Analysis:: +// * Deprecate dutch_kp and lovins stemmer as they are removed in Lucene 10 {es-pull}113143[#113143] +// * deprecate `edge_ngram` side parameter {es-pull}110829[#110829] +// +// CRUD:: +// * Deprecate dot-prefixed indices and composable template index patterns {es-pull}112571[#112571] +// +// Machine Learning:: +// * [Inference API] Deprecate elser service {es-pull}113216[#113216] +// +// Search:: +// * Adding deprecation warnings for rrf using rank and `sub_searches` {es-pull}114854[#114854] +// * Deprecate legacy params from range query {es-pull}113286[#113286] +// +// [[enhancement-9.0.0]] +// [float] +// === Enhancements +// +// Aggregations:: +// * Account for `DelayedBucket` before reduction {es-pull}113013[#113013] +// * Add protection for OOM during aggregations partial reduction {es-pull}110520[#110520] +// * Deduplicate `BucketOrder` when deserializing {es-pull}112707[#112707] +// * Lower the memory footprint when creating `DelayedBucket` {es-pull}112519[#112519] +// * Reduce heap usage for `AggregatorsReducer` {es-pull}112874[#112874] +// * Remove reduce and `reduceContext` from `DelayedBucket` {es-pull}112547[#112547] +// +// Allocation:: +// * Add link to flood-stage watermark exception message {es-pull}111315[#111315] +// * Always allow rebalancing by default {es-pull}111015[#111015] +// * Only publish desired balance gauges on master {es-pull}115383[#115383] +// +// Application:: +// * [Profiling] add `container.id` field to event index template {es-pull}111969[#111969] +// +// Authorization:: +// * Add manage roles privilege {es-pull}110633[#110633] +// * Add privileges required for CDR misconfiguration features to work on AWS SecurityHub integration {es-pull}112574[#112574] +// * [Security Solution] Add `create_index` to `kibana_system` role for index/DS `.logs-endpoint.action.responses-*` {es-pull}115241[#115241] +// +// CRUD:: +// * Suppress merge-on-recovery for older indices {es-pull}113462[#113462] +// +// Codec:: +// * Remove zstd feature flag for index codec best compression {es-pull}112665[#112665] +// +// Data streams:: +// * Add 'verbose' flag retrieving `maximum_timestamp` for get data stream API {es-pull}112303[#112303] +// * Display effective retention in the relevant data stream APIs {es-pull}112019[#112019] +// * Expose global retention settings via data stream lifecycle API {es-pull}112210[#112210] +// * Make ecs@mappings work with OTel attributes {es-pull}111600[#111600] +// +// Distributed:: +// * Add link to Max Shards Per Node exception message {es-pull}110993[#110993] +// * Use Azure blob batch API to delete blobs in batches {es-pull}114566[#114566] +// +// EQL:: +// * ESQL: Delay construction of warnings {es-pull}114368[#114368] +// +// ES|QL:: +// * Add EXP ES|QL function {es-pull}110879[#110879] +// * Add `CircuitBreaker` to TDigest, Step 3: Connect with ESQL CB {es-pull}113387[#113387] +// * Add `CircuitBreaker` to TDigest, Step 4: Take into account shallow classes size {es-pull}113613[#113613] (issue: {es-issue}113916[#113916]) +// * Collect and display execution metadata for ES|QL cross cluster searches {es-pull}112595[#112595] (issue: {es-issue}112402[#112402]) +// * ESQL: Add support for multivalue fields in Arrow output {es-pull}114774[#114774] +// * ESQL: BUCKET: allow numerical spans as whole numbers {es-pull}111874[#111874] (issues: {es-issue}104646[#104646], {es-issue}109340[#109340], {es-issue}105375[#105375]) +// * ESQL: Have BUCKET generate friendlier intervals {es-pull}111879[#111879] (issue: {es-issue}110916[#110916]) +// * ESQL: Profile more timing information {es-pull}111855[#111855] +// * ESQL: Push down filters even in case of renames in Evals {es-pull}114411[#114411] +// * ESQL: Remove parent from `FieldAttribute` {es-pull}112881[#112881] +// * ESQL: Speed up CASE for some parameters {es-pull}112295[#112295] +// * ESQL: Speed up grouping by bytes {es-pull}114021[#114021] +// * ESQL: Support INLINESTATS grouped on expressions {es-pull}111690[#111690] +// * ESQL: Use less memory in listener {es-pull}114358[#114358] +// * ES|QL: Add support for cached strings in plan serialization {es-pull}112929[#112929] +// * ES|QL: add Telemetry API and track top functions {es-pull}111226[#111226] +// * ES|QL: add metrics for functions {es-pull}114620[#114620] +// * Enhance SORT push-down to Lucene to cover references to fields and ST_DISTANCE function {es-pull}112938[#112938] (issue: {es-issue}109973[#109973]) +// * Siem ea 9521 improve test {es-pull}111552[#111552] +// * Support multi-valued fields in compute engine for ST_DISTANCE {es-pull}114836[#114836] (issue: {es-issue}112910[#112910]) +// * [ESQL] Add `SPACE` function {es-pull}112350[#112350] +// * [ESQL] Add finish() elapsed time to aggregation profiling times {es-pull}113172[#113172] (issue: {es-issue}112950[#112950]) +// * [ESQL] Make query wrapped by `SingleValueQuery` cacheable {es-pull}110116[#110116] +// * [ES|QL] Add hypot function {es-pull}114382[#114382] +// * [ES|QL] Cast mixed numeric types to a common numeric type for Coalesce and In at Analyzer {es-pull}111917[#111917] (issue: {es-issue}111486[#111486]) +// * [ES|QL] Combine Disjunctive CIDRMatch {es-pull}111501[#111501] (issue: {es-issue}105143[#105143]) +// * [ES|QL] Create `Range` in `PushFiltersToSource` for qualified pushable filters on the same field {es-pull}111437[#111437] +// * [ES|QL] Name parameter with leading underscore {es-pull}111950[#111950] (issue: {es-issue}111821[#111821]) +// * [ES|QL] Named parameter for field names and field name patterns {es-pull}112905[#112905] +// * [ES|QL] Validate index name in parser {es-pull}112081[#112081] +// * [ES|QL] add reverse function {es-pull}113297[#113297] +// * [ES|QL] explicit cast a string literal to `date_period` and `time_duration` in arithmetic operations {es-pull}109193[#109193] +// +// Experiences:: +// * Integrate IBM watsonx to Inference API for text embeddings {es-pull}111770[#111770] +// +// Geo:: +// * Add support for spatial relationships in point field mapper {es-pull}112126[#112126] +// * Small performance improvement in h3 library {es-pull}113385[#113385] +// * Support docvalues only query in shape field {es-pull}112199[#112199] +// +// Health:: +// * (API) Cluster Health report `unassigned_primary_shards` {es-pull}112024[#112024] +// * Do not treat replica as unassigned if primary recently created and unassigned time is below a threshold {es-pull}112066[#112066] +// * Increase `replica_unassigned_buffer_time` default from 3s to 5s {es-pull}112834[#112834] +// +// ILM+SLM:: +// * ILM: Add `total_shards_per_node` setting to searchable snapshot {es-pull}112972[#112972] (issue: {es-issue}112261[#112261]) +// * PUT slm policy should only increase version if actually changed {es-pull}111079[#111079] +// * Preserve Step Info Across ILM Auto Retries {es-pull}113187[#113187] +// * Register SLM run before snapshotting to save stats {es-pull}110216[#110216] +// * SLM interval schedule followup - add back `getFieldName` style getters {es-pull}112123[#112123] +// +// Infra/Circuit Breakers:: +// * Add link to Circuit Breaker "Data too large" exception message {es-pull}113561[#113561] +// +// Infra/Core:: +// * Add nanos support to `ZonedDateTime` serialization {es-pull}111689[#111689] (issue: {es-issue}68292[#68292]) +// * Extend logging for dropped warning headers {es-pull}111624[#111624] (issue: {es-issue}90527[#90527]) +// * Give the kibana system user permission to read security entities {es-pull}114363[#114363] +// +// Infra/Metrics:: +// * Add `TaskManager` to `pluginServices` {es-pull}112687[#112687] +// * Add `ensureGreen` test method for use with `adminClient` {es-pull}113425[#113425] +// +// Infra/REST API:: +// * Optimize the loop processing of URL decoding {es-pull}110237[#110237] (issue: {es-issue}110235[#110235]) +// +// Infra/Scripting:: +// * Add a `mustache.max_output_size_bytes` setting to limit the length of results from mustache scripts {es-pull}114002[#114002] +// * Expose `HexFormat` in Painless {es-pull}112412[#112412] +// +// Infra/Settings:: +// * Improve exception message for bad environment variable placeholders in settings {es-pull}114552[#114552] (issue: {es-issue}110858[#110858]) +// * Reprocess operator file settings when settings service starts, due to node restart or master node change {es-pull}114295[#114295] +// +// Ingest Node:: +// * Add `size_in_bytes` to enrich cache stats {es-pull}110578[#110578] +// * Add support for templates when validating mappings in the simulate ingest API {es-pull}111161[#111161] +// * Adding `index_template_substitutions` to the simulate ingest API {es-pull}114128[#114128] +// * Adding component template substitutions to the simulate ingest API {es-pull}113276[#113276] +// * Adding mapping validation to the simulate ingest API {es-pull}110606[#110606] +// * Adding support for additional mapping to simulate ingest API {es-pull}114742[#114742] +// * Adding support for simulate ingest mapping adddition for indices with mappings that do not come from templates {es-pull}115359[#115359] +// * Adds example plugin for custom ingest processor {es-pull}112282[#112282] (issue: {es-issue}111539[#111539]) +// * Fix unnecessary mustache template evaluation {es-pull}110986[#110986] (issue: {es-issue}110191[#110191]) +// * Listing all available databases in the _ingest/geoip/database API {es-pull}113498[#113498] +// * Make enrich cache based on memory usage {es-pull}111412[#111412] (issue: {es-issue}106081[#106081]) +// * Tag redacted document in ingest metadata {es-pull}113552[#113552] +// * Verify Maxmind database types in the geoip processor {es-pull}114527[#114527] +// +// Logs:: +// * Add validation for synthetic source mode in logs mode indices {es-pull}110677[#110677] +// * Store original source for keywords using a normalizer {es-pull}112151[#112151] +// +// Machine Learning:: +// * Add Completion Inference API for Alibaba Cloud AI Search Model {es-pull}112512[#112512] +// * Add DeBERTa-V2/V3 tokenizer {es-pull}111852[#111852] +// * Add Streaming Inference spec {es-pull}113812[#113812] +// * Add chunking settings configuration to `CohereService,` `AmazonBedrockService,` and `AzureOpenAiService` {es-pull}113897[#113897] +// * Add chunking settings configuration to `ElasticsearchService/ELSER` {es-pull}114429[#114429] +// * Add custom rule parameters to force time shift {es-pull}110974[#110974] +// * Adding chunking settings to `GoogleVertexAiService,` `AzureAiStudioService,` and `AlibabaCloudSearchService` {es-pull}113981[#113981] +// * Adding chunking settings to `MistralService,` `GoogleAiStudioService,` and `HuggingFaceService` {es-pull}113623[#113623] +// * Adds a new Inference API for streaming responses back to the user. {es-pull}113158[#113158] +// * Create `StreamingHttpResultPublisher` {es-pull}112026[#112026] +// * Create an ml node inference endpoint referencing an existing model {es-pull}114750[#114750] +// * Default inference endpoint for ELSER {es-pull}113873[#113873] +// * Default inference endpoint for the multilingual-e5-small model {es-pull}114683[#114683] +// * Enable OpenAI Streaming {es-pull}113911[#113911] +// * Filter empty task settings objects from the API response {es-pull}114389[#114389] +// * Increase default `queue_capacity` to 10_000 and decrease max `queue_capacity` to 100_000 {es-pull}115041[#115041] +// * Migrate Inference to `ChunkedToXContent` {es-pull}111655[#111655] +// * Register Task while Streaming {es-pull}112369[#112369] +// * Server-Sent Events for Inference response {es-pull}112565[#112565] +// * Stream Anthropic Completion {es-pull}114321[#114321] +// * Stream Azure Completion {es-pull}114464[#114464] +// * Stream Bedrock Completion {es-pull}114732[#114732] +// * Stream Cohere Completion {es-pull}114080[#114080] +// * Stream Google Completion {es-pull}114596[#114596] +// * Stream OpenAI Completion {es-pull}112677[#112677] +// * Support sparse embedding models in the elasticsearch inference service {es-pull}112270[#112270] +// * Switch default chunking strategy to sentence {es-pull}114453[#114453] +// * Upgrade to AWS SDK v2 {es-pull}114309[#114309] (issue: {es-issue}110590[#110590]) +// * Use the same chunking configurations for models in the Elasticsearch service {es-pull}111336[#111336] +// * Validate streaming HTTP Response {es-pull}112481[#112481] +// * Wait for allocation on scale up {es-pull}114719[#114719] +// * [Inference API] Add Alibaba Cloud AI Search Model support to Inference API {es-pull}111181[#111181] +// * [Inference API] Add Docs for AlibabaCloud AI Search Support for the Inference API {es-pull}111181[#111181] +// * [Inference API] Introduce Update API to change some aspects of existing inference endpoints {es-pull}114457[#114457] +// * [Inference API] Prevent inference endpoints from being deleted if they are referenced by semantic text {es-pull}110399[#110399] +// * [Inference API] alibabacloud ai search service support chunk infer to support semantic_text field {es-pull}110399[#110399] +// +// Mapping:: +// * Add Field caps support for Semantic Text {es-pull}111809[#111809] +// * Add Lucene segment-level fields stats {es-pull}111123[#111123] +// * Add Search Inference ID To Semantic Text Mapping {es-pull}113051[#113051] +// * Add object param for keeping synthetic source {es-pull}113690[#113690] +// * Add support for multi-value dimensions {es-pull}112645[#112645] (issue: {es-issue}110387[#110387]) +// * Allow dimension fields to have multiple values in standard and logsdb index mode {es-pull}112345[#112345] (issues: {es-issue}112232[#112232], {es-issue}112239[#112239]) +// * Allow fields with dots in sparse vector field mapper {es-pull}111981[#111981] (issue: {es-issue}109118[#109118]) +// * Allow querying `index_mode` {es-pull}110676[#110676] +// * Configure keeping source in `FieldMapper` {es-pull}112706[#112706] +// * Control storing array source with index setting {es-pull}112397[#112397] +// * Introduce mode `subobjects=auto` for objects {es-pull}110524[#110524] +// * Update `semantic_text` field to support indexing numeric and boolean data types {es-pull}111284[#111284] +// * Use ELSER By Default For Semantic Text {es-pull}113563[#113563] +// * Use fallback synthetic source for `copy_to` and doc_values: false cases {es-pull}112294[#112294] (issues: {es-issue}110753[#110753], {es-issue}110038[#110038], {es-issue}109546[#109546]) +// +// Network:: +// * Add links to network disconnect troubleshooting {es-pull}112330[#112330] +// +// Ranking:: +// * Add timeout and cancellation check to rescore phase {es-pull}115048[#115048] +// +// Recovery:: +// * Trigger merges after recovery {es-pull}113102[#113102] +// +// Relevance:: +// * Add a query rules tester API call {es-pull}114168[#114168] +// +// Search:: +// * Add initial support for `semantic_text` field type {es-pull}113920[#113920] +// * Add more `dense_vector` details for cluster stats field stats {es-pull}113607[#113607] +// * Add range and regexp Intervals {es-pull}111465[#111465] +// * Adding support for `allow_partial_search_results` in PIT {es-pull}111516[#111516] +// * Allow incubating Panama Vector in simdvec, and add vectorized `ipByteBin` {es-pull}112933[#112933] +// * Avoid using concurrent collector manager in `LuceneChangesSnapshot` {es-pull}113816[#113816] +// * Bool query early termination should also consider `must_not` clauses {es-pull}115031[#115031] +// * Deduplicate Kuromoji User Dictionary {es-pull}112768[#112768] +// * Multi term intervals: increase max_expansions {es-pull}112826[#112826] (issue: {es-issue}110491[#110491]) +// * Search coordinator uses `event.ingested` in cluster state to do rewrites {es-pull}111523[#111523] +// * Update cluster stats for retrievers {es-pull}114109[#114109] +// +// Security:: +// * (logger) change from error to warn for short circuiting user {es-pull}112895[#112895] +// * Add asset criticality indices for `kibana_system_user` {es-pull}113588[#113588] +// * Add tier preference to security index settings allowlist {es-pull}111818[#111818] +// * [Service Account] Add `AutoOps` account {es-pull}111316[#111316] +// +// Snapshot/Restore:: +// * Add `max_multipart_parts` setting to S3 repository {es-pull}113989[#113989] +// * Add support for Azure Managed Identity {es-pull}111344[#111344] +// * Add telemetry for repository usage {es-pull}112133[#112133] +// * Add workaround for missing shard gen blob {es-pull}112337[#112337] +// * Clean up dangling S3 multipart uploads {es-pull}111955[#111955] (issues: {es-issue}101169[#101169], {es-issue}44971[#44971]) +// * Execute shard snapshot tasks in shard-id order {es-pull}111576[#111576] (issue: {es-issue}108739[#108739]) +// * Include account name in Azure settings exceptions {es-pull}111274[#111274] +// * Introduce repository integrity verification API {es-pull}112348[#112348] (issue: {es-issue}52622[#52622]) +// * Retry `S3BlobContainer#getRegister` on all exceptions {es-pull}114813[#114813] +// * Track shard snapshot progress during node shutdown {es-pull}112567[#112567] +// +// Stats:: +// * Track search and fetch failure stats {es-pull}113988[#113988] +// +// TSDB:: +// * Add support for boolean dimensions {es-pull}111457[#111457] (issue: {es-issue}111338[#111338]) +// * Stop iterating over all fields to extract @timestamp value {es-pull}110603[#110603] (issue: {es-issue}92297[#92297]) +// * Support booleans in routing path {es-pull}111445[#111445] +// +// Vector Search:: +// * Dense vector field types updatable for int4 {es-pull}110928[#110928] +// * Use native scalar scorer for int8_flat index {es-pull}111071[#111071] +// +// [[feature-9.0.0]] +// [float] +// === New features +// +// Data streams:: +// * Introduce global retention in data stream lifecycle. {es-pull}111972[#111972] +// * X-pack/plugin/otel: introduce x-pack-otel plugin {es-pull}111091[#111091] +// +// ES|QL:: +// * Add ESQL match function {es-pull}113374[#113374] +// * ESQL: Add `MV_PSERIES_WEIGHTED_SUM` for score calculations used by security solution {es-pull}109017[#109017] +// * ESQL: Add async ID and `is_running` headers to ESQL async query {es-pull}111840[#111840] +// * ESQL: Add boolean support to Max and Min aggs {es-pull}110527[#110527] +// * ESQL: Add boolean support to TOP aggregation {es-pull}110718[#110718] +// * ESQL: Added `mv_percentile` function {es-pull}111749[#111749] (issue: {es-issue}111591[#111591]) +// * ESQL: INLINESTATS {es-pull}109583[#109583] (issue: {es-issue}107589[#107589]) +// * ESQL: Introduce per agg filter {es-pull}113735[#113735] +// * ESQL: Strings support for MAX and MIN aggregations {es-pull}111544[#111544] +// * ESQL: Support IP fields in MAX and MIN aggregations {es-pull}110921[#110921] +// * ESQL: TOP aggregation IP support {es-pull}111105[#111105] +// * ESQL: TOP support for strings {es-pull}113183[#113183] (issue: {es-issue}109849[#109849]) +// * ESQL: `mv_median_absolute_deviation` function {es-pull}112055[#112055] (issue: {es-issue}111590[#111590]) +// * Remove snapshot build restriction for match and qstr functions {es-pull}114482[#114482] +// * Search in ES|QL: Add MATCH operator {es-pull}110971[#110971] +// +// ILM+SLM:: +// * SLM Interval based scheduling {es-pull}110847[#110847] +// +// Inference:: +// * EIS integration {es-pull}111154[#111154] +// +// Ingest Node:: +// * Add a `terminate` ingest processor {es-pull}114157[#114157] (issue: {es-issue}110218[#110218]) +// +// Machine Learning:: +// * Inference autoscaling {es-pull}109667[#109667] +// * Telemetry for inference adaptive allocations {es-pull}110630[#110630] +// +// Relevance:: +// * [Query rules] Add `exclude` query rule type {es-pull}111420[#111420] +// +// Search:: +// * Async search: Add ID and "is running" http headers {es-pull}112431[#112431] (issue: {es-issue}109576[#109576]) +// * Cross-cluster search telemetry {es-pull}113825[#113825] +// +// Vector Search:: +// * Adding new bbq index types behind a feature flag {es-pull}114439[#114439] + +[[upgrade-9.0.0]] +[float] +=== Upgrades +// +// Infra/Core:: +// * Upgrade xcontent to Jackson 2.17.0 {es-pull}111948[#111948] +// * Upgrade xcontent to Jackson 2.17.2 {es-pull}112320[#112320] +// +// Infra/Metrics:: +// * Update APM Java Agent to support JDK 23 {es-pull}115194[#115194] (issues: {es-issue}115101[#115101], {es-issue}115100[#115100]) +// +// Search:: +// * Upgrade to Lucene 10 {es-pull}114741[#114741] +// * Upgrade to Lucene 9.12 {es-pull}113333[#113333] +// +// Snapshot/Restore:: +// * Upgrade Azure SDK {es-pull}111225[#111225] +// * Upgrade `repository-azure` dependencies {es-pull}112277[#112277] + + diff --git a/docs/reference/release-notes/highlights.asciidoc b/docs/reference/release-notes/highlights.asciidoc index c3f6fb43f2ffd..edecd4f727583 100644 --- a/docs/reference/release-notes/highlights.asciidoc +++ b/docs/reference/release-notes/highlights.asciidoc @@ -1,6 +1,8 @@ -[chapter] +// THIS IS A GENERATED FILE. DO NOT EDIT DIRECTLY. +// The content generated here are is not correct and most has been manually commented out until it can be fixed. +// See ES-9931 for more details. [[release-highlights]] -= What's new in {minor-version} +== What's new in {minor-version} coming::[{minor-version}] @@ -9,95 +11,164 @@ ifeval::["{release-state}"!="unreleased"] For detailed information about this release, see the <> and <>. -// Add previous release to the list -Other versions: - -{ref-bare}/8.16/release-highlights.html[8.16] -| {ref-bare}/8.15/release-highlights.html[8.15] -| {ref-bare}/8.14/release-highlights.html[8.14] -| {ref-bare}/8.13/release-highlights.html[8.13] -| {ref-bare}/8.12/release-highlights.html[8.12] -| {ref-bare}/8.11/release-highlights.html[8.11] -| {ref-bare}/8.10/release-highlights.html[8.10] -| {ref-bare}/8.9/release-highlights.html[8.9] -| {ref-bare}/8.8/release-highlights.html[8.8] -| {ref-bare}/8.7/release-highlights.html[8.7] -| {ref-bare}/8.6/release-highlights.html[8.6] -| {ref-bare}/8.5/release-highlights.html[8.5] -| {ref-bare}/8.4/release-highlights.html[8.4] -| {ref-bare}/8.3/release-highlights.html[8.3] -| {ref-bare}/8.2/release-highlights.html[8.2] -| {ref-bare}/8.1/release-highlights.html[8.1] -| {ref-bare}/8.0/release-highlights.html[8.0] - endif::[] - -// The notable-highlights tag marks entries that -// should be featured in the Stack Installation and Upgrade Guide: -// tag::notable-highlights[] - -[discrete] -[[esql_inlinestats]] -== ESQL: INLINESTATS -This adds the `INLINESTATS` command to ESQL which performs a STATS and -then enriches the results into the output stream. So, this query: - -[source,esql] ----- -FROM test -| INLINESTATS m=MAX(a * b) BY b -| WHERE m == a * b -| SORT a DESC, b DESC -| LIMIT 3 ----- - -Produces output like: - -| a | b | m | -| --- | --- | ----- | -| 99 | 999 | 98901 | -| 99 | 998 | 98802 | -| 99 | 997 | 98703 | - -{es-pull}109583[#109583] - -[discrete] -[[always_allow_rebalancing_by_default]] -== Always allow rebalancing by default -In earlier versions of {es} the `cluster.routing.allocation.allow_rebalance` setting defaults to -`indices_all_active` which blocks all rebalancing moves while the cluster is in `yellow` or `red` health. This was -appropriate for the legacy allocator which might do too many rebalancing moves otherwise. Today's allocator has -better support for rebalancing a cluster that is not in `green` health, and expects to be able to rebalance some -shards away from over-full nodes to avoid allocating shards to undesirable locations in the first place. From -version 8.16 `allow_rebalance` setting defaults to `always` unless the legacy allocator is explicitly enabled. - -{es-pull}111015[#111015] - -[discrete] -[[add_global_retention_in_data_stream_lifecycle]] -== Add global retention in data stream lifecycle -Data stream lifecycle now supports configuring retention on a cluster level, -namely global retention. Global retention \nallows us to configure two different -retentions: - -- `data_streams.lifecycle.retention.default` is applied to all data streams managed -by the data stream lifecycle that do not have retention defined on the data stream level. -- `data_streams.lifecycle.retention.max` is applied to all data streams managed by the -data stream lifecycle and it allows any data stream \ndata to be deleted after the `max_retention` has passed. - -{es-pull}111972[#111972] - -[discrete] -[[enable_zstandard_compression_for_indices_with_index_codec_set_to_best_compression]] -== Enable ZStandard compression for indices with index.codec set to best_compression -Before DEFLATE compression was used to compress stored fields in indices with index.codec index setting set to -best_compression, with this change ZStandard is used as compression algorithm to stored fields for indices with -index.codec index setting set to best_compression. The usage ZStandard results in less storage usage with a -similar indexing throughput depending on what options are used. Experiments with indexing logs have shown that -ZStandard offers ~12% lower storage usage and a ~14% higher indexing throughput compared to DEFLATE. - -{es-pull}112665[#112665] - -// end::notable-highlights[] - +// +// // tag::notable-highlights[] +// +// [discrete] +// [[esql_inlinestats]] +// === ESQL: INLINESTATS +// This adds the `INLINESTATS` command to ESQL which performs a STATS and +// then enriches the results into the output stream. So, this query: +// +// [source,esql] +// ---- +// FROM test +// | INLINESTATS m=MAX(a * b) BY b +// | WHERE m == a * b +// | SORT a DESC, b DESC +// | LIMIT 3 +// ---- +// +// Produces output like: +// +// | a | b | m | +// | --- | --- | ----- | +// | 99 | 999 | 98901 | +// | 99 | 998 | 98802 | +// | 99 | 997 | 98703 | +// +// {es-pull}109583[#109583] +// +// [discrete] +// [[always_allow_rebalancing_by_default]] +// === Always allow rebalancing by default +// In earlier versions of {es} the `cluster.routing.allocation.allow_rebalance` setting defaults to +// `indices_all_active` which blocks all rebalancing moves while the cluster is in `yellow` or `red` health. This was +// appropriate for the legacy allocator which might do too many rebalancing moves otherwise. Today's allocator has +// better support for rebalancing a cluster that is not in `green` health, and expects to be able to rebalance some +// shards away from over-full nodes to avoid allocating shards to undesirable locations in the first place. From +// version 8.16 `allow_rebalance` setting defaults to `always` unless the legacy allocator is explicitly enabled. +// +// {es-pull}111015[#111015] +// +// [discrete] +// [[add_global_retention_in_data_stream_lifecycle]] +// === Add global retention in data stream lifecycle +// Data stream lifecycle now supports configuring retention on a cluster level, +// namely global retention. Global retention \nallows us to configure two different +// retentions: +// +// - `data_streams.lifecycle.retention.default` is applied to all data streams managed +// by the data stream lifecycle that do not have retention defined on the data stream level. +// - `data_streams.lifecycle.retention.max` is applied to all data streams managed by the +// data stream lifecycle and it allows any data stream \ndata to be deleted after the `max_retention` has passed. +// +// {es-pull}111972[#111972] +// +// [discrete] +// [[enable_zstandard_compression_for_indices_with_index_codec_set_to_best_compression]] +// === Enable ZStandard compression for indices with index.codec set to best_compression +// Before DEFLATE compression was used to compress stored fields in indices with index.codec index setting set to +// best_compression, with this change ZStandard is used as compression algorithm to stored fields for indices with +// index.codec index setting set to best_compression. The usage ZStandard results in less storage usage with a +// similar indexing throughput depending on what options are used. Experiments with indexing logs have shown that +// ZStandard offers ~12% lower storage usage and a ~14% higher indexing throughput compared to DEFLATE. +// +// {es-pull}112665[#112665] +// +// [discrete] +// [[esql_introduce_per_agg_filter]] +// === ESQL: Introduce per agg filter +// Add support for aggregation scoped filters that work dynamically on the +// data in each group. +// +// [source,esql] +// ---- +// | STATS success = COUNT(*) WHERE 200 <= code AND code < 300, +// redirect = COUNT(*) WHERE 300 <= code AND code < 400, +// client_err = COUNT(*) WHERE 400 <= code AND code < 500, +// server_err = COUNT(*) WHERE 500 <= code AND code < 600, +// total_count = COUNT(*) +// ---- +// +// Implementation wise, the base AggregateFunction has been extended to +// allow a filter to be passed on. This is required to incorporate the +// filter as part of the aggregate equality/identity which would fail with +// the filter as an external component. +// As part of the process, the serialization for the existing aggregations +// had to be fixed so AggregateFunction implementations so that it +// delegates to their parent first. +// +// {es-pull}113735[#113735] +// +// // end::notable-highlights[] +// +// +// [discrete] +// [[esql_multi_value_fields_supported_in_geospatial_predicates]] +// === ESQL: Multi-value fields supported in Geospatial predicates +// Supporting multi-value fields in `WHERE` predicates is a challenge due to not knowing whether `ALL` or `ANY` +// of the values in the field should pass the predicate. +// For example, should the field `age:[10,30]` pass the predicate `WHERE age>20` or not? +// This ambiguity does not exist with the spatial predicates +// `ST_INTERSECTS` and `ST_DISJOINT`, because the choice between `ANY` or `ALL` +// is implied by the predicate itself. +// Consider a predicate checking a field named `location` against a test geometry named `shape`: +// +// * `ST_INTERSECTS(field, shape)` - true if `ANY` value can intersect the shape +// * `ST_DISJOINT(field, shape)` - true only if `ALL` values are disjoint from the shape +// +// This works even if the shape argument is itself a complex or compound geometry. +// +// Similar logic exists for `ST_CONTAINS` and `ST_WITHIN` predicates, but these are not as easily solved +// with `ANY` or `ALL`, because a collection of geometries contains another collection if each of the contained +// geometries is within at least one of the containing geometries. Evaluating this requires that the multi-value +// field is first combined into a single geometry before performing the predicate check. +// +// * `ST_CONTAINS(field, shape)` - true if the combined geometry contains the shape +// * `ST_WITHIN(field, shape)` - true if the combined geometry is within the shape +// +// {es-pull}112063[#112063] +// +// [discrete] +// [[enhance_sort_push_down_to_lucene_to_cover_references_to_fields_st_distance_function]] +// === Enhance SORT push-down to Lucene to cover references to fields and ST_DISTANCE function +// The most used and likely most valuable geospatial search query in Elasticsearch is the sorted proximity search, +// finding items within a certain distance of a point of interest and sorting the results by distance. +// This has been possible in ES|QL since 8.15.0, but the sorting was done in-memory, not pushed down to Lucene. +// Now the sorting is pushed down to Lucene, which results in a significant performance improvement. +// +// Queries that perform both filtering and sorting on distance are supported. For example: +// +// [source,esql] +// ---- +// FROM test +// | EVAL distance = ST_DISTANCE(location, TO_GEOPOINT("POINT(37.7749, -122.4194)")) +// | WHERE distance < 1000000 +// | SORT distance ASC, name DESC +// | LIMIT 10 +// ---- +// +// In addition, the support for sorting on EVAL expressions has been extended to cover references to fields: +// +// [source,esql] +// ---- +// FROM test +// | EVAL ref = field +// | SORT ref ASC +// | LIMIT 10 +// ---- +// +// {es-pull}112938[#112938] +// +// [discrete] +// [[cross_cluster_search_telemetry]] +// === Cross-cluster search telemetry +// The cross-cluster search telemetry is collected when cross-cluster searches +// are performed, and is returned as "ccs" field in `_cluster/stats` output. +// It also add a new parameter `include_remotes=true` to the `_cluster/stats` API +// which will collect data from connected remote clusters. +// +// {es-pull}113825[#113825] diff --git a/docs/reference/rest-api/rest-api-compatibility.asciidoc b/docs/reference/rest-api/rest-api-compatibility.asciidoc index 14193fa9371bd..dbc140d1f9236 100644 --- a/docs/reference/rest-api/rest-api-compatibility.asciidoc +++ b/docs/reference/rest-api/rest-api-compatibility.asciidoc @@ -6,11 +6,6 @@ API changes, {es} provides a per-request, opt-in API compatibility mode. {es} REST APIs are generally stable across versions. However, some improvements require changes that are not compatible with previous versions. -For example, {es} 7.x supported custom mapping types in many URL paths, -but {es} 8.0+ does not (see <>). Specifying a custom type -in a request sent to {es} 8.0+ returns an error. However, if you request -REST API compatibility, {es} accepts the request even though mapping types -are no longer supported. When an API is targeted for removal or is going to be changed in a non-compatible way, the original API is deprecated for one or more releases. @@ -32,20 +27,18 @@ IMPORTANT: REST API compatibility does not guarantee the same behavior as the prior version. It instructs {es} to automatically resolve any incompatibilities so the request can be processed instead of returning an error. - REST API compatibility should be a bridge to smooth out the upgrade process, not a long term strategy. REST API compatibility is only honored across one -major version: honor 7.x requests/responses from 8.x. +major version: honor 8.x requests/responses from 9.x. When you submit requests using REST API compatibility and {es} resolves the incompatibility, a message is written to the deprecation log with the category "compatible_api". Review the deprecation log to identify any gaps in usage and fully supported features. - -For information about specific breaking changes and the impact of requesting -compatibility mode, see <> -in the migration guide. +//TODO: add this back once this is fixed: ES-9932 +// For information about specific breaking changes and the impact of requesting +// compatibility mode. See <> in the migration guide. [discrete] [[request-rest-api-compatibility]] @@ -58,19 +51,19 @@ For example: [source, text] ------------------------------------------------------------ -Accept: "application/vnd.elasticsearch+json;compatible-with=7" -Content-Type: "application/vnd.elasticsearch+json;compatible-with=7" +Accept: "application/vnd.elasticsearch+json;compatible-with=8" +Content-Type: "application/vnd.elasticsearch+json;compatible-with=8" ------------------------------------------------------------ The Accept header is always required and the Content-Type header is only required when a body is sent with the request. The following values are -valid when communicating with a 7.x or 8.x {es} server: +valid when communicating with a 8.x or 9.x {es} server: [source, text] ------------------------------------------------------------ -"application/vnd.elasticsearch+json;compatible-with=7" -"application/vnd.elasticsearch+yaml;compatible-with=7" -"application/vnd.elasticsearch+smile;compatible-with=7" -"application/vnd.elasticsearch+cbor;compatible-with=7" +"application/vnd.elasticsearch+json;compatible-with=8" +"application/vnd.elasticsearch+yaml;compatible-with=8" +"application/vnd.elasticsearch+smile;compatible-with=8" +"application/vnd.elasticsearch+cbor;compatible-with=8" ------------------------------------------------------------ The https://www.elastic.co/guide/en/elasticsearch/client/index.html[officially supported {es} clients] can enable REST API compatibility for all requests. @@ -81,10 +74,10 @@ by {es} set the environment variable `ELASTIC_CLIENT_APIVERSIONING` to true. [discrete] === REST API compatibility workflow -To leverage REST API compatibility during an upgrade from 7.17 to {version}: +To leverage REST API compatibility during an upgrade from the last 8.x to {version}: 1. Upgrade your https://www.elastic.co/guide/en/elasticsearch/client/index.html[{es} clients] -to the latest 7.x version and enable REST API compatibility. +to the latest 8.x version and enable REST API compatibility. 2. Use the {kibana-ref-all}/{prev-major-last}/upgrade-assistant.html[Upgrade Assistant] to review all critical issues and explore the deprecation logs. Some critical issues might be mitigated by REST API compatibility. @@ -92,5 +85,6 @@ Some critical issues might be mitigated by REST API compatibility. 4. Upgrade Elasticsearch to {version}. 5. Review the deprecation logs for entries with the category `compatible_api`. Review the workflow associated with the requests that relied on compatibility mode. -6. Upgrade your {es} clients to 8.x and resolve compatibility issues manually where needed. +6. Upgrade your {es} clients to 9.x and resolve compatibility issues manually where needed. + From 3cbbcc57485ddc88145caefa7124164089b1dac0 Mon Sep 17 00:00:00 2001 From: Salvatore Campagna <93581129+salvatore-campagna@users.noreply.github.com> Date: Thu, 31 Oct 2024 15:54:42 +0100 Subject: [PATCH 242/324] Default LogsDB value for `ignore_dynamic_beyond_limit` (#115265) When ingesting logs, it's important to ensure that documents are not dropped due to mapping issues, also when dealing with dynamically mapped fields. Elasticsearch provides two key settings that help manage the total number of field mappings and handle situations where this limit might be exceeded: 1. **`index.mapping.total_fields.limit`**: This setting defines the maximum number of fields allowed in an index. If this limit is reached, any further mapped fields would cause indexing to fail. 2. **`index.mapping.total_fields.ignore_dynamic_beyond_limit`**: This setting determines whether Elasticsearch should ignore any dynamically mapped fields that exceed the limit defined by `index.mapping.total_fields.limit`. If set to `false`, indexing will fail once the limit is surpassed. However, if set to `true`, Elasticsearch will continue indexing the document but will silently ignore any additional dynamically mapped fields beyond the limit. To prevent indexing failures due to dynamic mapping issues, especially in logs where the schema might change frequently, we change the default value of **`index.mapping.total_fields.ignore_dynamic_beyond_limit` from `false` to `true` in LogsDB**. This change ensures that even when the number of dynamically mapped fields exceeds the set limit, documents will still be indexed, and additional fields will simply be ignored rather than causing an indexing failure. This adjustment is important for LogsDB, where dynamically mapped fields may be common, and we want to make sure to avoid documents from being dropped. --- .../indices.create/20_synthetic_source.yml | 1 + .../rest-api-spec/test/logsdb/10_settings.yml | 281 ++++++++++++++++++ .../elasticsearch/index/IndexVersions.java | 3 +- .../index/mapper/MapperFeatures.java | 3 +- .../index/mapper/MapperService.java | 17 +- 5 files changed, 302 insertions(+), 3 deletions(-) diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/20_synthetic_source.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/20_synthetic_source.yml index 258dfeb57e00c..cc5fd0e08e695 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/20_synthetic_source.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/20_synthetic_source.yml @@ -1,3 +1,4 @@ +--- object with unmapped fields: - requires: cluster_features: ["mapper.track_ignored_source", "mapper.bwc_workaround_9_0"] diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/logsdb/10_settings.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/logsdb/10_settings.yml index 4439441efdd02..20c2ef63fc850 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/logsdb/10_settings.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/logsdb/10_settings.yml @@ -599,3 +599,284 @@ end time not allowed in logs mode: - match: { error.root_cause.0.type: "illegal_argument_exception" } - match: { error.type: "illegal_argument_exception" } - match: { error.reason: "[index.time_series.end_time] requires [index.mode=time_series]" } + +--- +ignore dynamic beyond limit logsdb default value: + - requires: + cluster_features: [ "mapper.logsdb_default_ignore_dynamic_beyond_limit" ] + reason: requires logsdb default value for `index.mapping.total_fields.ignore_dynamic_beyond_limit` + + - do: + indices.create: + index: test-ignore-dynamic-default + body: + settings: + index: + mode: logsdb + + - do: + indices.get_settings: + index: test-ignore-dynamic-default + include_defaults: true + + - match: { test-ignore-dynamic-default.settings.index.mode: "logsdb" } + - match: { test-ignore-dynamic-default.defaults.index.mapping.total_fields.limit: "1000" } + - match: { test-ignore-dynamic-default.defaults.index.mapping.total_fields.ignore_dynamic_beyond_limit: "true" } + +--- +ignore dynamic beyond limit logsdb override value: + - requires: + cluster_features: [ "mapper.logsdb_default_ignore_dynamic_beyond_limit" ] + reason: requires logsdb default value for `index.mapping.total_fields.ignore_dynamic_beyond_limit` + + - do: + indices.create: + index: test-ignore-dynamic-override + body: + settings: + index: + mode: logsdb + mapping: + total_fields: + ignore_dynamic_beyond_limit: false + + - do: + indices.get_settings: + index: test-ignore-dynamic-override + + - match: { test-ignore-dynamic-override.settings.index.mode: "logsdb" } + - match: { test-ignore-dynamic-override.settings.index.mapping.total_fields.ignore_dynamic_beyond_limit: "false" } + +--- +logsdb with default ignore dynamic beyond limit and default sorting: + - requires: + cluster_features: ["mapper.logsdb_default_ignore_dynamic_beyond_limit"] + reason: requires default value for ignore_dynamic_beyond_limit + + - do: + indices.create: + index: test-logsdb-default-sort + body: + settings: + index: + mode: logsdb + mapping: + # NOTE: When the index mode is set to `logsdb`, the `host.name` field is automatically injected if + # sort settings are not overridden. + # With `subobjects` set to `true` (default), this creates a `host` object field and a nested `name` + # keyword field (`host.name`). + # + # As a result, there are always at least 4 statically mapped fields (`@timestamp`, `host`, `host.name` + # and `name`). We cannot use a field limit lower than 4 because these fields are always present. + # + # Indeed, if `index.mapping.total_fields.ignore_dynamic_beyond_limit` is `true`, any dynamically + # mapped fields beyond the limit `index.mapping.total_fields.limit` are ignored, but the statically + # mapped fields are always counted. + total_fields: + limit: 4 + mappings: + properties: + "@timestamp": + type: date + name: + type: keyword + + - do: + indices.get_settings: + index: test-logsdb-default-sort + + - match: { test-logsdb-default-sort.settings.index.mode: "logsdb" } + + - do: + bulk: + index: test-logsdb-default-sort + refresh: true + body: + - '{ "index": { } }' + - '{ "@timestamp": "2024-08-13T12:30:00Z", "name": "foo", "host.name": "92f4a67c", "value": 10, "message": "the quick brown fox", "region": "us-west", "pid": 153462 }' + - '{ "index": { } }' + - '{ "@timestamp": "2024-08-13T12:01:00Z", "name": "bar", "host.name": "24eea278", "value": 20, "message": "jumps over the lazy dog", "region": "us-central", "pid": 674972 }' + - match: { errors: false } + + - do: + search: + index: test-logsdb-default-sort + body: + query: + match_all: {} + + - match: { hits.total.value: 2 } + - match: { hits.hits.0._source.name: "bar" } + - match: { hits.hits.0._source.value: 20 } + - match: { hits.hits.0._source.message: "jumps over the lazy dog" } + - match: { hits.hits.0._ignored: [ "message", "pid", "region", "value" ] } + - match: { hits.hits.1._source.name: "foo" } + - match: { hits.hits.1._source.value: 10 } + - match: { hits.hits.1._source.message: "the quick brown fox" } + - match: { hits.hits.1._ignored: [ "message", "pid", "region", "value" ] } + +--- +logsdb with default ignore dynamic beyond limit and non-default sorting: + - requires: + cluster_features: ["mapper.logsdb_default_ignore_dynamic_beyond_limit"] + reason: requires default value for ignore_dynamic_beyond_limit + + - do: + indices.create: + index: test-logsdb-non-default-sort + body: + settings: + index: + sort.field: [ "name" ] + sort.order: [ "desc" ] + mode: logsdb + mapping: + # NOTE: Here sort settings are overridden and we do not have any additional statically mapped field other + # than `name` and `timestamp`. As a result, there are only 2 statically mapped fields. + total_fields: + limit: 2 + mappings: + properties: + "@timestamp": + type: date + name: + type: keyword + + - do: + indices.get_settings: + index: test-logsdb-non-default-sort + + - match: { test-logsdb-non-default-sort.settings.index.mode: "logsdb" } + + - do: + bulk: + index: test-logsdb-non-default-sort + refresh: true + body: + - '{ "index": { } }' + - '{ "@timestamp": "2024-08-13T12:30:00Z", "name": "foo", "host.name": "92f4a67c", "value": 10, "message": "the quick brown fox", "region": "us-west", "pid": 153462 }' + - '{ "index": { } }' + - '{ "@timestamp": "2024-08-13T12:01:00Z", "name": "bar", "host.name": "24eea278", "value": 20, "message": "jumps over the lazy dog", "region": "us-central", "pid": 674972 }' + - match: { errors: false } + + - do: + search: + index: test-logsdb-non-default-sort + body: + query: + match_all: {} + + - match: { hits.total.value: 2 } + - match: { hits.hits.0._source.name: "foo" } + - match: { hits.hits.0._source.value: 10 } + - match: { hits.hits.0._source.message: "the quick brown fox" } + - match: { hits.hits.0._ignored: [ "host", "message", "pid", "region", "value" ] } + - match: { hits.hits.1._source.name: "bar" } + - match: { hits.hits.1._source.value: 20 } + - match: { hits.hits.1._source.message: "jumps over the lazy dog" } + - match: { hits.hits.1._ignored: [ "host", "message", "pid", "region", "value" ] } + +--- +logsdb with default ignore dynamic beyond limit and too low limit: + - requires: + cluster_features: ["mapper.logsdb_default_ignore_dynamic_beyond_limit"] + reason: requires default value for ignore_dynamic_beyond_limit + + - do: + catch: bad_request + indices.create: + index: test-logsdb-low-limit + body: + settings: + index: + mode: logsdb + mapping: + # NOTE: When the index mode is set to `logsdb`, the `host.name` field is automatically injected if + # sort settings are not overridden. + # With `subobjects` set to `true` (default), this creates a `host` object field and a nested `name` + # keyword field (`host.name`). + # + # As a result, there are always at least 4 statically mapped fields (`@timestamp`, `host`, `host.name` + # and `name`). We cannot use a field limit lower than 4 because these fields are always present. + # + # Indeed, if `index.mapping.total_fields.ignore_dynamic_beyond_limit` is `true`, any dynamically + # mapped fields beyond the limit `index.mapping.total_fields.limit` are ignored, but the statically + # mapped fields are always counted. + total_fields: + limit: 3 + mappings: + properties: + "@timestamp": + type: date + name: + type: keyword + - match: { error.type: "illegal_argument_exception" } + - match: { error.reason: "Limit of total fields [3] has been exceeded" } + +--- +logsdb with default ignore dynamic beyond limit and subobjects false: + - requires: + cluster_features: ["mapper.logsdb_default_ignore_dynamic_beyond_limit"] + reason: requires default value for ignore_dynamic_beyond_limit + + - do: + indices.create: + index: test-logsdb-subobjects-false + body: + settings: + index: + mode: logsdb + mapping: + # NOTE: When the index mode is set to `logsdb`, the `host.name` field is automatically injected if + # sort settings are not overridden. + # With `subobjects` set to `false` anyway, a single `host.name` keyword field is automatically mapped. + # + # As a result, there are just 3 statically mapped fields (`@timestamp`, `host.name` and `name`). + # We cannot use a field limit lower than 3 because these fields are always present. + # + # Indeed, if `index.mapping.total_fields.ignore_dynamic_beyond_limit` is `true`, any dynamically + # mapped fields beyond the limit `index.mapping.total_fields.limit` are ignored, but the statically + # mapped fields are always counted. + total_fields: + limit: 3 + mappings: + subobjects: false + properties: + "@timestamp": + type: date + name: + type: keyword + + - do: + indices.get_settings: + index: test-logsdb-subobjects-false + + - match: { test-logsdb-subobjects-false.settings.index.mode: "logsdb" } + + - do: + bulk: + index: test-logsdb-subobjects-false + refresh: true + body: + - '{ "index": { } }' + - '{ "@timestamp": "2024-08-13T12:30:00Z", "name": "foo", "host.name": "92f4a67c", "value": 10, "message": "the quick brown fox", "region": "us-west", "pid": 153462 }' + - '{ "index": { } }' + - '{ "@timestamp": "2024-08-13T12:01:00Z", "name": "bar", "host.name": "24eea278", "value": 20, "message": "jumps over the lazy dog", "region": "us-central", "pid": 674972 }' + - match: { errors: false } + + - do: + search: + index: test-logsdb-subobjects-false + body: + query: + match_all: {} + + - match: { hits.total.value: 2 } + - match: { hits.hits.0._source.name: "bar" } + - match: { hits.hits.0._source.value: 20 } + - match: { hits.hits.0._source.message: "jumps over the lazy dog" } + - match: { hits.hits.0._ignored: [ "message", "pid", "region", "value" ] } + - match: { hits.hits.1._source.name: "foo" } + - match: { hits.hits.1._source.value: 10 } + - match: { hits.hits.1._source.message: "the quick brown fox" } + - match: { hits.hits.1._ignored: [ "message", "pid", "region", "value" ] } diff --git a/server/src/main/java/org/elasticsearch/index/IndexVersions.java b/server/src/main/java/org/elasticsearch/index/IndexVersions.java index 2919f98ee200e..440613263d441 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexVersions.java +++ b/server/src/main/java/org/elasticsearch/index/IndexVersions.java @@ -129,8 +129,9 @@ private static Version parseUnchecked(String version) { public static final IndexVersion UPGRADE_TO_LUCENE_9_12 = def(8_516_00_0, Version.LUCENE_9_12_0); public static final IndexVersion ENABLE_IGNORE_ABOVE_LOGSDB = def(8_517_00_0, Version.LUCENE_9_12_0); public static final IndexVersion ADD_ROLE_MAPPING_CLEANUP_MIGRATION = def(8_518_00_0, Version.LUCENE_9_12_0); + public static final IndexVersion LOGSDB_DEFAULT_IGNORE_DYNAMIC_BEYOND_LIMIT_BACKPORT = def(8_519_00_0, Version.LUCENE_9_12_0); public static final IndexVersion UPGRADE_TO_LUCENE_10_0_0 = def(9_000_00_0, Version.LUCENE_10_0_0); - + public static final IndexVersion LOGSDB_DEFAULT_IGNORE_DYNAMIC_BEYOND_LIMIT = def(9_001_00_0, Version.LUCENE_10_0_0); /* * STOP! READ THIS FIRST! No, really, * ____ _____ ___ ____ _ ____ _____ _ ____ _____ _ _ ___ ____ _____ ___ ____ ____ _____ _ diff --git a/server/src/main/java/org/elasticsearch/index/mapper/MapperFeatures.java b/server/src/main/java/org/elasticsearch/index/mapper/MapperFeatures.java index a5f173afffba2..4797857fc12f8 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/MapperFeatures.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/MapperFeatures.java @@ -64,7 +64,8 @@ public Set getTestFeatures() { IgnoredSourceFieldMapper.DONT_EXPAND_DOTS_IN_IGNORED_SOURCE, SourceFieldMapper.REMOVE_SYNTHETIC_SOURCE_ONLY_VALIDATION, IgnoredSourceFieldMapper.IGNORED_SOURCE_AS_TOP_LEVEL_METADATA_ARRAY_FIELD, - IgnoredSourceFieldMapper.ALWAYS_STORE_OBJECT_ARRAYS_IN_NESTED_OBJECTS + IgnoredSourceFieldMapper.ALWAYS_STORE_OBJECT_ARRAYS_IN_NESTED_OBJECTS, + MapperService.LOGSDB_DEFAULT_IGNORE_DYNAMIC_BEYOND_LIMIT ); } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java b/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java index 08461525526b9..7f952153c6453 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java @@ -22,9 +22,12 @@ import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.core.Nullable; +import org.elasticsearch.features.NodeFeature; import org.elasticsearch.index.AbstractIndexComponent; +import org.elasticsearch.index.IndexMode; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.IndexVersion; +import org.elasticsearch.index.IndexVersions; import org.elasticsearch.index.analysis.AnalysisRegistry; import org.elasticsearch.index.analysis.IndexAnalyzers; import org.elasticsearch.index.analysis.NamedAnalyzer; @@ -121,9 +124,21 @@ public boolean isAutoUpdate() { Property.IndexScope, Property.ServerlessPublic ); + + public static final NodeFeature LOGSDB_DEFAULT_IGNORE_DYNAMIC_BEYOND_LIMIT = new NodeFeature( + "mapper.logsdb_default_ignore_dynamic_beyond_limit" + ); public static final Setting INDEX_MAPPING_IGNORE_DYNAMIC_BEYOND_LIMIT_SETTING = Setting.boolSetting( "index.mapping.total_fields.ignore_dynamic_beyond_limit", - false, + settings -> { + boolean isLogsDBIndexMode = IndexSettings.MODE.get(settings) == IndexMode.LOGSDB; + final IndexVersion indexVersionCreated = IndexMetadata.SETTING_INDEX_VERSION_CREATED.get(settings); + boolean isNewIndexVersion = indexVersionCreated.between( + IndexVersions.LOGSDB_DEFAULT_IGNORE_DYNAMIC_BEYOND_LIMIT_BACKPORT, + IndexVersions.UPGRADE_TO_LUCENE_10_0_0 + ) || indexVersionCreated.onOrAfter(IndexVersions.LOGSDB_DEFAULT_IGNORE_DYNAMIC_BEYOND_LIMIT); + return String.valueOf(isLogsDBIndexMode && isNewIndexVersion); + }, Property.Dynamic, Property.IndexScope, Property.ServerlessPublic From 50b9e4edcde5da57d36c25ef29c06402912b7906 Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Fri, 1 Nov 2024 02:52:07 +1100 Subject: [PATCH 243/324] Mute org.elasticsearch.search.basic.SearchWithRandomExceptionsIT testRandomExceptions #116027 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index c781a7d30a597..9b4b07b5c63d6 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -287,6 +287,9 @@ tests: - class: org.elasticsearch.search.query.SearchQueryIT method: testAllDocsQueryString issue: https://github.com/elastic/elasticsearch/issues/115728 +- class: org.elasticsearch.search.basic.SearchWithRandomExceptionsIT + method: testRandomExceptions + issue: https://github.com/elastic/elasticsearch/issues/116027 # Examples: # From b280e946fbeb6d8c368dc9d174320f6d0bd6b16f Mon Sep 17 00:00:00 2001 From: Mark Vieira Date: Thu, 31 Oct 2024 10:13:13 -0700 Subject: [PATCH 244/324] Don't run validate changelogs task during 'check' tasks (#116028) --- build.gradle | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/build.gradle b/build.gradle index 746f964cb6158..2ef0511b2be88 100644 --- a/build.gradle +++ b/build.gradle @@ -409,6 +409,10 @@ gradle.projectsEvaluated { } } +tasks.named("validateChangelogs") { + onlyIf { project.gradle.startParameter.taskNames.any { it.startsWith("checkPart") || it == 'functionalTests' } == false } +} + tasks.named("precommit") { dependsOn gradle.includedBuild('build-tools').task(':precommit') dependsOn gradle.includedBuild('build-tools-internal').task(':precommit') From 0f38b2b10e898b2478debd1a25525f1b2ff11bfb Mon Sep 17 00:00:00 2001 From: Ying Mao Date: Thu, 31 Oct 2024 14:08:58 -0400 Subject: [PATCH 245/324] Fixing tests (#116032) --- .../xpack/inference/InferenceCrudIT.java | 38 ++++++++++++++----- 1 file changed, 28 insertions(+), 10 deletions(-) diff --git a/x-pack/plugin/inference/qa/inference-service-tests/src/javaRestTest/java/org/elasticsearch/xpack/inference/InferenceCrudIT.java b/x-pack/plugin/inference/qa/inference-service-tests/src/javaRestTest/java/org/elasticsearch/xpack/inference/InferenceCrudIT.java index fed63477701e3..f9a1318cd9740 100644 --- a/x-pack/plugin/inference/qa/inference-service-tests/src/javaRestTest/java/org/elasticsearch/xpack/inference/InferenceCrudIT.java +++ b/x-pack/plugin/inference/qa/inference-service-tests/src/javaRestTest/java/org/elasticsearch/xpack/inference/InferenceCrudIT.java @@ -13,8 +13,10 @@ import org.elasticsearch.client.ResponseException; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.inference.TaskType; +import org.elasticsearch.xpack.inference.services.elastic.ElasticInferenceServiceFeature; import java.io.IOException; +import java.util.ArrayList; import java.util.Arrays; import java.util.List; import java.util.Map; @@ -132,7 +134,11 @@ public void testApisWithoutTaskType() throws IOException { @SuppressWarnings("unchecked") public void testGetServicesWithoutTaskType() throws IOException { List services = getAllServices(); - assertThat(services.size(), equalTo(19)); + if (ElasticInferenceServiceFeature.ELASTIC_INFERENCE_SERVICE_FEATURE_FLAG.isEnabled()) { + assertThat(services.size(), equalTo(19)); + } else { + assertThat(services.size(), equalTo(18)); + } String[] providers = new String[services.size()]; for (int i = 0; i < services.size(); i++) { @@ -141,16 +147,15 @@ public void testGetServicesWithoutTaskType() throws IOException { } Arrays.sort(providers); - assertArrayEquals( - providers, - List.of( + + var providerList = new ArrayList<>( + Arrays.asList( "alibabacloud-ai-search", "amazonbedrock", "anthropic", "azureaistudio", "azureopenai", "cohere", - "elastic", "elasticsearch", "googleaistudio", "googlevertexai", @@ -163,8 +168,12 @@ public void testGetServicesWithoutTaskType() throws IOException { "test_service", "text_embedding_test_service", "watsonxai" - ).toArray() + ) ); + if (ElasticInferenceServiceFeature.ELASTIC_INFERENCE_SERVICE_FEATURE_FLAG.isEnabled()) { + providerList.add(6, "elastic"); + } + assertArrayEquals(providers, providerList.toArray()); } @SuppressWarnings("unchecked") @@ -248,7 +257,12 @@ public void testGetServicesWithCompletionTaskType() throws IOException { @SuppressWarnings("unchecked") public void testGetServicesWithSparseEmbeddingTaskType() throws IOException { List services = getServices(TaskType.SPARSE_EMBEDDING); - assertThat(services.size(), equalTo(6)); + + if (ElasticInferenceServiceFeature.ELASTIC_INFERENCE_SERVICE_FEATURE_FLAG.isEnabled()) { + assertThat(services.size(), equalTo(6)); + } else { + assertThat(services.size(), equalTo(5)); + } String[] providers = new String[services.size()]; for (int i = 0; i < services.size(); i++) { @@ -257,10 +271,14 @@ public void testGetServicesWithSparseEmbeddingTaskType() throws IOException { } Arrays.sort(providers); - assertArrayEquals( - providers, - List.of("alibabacloud-ai-search", "elastic", "elasticsearch", "hugging_face", "hugging_face_elser", "test_service").toArray() + + var providerList = new ArrayList<>( + Arrays.asList("alibabacloud-ai-search", "elasticsearch", "hugging_face", "hugging_face_elser", "test_service") ); + if (ElasticInferenceServiceFeature.ELASTIC_INFERENCE_SERVICE_FEATURE_FLAG.isEnabled()) { + providerList.add(1, "elastic"); + } + assertArrayEquals(providers, providerList.toArray()); } public void testSkipValidationAndStart() throws IOException { From c9c1765986189905cad635dd768f1e41555524fd Mon Sep 17 00:00:00 2001 From: Craig Taverner Date: Thu, 31 Oct 2024 19:14:58 +0100 Subject: [PATCH 246/324] Remove duplicate 'the the' (#116023) There were many places where `the the` was typed, in comments, docs and messages. All were incorrect and replaces with a single `the` --- .../gradle/internal/test/rest/RestResourcesPlugin.java | 4 ++-- .../connector/docs/connectors-hosted-tutorial-mongo.asciidoc | 2 +- docs/reference/esql/esql-query-api.asciidoc | 2 +- docs/reference/ml/anomaly-detection/apis/get-job.asciidoc | 2 +- .../reranking/learning-to-rank-model-training.asciidoc | 2 +- .../search-application/apis/put-search-application.asciidoc | 2 +- .../securing-communications/security-minimal-setup.asciidoc | 4 ++-- .../xcontent/support/AbstractXContentParser.java | 2 +- .../java/org/elasticsearch/repositories/s3/S3Service.java | 2 +- .../test/java/org/elasticsearch/packaging/util/FileUtils.java | 2 +- .../indices/SystemIndexMappingUpdateServiceIT.java | 2 +- .../java/org/elasticsearch/cluster/metadata/Metadata.java | 2 +- .../main/java/org/elasticsearch/common/settings/Settings.java | 2 +- .../elasticsearch/indices/recovery/RecoverySourceHandler.java | 2 +- .../repositories/blobstore/BlobStoreRepository.java | 2 +- .../repositories/blobstore/ChunkedBlobOutputStream.java | 2 +- .../aggregations/bucket/filter/FilterByFilterAggregator.java | 2 +- .../bucket/terms/SignificantTermsAggregatorFactory.java | 2 +- .../search/aggregations/support/AggregationContext.java | 2 +- .../main/java/org/elasticsearch/snapshots/package-info.java | 2 +- .../action/admin/cluster/node/info/NodeInfoTests.java | 2 +- .../cluster/coordination/JoinReasonServiceTests.java | 2 +- .../java/org/elasticsearch/common/LocalTimeOffsetTests.java | 2 +- .../indices/breaker/HierarchyCircuitBreakerServiceTests.java | 2 +- .../bucket/AbstractNXYSignificanceHeuristicTestCase.java | 2 +- .../java/org/elasticsearch/blobcache/BlobCacheMetrics.java | 2 +- .../esql/core/async/AsyncTaskManagementServiceTests.java | 2 +- .../xpack/ql/async/AsyncTaskManagementServiceTests.java | 2 +- .../elasticsearch/xpack/rollup/job/RollupJobTaskTests.java | 2 +- .../xpack/sql/execution/search/SourceGenerator.java | 2 +- .../sql/execution/search/extractor/MetricAggExtractor.java | 2 +- 31 files changed, 33 insertions(+), 33 deletions(-) diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/RestResourcesPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/RestResourcesPlugin.java index 4724c576066f1..c62089a42b159 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/RestResourcesPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/RestResourcesPlugin.java @@ -39,7 +39,7 @@ * } * } * - * Will copy the entire core Rest API specifications (assuming the project has tests) and any of the the X-pack specs starting with enrich*. + * Will copy the entire core Rest API specifications (assuming the project has tests) and any of the X-pack specs starting with enrich*. * It is recommended (but not required) to also explicitly declare which core specs your project depends on to help optimize the caching * behavior. * For example: @@ -66,7 +66,7 @@ * } * } * - * Will copy any of the the x-pack tests that start with graph, and will copy the X-pack graph specification, as well as the full core + * Will copy any of the x-pack tests that start with graph, and will copy the X-pack graph specification, as well as the full core * Rest API specification. *

    * Additionally you can specify which sourceSetName resources should be copied to. The default is the yamlRestTest source set. diff --git a/docs/reference/connector/docs/connectors-hosted-tutorial-mongo.asciidoc b/docs/reference/connector/docs/connectors-hosted-tutorial-mongo.asciidoc index a1f7048705555..12050ff17e279 100644 --- a/docs/reference/connector/docs/connectors-hosted-tutorial-mongo.asciidoc +++ b/docs/reference/connector/docs/connectors-hosted-tutorial-mongo.asciidoc @@ -90,7 +90,7 @@ Find this by https://www.mongodb.com/docs/atlas/tutorial/connect-to-your-cluster In this example, we'll use the `sample_mflix` database. * *Collection*: The name of the collection you want to sync. In this example, we'll use the `comments` collection of the `sample_mflix` database. -* *Username*: The username you created earlier, in the the setup phase. +* *Username*: The username you created earlier, in the setup phase. * *Password*: The password you created earlier. Keep these details handy! diff --git a/docs/reference/esql/esql-query-api.asciidoc b/docs/reference/esql/esql-query-api.asciidoc index b1582721ad0e0..63b8738266132 100644 --- a/docs/reference/esql/esql-query-api.asciidoc +++ b/docs/reference/esql/esql-query-api.asciidoc @@ -46,7 +46,7 @@ supports this parameter for CSV responses. `drop_null_columns`:: (Optional, boolean) Should columns that are entirely `null` be removed from the `columns` and `values` portion of the results? Defaults to `false`. If -`true` the the response will include an extra section under the name +`true` the response will include an extra section under the name `all_columns` which has the name of all columns. `format`:: diff --git a/docs/reference/ml/anomaly-detection/apis/get-job.asciidoc b/docs/reference/ml/anomaly-detection/apis/get-job.asciidoc index 4ee6c429ce730..33692fd182fa7 100644 --- a/docs/reference/ml/anomaly-detection/apis/get-job.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/get-job.asciidoc @@ -209,7 +209,7 @@ value. (string) Reserved for future use, currently set to `anomaly_detector`. `job_version`:: -(string) The {ml} configuration version number at which the the job was created. +(string) The {ml} configuration version number at which the job was created. NOTE: From {es} 8.10.0, a new version number is used to track the configuration and state changes in the {ml} plugin. This new diff --git a/docs/reference/reranking/learning-to-rank-model-training.asciidoc b/docs/reference/reranking/learning-to-rank-model-training.asciidoc index 8e0b3f9ae94ce..9a35573a0879d 100644 --- a/docs/reference/reranking/learning-to-rank-model-training.asciidoc +++ b/docs/reference/reranking/learning-to-rank-model-training.asciidoc @@ -43,7 +43,7 @@ feature_extractors=[ feature_name="title_bm25", query={"match": {"title": "{{query}}"}} ), - # We want to use the the number of matched terms in the title field as a feature: + # We want to use the number of matched terms in the title field as a feature: QueryFeatureExtractor( feature_name="title_matched_term_count", query={ diff --git a/docs/reference/search-application/apis/put-search-application.asciidoc b/docs/reference/search-application/apis/put-search-application.asciidoc index eb559acc8cdc7..dc5e20ec40b7f 100644 --- a/docs/reference/search-application/apis/put-search-application.asciidoc +++ b/docs/reference/search-application/apis/put-search-application.asciidoc @@ -192,7 +192,7 @@ When the above `dictionary` parameter is specified, the <> API will return an error. +If the parameters are not valid, the <> API will return an error. [source,console] ---- POST _application/search_application/my-app/_search diff --git a/docs/reference/security/securing-communications/security-minimal-setup.asciidoc b/docs/reference/security/securing-communications/security-minimal-setup.asciidoc index ee158294df03c..fd54c37d9e8fa 100644 --- a/docs/reference/security/securing-communications/security-minimal-setup.asciidoc +++ b/docs/reference/security/securing-communications/security-minimal-setup.asciidoc @@ -78,7 +78,7 @@ This command resets the password to an auto-generated value. ./bin/elasticsearch-reset-password -u elastic ---- + -If you want to set the password to a specific value, run the command with the +If you want to set the password to a specific value, run the command with the interactive (`-i`) parameter. + [source,shell] @@ -93,7 +93,7 @@ interactive (`-i`) parameter. ./bin/elasticsearch-reset-password -u kibana_system ---- -. Save the new passwords. In the next step, you'll add the the password for the +. Save the new passwords. In the next step, you'll add the password for the `kibana_system` user to {kib}. *Next*: <> diff --git a/libs/x-content/src/main/java/org/elasticsearch/xcontent/support/AbstractXContentParser.java b/libs/x-content/src/main/java/org/elasticsearch/xcontent/support/AbstractXContentParser.java index a3b495b9c3e38..909cf808d1f34 100644 --- a/libs/x-content/src/main/java/org/elasticsearch/xcontent/support/AbstractXContentParser.java +++ b/libs/x-content/src/main/java/org/elasticsearch/xcontent/support/AbstractXContentParser.java @@ -370,7 +370,7 @@ private static void skipToListStart(XContentParser parser) throws IOException { } } - // read a list without bounds checks, assuming the the current parser is always on an array start + // read a list without bounds checks, assuming the current parser is always on an array start private static List readListUnsafe(XContentParser parser, Supplier> mapFactory) throws IOException { assert parser.currentToken() == Token.START_ARRAY; ArrayList list = new ArrayList<>(); diff --git a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Service.java b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Service.java index 36eb1d61e21d7..1ebd6f920d518 100644 --- a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Service.java +++ b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Service.java @@ -335,7 +335,7 @@ public void refresh() { * Customizes {@link com.amazonaws.auth.WebIdentityTokenCredentialsProvider} * *
      - *
    • Reads the the location of the web identity token not from AWS_WEB_IDENTITY_TOKEN_FILE, but from a symlink + *
    • Reads the location of the web identity token not from AWS_WEB_IDENTITY_TOKEN_FILE, but from a symlink * in the plugin directory, so we don't need to create a hardcoded read file permission for the plugin.
    • *
    • Supports customization of the STS endpoint via a system property, so we can test it against a test fixture.
    • *
    • Supports gracefully shutting down the provider and the STS client.
    • diff --git a/qa/packaging/src/test/java/org/elasticsearch/packaging/util/FileUtils.java b/qa/packaging/src/test/java/org/elasticsearch/packaging/util/FileUtils.java index 76140da810c2b..2b8fb0d4e6021 100644 --- a/qa/packaging/src/test/java/org/elasticsearch/packaging/util/FileUtils.java +++ b/qa/packaging/src/test/java/org/elasticsearch/packaging/util/FileUtils.java @@ -373,7 +373,7 @@ public static String escapePath(Path path) { } /** - * Recursively copy the the source directory to the target directory, preserving permissions. + * Recursively copy the source directory to the target directory, preserving permissions. */ public static void copyDirectory(Path source, Path target) throws IOException { Files.walkFileTree(source, new SimpleFileVisitor<>() { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/SystemIndexMappingUpdateServiceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/SystemIndexMappingUpdateServiceIT.java index fbc4e08b6b78a..de565605ff58a 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/SystemIndexMappingUpdateServiceIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/SystemIndexMappingUpdateServiceIT.java @@ -70,7 +70,7 @@ public void testSystemIndexManagerUpgradesMappings() throws Exception { } /** - * Check that if the the SystemIndexManager finds a managed index with mappings that claim to be newer than + * Check that if the SystemIndexManager finds a managed index with mappings that claim to be newer than * what it expects, then those mappings are left alone. */ public void testSystemIndexManagerLeavesNewerMappingsAlone() throws Exception { diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/Metadata.java b/server/src/main/java/org/elasticsearch/cluster/metadata/Metadata.java index b7777eca86179..6d2e9c37fc625 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/Metadata.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/Metadata.java @@ -2402,7 +2402,7 @@ public Metadata build(boolean skipNameCollisionChecks) { assert previousIndicesLookup.equals(buildIndicesLookup(dataStreamMetadata(), indicesMap)); indicesLookup = previousIndicesLookup; } else if (skipNameCollisionChecks == false) { - // we have changes to the the entity names so we ensure we have no naming collisions + // we have changes to the entity names so we ensure we have no naming collisions ensureNoNameCollisions(aliasedIndices.keySet(), indicesMap, dataStreamMetadata()); } assert assertDataStreams(indicesMap, dataStreamMetadata()); diff --git a/server/src/main/java/org/elasticsearch/common/settings/Settings.java b/server/src/main/java/org/elasticsearch/common/settings/Settings.java index 2abfee670b950..09773ba1de35e 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/Settings.java +++ b/server/src/main/java/org/elasticsearch/common/settings/Settings.java @@ -242,7 +242,7 @@ public Settings getByPrefix(String prefix) { if (prefix.isEmpty()) { return this; } - // create the the next prefix right after the given prefix, and use it as exclusive upper bound for the sub-map to filter by prefix + // create the next prefix right after the given prefix, and use it as exclusive upper bound for the sub-map to filter by prefix // below char[] toPrefixCharArr = prefix.toCharArray(); toPrefixCharArr[toPrefixCharArr.length - 1]++; diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java b/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java index fbfed8a75a146..30fea41330038 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java @@ -954,7 +954,7 @@ private void notifyFailureOnceAllOutstandingRequestAreDone(Exception e) { void createRetentionLease(final long startingSeqNo, ActionListener listener) { updateRetentionLease(syncListener -> { - // Clone the peer recovery retention lease belonging to the source shard. We are retaining history between the the local + // Clone the peer recovery retention lease belonging to the source shard. We are retaining history between the local // checkpoint of the safe commit we're creating and this lease's retained seqno with the retention lock, and by cloning an // existing lease we (approximately) know that all our peers are also retaining history as requested by the cloned lease. If // the recovery now fails before copying enough history over then a subsequent attempt will find this lease, determine it is diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java index 435bf71e3b2c9..b43fe05a541f6 100644 --- a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java @@ -435,7 +435,7 @@ public static String getRepositoryDataBlobName(long repositoryGeneration) { /** * Flag that is set to {@code true} if this instance is started with {@link #metadata} that has a higher value for * {@link RepositoryMetadata#pendingGeneration()} than for {@link RepositoryMetadata#generation()} indicating a full cluster restart - * potentially accounting for the the last {@code index-N} write in the cluster state. + * potentially accounting for the last {@code index-N} write in the cluster state. * Note: While it is true that this value could also be set to {@code true} for an instance on a node that is just joining the cluster * during a new {@code index-N} write, this does not present a problem. The node will still load the correct {@link RepositoryData} in * all cases and simply do a redundant listing of the repository contents if it tries to load {@link RepositoryData} and falls back diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/ChunkedBlobOutputStream.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/ChunkedBlobOutputStream.java index bcbbad81f1ca1..0e2066f58e25d 100644 --- a/server/src/main/java/org/elasticsearch/repositories/blobstore/ChunkedBlobOutputStream.java +++ b/server/src/main/java/org/elasticsearch/repositories/blobstore/ChunkedBlobOutputStream.java @@ -128,7 +128,7 @@ protected final void finishPart(T partId) { } /** - * Write the contents of {@link #buffer} to storage. Implementations should call {@link #finishPart} at the end to track the the chunk + * Write the contents of {@link #buffer} to storage. Implementations should call {@link #finishPart} at the end to track the chunk * of data just written and ready {@link #buffer} for the next write. */ protected abstract void flushBuffer() throws IOException; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FilterByFilterAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FilterByFilterAggregator.java index 8ebf55487ed59..cc8ecc74b5ea0 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FilterByFilterAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FilterByFilterAggregator.java @@ -132,7 +132,7 @@ final void add(QueryToFilterAdapter filter) throws IOException { } /** - * Build the the adapter or {@code null} if the this isn't a valid rewrite. + * Build the adapter or {@code null} if this isn't a valid rewrite. */ public final T build() throws IOException { if (false == valid || aggCtx.enableRewriteToFilterByFilter() == false) { diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/SignificantTermsAggregatorFactory.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/SignificantTermsAggregatorFactory.java index 1ff7529bf3188..080cac9cbfb85 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/SignificantTermsAggregatorFactory.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/SignificantTermsAggregatorFactory.java @@ -123,7 +123,7 @@ private static SignificantTermsAggregatorSupplier bytesSupplier() { /** * Whether the aggregation will execute. If the main query matches no documents and parent aggregation isn't a global or terms - * aggregation with min_doc_count = 0, the the aggregator will not really execute. In those cases it doesn't make sense to load + * aggregation with min_doc_count = 0, the aggregator will not really execute. In those cases it doesn't make sense to load * global ordinals. *

      * Some searches that will never match can still fall through and we endup running query that will produce no results. diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/support/AggregationContext.java b/server/src/main/java/org/elasticsearch/search/aggregations/support/AggregationContext.java index 4f939ea294e48..c720f3d9465a3 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/support/AggregationContext.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/support/AggregationContext.java @@ -556,7 +556,7 @@ public synchronized void removeReleasable(Aggregator aggregator) { // Removing an aggregator is done after calling Aggregator#buildTopLevel which happens on an executor thread. // We need to synchronize the removal because he AggregatorContext it is shared between executor threads. assert releaseMe.contains(aggregator) - : "removing non-existing aggregator [" + aggregator.name() + "] from the the aggregation context"; + : "removing non-existing aggregator [" + aggregator.name() + "] from the aggregation context"; releaseMe.remove(aggregator); } diff --git a/server/src/main/java/org/elasticsearch/snapshots/package-info.java b/server/src/main/java/org/elasticsearch/snapshots/package-info.java index 694c9c5c9062b..d73a1d9bd701a 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/package-info.java +++ b/server/src/main/java/org/elasticsearch/snapshots/package-info.java @@ -113,7 +113,7 @@ * snapshots, we load the {@link org.elasticsearch.snapshots.SnapshotInfo} for the source snapshot and check for shard snapshot * failures of the relevant indices. *

    • Once all shard counts are known and the health of all source indices data has been verified, we populate the - * {@code SnapshotsInProgress.Entry#clones} map for the clone operation with the the relevant shard clone tasks.
    • + * {@code SnapshotsInProgress.Entry#clones} map for the clone operation with the relevant shard clone tasks. *
    • After the clone tasks have been added to the {@code SnapshotsInProgress.Entry}, master executes them on its snapshot thread-pool * by invoking {@link org.elasticsearch.repositories.Repository#cloneShardSnapshot} for each shard that is to be cloned. Each completed * shard snapshot triggers a call to the {@link org.elasticsearch.snapshots.SnapshotsService#masterServiceTaskQueue} which updates the diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/info/NodeInfoTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/info/NodeInfoTests.java index c0bf8f7c3bf12..5fa138abca809 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/info/NodeInfoTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/info/NodeInfoTests.java @@ -33,7 +33,7 @@ public class NodeInfoTests extends ESTestCase { /** - * Check that the the {@link NodeInfo#getInfo(Class)} method returns null + * Check that the {@link NodeInfo#getInfo(Class)} method returns null * for absent info objects, and returns the right thing for present info * objects. */ diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/JoinReasonServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/coordination/JoinReasonServiceTests.java index 80aa142069358..dc46f862923e5 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/JoinReasonServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/JoinReasonServiceTests.java @@ -144,7 +144,7 @@ public void testCleanup() { discoveryNodes[i] = randomDiscoveryNode(); } - // we stop tracking the the oldest absent node(s) when only 1/3 of the tracked nodes are present + // we stop tracking the oldest absent node(s) when only 1/3 of the tracked nodes are present final int cleanupNodeCount = (discoveryNodes.length - 2) / 3; final DiscoveryNodes.Builder cleanupNodesBuilder = new DiscoveryNodes.Builder().add(masterNode) diff --git a/server/src/test/java/org/elasticsearch/common/LocalTimeOffsetTests.java b/server/src/test/java/org/elasticsearch/common/LocalTimeOffsetTests.java index 9711fb2c0f7fd..e3af6695bde1d 100644 --- a/server/src/test/java/org/elasticsearch/common/LocalTimeOffsetTests.java +++ b/server/src/test/java/org/elasticsearch/common/LocalTimeOffsetTests.java @@ -275,7 +275,7 @@ private static long time(String time, ZoneId zone) { } /** - * The the last "fully defined" transitions in the provided {@linkplain ZoneId}. + * The last "fully defined" transitions in the provided {@linkplain ZoneId}. */ private static ZoneOffsetTransition lastTransitionIn(ZoneId zone) { List transitions = zone.getRules().getTransitions(); diff --git a/server/src/test/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerServiceTests.java b/server/src/test/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerServiceTests.java index 610e87b50d365..0a8fbcf6d56b9 100644 --- a/server/src/test/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerServiceTests.java +++ b/server/src/test/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerServiceTests.java @@ -747,7 +747,7 @@ public void testAllocationBucketsBreaker() { // make sure used bytes is greater than the total circuit breaker limit breaker.addWithoutBreaking(200); - // make sure that we check on the the following call + // make sure that we check on the following call for (int i = 0; i < 1023; i++) { multiBucketConsumer.accept(0); } diff --git a/test/framework/src/main/java/org/elasticsearch/search/aggregations/bucket/AbstractNXYSignificanceHeuristicTestCase.java b/test/framework/src/main/java/org/elasticsearch/search/aggregations/bucket/AbstractNXYSignificanceHeuristicTestCase.java index adb9c6f1e4ca0..5e82cb7edfeac 100644 --- a/test/framework/src/main/java/org/elasticsearch/search/aggregations/bucket/AbstractNXYSignificanceHeuristicTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/search/aggregations/bucket/AbstractNXYSignificanceHeuristicTestCase.java @@ -27,7 +27,7 @@ protected SignificanceHeuristic getHeuristic() { /** * @param includeNegatives value for this test run, should the scores include negative values. * @param backgroundIsSuperset value for this test run, indicates in NXY significant terms if the background is indeed - * a superset of the the subset, or is instead a disjoint set + * a superset of the subset, or is instead a disjoint set * @return A random instance of an NXY heuristic to test */ protected abstract SignificanceHeuristic getHeuristic(boolean includeNegatives, boolean backgroundIsSuperset); diff --git a/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/BlobCacheMetrics.java b/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/BlobCacheMetrics.java index 075621e8cdccb..a253b6bdd2360 100644 --- a/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/BlobCacheMetrics.java +++ b/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/BlobCacheMetrics.java @@ -64,7 +64,7 @@ public BlobCacheMetrics(MeterRegistry meterRegistry) { ), meterRegistry.registerDoubleHistogram( "es.blob_cache.population.throughput.histogram", - "The throughput observed when populating the the cache", + "The throughput observed when populating the cache", "MiB/second" ), meterRegistry.registerLongCounter( diff --git a/x-pack/plugin/esql-core/src/test/java/org/elasticsearch/xpack/esql/core/async/AsyncTaskManagementServiceTests.java b/x-pack/plugin/esql-core/src/test/java/org/elasticsearch/xpack/esql/core/async/AsyncTaskManagementServiceTests.java index 5361f1e8d1974..2e71c2c0a1ad3 100644 --- a/x-pack/plugin/esql-core/src/test/java/org/elasticsearch/xpack/esql/core/async/AsyncTaskManagementServiceTests.java +++ b/x-pack/plugin/esql-core/src/test/java/org/elasticsearch/xpack/esql/core/async/AsyncTaskManagementServiceTests.java @@ -313,7 +313,7 @@ public void execute(TestRequest request, TestTask task, ActionListener response = getResponse(responseHolder.get().id, TimeValue.ZERO); if (success) { assertThat(response.getException(), nullValue()); diff --git a/x-pack/plugin/ql/src/test/java/org/elasticsearch/xpack/ql/async/AsyncTaskManagementServiceTests.java b/x-pack/plugin/ql/src/test/java/org/elasticsearch/xpack/ql/async/AsyncTaskManagementServiceTests.java index 65e5efbccb1af..018504785b5eb 100644 --- a/x-pack/plugin/ql/src/test/java/org/elasticsearch/xpack/ql/async/AsyncTaskManagementServiceTests.java +++ b/x-pack/plugin/ql/src/test/java/org/elasticsearch/xpack/ql/async/AsyncTaskManagementServiceTests.java @@ -313,7 +313,7 @@ public void execute(TestRequest request, TestTask task, ActionListener response = getResponse(responseHolder.get().id, TimeValue.ZERO); if (success) { assertThat(response.getException(), nullValue()); diff --git a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupJobTaskTests.java b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupJobTaskTests.java index 8b63b76cdf248..7d120e62e0260 100644 --- a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupJobTaskTests.java +++ b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupJobTaskTests.java @@ -396,7 +396,7 @@ public void onFailure(Exception e) { }); assertUnblockIn10s(latch2); - // the the client answer + // the client answer unblock.countDown(); } } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/SourceGenerator.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/SourceGenerator.java index 0cbd7f1389188..207f92759fa34 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/SourceGenerator.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/SourceGenerator.java @@ -70,7 +70,7 @@ public static SearchSourceBuilder sourceBuilder(QueryContainer container, QueryB // set page size if (size != null) { int sz = container.limit() > 0 ? Math.min(container.limit(), size) : size; - // now take into account the the minimum page (if set) + // now take into account the minimum page (if set) // that is, return the multiple of the minimum page size closer to the set size int minSize = container.minPageSize(); sz = minSize > 0 ? (Math.max(sz / minSize, 1) * minSize) : sz; diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/extractor/MetricAggExtractor.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/extractor/MetricAggExtractor.java index f76702f5ffe5d..c12a172453941 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/extractor/MetricAggExtractor.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/extractor/MetricAggExtractor.java @@ -139,7 +139,7 @@ private Object handleTargetType(Object object) { return DateUtils.asDateTimeWithMillis(((Number) object).longValue(), zoneId); } else if (dataType.isInteger()) { // MIN and MAX need to return the same type as field's and SUM a long for integral types, but ES returns them always as - // floating points -> convert them in the the SELECT pipeline, if needed + // floating points -> convert them in the SELECT pipeline, if needed return convert(object, dataType); } } From a2075f5ddd9c0c5f7fdc9c0374dbd364d7452805 Mon Sep 17 00:00:00 2001 From: Mark Vieira Date: Thu, 31 Oct 2024 11:41:43 -0700 Subject: [PATCH 247/324] Unmute GlobalCheckpointSyncActionIT (#116037) Closes #111124 --- muted-tests.yml | 2 -- 1 file changed, 2 deletions(-) diff --git a/muted-tests.yml b/muted-tests.yml index 9b4b07b5c63d6..670c6d5898ddd 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -20,8 +20,6 @@ tests: - class: org.elasticsearch.nativeaccess.VectorSystemPropertyTests method: testSystemPropertyDisabled issue: https://github.com/elastic/elasticsearch/issues/110949 -- class: org.elasticsearch.multi_node.GlobalCheckpointSyncActionIT - issue: https://github.com/elastic/elasticsearch/issues/111124 - class: org.elasticsearch.packaging.test.DockerTests method: test021InstallPlugin issue: https://github.com/elastic/elasticsearch/issues/110343 From 69245bdee1576dca685d5153557cb2e8f60fd104 Mon Sep 17 00:00:00 2001 From: Max Hniebergall <137079448+maxhniebergall@users.noreply.github.com> Date: Thu, 31 Oct 2024 14:47:36 -0400 Subject: [PATCH 248/324] [ML] Wait for up to 2 seconds for yellow status before starting search (#115938) * Wait for up to 2 seconds for yellow before starting search * Update docs/changelog/115938.yaml * Update docs/changelog/115938.yaml --------- Co-authored-by: Elastic Machine --- docs/changelog/115938.yaml | 9 ++ .../persistence/TrainedModelProvider.java | 142 +++++++++++------- 2 files changed, 100 insertions(+), 51 deletions(-) create mode 100644 docs/changelog/115938.yaml diff --git a/docs/changelog/115938.yaml b/docs/changelog/115938.yaml new file mode 100644 index 0000000000000..e096d8821a1d7 --- /dev/null +++ b/docs/changelog/115938.yaml @@ -0,0 +1,9 @@ +pr: 115938 +summary: Wait for up to 2 seconds for yellow status before starting search +area: Machine Learning +type: bug +issues: + - 107777 + - 105955 + - 107815 + - 112191 diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/persistence/TrainedModelProvider.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/persistence/TrainedModelProvider.java index ff5f37427b18f..5cf349b96a4f7 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/persistence/TrainedModelProvider.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/persistence/TrainedModelProvider.java @@ -14,6 +14,8 @@ import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.DocWriteRequest; +import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequest; +import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; import org.elasticsearch.action.admin.indices.refresh.RefreshAction; import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; import org.elasticsearch.action.bulk.BulkItemResponse; @@ -28,6 +30,7 @@ import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.TransportSearchAction; import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.action.support.SubscribableListener; import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.action.support.master.AcknowledgedResponse; @@ -43,6 +46,7 @@ import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.TimeValue; import org.elasticsearch.core.Tuple; import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.engine.VersionConflictEngineException; @@ -105,6 +109,7 @@ import java.util.Objects; import java.util.Set; import java.util.TreeSet; +import java.util.concurrent.TimeUnit; import java.util.function.Function; import java.util.stream.Collectors; @@ -1053,63 +1058,98 @@ public void expandIds( } public void getInferenceStats(String[] modelIds, @Nullable TaskId parentTaskId, ActionListener> listener) { - MultiSearchRequest multiSearchRequest = new MultiSearchRequest(); - Arrays.stream(modelIds).map(TrainedModelProvider::buildStatsSearchRequest).forEach(multiSearchRequest::add); - if (multiSearchRequest.requests().isEmpty()) { - listener.onResponse(Collections.emptyList()); - return; - } - if (parentTaskId != null) { - multiSearchRequest.setParentTask(parentTaskId); - } - executeAsyncWithOrigin( - client.threadPool().getThreadContext(), - ML_ORIGIN, - multiSearchRequest, - ActionListener.wrap(responses -> { - List allStats = new ArrayList<>(modelIds.length); - int modelIndex = 0; - assert responses.getResponses().length == modelIds.length : "mismatch between search response size and models requested"; - for (MultiSearchResponse.Item response : responses.getResponses()) { - if (response.isFailure()) { - if (ExceptionsHelper.unwrapCause(response.getFailure()) instanceof ResourceNotFoundException) { - modelIndex++; - continue; - } + + SubscribableListener.newForked((delegate) -> { + // first wait for the index to be available + executeAsyncWithOrigin( + client.threadPool().getThreadContext(), + ML_ORIGIN, + new ClusterHealthRequest(new TimeValue(2, TimeUnit.SECONDS), MlStatsIndex.indexPattern()).waitForYellowStatus(), + delegate, + client.admin().cluster()::health + ); + }) + .>andThen( + client.threadPool().executor(MachineLearning.UTILITY_THREAD_POOL_NAME), + client.threadPool().getThreadContext(), + (delegate, clusterHealthResponse) -> { + if (clusterHealthResponse.isTimedOut()) { logger.error( - () -> "[" + Strings.arrayToCommaDelimitedString(modelIds) + "] search failed for models", - response.getFailure() - ); - listener.onFailure( - ExceptionsHelper.serverError( - "Searching for stats for models [{}] failed", - response.getFailure(), - Strings.arrayToCommaDelimitedString(modelIds) - ) + "getInferenceStats Timed out waiting for index [{}] to be available, " + + "this will probably cause the request to fail", + MlStatsIndex.indexPattern() ); - return; } - try { - InferenceStats inferenceStats = handleMultiNodeStatsResponse(response.getResponse(), modelIds[modelIndex++]); - if (inferenceStats != null) { - allStats.add(inferenceStats); - } - } catch (Exception e) { - listener.onFailure(e); + + MultiSearchRequest multiSearchRequest = new MultiSearchRequest(); + Arrays.stream(modelIds).map(TrainedModelProvider::buildStatsSearchRequest).forEach(multiSearchRequest::add); + if (multiSearchRequest.requests().isEmpty()) { + delegate.onResponse(Collections.emptyList()); return; } + if (parentTaskId != null) { + multiSearchRequest.setParentTask(parentTaskId); + } + executeAsyncWithOrigin( + client.threadPool().getThreadContext(), + ML_ORIGIN, + multiSearchRequest, + ActionListener.wrap(responses -> { + List allStats = new ArrayList<>(modelIds.length); + int modelIndex = 0; + assert responses.getResponses().length == modelIds.length + : "mismatch between search response size and models requested"; + for (MultiSearchResponse.Item response : responses.getResponses()) { + if (response.isFailure()) { + if (ExceptionsHelper.unwrapCause(response.getFailure()) instanceof ResourceNotFoundException) { + modelIndex++; + continue; + } + logger.error( + () -> "[" + Strings.arrayToCommaDelimitedString(modelIds) + "] search failed for models", + response.getFailure() + ); + delegate.onFailure( + ExceptionsHelper.serverError( + "Searching for stats for models [{}] failed", + response.getFailure(), + Strings.arrayToCommaDelimitedString(modelIds) + ) + ); + return; + } + try { + InferenceStats inferenceStats = handleMultiNodeStatsResponse( + response.getResponse(), + modelIds[modelIndex++] + ); + if (inferenceStats != null) { + allStats.add(inferenceStats); + } + } catch (Exception e) { + delegate.onFailure(e); + return; + } + } + delegate.onResponse(allStats); + }, e -> { + Throwable unwrapped = ExceptionsHelper.unwrapCause(e); + if (unwrapped instanceof ResourceNotFoundException) { + delegate.onResponse(Collections.emptyList()); + return; + } + delegate.onFailure((Exception) unwrapped); + }), + client::multiSearch + ); + } - listener.onResponse(allStats); - }, e -> { - Throwable unwrapped = ExceptionsHelper.unwrapCause(e); - if (unwrapped instanceof ResourceNotFoundException) { - listener.onResponse(Collections.emptyList()); - return; - } - listener.onFailure((Exception) unwrapped); - }), - client::multiSearch - ); + ) + .addListener( + listener, + client.threadPool().executor(MachineLearning.UTILITY_THREAD_POOL_NAME), + client.threadPool().getThreadContext() + ); } private static SearchRequest buildStatsSearchRequest(String modelId) { From 18a57d8a39c4cdb8d9d3ed9178489430b78dc534 Mon Sep 17 00:00:00 2001 From: Mark Vieira Date: Thu, 31 Oct 2024 12:15:47 -0700 Subject: [PATCH 249/324] Remove sync to jdk branch github workflow --- .github/workflows/sync-main-to-jdk-branch.yml | 21 ------------------- 1 file changed, 21 deletions(-) delete mode 100644 .github/workflows/sync-main-to-jdk-branch.yml diff --git a/.github/workflows/sync-main-to-jdk-branch.yml b/.github/workflows/sync-main-to-jdk-branch.yml deleted file mode 100644 index 4e2dcfb0eed76..0000000000000 --- a/.github/workflows/sync-main-to-jdk-branch.yml +++ /dev/null @@ -1,21 +0,0 @@ -# Daily update of JDK update branch with changes from main -name: "Merge main to openjdk23-bundle branch" -on: - schedule: - - cron: "30 17 * * *" - workflow_dispatch: {} - -jobs: - merge-branch: - if: github.repository == 'elastic/elasticsearch' - runs-on: ubuntu-latest - steps: - - name: checkout - uses: actions/checkout@master - - - name: merge - uses: devmasx/merge-branch@1.4.0 - with: - type: "now" - target_branch: openjdk23-bundle - github_token: ${{ secrets.ELASTICSEARCHMACHINE_TOKEN }} From f3d4d575d8107d794d4c209f255e41f00b5d5d38 Mon Sep 17 00:00:00 2001 From: Mark Vieira Date: Thu, 31 Oct 2024 12:48:55 -0700 Subject: [PATCH 250/324] Increase startup timeout in EnrollmentProcessTests (#116039) Closes #114885 --- muted-tests.yml | 3 --- .../elasticsearch/packaging/test/EnrollmentProcessTests.java | 2 +- 2 files changed, 1 insertion(+), 4 deletions(-) diff --git a/muted-tests.yml b/muted-tests.yml index 670c6d5898ddd..32fe87fce01f2 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -208,9 +208,6 @@ tests: - class: org.elasticsearch.xpack.remotecluster.RemoteClusterSecurityWithApmTracingRestIT method: testTracingCrossCluster issue: https://github.com/elastic/elasticsearch/issues/112731 -- class: org.elasticsearch.packaging.test.EnrollmentProcessTests - method: test20DockerAutoFormCluster - issue: https://github.com/elastic/elasticsearch/issues/114885 - class: org.elasticsearch.xpack.inference.DefaultEndPointsIT method: testInferDeploysDefaultElser issue: https://github.com/elastic/elasticsearch/issues/114913 diff --git a/qa/packaging/src/test/java/org/elasticsearch/packaging/test/EnrollmentProcessTests.java b/qa/packaging/src/test/java/org/elasticsearch/packaging/test/EnrollmentProcessTests.java index 0f0a599423092..836a6b71c4a19 100644 --- a/qa/packaging/src/test/java/org/elasticsearch/packaging/test/EnrollmentProcessTests.java +++ b/qa/packaging/src/test/java/org/elasticsearch/packaging/test/EnrollmentProcessTests.java @@ -116,7 +116,7 @@ public void test20DockerAutoFormCluster() throws Exception { makeRequestAsElastic("https://localhost:9200/_cluster/health", "password"), containsString("\"number_of_nodes\":2") ), - 20, + 60, TimeUnit.SECONDS ); From cad71e9a22c3d299ea5aa7be8ca57bdd8496bc52 Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Fri, 1 Nov 2024 07:45:15 +1100 Subject: [PATCH 251/324] Mute org.elasticsearch.action.admin.HotThreadsIT testHotThreadsDontFail #115754 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index 32fe87fce01f2..812f837ba481a 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -285,6 +285,9 @@ tests: - class: org.elasticsearch.search.basic.SearchWithRandomExceptionsIT method: testRandomExceptions issue: https://github.com/elastic/elasticsearch/issues/116027 +- class: org.elasticsearch.action.admin.HotThreadsIT + method: testHotThreadsDontFail + issue: https://github.com/elastic/elasticsearch/issues/115754 # Examples: # From e681eef1013078282fb8a18624a03c240dd2fb7b Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Fri, 1 Nov 2024 07:56:59 +1100 Subject: [PATCH 252/324] Mute org.elasticsearch.search.functionscore.QueryRescorerIT testScoring #116050 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index 812f837ba481a..e977a18032efb 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -288,6 +288,9 @@ tests: - class: org.elasticsearch.action.admin.HotThreadsIT method: testHotThreadsDontFail issue: https://github.com/elastic/elasticsearch/issues/115754 +- class: org.elasticsearch.search.functionscore.QueryRescorerIT + method: testScoring + issue: https://github.com/elastic/elasticsearch/issues/116050 # Examples: # From f0881e62eef24232c8a63df816d3907c29fdd122 Mon Sep 17 00:00:00 2001 From: Mark Vieira Date: Thu, 31 Oct 2024 13:59:59 -0700 Subject: [PATCH 253/324] Unmute SecureHdfsSearchableSnapshotsIT (#116044) Closes #115995 --- muted-tests.yml | 2 -- 1 file changed, 2 deletions(-) diff --git a/muted-tests.yml b/muted-tests.yml index e977a18032efb..8c96691528599 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -274,8 +274,6 @@ tests: - class: org.elasticsearch.action.search.PointInTimeIT method: testPITTiebreak issue: https://github.com/elastic/elasticsearch/issues/115810 -- class: org.elasticsearch.xpack.searchablesnapshots.hdfs.SecureHdfsSearchableSnapshotsIT - issue: https://github.com/elastic/elasticsearch/issues/115995 - class: org.elasticsearch.index.reindex.ReindexNodeShutdownIT method: testReindexWithShutdown issue: https://github.com/elastic/elasticsearch/issues/115996 From a48925e1ba93d41b3613411fddb53c4b4130ed22 Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Fri, 1 Nov 2024 08:04:40 +1100 Subject: [PATCH 254/324] Mute org.elasticsearch.indexing.IndexActionIT testAutoGenerateIdNoDuplicates #115716 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index 8c96691528599..98a352846ded5 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -289,6 +289,9 @@ tests: - class: org.elasticsearch.search.functionscore.QueryRescorerIT method: testScoring issue: https://github.com/elastic/elasticsearch/issues/116050 +- class: org.elasticsearch.indexing.IndexActionIT + method: testAutoGenerateIdNoDuplicates + issue: https://github.com/elastic/elasticsearch/issues/115716 # Examples: # From d3321ea64c3f4cf8e68470604f10cd370d02c9da Mon Sep 17 00:00:00 2001 From: Mark Vieira Date: Thu, 31 Oct 2024 16:10:34 -0700 Subject: [PATCH 255/324] Unmute HdfsRepositoryAnalysisRestIT (#116056) Closes #112889 --- muted-tests.yml | 2 -- 1 file changed, 2 deletions(-) diff --git a/muted-tests.yml b/muted-tests.yml index 98a352846ded5..94825bc7d098a 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -98,8 +98,6 @@ tests: - class: org.elasticsearch.xpack.sql.qa.single_node.JdbcSqlSpecIT method: test {case-functions.testUcaseInline3} issue: https://github.com/elastic/elasticsearch/issues/112643 -- class: org.elasticsearch.repositories.blobstore.testkit.analyze.HdfsRepositoryAnalysisRestIT - issue: https://github.com/elastic/elasticsearch/issues/112889 - class: org.elasticsearch.xpack.sql.qa.security.JdbcSqlSpecIT method: test {case-functions.testUcaseInline1} issue: https://github.com/elastic/elasticsearch/issues/112641 From 8eb4d04873966615c8c28f98c5cd64c621c40b31 Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Fri, 1 Nov 2024 15:47:42 +1100 Subject: [PATCH 256/324] Mute org.elasticsearch.smoketest.SmokeTestMultiNodeClientYamlTestSuiteIT test {yaml=logsdb/10_settings/logsdb with default ignore dynamic beyond limit and subobjects false} #116054 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index 94825bc7d098a..d04fc426a7a6b 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -290,6 +290,9 @@ tests: - class: org.elasticsearch.indexing.IndexActionIT method: testAutoGenerateIdNoDuplicates issue: https://github.com/elastic/elasticsearch/issues/115716 +- class: org.elasticsearch.smoketest.SmokeTestMultiNodeClientYamlTestSuiteIT + method: test {yaml=logsdb/10_settings/logsdb with default ignore dynamic beyond limit and subobjects false} + issue: https://github.com/elastic/elasticsearch/issues/116054 # Examples: # From fc1d9d09e71e51e4b18e570be09d936e3f1f184c Mon Sep 17 00:00:00 2001 From: Iraklis Psaroudakis Date: Fri, 1 Nov 2024 08:10:15 +0200 Subject: [PATCH 257/324] ReplicationOperation should fail gracefully (#115341) Problem: finishAsFailed could be called asynchronously in the middle of operations like runPostReplicationActions which try to sync the translog. finishAsFailed immediately triggers the failure of the resultListener which releases the index shard primary operation permit. This means that runPostReplicationActions may try to sync the translog without an operation permit. Solution: We refactor the infrastructure of ReplicationOperation regarding pendingActions and the resultListener, by replacing them with a RefCountingListener. This way, if there are async failures, they are aggregated, and the result listener is called once, after all mid-way operations are done. For the specific error we got in issue #97183, this means that a call to onNoLongerPrimary (which can happen if we fail to fail a replica shard or mark it as stale) will not immediately release the primary operation permit and the assertion in the translog sync will be honored. Fixes #97183 --- .../replication/ReplicationOperation.java | 406 +++++++++--------- .../elasticsearch/index/shard/IndexShard.java | 3 +- 2 files changed, 214 insertions(+), 195 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java b/server/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java index 5c4195bc7b069..43167e206bfb5 100644 --- a/server/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java +++ b/server/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java @@ -15,6 +15,7 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.UnavailableShardsException; import org.elasticsearch.action.support.ActiveShardCount; +import org.elasticsearch.action.support.RefCountingListener; import org.elasticsearch.action.support.RetryableAction; import org.elasticsearch.action.support.TransportActions; import org.elasticsearch.cluster.action.shard.ShardStateAction; @@ -55,17 +56,6 @@ public class ReplicationOperation< private final Request request; private final String opType; private final AtomicInteger totalShards = new AtomicInteger(); - /** - * The number of pending sub-operations in this operation. This is incremented when the following operations start and decremented when - * they complete: - *
        - *
      • The operation on the primary
      • - *
      • The operation on each replica
      • - *
      • Coordination of the operation as a whole. This prevents the operation from terminating early if we haven't started any replica - * operations and the primary finishes.
      • - *
      - */ - private final AtomicInteger pendingActions = new AtomicInteger(); private final AtomicInteger successfulShards = new AtomicInteger(); private final Primary primary; private final Replicas replicasProxy; @@ -74,7 +64,6 @@ public class ReplicationOperation< private final TimeValue retryTimeout; private final long primaryTerm; - // exposed for tests private final ActionListener resultListener; private volatile PrimaryResultT primaryResult = null; @@ -105,29 +94,57 @@ public ReplicationOperation( this.retryTimeout = retryTimeout; } + /** + * The execution is based on a {@link RefCountingListener} that encapsulates the pending sub-operations in this operation. A new + * listener is acquired when the following sub-operations start and triggered when they complete: + *
        + *
      • The operation on the primary
      • + *
      • The operation on each replica
      • + *
      • Coordination of the operation as a whole. This prevents the operation from terminating early if we haven't started any replica + * operations and the primary finishes.
      • + *
      + */ public void execute() throws Exception { - final String activeShardCountFailure = checkActiveShardCount(); - final ShardRouting primaryRouting = primary.routingEntry(); - final ShardId primaryId = primaryRouting.shardId(); - if (activeShardCountFailure != null) { - finishAsFailed( - new UnavailableShardsException( - primaryId, - "{} Timeout: [{}], request: [{}]", - activeShardCountFailure, - request.timeout(), - request + try (var pendingActionsListener = new RefCountingListener(ActionListener.wrap((ignored) -> { + primaryResult.setShardInfo( + ReplicationResponse.ShardInfo.of( + totalShards.get(), + successfulShards.get(), + shardReplicaFailures.toArray(ReplicationResponse.NO_FAILURES) ) ); - return; - } + resultListener.onResponse(primaryResult); + }, resultListener::onFailure))) { + final String activeShardCountFailure = checkActiveShardCount(); + final ShardRouting primaryRouting = primary.routingEntry(); + final ShardId primaryId = primaryRouting.shardId(); + if (activeShardCountFailure != null) { + pendingActionsListener.acquire() + .onFailure( + new UnavailableShardsException( + primaryId, + "{} Timeout: [{}], request: [{}]", + activeShardCountFailure, + request.timeout(), + request + ) + ); + return; + } - totalShards.incrementAndGet(); - pendingActions.incrementAndGet(); // increase by 1 until we finish all primary coordination - primary.perform(request, ActionListener.wrap(this::handlePrimaryResult, this::finishAsFailed)); + totalShards.incrementAndGet(); + var primaryCoordinationPendingActionListener = pendingActionsListener.acquire(); // triggered when we finish all coordination + primary.perform(request, primaryCoordinationPendingActionListener.delegateFailureAndWrap((l, primaryResult) -> { + handlePrimaryResult(primaryResult, l, pendingActionsListener); + })); + } } - private void handlePrimaryResult(final PrimaryResultT primaryResult) { + private void handlePrimaryResult( + final PrimaryResultT primaryResult, + final ActionListener primaryCoordinationPendingActionListener, + final RefCountingListener pendingActionsListener + ) { this.primaryResult = primaryResult; final ReplicaRequest replicaRequest = primaryResult.replicaRequest(); if (replicaRequest != null) { @@ -136,11 +153,11 @@ private void handlePrimaryResult(final PrimaryResultT primaryResult) { } final ReplicationGroup replicationGroup = primary.getReplicationGroup(); - pendingActions.incrementAndGet(); + var primaryOperationPendingActionListener = pendingActionsListener.acquire(); replicasProxy.onPrimaryOperationComplete( replicaRequest, replicationGroup.getRoutingTable(), - ActionListener.wrap(ignored -> decPendingAndFinishIfNeeded(), exception -> { + ActionListener.wrap(ignored -> primaryOperationPendingActionListener.onResponse(null), exception -> { totalShards.incrementAndGet(); shardReplicaFailures.add( new ReplicationResponse.ShardInfo.Failure( @@ -151,7 +168,7 @@ private void handlePrimaryResult(final PrimaryResultT primaryResult) { false ) ); - decPendingAndFinishIfNeeded(); + primaryOperationPendingActionListener.onResponse(null); }) ); @@ -169,8 +186,15 @@ private void handlePrimaryResult(final PrimaryResultT primaryResult) { final long maxSeqNoOfUpdatesOrDeletes = primary.maxSeqNoOfUpdatesOrDeletes(); assert maxSeqNoOfUpdatesOrDeletes != SequenceNumbers.UNASSIGNED_SEQ_NO : "seqno_of_updates still uninitialized"; final PendingReplicationActions pendingReplicationActions = primary.getPendingReplicationActions(); - markUnavailableShardsAsStale(replicaRequest, replicationGroup); - performOnReplicas(replicaRequest, globalCheckpoint, maxSeqNoOfUpdatesOrDeletes, replicationGroup, pendingReplicationActions); + markUnavailableShardsAsStale(replicaRequest, replicationGroup, pendingActionsListener); + performOnReplicas( + replicaRequest, + globalCheckpoint, + maxSeqNoOfUpdatesOrDeletes, + replicationGroup, + pendingReplicationActions, + pendingActionsListener + ); } primaryResult.runPostReplicationActions(new ActionListener<>() { @@ -181,7 +205,7 @@ public void onResponse(Void aVoid) { primary.routingEntry(), primary::localCheckpoint, primary::globalCheckpoint, - () -> decPendingAndFinishIfNeeded() + () -> primaryCoordinationPendingActionListener.onResponse(null) ); } @@ -193,20 +217,29 @@ public void onFailure(Exception e) { // We update the checkpoints since a refresh might fail but the operations could be safely persisted, in the case that the // fsync failed the local checkpoint won't advance and the engine will be marked as failed when the next indexing operation // is appended into the translog. - updateCheckPoints(primary.routingEntry(), primary::localCheckpoint, primary::globalCheckpoint, () -> finishAsFailed(e)); + updateCheckPoints( + primary.routingEntry(), + primary::localCheckpoint, + primary::globalCheckpoint, + () -> primaryCoordinationPendingActionListener.onFailure(e) + ); } }); } - private void markUnavailableShardsAsStale(ReplicaRequest replicaRequest, ReplicationGroup replicationGroup) { + private void markUnavailableShardsAsStale( + final ReplicaRequest replicaRequest, + final ReplicationGroup replicationGroup, + final RefCountingListener pendingActionsListener + ) { // if inSyncAllocationIds contains allocation ids of shards that don't exist in RoutingTable, mark copies as stale for (String allocationId : replicationGroup.getUnavailableInSyncShards()) { - pendingActions.incrementAndGet(); + var staleCopyPendingActionListener = pendingActionsListener.acquire(); replicasProxy.markShardCopyAsStaleIfNeeded( replicaRequest.shardId(), allocationId, primaryTerm, - ActionListener.wrap(r -> decPendingAndFinishIfNeeded(), ReplicationOperation.this::onNoLongerPrimary) + staleCopyPendingActionListener.delegateResponse((l, e) -> onNoLongerPrimary(e, l)) ); } } @@ -216,7 +249,8 @@ private void performOnReplicas( final long globalCheckpoint, final long maxSeqNoOfUpdatesOrDeletes, final ReplicationGroup replicationGroup, - final PendingReplicationActions pendingReplicationActions + final PendingReplicationActions pendingReplicationActions, + final RefCountingListener pendingActionsListener ) { // for total stats, add number of unassigned shards and // number of initializing shards that are not ready yet to receive operations (recovery has not opened engine yet on the target) @@ -226,7 +260,14 @@ private void performOnReplicas( for (final ShardRouting shard : replicationGroup.getReplicationTargets()) { if (shard.isSameAllocation(primaryRouting) == false) { - performOnReplica(shard, replicaRequest, globalCheckpoint, maxSeqNoOfUpdatesOrDeletes, pendingReplicationActions); + performOnReplica( + shard, + replicaRequest, + globalCheckpoint, + maxSeqNoOfUpdatesOrDeletes, + pendingReplicationActions, + pendingActionsListener + ); } } } @@ -236,94 +277,97 @@ private void performOnReplica( final ReplicaRequest replicaRequest, final long globalCheckpoint, final long maxSeqNoOfUpdatesOrDeletes, - final PendingReplicationActions pendingReplicationActions + final PendingReplicationActions pendingReplicationActions, + final RefCountingListener pendingActionsListener ) { assert shard.isPromotableToPrimary() : "only promotable shards should receive replication requests"; if (logger.isTraceEnabled()) { logger.trace("[{}] sending op [{}] to replica {} for request [{}]", shard.shardId(), opType, shard, replicaRequest); } totalShards.incrementAndGet(); - pendingActions.incrementAndGet(); - final ActionListener replicationListener = new ActionListener<>() { - @Override - public void onResponse(ReplicaResponse response) { - successfulShards.incrementAndGet(); - updateCheckPoints(shard, response::localCheckpoint, response::globalCheckpoint, () -> decPendingAndFinishIfNeeded()); - } + var replicationPendingActionListener = pendingActionsListener.acquire(); + ActionListener.run(replicationPendingActionListener, (listener) -> { + final ActionListener replicationListener = new ActionListener<>() { + @Override + public void onResponse(ReplicaResponse response) { + successfulShards.incrementAndGet(); + updateCheckPoints(shard, response::localCheckpoint, response::globalCheckpoint, () -> listener.onResponse(null)); + } - @Override - public void onFailure(Exception replicaException) { - logger.trace( - () -> format( - "[%s] failure while performing [%s] on replica %s, request [%s]", - shard.shardId(), - opType, - shard, - replicaRequest - ), - replicaException - ); - // Only report "critical" exceptions - TODO: Reach out to the master node to get the latest shard state then report. - if (TransportActions.isShardNotAvailableException(replicaException) == false) { - RestStatus restStatus = ExceptionsHelper.status(replicaException); - shardReplicaFailures.add( - new ReplicationResponse.ShardInfo.Failure( + @Override + public void onFailure(Exception replicaException) { + logger.trace( + () -> format( + "[%s] failure while performing [%s] on replica %s, request [%s]", shard.shardId(), - shard.currentNodeId(), - replicaException, - restStatus, - false - ) + opType, + shard, + replicaRequest + ), + replicaException + ); + // Only report "critical" exceptions - TODO: Reach out to the master node to get the latest shard state then report. + if (TransportActions.isShardNotAvailableException(replicaException) == false) { + RestStatus restStatus = ExceptionsHelper.status(replicaException); + shardReplicaFailures.add( + new ReplicationResponse.ShardInfo.Failure( + shard.shardId(), + shard.currentNodeId(), + replicaException, + restStatus, + false + ) + ); + } + String message = String.format(Locale.ROOT, "failed to perform %s on replica %s", opType, shard); + replicasProxy.failShardIfNeeded( + shard, + primaryTerm, + message, + replicaException, + listener.delegateResponse((l, e) -> onNoLongerPrimary(e, l)) ); } - String message = String.format(Locale.ROOT, "failed to perform %s on replica %s", opType, shard); - replicasProxy.failShardIfNeeded( - shard, - primaryTerm, - message, - replicaException, - ActionListener.wrap(r -> decPendingAndFinishIfNeeded(), ReplicationOperation.this::onNoLongerPrimary) - ); - } - @Override - public String toString() { - return "[" + replicaRequest + "][" + shard + "]"; - } - }; - - final String allocationId = shard.allocationId().getId(); - final RetryableAction replicationAction = new RetryableAction<>( - logger, - threadPool, - initialRetryBackoffBound, - retryTimeout, - replicationListener, - EsExecutors.DIRECT_EXECUTOR_SERVICE - ) { + @Override + public String toString() { + return "[" + replicaRequest + "][" + shard + "]"; + } + }; + + final String allocationId = shard.allocationId().getId(); + final RetryableAction replicationAction = new RetryableAction<>( + logger, + threadPool, + initialRetryBackoffBound, + retryTimeout, + replicationListener, + EsExecutors.DIRECT_EXECUTOR_SERVICE + ) { - @Override - public void tryAction(ActionListener listener) { - replicasProxy.performOn(shard, replicaRequest, primaryTerm, globalCheckpoint, maxSeqNoOfUpdatesOrDeletes, listener); - } + @Override + public void tryAction(ActionListener listener) { + replicasProxy.performOn(shard, replicaRequest, primaryTerm, globalCheckpoint, maxSeqNoOfUpdatesOrDeletes, listener); + } - @Override - public void onFinished() { - super.onFinished(); - pendingReplicationActions.removeReplicationAction(allocationId, this); - } + @Override + public void onFinished() { + super.onFinished(); + pendingReplicationActions.removeReplicationAction(allocationId, this); + } - @Override - public boolean shouldRetry(Exception e) { - final Throwable cause = ExceptionsHelper.unwrapCause(e); - return cause instanceof CircuitBreakingException - || cause instanceof EsRejectedExecutionException - || cause instanceof ConnectTransportException; - } - }; + @Override + public boolean shouldRetry(Exception e) { + final Throwable cause = ExceptionsHelper.unwrapCause(e); + return cause instanceof CircuitBreakingException + || cause instanceof EsRejectedExecutionException + || cause instanceof ConnectTransportException; + } + }; - pendingReplicationActions.addPendingAction(allocationId, replicationAction); - replicationAction.run(); + pendingReplicationActions.addPendingAction(allocationId, replicationAction); + replicationAction.run(); + }); } private void updateCheckPoints( @@ -369,58 +413,60 @@ public void onAfter() { } } - private void onNoLongerPrimary(Exception failure) { - final Throwable cause = ExceptionsHelper.unwrapCause(failure); - final boolean nodeIsClosing = cause instanceof NodeClosedException; - if (nodeIsClosing) { - // We prefer not to fail the primary to avoid unnecessary warning log - // when the node with the primary shard is gracefully shutting down. - finishAsFailed( - new RetryOnPrimaryException( - primary.routingEntry().shardId(), - String.format( - Locale.ROOT, - "node with primary [%s] is shutting down while failing replica shard", - primary.routingEntry() - ), - failure - ) - ); - } else { - assert failure instanceof ShardStateAction.NoLongerPrimaryShardException : failure; - threadPool.executor(ThreadPool.Names.WRITE).execute(new AbstractRunnable() { - @Override - protected void doRun() { - // we are no longer the primary, fail ourselves and start over - final var message = String.format( - Locale.ROOT, - "primary shard [%s] was demoted while failing replica shard", - primary.routingEntry() - ); - primary.failShard(message, failure); - finishAsFailed(new RetryOnPrimaryException(primary.routingEntry().shardId(), message, failure)); - } - - @Override - public boolean isForceExecution() { - return true; - } - - @Override - public void onFailure(Exception e) { - e.addSuppressed(failure); - assert false : e; - logger.error(() -> "unexpected failure while failing primary [" + primary.routingEntry() + "]", e); - finishAsFailed( - new RetryOnPrimaryException( - primary.routingEntry().shardId(), - String.format(Locale.ROOT, "unexpected failure while failing primary [%s]", primary.routingEntry()), - e - ) - ); - } - }); - } + private void onNoLongerPrimary(Exception failure, ActionListener listener) { + ActionListener.run(listener, (l) -> { + final Throwable cause = ExceptionsHelper.unwrapCause(failure); + final boolean nodeIsClosing = cause instanceof NodeClosedException; + if (nodeIsClosing) { + // We prefer not to fail the primary to avoid unnecessary warning log + // when the node with the primary shard is gracefully shutting down. + l.onFailure( + new RetryOnPrimaryException( + primary.routingEntry().shardId(), + String.format( + Locale.ROOT, + "node with primary [%s] is shutting down while failing replica shard", + primary.routingEntry() + ), + failure + ) + ); + } else { + assert failure instanceof ShardStateAction.NoLongerPrimaryShardException : failure; + threadPool.executor(ThreadPool.Names.WRITE).execute(new AbstractRunnable() { + @Override + protected void doRun() { + // we are no longer the primary, fail ourselves and start over + final var message = String.format( + Locale.ROOT, + "primary shard [%s] was demoted while failing replica shard", + primary.routingEntry() + ); + primary.failShard(message, failure); + l.onFailure(new RetryOnPrimaryException(primary.routingEntry().shardId(), message, failure)); + } + + @Override + public boolean isForceExecution() { + return true; + } + + @Override + public void onFailure(Exception e) { + e.addSuppressed(failure); + assert false : e; + logger.error(() -> "unexpected failure while failing primary [" + primary.routingEntry() + "]", e); + l.onFailure( + new RetryOnPrimaryException( + primary.routingEntry().shardId(), + String.format(Locale.ROOT, "unexpected failure while failing primary [%s]", primary.routingEntry()), + e + ) + ); + } + }); + } + }); } /** @@ -461,32 +507,6 @@ protected String checkActiveShardCount() { } } - private void decPendingAndFinishIfNeeded() { - assert pendingActions.get() > 0 : "pending action count goes below 0 for request [" + request + "]"; - if (pendingActions.decrementAndGet() == 0) { - finish(); - } - } - - private void finish() { - if (finished.compareAndSet(false, true)) { - primaryResult.setShardInfo( - ReplicationResponse.ShardInfo.of( - totalShards.get(), - successfulShards.get(), - shardReplicaFailures.toArray(ReplicationResponse.NO_FAILURES) - ) - ); - resultListener.onResponse(primaryResult); - } - } - - private void finishAsFailed(Exception exception) { - if (finished.compareAndSet(false, true)) { - resultListener.onFailure(exception); - } - } - /** * An encapsulation of an operation that is to be performed on the primary shard */ @@ -693,7 +713,7 @@ public interface PrimaryResult> { /** * Run actions to be triggered post replication - * @param listener calllback that is invoked after post replication actions have completed + * @param listener callback that is invoked after post replication actions have completed * */ void runPostReplicationActions(ActionListener listener); } diff --git a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java index 0dff80ecc2cd6..ee24b8d9a9e91 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java @@ -3860,8 +3860,7 @@ public int getActiveOperationsCount() { * listener handles all exception cases internally. */ public final void syncAfterWrite(Translog.Location location, Consumer syncListener) { - // TODO AwaitsFix https://github.com/elastic/elasticsearch/issues/97183 - // assert indexShardOperationPermits.getActiveOperationsCount() != 0; + assert indexShardOperationPermits.getActiveOperationsCount() != 0; verifyNotClosed(); getEngine().asyncEnsureTranslogSynced(location, syncListener); } From 0a5b1c6ece91ab2e35f4f8e738532b27a242d618 Mon Sep 17 00:00:00 2001 From: Ioana Tagirta Date: Fri, 1 Nov 2024 07:18:07 +0100 Subject: [PATCH 258/324] semantic_text as string type in ES|QL - support for functions and operators (#115243) * fix tests * Add CSV tests * Add function tests * Refactor tests * spotless * Use DataType.stringTypes() where possible * Add tests for conditional functions and expressions * Fix tests after merge * Reorder semantic_text evaluators and tests * Re-ordered two more places for SEMANTIC_TEXT after TEXT --------- Co-authored-by: Elastic Machine Co-authored-by: Craig Taverner --- .../xpack/esql/core/type/DataType.java | 5 +- .../main/resources/mapping-semantic_text.json | 4 + .../src/main/resources/semantic_text.csv | 8 +- .../src/main/resources/semantic_text.csv-spec | 943 ++++++++++++++++++ .../xpack/esql/analysis/Verifier.java | 4 + .../function/scalar/convert/ToBoolean.java | 2 + .../scalar/convert/ToCartesianPoint.java | 4 +- .../scalar/convert/ToCartesianShape.java | 4 +- .../function/scalar/convert/ToDateNanos.java | 2 + .../function/scalar/convert/ToDatetime.java | 2 + .../function/scalar/convert/ToDouble.java | 2 + .../function/scalar/convert/ToGeoPoint.java | 4 +- .../function/scalar/convert/ToGeoShape.java | 4 +- .../function/scalar/convert/ToIP.java | 4 +- .../function/scalar/convert/ToInteger.java | 5 +- .../function/scalar/convert/ToLong.java | 2 + .../function/scalar/convert/ToString.java | 2 + .../scalar/convert/ToUnsignedLong.java | 2 + .../function/scalar/convert/ToVersion.java | 4 +- .../predicate/operator/comparison/Equals.java | 1 + .../operator/comparison/GreaterThan.java | 1 + .../comparison/GreaterThanOrEqual.java | 1 + .../operator/comparison/LessThan.java | 1 + .../operator/comparison/LessThanOrEqual.java | 1 + .../operator/comparison/NotEquals.java | 1 + .../esql/type/EsqlDataTypeConverter.java | 4 + .../xpack/esql/analysis/VerifierTests.java | 6 +- .../scalar/conditional/CaseTests.java | 2 +- .../scalar/conditional/GreatestTests.java | 30 +- .../scalar/conditional/LeastTests.java | 30 +- .../scalar/convert/FromBase64Tests.java | 30 +- .../scalar/convert/ToBase64Tests.java | 30 +- .../AbstractMultivalueFunctionTestCase.java | 2 +- .../scalar/multivalue/MvAppendTests.java | 16 + .../scalar/multivalue/MvSliceTests.java | 17 + .../scalar/multivalue/MvSortTests.java | 14 + .../function/scalar/string/ConcatTests.java | 5 +- .../function/scalar/string/LeftTests.java | 14 + .../function/scalar/string/LengthTests.java | 10 + .../function/scalar/string/RLikeTests.java | 2 +- .../function/scalar/string/RepeatTests.java | 16 + .../function/scalar/string/ReverseTests.java | 2 +- .../function/scalar/string/RightTests.java | 13 + .../function/scalar/string/SplitTests.java | 5 +- .../scalar/string/SubstringTests.java | 19 + .../function/scalar/string/ToLowerTests.java | 2 + .../function/scalar/string/ToUpperTests.java | 2 + .../scalar/string/WildcardLikeTests.java | 2 +- .../operator/comparison/EqualsTests.java | 4 +- .../comparison/GreaterThanOrEqualTests.java | 2 +- .../operator/comparison/GreaterThanTests.java | 2 +- .../operator/comparison/InTests.java | 20 +- .../comparison/LessThanOrEqualTests.java | 2 +- .../operator/comparison/LessThanTests.java | 2 +- .../esql/type/EsqlDataTypeConverterTests.java | 4 + 55 files changed, 1223 insertions(+), 99 deletions(-) diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/DataType.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/DataType.java index 1b1eff8a07b1d..81739536c6572 100644 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/DataType.java +++ b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/DataType.java @@ -370,6 +370,9 @@ public static boolean isUnsupported(DataType from) { } public static boolean isString(DataType t) { + if (EsqlCorePlugin.SEMANTIC_TEXT_FEATURE_FLAG.isEnabled() && t == SEMANTIC_TEXT) { + return true; + } return t == KEYWORD || t == TEXT; } @@ -585,7 +588,7 @@ static Builder builder() { } public DataType noText() { - return this == TEXT ? KEYWORD : this; + return isString(this) ? KEYWORD : this; } /** diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/mapping-semantic_text.json b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/mapping-semantic_text.json index b110d6fd4cdd5..c587b69828170 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/mapping-semantic_text.json +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/mapping-semantic_text.json @@ -68,6 +68,10 @@ }, "value": { "type": "long" + }, + "st_base64": { + "type": "semantic_text", + "inference_id": "test_sparse_inference" } } } diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/semantic_text.csv b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/semantic_text.csv index c6de9a208e9a7..6cae82cfefa0a 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/semantic_text.csv +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/semantic_text.csv @@ -1,4 +1,4 @@ -_id:keyword,semantic_text_field:semantic_text,st_bool:semantic_text,st_cartesian_point:semantic_text,st_cartesian_shape:semantic_text,st_datetime:semantic_text,st_double:semantic_text,st_geopoint:semantic_text,st_geoshape:semantic_text,st_integer:semantic_text,st_ip:semantic_text,st_long:semantic_text,st_unsigned_long:semantic_text,st_version:semantic_text,st_multi_value:semantic_text,st_unicode:semantic_text,host:keyword,description:text,value:long -1,live long and prosper,false,"POINT(4297.11 -1475.53)",,1953-09-02T00:00:00.000Z,5.20128E11,"POINT(42.97109630194 14.7552534413725)","POLYGON ((30 10\, 40 40\, 20 40\, 10 20\, 30 10))",23,1.1.1.1,2147483648,2147483648,1.2.3,["Hello there!", "This is a random value", "for testing purposes"],你吃饭了吗,"host1","some description1",1001 -2,all we have to decide is what to do with the time that is given to us,true,"POINT(7580.93 2272.77)",,2023-09-24T15:57:00.000Z,4541.11,"POINT(37.97109630194 21.7552534413725)","POLYGON ((30 10\, 40 40\, 20 40\, 10 20\, 30 10))",122,1.1.2.1,123,2147483648.2,9.0.0,["nice to meet you", "bye bye!"],["谢谢", "对不起我的中文不好"],"host2","some description2",1002 -3,be excellent to each other,,,,,,,,,,,,,,,"host3","some description3",1003 +_id:keyword,semantic_text_field:semantic_text,st_bool:semantic_text,st_cartesian_point:semantic_text,st_cartesian_shape:semantic_text,st_datetime:semantic_text,st_double:semantic_text,st_geopoint:semantic_text,st_geoshape:semantic_text,st_integer:semantic_text,st_ip:semantic_text,st_long:semantic_text,st_unsigned_long:semantic_text,st_version:semantic_text,st_multi_value:semantic_text,st_unicode:semantic_text,host:keyword,description:text,value:long,st_base64:semantic_text +1,live long and prosper,false,"POINT(4297.11 -1475.53)",,1953-09-02T00:00:00.000Z,5.20128E11,"POINT(42.97109630194 14.7552534413725)","POLYGON ((30 10\, 40 40\, 20 40\, 10 20\, 30 10))",23,1.1.1.1,2147483648,2147483648,1.2.3,["Hello there!", "This is a random value", "for testing purposes"],你吃饭了吗,"host1","some description1",1001,ZWxhc3RpYw== +2,all we have to decide is what to do with the time that is given to us,true,"POINT(7580.93 2272.77)",,2023-09-24T15:57:00.000Z,4541.11,"POINT(37.97109630194 21.7552534413725)","POLYGON ((30 10\, 40 40\, 20 40\, 10 20\, 30 10))",122,1.1.2.1,123,2147483648.2,9.0.0,["nice to meet you", "bye bye!"],["谢谢", "对不起我的中文不好"],"host2","some description2",1002,aGVsbG8= +3,be excellent to each other,,,,,,,,,,,,,,,"host3","some description3",1003, diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/semantic_text.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/semantic_text.csv-spec index 683bcdc3f7490..de2a79df06a50 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/semantic_text.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/semantic_text.csv-spec @@ -173,3 +173,946 @@ host:keyword | semantic_text_field:semantic_text "host2" | all we have to decide is what to do with the time that is given to us "host3" | be excellent to each other ; + +case +required_capability: semantic_text_type + +FROM semantic_text METADATA _id +| EVAL result = case(st_ip == "1.1.1.1", "okay", "try again") +| KEEP _id, result +| SORT _id +; + +_id:keyword | result:keyword +1 | okay +2 | try again +3 | try again +; + +coalesce +required_capability: semantic_text_type + +FROM semantic_text METADATA _id +| EVAL result = coalesce(st_version, st_ip, semantic_text_field) +| KEEP _id, result +| SORT _id +; + +_id:keyword | result:keyword +1 | 1.2.3 +2 | 9.0.0 +3 | be excellent to each other +; + +greatest +required_capability: semantic_text_type + +FROM semantic_text METADATA _id +| EVAL result = greatest(semantic_text_field, st_version) +| KEEP _id, result +| SORT _id +; + +_id:keyword | result:keyword +1 | live long and prosper +2 | all we have to decide is what to do with the time that is given to us +3 | null +; + +least +required_capability: semantic_text_type + +FROM semantic_text METADATA _id +| EVAL result = least(semantic_text_field, st_version) +| KEEP _id, result +| SORT _id +; + +_id:keyword | result:keyword +1 | 1.2.3 +2 | 9.0.0 +3 | null +; + +convertToBool +required_capability: semantic_text_type + +FROM semantic_text METADATA _id +| EVAL result = to_bool(st_bool) +| KEEP _id, result +| SORT _id +; + +_id:keyword | result:bool +1 | false +2 | true +3 | null +; + +convertToCartesianPoint +required_capability: semantic_text_type + +FROM semantic_text METADATA _id +| EVAL result = to_cartesianpoint(st_cartesian_point) +| KEEP _id, result +| SORT _id +; + +_id:keyword | result:cartesian_point +1 | "POINT(4297.11 -1475.53)" +2 | "POINT(7580.93 2272.77)" +3 | null +; + +convertToCartesianShape +required_capability: semantic_text_type + +FROM semantic_text METADATA _id +| EVAL result = to_cartesianshape(st_cartesian_shape) +| KEEP _id, result +| SORT _id +; + +_id:keyword | result:cartesian_shape +1 | null +2 | null +3 | null +; + +convertToDatetime +required_capability: semantic_text_type + +FROM semantic_text METADATA _id +| EVAL result = to_datetime(st_datetime) +| KEEP _id, result +| SORT _id, result +; + +_id:keyword|result:datetime +1 | 1953-09-02T00:00:00.000Z +2 | 2023-09-24T15:57:00.000Z +3 | null +; + +convertToDouble +required_capability: semantic_text_type + +FROM semantic_text METADATA _id +| EVAL result = to_double(st_double) +| KEEP _id, result +| SORT _id +; + +_id:keyword|result:double +1 | 5.20128E11 +2 | 4541.11 +3 | null +; + +convertToGeopoint +required_capability: semantic_text_type + +FROM semantic_text METADATA _id +| EVAL result = to_geopoint(st_geopoint) +| KEEP _id, result +| SORT _id +; + +_id:keyword | result:geo_point +1 | "POINT(42.97109630194 14.7552534413725)" +2 | "POINT(37.97109630194 21.7552534413725)" +3 | null +; + +convertToGeoshape +required_capability: semantic_text_type + +FROM semantic_text METADATA _id +| EVAL result = to_geoshape(st_geoshape) +| KEEP _id, result +| SORT _id +; + +_id:keyword | result:geo_shape +1 | "POLYGON ((30 10, 40 40, 20 40, 10 20, 30 10))" +2 | "POLYGON ((30 10, 40 40, 20 40, 10 20, 30 10))" +3 | null +; + +convertToInteger +required_capability: semantic_text_type + +FROM semantic_text METADATA _id +| EVAL result = to_integer(st_integer) +| KEEP _id, result +| SORT _id +; + +_id:keyword | result:integer +1 | 23 +2 | 122 +3 | null +; + +convertToIp +required_capability: semantic_text_type + +FROM semantic_text METADATA _id +| EVAL result = to_ip(st_ip) +| KEEP _id, result +| SORT _id +; + +_id:keyword | result:ip +1 | 1.1.1.1 +2 | 1.1.2.1 +3 | null +; + +convertToLong +required_capability: semantic_text_type + +FROM semantic_text METADATA _id +| EVAL result = to_long(st_long) +| KEEP _id, result +| SORT _id +; + +_id:keyword | result:long +1 | 2147483648 +2 | 123 +3 | null +; + +convertToUnsignedLong +required_capability: semantic_text_type + +FROM semantic_text METADATA _id +| EVAL result = to_unsigned_long(st_unsigned_long) +| KEEP _id, result +| SORT _id +; + +_id:keyword | result:unsigned_long +1 | 2147483648 +2 | 2147483648.2 +3 | null +; + +convertToVersion +required_capability: semantic_text_type + +FROM semantic_text METADATA _id +| EVAL result = to_version(st_version) +| KEEP _id, result +| SORT _id +; + +_id:keyword | result:version +1 | 1.2.3 +2 | 9.0.0 +3 | null +; + +concat +required_capability: semantic_text_type + +FROM semantic_text +| EVAL result = concat("", semantic_text_field, "") +| KEEP result +| SORT result +; + +result:keyword +all we have to decide is what to do with the time that is given to us +be excellent to each other +live long and prosper +; + +endsWith +required_capability: semantic_text_type + +FROM semantic_text +| WHERE ends_with(semantic_text_field, "er") +| KEEP semantic_text_field +| SORT semantic_text_field +; + +semantic_text_field:semantic_text +be excellent to each other +live long and prosper +; + +fromBase64 +required_capability: semantic_text_type +FROM semantic_text +| EVAL result = from_base64(st_base64) +| SORT result +| KEEP result +; + +result:keyword +elastic +hello +null +; + +left +required_capability: semantic_text_type + +FROM semantic_text +| EVAL result = left(semantic_text_field, 2) +| SORT result +| KEEP result +; + +result:keyword +al +be +li +; + +length +required_capability: semantic_text_type + +FROM semantic_text +| EVAL result = length(st_version) +| KEEP result +| SORT result +; + +result:integer +5 +5 +null +; + +locate +required_capability: semantic_text_type + +FROM semantic_text METADATA _id +| EVAL result = locate(semantic_text_field, "all") +| KEEP _id, result +| SORT _id +; + +_id:keyword | result:integer +1 | 0 +2 | 1 +3 | 0 +; + +ltrim +required_capability: semantic_text_type + +FROM semantic_text +| EVAL result = ltrim(semantic_text_field) +| SORT result +| KEEP result +; + +result:keyword +all we have to decide is what to do with the time that is given to us +be excellent to each other +live long and prosper +; + +repeat +required_capability: semantic_text_type + +FROM semantic_text +| EVAL result = repeat(semantic_text_field, 2) +| WHERE length(semantic_text_field) < 25 +| KEEP result +; + +result:keyword +live long and prosperlive long and prosper +; + +replace +required_capability: semantic_text_type + +FROM semantic_text +| EVAL result = replace(semantic_text_field, "excellent", "good") +| WHERE length(semantic_text_field) < 30 +| KEEP result +| SORT result +; + +result:keyword +be good to each other +live long and prosper +; + +right +required_capability: semantic_text_type + +FROM semantic_text +| EVAL result = right(semantic_text_field, 2) +| KEEP result +| SORT result +; + +result:keyword +er +er +us +; + +rtrim +required_capability: semantic_text_type + +FROM semantic_text +| EVAL result = rtrim(semantic_text_field) +| KEEP result +| SORT result +; + +result:keyword +all we have to decide is what to do with the time that is given to us +be excellent to each other +live long and prosper +; + +split +required_capability: semantic_text_type + +FROM semantic_text METADATA _id +| EVAL result = split(st_version, ".") +| SORT _id +| KEEP result +; + +result:keyword +["1", "2", "3"] +["9", "0", "0"] +null +; + +startsWith +required_capability: semantic_text_type + +FROM semantic_text METADATA _id +| EVAL result = starts_with(semantic_text_field, "be") +| KEEP _id, result +| SORT _id +; + +_id:keyword | result:bool +1 | false +2 | false +3 | true +; + +substring +required_capability: semantic_text_type + +FROM semantic_text METADATA _id +| EVAL result = substring(semantic_text_field, 2, 1) +| KEEP _id, result +| SORT _id +; + +_id:keyword | result:keyword +1 | i +2 | l +3 | e +; + +toBase64 +required_capability: semantic_text_type + +FROM semantic_text METADATA _id +| EVAL result = to_base64(st_integer) +| KEEP _id, result +| SORT _id +; + +_id:keyword | result:keyword +1 | MjM= +2 | MTIy +3 | null +; + +toLower +required_capability: semantic_text_type + +FROM semantic_text METADATA _id +| EVAL result = to_lower(st_cartesian_point) +| KEEP _id, result +| SORT _id +; + +_id:keyword | result:keyword +1 | point(4297.11 -1475.53) +2 | point(7580.93 2272.77) +3 | null +; + +toUpper +required_capability: semantic_text_type + +FROM semantic_text METADATA _id +| EVAL result = to_upper(semantic_text_field) +| KEEP _id, result +| SORT _id +; + +_id:keyword | result:keyword +1 | LIVE LONG AND PROSPER +2 | ALL WE HAVE TO DECIDE IS WHAT TO DO WITH THE TIME THAT IS GIVEN TO US +3 | BE EXCELLENT TO EACH OTHER +; + +trim +required_capability: semantic_text_type + +FROM semantic_text +| EVAL result = trim(semantic_text_field) +| SORT result +| KEEP result +; + +result:keyword +all we have to decide is what to do with the time that is given to us +be excellent to each other +live long and prosper +; + +mvAppend +required_capability: semantic_text_type + +FROM semantic_text METADATA _id +| EVAL result = mv_append(st_multi_value, st_long) +| KEEP _id, result +| SORT _id +; + +_id: keyword | result:keyword +1 | ["Hello there!", "This is a random value", "for testing purposes", "2147483648"] +2 | ["nice to meet you", "bye bye!", "123"] +3 | null +; + +mvConcat +required_capability: semantic_text_type + +FROM semantic_text METADATA _id +| EVAL result = mv_concat(st_multi_value, "; ") +| KEEP _id, result +| SORT _id +; + +_id: keyword | result:keyword +1 | Hello there!; This is a random value; for testing purposes +2 | nice to meet you; bye bye! +3 | null +; + +mvCount +required_capability: semantic_text_type + +FROM semantic_text METADATA _id +| EVAL result = mv_count(st_multi_value) +| KEEP _id, result +| SORT _id +; + +_id: keyword | result:integer +1 | 3 +2 | 2 +3 | null +; + +mvDedupe +required_capability: semantic_text_type + +FROM semantic_text METADATA _id +| EVAL result = mv_dedupe(st_multi_value) +| KEEP _id, result +| SORT _id +; + +_id: keyword | result:keyword +1 | ["Hello there!", "This is a random value", "for testing purposes"] +2 | ["nice to meet you", "bye bye!"] +3 | null +; + +mvFirst +required_capability: semantic_text_type + +FROM semantic_text METADATA _id +| EVAL result = mv_first(st_multi_value) +| KEEP _id, result +| SORT _id +; + +_id: keyword | result:keyword +1 | Hello there! +2 | nice to meet you +3 | null +; + +mvLast +required_capability: semantic_text_type + +FROM semantic_text METADATA _id +| EVAL result = mv_last(st_multi_value) +| KEEP _id, result +| SORT _id +; + +_id: keyword | result:keyword +1 | for testing purposes +2 | bye bye! +3 | null +; + +mvMax +required_capability: semantic_text_type + +FROM semantic_text METADATA _id +| EVAL result = mv_max(st_multi_value) +| KEEP _id, result +| SORT _id +; + +_id: keyword | result:keyword +1 | for testing purposes +2 | nice to meet you +3 | null +; + +mvMin +required_capability: semantic_text_type + +FROM semantic_text METADATA _id +| EVAL result = mv_min(st_multi_value) +| KEEP _id, result +| SORT _id +; + +_id: keyword | result:keyword +1 | Hello there! +2 | bye bye! +3 | null +; + +mvSlice +required_capability: semantic_text_type + +FROM semantic_text METADATA _id +| EVAL result = mv_slice(st_multi_value, 1, 2) +| KEEP _id, result +| SORT _id +; + +_id: keyword | result:keyword +1 | ["This is a random value", "for testing purposes"] +2 | bye bye! +3 | null +; + +mvSort +required_capability: semantic_text_type + +FROM semantic_text METADATA _id +| EVAL result = mv_sort(st_multi_value, "ASC") +| KEEP _id, result +| SORT _id +; + +_id: keyword | result:keyword +1 | ["Hello there!", "This is a random value", "for testing purposes"] +2 | ["bye bye!", "nice to meet you"] +3 | null +; + +mvZip +required_capability: semantic_text_type + +FROM semantic_text METADATA _id +| EVAL result = mv_zip(st_multi_value, st_multi_value, " + ") +| KEEP _id, result +| SORT _id +; + +_id: keyword | result:keyword +1 | ["Hello there! + Hello there!", "This is a random value + This is a random value", "for testing purposes + for testing purposes"] +2 | ["nice to meet you + nice to meet you", "bye bye! + bye bye!"] +3 | null +; + +equalityWithConstant +required_capability: semantic_text_type + +FROM semantic_text METADATA _id +| EVAL result = st_ip == "1.1.1.1" +| KEEP _id, result +| SORT _id +; + +_id: keyword | result:bool +1 | true +2 | false +3 | null +; + +equalityBetweenFields +required_capability: semantic_text_type + +FROM semantic_text METADATA _id +| EVAL result = st_long == st_unsigned_long +| KEEP _id, result +| SORT _id +; + +_id: keyword | result:bool +1 | true +2 | false +3 | null +; + +inequalityWithConstant +required_capability: semantic_text_type + +FROM semantic_text METADATA _id +| EVAL result = st_ip != "1.1.1.1" +| KEEP _id, result +| SORT _id +; + +_id: keyword | result:bool +1 | false +2 | true +3 | null +; + +inequalityBetweenFields +required_capability: semantic_text_type + +FROM semantic_text METADATA _id +| EVAL result = st_long != st_unsigned_long +| KEEP _id, result +| SORT _id +; + +_id: keyword | result:bool +1 | false +2 | true +3 | null +; + +lessThanWithConstant +required_capability: semantic_text_type + +FROM semantic_text METADATA _id +| EVAL result = semantic_text_field < "bye!" +| KEEP _id, result +| SORT _id +; + +_id: keyword | result:bool +1 | false +2 | true +3 | true +; + +lessThanBetweenFields +required_capability: semantic_text_type + +FROM semantic_text METADATA _id +| EVAL result = semantic_text_field < st_version +| KEEP _id, result +| SORT _id +; + +_id: keyword | result:bool +1 | false +2 | false +3 | null +; + + +lessThanOrEqualToWithConstant +required_capability: semantic_text_type + + +FROM semantic_text METADATA _id +| EVAL result = semantic_text_field <= "be excellent to each other" +| KEEP _id, result +| SORT _id +; + +_id: keyword | result:bool +1 | false +2 | true +3 | true +; + +lessThanOrEqualToBetweenFields +required_capability: semantic_text_type + +FROM semantic_text METADATA _id +| EVAL result = st_integer <= st_long +| KEEP _id, result +| SORT _id +; + +_id: keyword | result:bool +1 | false +2 | true +3 | null +; + +greaterThanWithConstant +required_capability: semantic_text_type + +FROM semantic_text METADATA _id +| EVAL result = semantic_text_field > "bye!" +| KEEP _id, result +| SORT _id +; + +_id: keyword | result:bool +1 | true +2 | false +3 | false +; + +greaterThanBetweenFields +required_capability: semantic_text_type + +FROM semantic_text METADATA _id +| EVAL result = semantic_text_field > st_version +| KEEP _id, result +| SORT _id +; + +_id: keyword | result:bool +1 | true +2 | true +3 | null +; + +greaterThanOrEqualToWithConstant +required_capability: semantic_text_type + +FROM semantic_text METADATA _id +| EVAL result = semantic_text_field >= "be excellent to each other" +| KEEP _id, result +| SORT _id +; + +_id: keyword | result:bool +1 | true +2 | false +3 | true +; + +greaterThanOrEqualToBetweenFields +required_capability: semantic_text_type + +FROM semantic_text METADATA _id +| EVAL result = st_integer >= st_long +| KEEP _id, result +| SORT _id +; + +_id: keyword | result:bool +1 | true +2 | false +3 | null +; + +isNull +required_capability: semantic_text_type + +FROM semantic_text METADATA _id +| EVAL result = st_integer IS NULL +| KEEP _id, result +| SORT _id +; + +_id: keyword | result:bool +1 | false +2 | false +3 | true +; + +isNotNull +required_capability: semantic_text_type + +FROM semantic_text METADATA _id +| EVAL result = st_integer IS NOT NULL +| KEEP _id, result +| SORT _id +; + +_id: keyword | result:bool +1 | true +2 | true +3 | false +; + +cast +required_capability: semantic_text_type + +FROM semantic_text METADATA _id +| EVAL result = st_bool::BOOL +| KEEP _id, result +| SORT _id +; + +_id:keyword | result:bool +1 | false +2 | true +3 | null +; + +in +required_capability: semantic_text_type + +FROM semantic_text METADATA _id +| EVAL result = st_integer IN ("123", "23") +| KEEP _id, result +| SORT _id +; + +_id: keyword | result:bool +1 | true +2 | false +3 | null +; + +like +required_capability: semantic_text_type + +FROM semantic_text METADATA _id +| EVAL result = semantic_text_field LIKE "all*" +| KEEP _id, result +| SORT _id +; + +_id: keyword | result:bool +1 | false +2 | true +3 | false +; + +rlike +required_capability: semantic_text_type + +FROM semantic_text METADATA _id +| EVAL result = st_version RLIKE "[0-9].[0-9].[0-9]" +| KEEP _id, result +| SORT _id +; + +_id: keyword | result:bool +1 | true +2 | true +3 | null +; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Verifier.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Verifier.java index fbaf43467a2e7..994ea3ecdbb0d 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Verifier.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Verifier.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.esql.analysis; import org.elasticsearch.common.logging.LoggerMessageFormat; +import org.elasticsearch.xpack.esql.action.EsqlCapabilities; import org.elasticsearch.xpack.esql.common.Failure; import org.elasticsearch.xpack.esql.core.capabilities.Unresolvable; import org.elasticsearch.xpack.esql.core.expression.Alias; @@ -505,6 +506,9 @@ public static Failure validateBinaryComparison(BinaryComparison bc) { List allowed = new ArrayList<>(); allowed.add(DataType.KEYWORD); allowed.add(DataType.TEXT); + if (EsqlCapabilities.Cap.SEMANTIC_TEXT_TYPE.isEnabled()) { + allowed.add(DataType.SEMANTIC_TEXT); + } allowed.add(DataType.IP); allowed.add(DataType.DATETIME); allowed.add(DataType.VERSION); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToBoolean.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToBoolean.java index 06cc993456433..ad73de7829692 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToBoolean.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToBoolean.java @@ -28,6 +28,7 @@ import static org.elasticsearch.xpack.esql.core.type.DataType.INTEGER; import static org.elasticsearch.xpack.esql.core.type.DataType.KEYWORD; import static org.elasticsearch.xpack.esql.core.type.DataType.LONG; +import static org.elasticsearch.xpack.esql.core.type.DataType.SEMANTIC_TEXT; import static org.elasticsearch.xpack.esql.core.type.DataType.TEXT; import static org.elasticsearch.xpack.esql.core.type.DataType.UNSIGNED_LONG; import static org.elasticsearch.xpack.esql.type.EsqlDataTypeConverter.stringToBoolean; @@ -44,6 +45,7 @@ public class ToBoolean extends AbstractConvertFunction { Map.entry(BOOLEAN, (field, source) -> field), Map.entry(KEYWORD, ToBooleanFromStringEvaluator.Factory::new), Map.entry(TEXT, ToBooleanFromStringEvaluator.Factory::new), + Map.entry(SEMANTIC_TEXT, ToBooleanFromStringEvaluator.Factory::new), Map.entry(DOUBLE, ToBooleanFromDoubleEvaluator.Factory::new), Map.entry(LONG, ToBooleanFromLongEvaluator.Factory::new), Map.entry(UNSIGNED_LONG, ToBooleanFromUnsignedLongEvaluator.Factory::new), diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToCartesianPoint.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToCartesianPoint.java index 60a25fc91d50d..92ae2cd0ade52 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToCartesianPoint.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToCartesianPoint.java @@ -25,6 +25,7 @@ import static org.elasticsearch.xpack.esql.core.type.DataType.CARTESIAN_POINT; import static org.elasticsearch.xpack.esql.core.type.DataType.KEYWORD; +import static org.elasticsearch.xpack.esql.core.type.DataType.SEMANTIC_TEXT; import static org.elasticsearch.xpack.esql.core.type.DataType.TEXT; import static org.elasticsearch.xpack.esql.type.EsqlDataTypeConverter.stringToSpatial; @@ -38,7 +39,8 @@ public class ToCartesianPoint extends AbstractConvertFunction { private static final Map EVALUATORS = Map.ofEntries( Map.entry(CARTESIAN_POINT, (fieldEval, source) -> fieldEval), Map.entry(KEYWORD, ToCartesianPointFromStringEvaluator.Factory::new), - Map.entry(TEXT, ToCartesianPointFromStringEvaluator.Factory::new) + Map.entry(TEXT, ToCartesianPointFromStringEvaluator.Factory::new), + Map.entry(SEMANTIC_TEXT, ToCartesianPointFromStringEvaluator.Factory::new) ); @FunctionInfo( diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToCartesianShape.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToCartesianShape.java index 03ac4bdf48243..83e66e9e3190f 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToCartesianShape.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToCartesianShape.java @@ -26,6 +26,7 @@ import static org.elasticsearch.xpack.esql.core.type.DataType.CARTESIAN_POINT; import static org.elasticsearch.xpack.esql.core.type.DataType.CARTESIAN_SHAPE; import static org.elasticsearch.xpack.esql.core.type.DataType.KEYWORD; +import static org.elasticsearch.xpack.esql.core.type.DataType.SEMANTIC_TEXT; import static org.elasticsearch.xpack.esql.core.type.DataType.TEXT; import static org.elasticsearch.xpack.esql.type.EsqlDataTypeConverter.stringToSpatial; @@ -40,7 +41,8 @@ public class ToCartesianShape extends AbstractConvertFunction { Map.entry(CARTESIAN_POINT, (fieldEval, source) -> fieldEval), Map.entry(CARTESIAN_SHAPE, (fieldEval, source) -> fieldEval), Map.entry(KEYWORD, ToCartesianShapeFromStringEvaluator.Factory::new), - Map.entry(TEXT, ToCartesianShapeFromStringEvaluator.Factory::new) + Map.entry(TEXT, ToCartesianShapeFromStringEvaluator.Factory::new), + Map.entry(SEMANTIC_TEXT, ToCartesianShapeFromStringEvaluator.Factory::new) ); @FunctionInfo( diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDateNanos.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDateNanos.java index 9a6a91b7ccedd..8c4375b424cdc 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDateNanos.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDateNanos.java @@ -32,6 +32,7 @@ import static org.elasticsearch.xpack.esql.core.type.DataType.DOUBLE; import static org.elasticsearch.xpack.esql.core.type.DataType.KEYWORD; import static org.elasticsearch.xpack.esql.core.type.DataType.LONG; +import static org.elasticsearch.xpack.esql.core.type.DataType.SEMANTIC_TEXT; import static org.elasticsearch.xpack.esql.core.type.DataType.TEXT; import static org.elasticsearch.xpack.esql.core.type.DataType.UNSIGNED_LONG; import static org.elasticsearch.xpack.esql.type.EsqlDataTypeConverter.DEFAULT_DATE_NANOS_FORMATTER; @@ -49,6 +50,7 @@ public class ToDateNanos extends AbstractConvertFunction { Map.entry(LONG, ToDateNanosFromLongEvaluator.Factory::new), Map.entry(KEYWORD, ToDateNanosFromStringEvaluator.Factory::new), Map.entry(TEXT, ToDateNanosFromStringEvaluator.Factory::new), + Map.entry(SEMANTIC_TEXT, ToDateNanosFromStringEvaluator.Factory::new), Map.entry(DOUBLE, ToDateNanosFromDoubleEvaluator.Factory::new), Map.entry(UNSIGNED_LONG, ToLongFromUnsignedLongEvaluator.Factory::new) /* diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDatetime.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDatetime.java index c66ba7f87a1c5..f8fe663b9086c 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDatetime.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDatetime.java @@ -30,6 +30,7 @@ import static org.elasticsearch.xpack.esql.core.type.DataType.INTEGER; import static org.elasticsearch.xpack.esql.core.type.DataType.KEYWORD; import static org.elasticsearch.xpack.esql.core.type.DataType.LONG; +import static org.elasticsearch.xpack.esql.core.type.DataType.SEMANTIC_TEXT; import static org.elasticsearch.xpack.esql.core.type.DataType.TEXT; import static org.elasticsearch.xpack.esql.core.type.DataType.UNSIGNED_LONG; import static org.elasticsearch.xpack.esql.type.EsqlDataTypeConverter.dateTimeToLong; @@ -47,6 +48,7 @@ public class ToDatetime extends AbstractConvertFunction { Map.entry(LONG, (field, source) -> field), Map.entry(KEYWORD, ToDatetimeFromStringEvaluator.Factory::new), Map.entry(TEXT, ToDatetimeFromStringEvaluator.Factory::new), + Map.entry(SEMANTIC_TEXT, ToDatetimeFromStringEvaluator.Factory::new), Map.entry(DOUBLE, ToLongFromDoubleEvaluator.Factory::new), Map.entry(UNSIGNED_LONG, ToLongFromUnsignedLongEvaluator.Factory::new), Map.entry(INTEGER, ToLongFromIntEvaluator.Factory::new) // CastIntToLongEvaluator would be a candidate, but not MV'd diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDouble.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDouble.java index de88281e7dbd1..67b7af73576eb 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDouble.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDouble.java @@ -30,6 +30,7 @@ import static org.elasticsearch.xpack.esql.core.type.DataType.INTEGER; import static org.elasticsearch.xpack.esql.core.type.DataType.KEYWORD; import static org.elasticsearch.xpack.esql.core.type.DataType.LONG; +import static org.elasticsearch.xpack.esql.core.type.DataType.SEMANTIC_TEXT; import static org.elasticsearch.xpack.esql.core.type.DataType.TEXT; import static org.elasticsearch.xpack.esql.core.type.DataType.UNSIGNED_LONG; import static org.elasticsearch.xpack.esql.type.EsqlDataTypeConverter.stringToDouble; @@ -44,6 +45,7 @@ public class ToDouble extends AbstractConvertFunction { Map.entry(DATETIME, ToDoubleFromLongEvaluator.Factory::new), // CastLongToDoubleEvaluator would be a candidate, but not MV'd Map.entry(KEYWORD, ToDoubleFromStringEvaluator.Factory::new), Map.entry(TEXT, ToDoubleFromStringEvaluator.Factory::new), + Map.entry(SEMANTIC_TEXT, ToDoubleFromStringEvaluator.Factory::new), Map.entry(UNSIGNED_LONG, ToDoubleFromUnsignedLongEvaluator.Factory::new), Map.entry(LONG, ToDoubleFromLongEvaluator.Factory::new), // CastLongToDoubleEvaluator would be a candidate, but not MV'd Map.entry(INTEGER, ToDoubleFromIntEvaluator.Factory::new), // CastIntToDoubleEvaluator would be a candidate, but not MV'd diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToGeoPoint.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToGeoPoint.java index 51cb08137a58c..42af06a40553d 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToGeoPoint.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToGeoPoint.java @@ -25,6 +25,7 @@ import static org.elasticsearch.xpack.esql.core.type.DataType.GEO_POINT; import static org.elasticsearch.xpack.esql.core.type.DataType.KEYWORD; +import static org.elasticsearch.xpack.esql.core.type.DataType.SEMANTIC_TEXT; import static org.elasticsearch.xpack.esql.core.type.DataType.TEXT; import static org.elasticsearch.xpack.esql.type.EsqlDataTypeConverter.stringToSpatial; @@ -38,7 +39,8 @@ public class ToGeoPoint extends AbstractConvertFunction { private static final Map EVALUATORS = Map.ofEntries( Map.entry(GEO_POINT, (fieldEval, source) -> fieldEval), Map.entry(KEYWORD, ToGeoPointFromStringEvaluator.Factory::new), - Map.entry(TEXT, ToGeoPointFromStringEvaluator.Factory::new) + Map.entry(TEXT, ToGeoPointFromStringEvaluator.Factory::new), + Map.entry(SEMANTIC_TEXT, ToGeoPointFromStringEvaluator.Factory::new) ); @FunctionInfo( diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToGeoShape.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToGeoShape.java index 00e9fb3e598f1..b5b6db2752b06 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToGeoShape.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToGeoShape.java @@ -26,6 +26,7 @@ import static org.elasticsearch.xpack.esql.core.type.DataType.GEO_POINT; import static org.elasticsearch.xpack.esql.core.type.DataType.GEO_SHAPE; import static org.elasticsearch.xpack.esql.core.type.DataType.KEYWORD; +import static org.elasticsearch.xpack.esql.core.type.DataType.SEMANTIC_TEXT; import static org.elasticsearch.xpack.esql.core.type.DataType.TEXT; import static org.elasticsearch.xpack.esql.type.EsqlDataTypeConverter.stringToSpatial; @@ -40,7 +41,8 @@ public class ToGeoShape extends AbstractConvertFunction { Map.entry(GEO_POINT, (fieldEval, source) -> fieldEval), Map.entry(GEO_SHAPE, (fieldEval, source) -> fieldEval), Map.entry(KEYWORD, ToGeoShapeFromStringEvaluator.Factory::new), - Map.entry(TEXT, ToGeoShapeFromStringEvaluator.Factory::new) + Map.entry(TEXT, ToGeoShapeFromStringEvaluator.Factory::new), + Map.entry(SEMANTIC_TEXT, ToGeoShapeFromStringEvaluator.Factory::new) ); @FunctionInfo( diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToIP.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToIP.java index 6df85948d94ef..cd161744bfc86 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToIP.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToIP.java @@ -25,6 +25,7 @@ import static org.elasticsearch.xpack.esql.core.type.DataType.IP; import static org.elasticsearch.xpack.esql.core.type.DataType.KEYWORD; +import static org.elasticsearch.xpack.esql.core.type.DataType.SEMANTIC_TEXT; import static org.elasticsearch.xpack.esql.core.type.DataType.TEXT; import static org.elasticsearch.xpack.esql.type.EsqlDataTypeConverter.stringToIP; @@ -34,7 +35,8 @@ public class ToIP extends AbstractConvertFunction { private static final Map EVALUATORS = Map.ofEntries( Map.entry(IP, (field, source) -> field), Map.entry(KEYWORD, ToIPFromStringEvaluator.Factory::new), - Map.entry(TEXT, ToIPFromStringEvaluator.Factory::new) + Map.entry(TEXT, ToIPFromStringEvaluator.Factory::new), + Map.entry(SEMANTIC_TEXT, ToIPFromStringEvaluator.Factory::new) ); @FunctionInfo( diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToInteger.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToInteger.java index 1785160594a78..d316b6eb46c38 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToInteger.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToInteger.java @@ -25,11 +25,13 @@ import java.util.Map; import static org.elasticsearch.xpack.esql.core.type.DataType.BOOLEAN; +import static org.elasticsearch.xpack.esql.core.type.DataType.COUNTER_INTEGER; import static org.elasticsearch.xpack.esql.core.type.DataType.DATETIME; import static org.elasticsearch.xpack.esql.core.type.DataType.DOUBLE; import static org.elasticsearch.xpack.esql.core.type.DataType.INTEGER; import static org.elasticsearch.xpack.esql.core.type.DataType.KEYWORD; import static org.elasticsearch.xpack.esql.core.type.DataType.LONG; +import static org.elasticsearch.xpack.esql.core.type.DataType.SEMANTIC_TEXT; import static org.elasticsearch.xpack.esql.core.type.DataType.TEXT; import static org.elasticsearch.xpack.esql.core.type.DataType.UNSIGNED_LONG; import static org.elasticsearch.xpack.esql.core.type.DataTypeConverter.safeToInt; @@ -49,10 +51,11 @@ public class ToInteger extends AbstractConvertFunction { Map.entry(DATETIME, ToIntegerFromLongEvaluator.Factory::new), Map.entry(KEYWORD, ToIntegerFromStringEvaluator.Factory::new), Map.entry(TEXT, ToIntegerFromStringEvaluator.Factory::new), + Map.entry(SEMANTIC_TEXT, ToIntegerFromStringEvaluator.Factory::new), Map.entry(DOUBLE, ToIntegerFromDoubleEvaluator.Factory::new), Map.entry(UNSIGNED_LONG, ToIntegerFromUnsignedLongEvaluator.Factory::new), Map.entry(LONG, ToIntegerFromLongEvaluator.Factory::new), - Map.entry(DataType.COUNTER_INTEGER, (fieldEval, source) -> fieldEval) + Map.entry(COUNTER_INTEGER, (fieldEval, source) -> fieldEval) ); @FunctionInfo( diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToLong.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToLong.java index e5f138df159cd..dbfb52b408b44 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToLong.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToLong.java @@ -31,6 +31,7 @@ import static org.elasticsearch.xpack.esql.core.type.DataType.INTEGER; import static org.elasticsearch.xpack.esql.core.type.DataType.KEYWORD; import static org.elasticsearch.xpack.esql.core.type.DataType.LONG; +import static org.elasticsearch.xpack.esql.core.type.DataType.SEMANTIC_TEXT; import static org.elasticsearch.xpack.esql.core.type.DataType.TEXT; import static org.elasticsearch.xpack.esql.core.type.DataType.UNSIGNED_LONG; import static org.elasticsearch.xpack.esql.core.type.DataTypeConverter.safeDoubleToLong; @@ -47,6 +48,7 @@ public class ToLong extends AbstractConvertFunction { Map.entry(BOOLEAN, ToLongFromBooleanEvaluator.Factory::new), Map.entry(KEYWORD, ToLongFromStringEvaluator.Factory::new), Map.entry(TEXT, ToLongFromStringEvaluator.Factory::new), + Map.entry(SEMANTIC_TEXT, ToLongFromStringEvaluator.Factory::new), Map.entry(DOUBLE, ToLongFromDoubleEvaluator.Factory::new), Map.entry(UNSIGNED_LONG, ToLongFromUnsignedLongEvaluator.Factory::new), Map.entry(INTEGER, ToLongFromIntEvaluator.Factory::new), // CastIntToLongEvaluator would be a candidate, but not MV'd diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToString.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToString.java index f9bc15c4d6903..2c8ecd794ef0b 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToString.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToString.java @@ -36,6 +36,7 @@ import static org.elasticsearch.xpack.esql.core.type.DataType.IP; import static org.elasticsearch.xpack.esql.core.type.DataType.KEYWORD; import static org.elasticsearch.xpack.esql.core.type.DataType.LONG; +import static org.elasticsearch.xpack.esql.core.type.DataType.SEMANTIC_TEXT; import static org.elasticsearch.xpack.esql.core.type.DataType.TEXT; import static org.elasticsearch.xpack.esql.core.type.DataType.UNSIGNED_LONG; import static org.elasticsearch.xpack.esql.core.type.DataType.VERSION; @@ -60,6 +61,7 @@ public class ToString extends AbstractConvertFunction implements EvaluatorMapper Map.entry(LONG, ToStringFromLongEvaluator.Factory::new), Map.entry(INTEGER, ToStringFromIntEvaluator.Factory::new), Map.entry(TEXT, (fieldEval, source) -> fieldEval), + Map.entry(SEMANTIC_TEXT, (fieldEval, source) -> fieldEval), Map.entry(VERSION, ToStringFromVersionEvaluator.Factory::new), Map.entry(UNSIGNED_LONG, ToStringFromUnsignedLongEvaluator.Factory::new), Map.entry(GEO_POINT, ToStringFromGeoPointEvaluator.Factory::new), diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToUnsignedLong.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToUnsignedLong.java index bfbfcf44b3945..ea06793f7adb6 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToUnsignedLong.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToUnsignedLong.java @@ -30,6 +30,7 @@ import static org.elasticsearch.xpack.esql.core.type.DataType.INTEGER; import static org.elasticsearch.xpack.esql.core.type.DataType.KEYWORD; import static org.elasticsearch.xpack.esql.core.type.DataType.LONG; +import static org.elasticsearch.xpack.esql.core.type.DataType.SEMANTIC_TEXT; import static org.elasticsearch.xpack.esql.core.type.DataType.TEXT; import static org.elasticsearch.xpack.esql.core.type.DataType.UNSIGNED_LONG; import static org.elasticsearch.xpack.esql.type.EsqlDataTypeConverter.booleanToUnsignedLong; @@ -51,6 +52,7 @@ public class ToUnsignedLong extends AbstractConvertFunction { Map.entry(BOOLEAN, ToUnsignedLongFromBooleanEvaluator.Factory::new), Map.entry(KEYWORD, ToUnsignedLongFromStringEvaluator.Factory::new), Map.entry(TEXT, ToUnsignedLongFromStringEvaluator.Factory::new), + Map.entry(SEMANTIC_TEXT, ToUnsignedLongFromStringEvaluator.Factory::new), Map.entry(DOUBLE, ToUnsignedLongFromDoubleEvaluator.Factory::new), Map.entry(LONG, ToUnsignedLongFromLongEvaluator.Factory::new), Map.entry(INTEGER, ToUnsignedLongFromIntEvaluator.Factory::new) diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToVersion.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToVersion.java index f6002c3c6bb17..296ddb35c3c41 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToVersion.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToVersion.java @@ -24,6 +24,7 @@ import java.util.Map; import static org.elasticsearch.xpack.esql.core.type.DataType.KEYWORD; +import static org.elasticsearch.xpack.esql.core.type.DataType.SEMANTIC_TEXT; import static org.elasticsearch.xpack.esql.core.type.DataType.TEXT; import static org.elasticsearch.xpack.esql.core.type.DataType.VERSION; import static org.elasticsearch.xpack.esql.type.EsqlDataTypeConverter.stringToVersion; @@ -38,7 +39,8 @@ public class ToVersion extends AbstractConvertFunction { private static final Map EVALUATORS = Map.ofEntries( Map.entry(VERSION, (fieldEval, source) -> fieldEval), Map.entry(KEYWORD, ToVersionFromStringEvaluator.Factory::new), - Map.entry(TEXT, ToVersionFromStringEvaluator.Factory::new) + Map.entry(TEXT, ToVersionFromStringEvaluator.Factory::new), + Map.entry(SEMANTIC_TEXT, ToVersionFromStringEvaluator.Factory::new) ); @FunctionInfo( diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/Equals.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/Equals.java index 614d9aa3ec920..6bb249385affe 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/Equals.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/Equals.java @@ -42,6 +42,7 @@ public class Equals extends EsqlBinaryComparison implements Negatable parameters() { builder.expectFlattenedInt(IntStream::max); builder.expectFlattenedLong(LongStream::max); List suppliers = builder.suppliers(); - suppliers.add( - new TestCaseSupplier( - "(a, b)", - List.of(DataType.KEYWORD, DataType.KEYWORD), - () -> new TestCaseSupplier.TestCase( - List.of( - new TestCaseSupplier.TypedData(new BytesRef("a"), DataType.KEYWORD, "a"), - new TestCaseSupplier.TypedData(new BytesRef("b"), DataType.KEYWORD, "b") - ), - "GreatestBytesRefEvaluator[values=[MvMax[field=Attribute[channel=0]], MvMax[field=Attribute[channel=1]]]]", - DataType.KEYWORD, - equalTo(new BytesRef("b")) + for (DataType stringType : DataType.stringTypes()) { + suppliers.add( + new TestCaseSupplier( + "(a, b)", + List.of(stringType, stringType), + () -> new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(new BytesRef("a"), stringType, "a"), + new TestCaseSupplier.TypedData(new BytesRef("b"), stringType, "b") + ), + "GreatestBytesRefEvaluator[values=[MvMax[field=Attribute[channel=0]], MvMax[field=Attribute[channel=1]]]]", + stringType, + equalTo(new BytesRef("b")) + ) ) - ) - ); + ); + } suppliers.add( new TestCaseSupplier( "(a, b)", diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/LeastTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/LeastTests.java index d95cc79dd22e0..3b24a4cbdc1eb 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/LeastTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/LeastTests.java @@ -39,21 +39,23 @@ public static Iterable parameters() { builder.expectFlattenedInt(IntStream::min); builder.expectFlattenedLong(LongStream::min); List suppliers = builder.suppliers(); - suppliers.add( - new TestCaseSupplier( - "(a, b)", - List.of(DataType.KEYWORD, DataType.KEYWORD), - () -> new TestCaseSupplier.TestCase( - List.of( - new TestCaseSupplier.TypedData(new BytesRef("a"), DataType.KEYWORD, "a"), - new TestCaseSupplier.TypedData(new BytesRef("b"), DataType.KEYWORD, "b") - ), - "LeastBytesRefEvaluator[values=[MvMin[field=Attribute[channel=0]], MvMin[field=Attribute[channel=1]]]]", - DataType.KEYWORD, - equalTo(new BytesRef("a")) + for (DataType stringType : DataType.stringTypes()) { + suppliers.add( + new TestCaseSupplier( + "(a, b)", + List.of(stringType, stringType), + () -> new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(new BytesRef("a"), stringType, "a"), + new TestCaseSupplier.TypedData(new BytesRef("b"), stringType, "b") + ), + "LeastBytesRefEvaluator[values=[MvMin[field=Attribute[channel=0]], MvMin[field=Attribute[channel=1]]]]", + stringType, + equalTo(new BytesRef("a")) + ) ) - ) - ); + ); + } suppliers.add( new TestCaseSupplier( "(a, b)", diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/FromBase64Tests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/FromBase64Tests.java index f472e5ef5efd9..60901e2a8214f 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/FromBase64Tests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/FromBase64Tests.java @@ -35,25 +35,17 @@ public FromBase64Tests(@Name("TestCase") Supplier tes @ParametersFactory public static Iterable parameters() { List suppliers = new ArrayList<>(); - suppliers.add(new TestCaseSupplier(List.of(DataType.KEYWORD), () -> { - BytesRef input = new BytesRef(randomAlphaOfLength(6)); - return new TestCaseSupplier.TestCase( - List.of(new TestCaseSupplier.TypedData(input, DataType.KEYWORD, "string")), - "FromBase64Evaluator[field=Attribute[channel=0]]", - DataType.KEYWORD, - equalTo(new BytesRef(Base64.getDecoder().decode(input.utf8ToString().getBytes(StandardCharsets.UTF_8)))) - ); - })); - - suppliers.add(new TestCaseSupplier(List.of(DataType.TEXT), () -> { - BytesRef input = new BytesRef(randomAlphaOfLength(54)); - return new TestCaseSupplier.TestCase( - List.of(new TestCaseSupplier.TypedData(input, DataType.TEXT, "string")), - "FromBase64Evaluator[field=Attribute[channel=0]]", - DataType.KEYWORD, - equalTo(new BytesRef(Base64.getDecoder().decode(input.utf8ToString().getBytes(StandardCharsets.UTF_8)))) - ); - })); + for (DataType dataType : DataType.stringTypes()) { + suppliers.add(new TestCaseSupplier(List.of(dataType), () -> { + BytesRef input = new BytesRef(randomAlphaOfLength(54)); + return new TestCaseSupplier.TestCase( + List.of(new TestCaseSupplier.TypedData(input, dataType, "string")), + "FromBase64Evaluator[field=Attribute[channel=0]]", + DataType.KEYWORD, + equalTo(new BytesRef(Base64.getDecoder().decode(input.utf8ToString().getBytes(StandardCharsets.UTF_8)))) + ); + })); + } return parameterSuppliersFromTypedDataWithDefaultChecks(true, suppliers, (v, p) -> "string"); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToBase64Tests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToBase64Tests.java index 06b5af8d7067b..6e6ff7bf52fce 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToBase64Tests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToBase64Tests.java @@ -36,25 +36,17 @@ public ToBase64Tests(@Name("TestCase") Supplier testC @ParametersFactory public static Iterable parameters() { List suppliers = new ArrayList<>(); - suppliers.add(new TestCaseSupplier(List.of(DataType.KEYWORD), () -> { - BytesRef input = (BytesRef) randomLiteral(DataType.KEYWORD).value(); - return new TestCaseSupplier.TestCase( - List.of(new TestCaseSupplier.TypedData(input, DataType.KEYWORD, "string")), - "ToBase64Evaluator[field=Attribute[channel=0]]", - DataType.KEYWORD, - equalTo(new BytesRef(Base64.getEncoder().encode(input.utf8ToString().getBytes(StandardCharsets.UTF_8)))) - ); - })); - - suppliers.add(new TestCaseSupplier(List.of(DataType.TEXT), () -> { - BytesRef input = (BytesRef) randomLiteral(DataType.TEXT).value(); - return new TestCaseSupplier.TestCase( - List.of(new TestCaseSupplier.TypedData(input, DataType.TEXT, "string")), - "ToBase64Evaluator[field=Attribute[channel=0]]", - DataType.KEYWORD, - equalTo(new BytesRef(Base64.getEncoder().encode(input.utf8ToString().getBytes(StandardCharsets.UTF_8)))) - ); - })); + for (DataType dataType : DataType.stringTypes()) { + suppliers.add(new TestCaseSupplier(List.of(dataType), () -> { + BytesRef input = (BytesRef) randomLiteral(dataType).value(); + return new TestCaseSupplier.TestCase( + List.of(new TestCaseSupplier.TypedData(input, dataType, "string")), + "ToBase64Evaluator[field=Attribute[channel=0]]", + DataType.KEYWORD, + equalTo(new BytesRef(Base64.getEncoder().encode(input.utf8ToString().getBytes(StandardCharsets.UTF_8)))) + ); + })); + } return parameterSuppliersFromTypedDataWithDefaultChecks(true, suppliers, (v, p) -> "string"); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/AbstractMultivalueFunctionTestCase.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/AbstractMultivalueFunctionTestCase.java index 13920fac26f5b..65f5653f27e1a 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/AbstractMultivalueFunctionTestCase.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/AbstractMultivalueFunctionTestCase.java @@ -123,7 +123,7 @@ protected static void bytesRefs( Function expectedDataType, BiFunction, Matcher> matcher ) { - for (DataType type : new DataType[] { DataType.KEYWORD, DataType.TEXT, DataType.IP, DataType.VERSION }) { + for (DataType type : new DataType[] { DataType.KEYWORD, DataType.TEXT, DataType.SEMANTIC_TEXT, DataType.IP, DataType.VERSION }) { if (type != DataType.IP) { cases.add( new TestCaseSupplier( diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvAppendTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvAppendTests.java index 37f4464c8b3ca..33733d5e70c61 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvAppendTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvAppendTests.java @@ -171,6 +171,22 @@ private static void bytesRefs(List suppliers) { ); })); + suppliers.add(new TestCaseSupplier(List.of(DataType.SEMANTIC_TEXT, DataType.SEMANTIC_TEXT), () -> { + List field1 = randomList(1, 10, () -> randomLiteral(DataType.SEMANTIC_TEXT).value()); + List field2 = randomList(1, 10, () -> randomLiteral(DataType.SEMANTIC_TEXT).value()); + var result = new ArrayList<>(field1); + result.addAll(field2); + return new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(field1, DataType.SEMANTIC_TEXT, "field1"), + new TestCaseSupplier.TypedData(field2, DataType.SEMANTIC_TEXT, "field2") + ), + "MvAppendBytesRefEvaluator[field1=Attribute[channel=0], field2=Attribute[channel=1]]", + DataType.SEMANTIC_TEXT, + equalTo(result) + ); + })); + suppliers.add(new TestCaseSupplier(List.of(DataType.IP, DataType.IP), () -> { List field1 = randomList(1, 10, () -> randomLiteral(DataType.IP).value()); List field2 = randomList(1, 10, () -> randomLiteral(DataType.IP).value()); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSliceTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSliceTests.java index 859c79090d62f..d5284602bf40c 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSliceTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSliceTests.java @@ -255,6 +255,23 @@ private static void bytesRefs(List suppliers) { ); })); + suppliers.add(new TestCaseSupplier(List.of(DataType.SEMANTIC_TEXT, DataType.INTEGER, DataType.INTEGER), () -> { + List field = randomList(1, 10, () -> randomLiteral(DataType.SEMANTIC_TEXT).value()); + int length = field.size(); + int start = randomIntBetween(0, length - 1); + int end = randomIntBetween(start, length - 1); + return new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(field, DataType.SEMANTIC_TEXT, "field"), + new TestCaseSupplier.TypedData(start, DataType.INTEGER, "start"), + new TestCaseSupplier.TypedData(end, DataType.INTEGER, "end") + ), + "MvSliceBytesRefEvaluator[field=Attribute[channel=0], start=Attribute[channel=1], end=Attribute[channel=2]]", + DataType.SEMANTIC_TEXT, + equalTo(start == end ? field.get(start) : field.subList(start, end + 1)) + ); + })); + suppliers.add(new TestCaseSupplier(List.of(DataType.IP, DataType.INTEGER, DataType.INTEGER), () -> { List field = randomList(1, 10, () -> randomLiteral(DataType.IP).value()); int length = field.size(); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSortTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSortTests.java index 63f538059dddf..e5f240c811bd0 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSortTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSortTests.java @@ -171,6 +171,20 @@ private static void bytesRefs(List suppliers) { ); })); + suppliers.add(new TestCaseSupplier(List.of(DataType.SEMANTIC_TEXT, DataType.KEYWORD), () -> { + List field = randomList(1, 10, () -> randomLiteral(DataType.SEMANTIC_TEXT).value()); + BytesRef order = new BytesRef("ASC"); + return new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(field, DataType.SEMANTIC_TEXT, "field"), + new TestCaseSupplier.TypedData(order, DataType.KEYWORD, "order").forceLiteral() + ), + "MvSortBytesRef[field=Attribute[channel=0], order=true]", + DataType.SEMANTIC_TEXT, + equalTo(field.size() == 1 ? field.iterator().next() : field.stream().sorted().toList()) + ); + })); + suppliers.add(new TestCaseSupplier(List.of(DataType.IP, DataType.KEYWORD), () -> { List field = randomList(1, 10, () -> randomLiteral(DataType.IP).value()); BytesRef order = new BytesRef("DESC"); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ConcatTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ConcatTests.java index 2ad953c9296b7..42c6284a3c25a 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ConcatTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ConcatTests.java @@ -48,7 +48,7 @@ public static Iterable parameters() { for (int length = 4; length < 100; length++) { suppliers(suppliers, length); } - Set supported = Set.of(DataType.NULL, DataType.KEYWORD, DataType.TEXT); + Set supported = Set.of(DataType.NULL, DataType.KEYWORD, DataType.TEXT, DataType.SEMANTIC_TEXT); List> supportedPerPosition = List.of(supported, supported); for (DataType lhs : DataType.types()) { if (lhs == DataType.NULL || DataType.isRepresentable(lhs) == false) { @@ -72,6 +72,7 @@ private static void suppliers(List suppliers, int length) { if (length > 3) { suppliers.add(supplier("ascii", DataType.KEYWORD, length, () -> randomAlphaOfLengthBetween(1, 10))); suppliers.add(supplier("unicode", DataType.TEXT, length, () -> randomRealisticUnicodeOfLengthBetween(1, 10))); + suppliers.add(supplier("unicode", DataType.SEMANTIC_TEXT, length, () -> randomRealisticUnicodeOfLengthBetween(1, 10))); } else { add(suppliers, "ascii", length, () -> randomAlphaOfLengthBetween(1, 10)); add(suppliers, "unicode", length, () -> randomRealisticUnicodeOfLengthBetween(1, 10)); @@ -99,7 +100,7 @@ private static TestCaseSupplier supplier(String name, DataType type, int length, private static void add(List suppliers, String name, int length, Supplier valueSupplier) { Map>> permutations = new HashMap>>(); - List supportedDataTypes = List.of(DataType.KEYWORD, DataType.TEXT); + List supportedDataTypes = DataType.stringTypes().stream().toList(); permutations.put(0, List.of(List.of(DataType.KEYWORD), List.of(DataType.TEXT))); for (int v = 0; v < length - 1; v++) { List> current = permutations.get(v); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/LeftTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/LeftTests.java index 627c46da025ea..e4e54a9e0935f 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/LeftTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/LeftTests.java @@ -167,6 +167,20 @@ public static Iterable parameters() { ); })); + suppliers.add(new TestCaseSupplier("semantic_text as input", List.of(DataType.SEMANTIC_TEXT, DataType.INTEGER), () -> { + String text = randomUnicodeOfLengthBetween(1, 64); + int length = between(1, text.length()); + return new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(new BytesRef(text), DataType.SEMANTIC_TEXT, "str"), + new TestCaseSupplier.TypedData(length, DataType.INTEGER, "length") + ), + "LeftEvaluator[str=Attribute[channel=0], length=Attribute[channel=1]]", + DataType.KEYWORD, + equalTo(new BytesRef(unicodeLeftSubstring(text, length))) + ); + })); + return parameterSuppliersFromTypedDataWithDefaultChecks(true, suppliers, (v, p) -> switch (p) { case 0 -> "string"; case 1 -> "integer"; diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/LengthTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/LengthTests.java index 6ae5a9d961398..ba4c8c8ce1ea4 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/LengthTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/LengthTests.java @@ -73,6 +73,16 @@ private static List makeTestCases(String title, Supplier new TestCaseSupplier.TestCase( + List.of(new TestCaseSupplier.TypedData(new BytesRef(text.get()), DataType.SEMANTIC_TEXT, "f")), + "LengthEvaluator[val=Attribute[channel=0]]", + DataType.INTEGER, + equalTo(expectedLength) + ) ) ); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/RLikeTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/RLikeTests.java index dab2fca212ff4..4f8adf3abaae6 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/RLikeTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/RLikeTests.java @@ -125,7 +125,7 @@ private static void casesForString( } private static void cases(List cases, String title, Supplier textAndPattern, boolean expected) { - for (DataType type : new DataType[] { DataType.KEYWORD, DataType.TEXT }) { + for (DataType type : DataType.stringTypes()) { cases.add(new TestCaseSupplier(title + " with " + type.esType(), List.of(type, type, DataType.BOOLEAN), () -> { TextAndPattern v = textAndPattern.get(); return new TestCaseSupplier.TestCase( diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/RepeatTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/RepeatTests.java index 4d97a2f629c23..8b4ea066fdccb 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/RepeatTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/RepeatTests.java @@ -62,6 +62,22 @@ public static Iterable parameters() { ); })); + cases.add( + new TestCaseSupplier("Repeat basic test with semantic_text input", List.of(DataType.SEMANTIC_TEXT, DataType.INTEGER), () -> { + String text = randomAlphaOfLength(10); + int number = between(0, 10); + return new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(new BytesRef(text), DataType.SEMANTIC_TEXT, "str"), + new TestCaseSupplier.TypedData(number, DataType.INTEGER, "number") + ), + "RepeatEvaluator[str=Attribute[channel=0], number=Attribute[channel=1]]", + DataType.KEYWORD, + equalTo(new BytesRef(text.repeat(number))) + ); + }) + ); + cases.add(new TestCaseSupplier("Repeat with number zero", List.of(DataType.KEYWORD, DataType.INTEGER), () -> { String text = randomAlphaOfLength(10); int number = 0; diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ReverseTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ReverseTests.java index 2873f18d53957..58d52cc02b548 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ReverseTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ReverseTests.java @@ -33,7 +33,7 @@ public ReverseTests(@Name("TestCase") Supplier testCa public static Iterable parameters() { List suppliers = new ArrayList<>(); - for (DataType stringType : new DataType[] { DataType.KEYWORD, DataType.TEXT }) { + for (DataType stringType : DataType.stringTypes()) { for (var supplier : TestCaseSupplier.stringCases(stringType)) { suppliers.add(makeSupplier(supplier)); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/RightTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/RightTests.java index a1ef77a62b67c..bf93ef42ed6ad 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/RightTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/RightTests.java @@ -166,6 +166,19 @@ public static Iterable parameters() { equalTo(new BytesRef(unicodeRightSubstring(text, length))) ); })); + suppliers.add(new TestCaseSupplier("ascii as semantic_text", List.of(DataType.SEMANTIC_TEXT, DataType.INTEGER), () -> { + String text = randomAlphaOfLengthBetween(1, 64); + int length = between(1, text.length()); + return new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(new BytesRef(text), DataType.SEMANTIC_TEXT, "str"), + new TestCaseSupplier.TypedData(length, DataType.INTEGER, "length") + ), + "RightEvaluator[str=Attribute[channel=0], length=Attribute[channel=1]]", + DataType.KEYWORD, + equalTo(new BytesRef(unicodeRightSubstring(text, length))) + ); + })); return parameterSuppliersFromTypedDataWithDefaultChecks(true, suppliers, (v, p) -> switch (p) { case 0 -> "string"; case 1 -> "integer"; diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/SplitTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/SplitTests.java index b5560f37914a9..098be8e1fda37 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/SplitTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/SplitTests.java @@ -42,9 +42,8 @@ public SplitTests(@Name("TestCase") Supplier testCase @ParametersFactory public static Iterable parameters() { List suppliers = new ArrayList<>(); - List supportedDataTyes = List.of(DataType.KEYWORD, DataType.TEXT); - for (DataType sType : supportedDataTyes) { - for (DataType dType : supportedDataTyes) { + for (DataType sType : DataType.stringTypes()) { + for (DataType dType : DataType.stringTypes()) { suppliers.add(new TestCaseSupplier("split test " + sType.toString() + " " + dType.toString(), List.of(sType, dType), () -> { String delimiter = randomAlphaOfLength(1); List strings = IntStream.range(0, between(1, 5)) diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/SubstringTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/SubstringTests.java index 6b934aae775df..ae8a2a1840dfb 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/SubstringTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/SubstringTests.java @@ -72,6 +72,25 @@ public static Iterable parameters() { ); } ), + new TestCaseSupplier( + "Substring basic test with semantic_text input", + List.of(DataType.SEMANTIC_TEXT, DataType.INTEGER, DataType.INTEGER), + () -> { + int start = between(1, 8); + int length = between(1, 10 - start); + String text = randomAlphaOfLength(10); + return new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(new BytesRef(text), DataType.SEMANTIC_TEXT, "str"), + new TestCaseSupplier.TypedData(start, DataType.INTEGER, "start"), + new TestCaseSupplier.TypedData(length, DataType.INTEGER, "end") + ), + "SubstringEvaluator[str=Attribute[channel=0], start=Attribute[channel=1], length=Attribute[channel=2]]", + DataType.KEYWORD, + equalTo(new BytesRef(text.substring(start - 1, start + length - 1))) + ); + } + ), new TestCaseSupplier("Substring empty string", List.of(DataType.TEXT, DataType.INTEGER, DataType.INTEGER), () -> { int start = between(1, 8); int length = between(1, 10 - start); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ToLowerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ToLowerTests.java index 1f564ecb87f1e..69dbe023bde66 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ToLowerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ToLowerTests.java @@ -45,6 +45,8 @@ public static Iterable parameters() { suppliers.add(supplier("keyword unicode", DataType.KEYWORD, () -> randomUnicodeOfLengthBetween(1, 10))); suppliers.add(supplier("text ascii", DataType.TEXT, () -> randomAlphaOfLengthBetween(1, 10))); suppliers.add(supplier("text unicode", DataType.TEXT, () -> randomUnicodeOfLengthBetween(1, 10))); + suppliers.add(supplier("semantic_text ascii", DataType.SEMANTIC_TEXT, () -> randomAlphaOfLengthBetween(1, 10))); + suppliers.add(supplier("semantic_text unicode", DataType.SEMANTIC_TEXT, () -> randomUnicodeOfLengthBetween(1, 10))); // add null as parameter return parameterSuppliersFromTypedDataWithDefaultChecks(true, suppliers, (v, p) -> "string"); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ToUpperTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ToUpperTests.java index 7c136c3bb83c2..33d6f929503b3 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ToUpperTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ToUpperTests.java @@ -45,6 +45,8 @@ public static Iterable parameters() { suppliers.add(supplier("keyword unicode", DataType.KEYWORD, () -> randomUnicodeOfLengthBetween(1, 10))); suppliers.add(supplier("text ascii", DataType.TEXT, () -> randomAlphaOfLengthBetween(1, 10))); suppliers.add(supplier("text unicode", DataType.TEXT, () -> randomUnicodeOfLengthBetween(1, 10))); + suppliers.add(supplier("semantic_text ascii", DataType.SEMANTIC_TEXT, () -> randomAlphaOfLengthBetween(1, 10))); + suppliers.add(supplier("semantic_text unicode", DataType.SEMANTIC_TEXT, () -> randomUnicodeOfLengthBetween(1, 10))); // add null as parameter return parameterSuppliersFromTypedDataWithDefaultChecks(true, suppliers, (v, p) -> "string"); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/WildcardLikeTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/WildcardLikeTests.java index 53b3a99f97b9c..eed2c7379e9e1 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/WildcardLikeTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/WildcardLikeTests.java @@ -53,7 +53,7 @@ public static Iterable parameters() { } private static void addCases(List suppliers) { - for (DataType type : new DataType[] { DataType.KEYWORD, DataType.TEXT }) { + for (DataType type : new DataType[] { DataType.KEYWORD, DataType.TEXT, DataType.SEMANTIC_TEXT }) { suppliers.add(new TestCaseSupplier(" with " + type.esType(), List.of(type, type), () -> { BytesRef str = new BytesRef(randomAlphaOfLength(5)); String patternString = randomAlphaOfLength(2); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/EqualsTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/EqualsTests.java index 66edd56e32f20..0fb416584b472 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/EqualsTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/EqualsTests.java @@ -217,8 +217,8 @@ public static Iterable parameters() { } private static String typeErrorString = - "boolean, cartesian_point, cartesian_shape, datetime, date_nanos, double, geo_point, geo_shape, integer, ip, keyword, long, text, " - + "unsigned_long or version"; + "boolean, cartesian_point, cartesian_shape, datetime, date_nanos, double, geo_point, geo_shape, integer, ip, keyword, long," + + " semantic_text, text, unsigned_long or version"; @Override protected Expression build(Source source, List args) { diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/GreaterThanOrEqualTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/GreaterThanOrEqualTests.java index a4d1bf69796e0..a4f1a19e135ef 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/GreaterThanOrEqualTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/GreaterThanOrEqualTests.java @@ -150,7 +150,7 @@ public static Iterable parameters() { o, v, t, - (l, p) -> "date_nanos, datetime, double, integer, ip, keyword, long, text, unsigned_long or version" + (l, p) -> "date_nanos, datetime, double, integer, ip, keyword, long, semantic_text, text, unsigned_long or version" ) ) ); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/GreaterThanTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/GreaterThanTests.java index d3fede5c2e2ce..86a4676e35009 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/GreaterThanTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/GreaterThanTests.java @@ -150,7 +150,7 @@ public static Iterable parameters() { o, v, t, - (l, p) -> "date_nanos, datetime, double, integer, ip, keyword, long, text, unsigned_long or version" + (l, p) -> "date_nanos, datetime, double, integer, ip, keyword, long, semantic_text, text, unsigned_long or version" ) ) ); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/InTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/InTests.java index 2a1dfb098a3a4..b004adca351ab 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/InTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/InTests.java @@ -187,8 +187,24 @@ private static void bytesRefs(List suppliers, int items) { ); })); - for (DataType type1 : new DataType[] { DataType.KEYWORD, DataType.TEXT }) { - for (DataType type2 : new DataType[] { DataType.KEYWORD, DataType.TEXT }) { + suppliers.add(new TestCaseSupplier("semantic_text", List.of(DataType.SEMANTIC_TEXT, DataType.SEMANTIC_TEXT), () -> { + List inlist = randomList(items, items, () -> randomLiteral(DataType.SEMANTIC_TEXT).value()); + Object field = inlist.get(0); + List args = new ArrayList<>(inlist.size() + 1); + for (Object i : inlist) { + args.add(new TestCaseSupplier.TypedData(i, DataType.SEMANTIC_TEXT, "inlist" + i)); + } + args.add(new TestCaseSupplier.TypedData(field, DataType.SEMANTIC_TEXT, "field")); + return new TestCaseSupplier.TestCase( + args, + matchesPattern("InBytesRefEvaluator.*"), + DataType.BOOLEAN, + equalTo(inlist.contains(field)) + ); + })); + + for (DataType type1 : DataType.stringTypes()) { + for (DataType type2 : DataType.stringTypes()) { if (type1 == type2 || items > 1) continue; suppliers.add(new TestCaseSupplier(type1 + " " + type2, List.of(type1, type2), () -> { List inlist = randomList(items, items, () -> randomLiteral(type1).value()); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/LessThanOrEqualTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/LessThanOrEqualTests.java index 3b8270c1576fd..5793f26ecd447 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/LessThanOrEqualTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/LessThanOrEqualTests.java @@ -150,7 +150,7 @@ public static Iterable parameters() { o, v, t, - (l, p) -> "date_nanos, datetime, double, integer, ip, keyword, long, text, unsigned_long or version" + (l, p) -> "date_nanos, datetime, double, integer, ip, keyword, long, semantic_text, text, unsigned_long or version" ) ) ); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/LessThanTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/LessThanTests.java index 647988fe35326..e8f9f26a76f43 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/LessThanTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/LessThanTests.java @@ -150,7 +150,7 @@ public static Iterable parameters() { o, v, t, - (l, p) -> "date_nanos, datetime, double, integer, ip, keyword, long, text, unsigned_long or version" + (l, p) -> "date_nanos, datetime, double, integer, ip, keyword, long, semantic_text, text, unsigned_long or version" ) ) ); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/type/EsqlDataTypeConverterTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/type/EsqlDataTypeConverterTests.java index babb9fc8c0bd1..b2228b5543ef2 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/type/EsqlDataTypeConverterTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/type/EsqlDataTypeConverterTests.java @@ -30,11 +30,13 @@ import static org.elasticsearch.xpack.esql.core.type.DataType.HALF_FLOAT; import static org.elasticsearch.xpack.esql.core.type.DataType.INTEGER; import static org.elasticsearch.xpack.esql.core.type.DataType.IP; +import static org.elasticsearch.xpack.esql.core.type.DataType.KEYWORD; import static org.elasticsearch.xpack.esql.core.type.DataType.LONG; import static org.elasticsearch.xpack.esql.core.type.DataType.NULL; import static org.elasticsearch.xpack.esql.core.type.DataType.OBJECT; import static org.elasticsearch.xpack.esql.core.type.DataType.PARTIAL_AGG; import static org.elasticsearch.xpack.esql.core.type.DataType.SCALED_FLOAT; +import static org.elasticsearch.xpack.esql.core.type.DataType.SEMANTIC_TEXT; import static org.elasticsearch.xpack.esql.core.type.DataType.SHORT; import static org.elasticsearch.xpack.esql.core.type.DataType.SOURCE; import static org.elasticsearch.xpack.esql.core.type.DataType.TEXT; @@ -69,6 +71,8 @@ public void testCommonTypeStrings() { } else if ((isString(dataType1) && isString(dataType2))) { if (dataType1 == dataType2) { assertEqualsCommonType(dataType1, dataType2, dataType1); + } else if (dataType1 == SEMANTIC_TEXT || dataType2 == SEMANTIC_TEXT) { + assertEqualsCommonType(dataType1, dataType2, KEYWORD); } else { assertEqualsCommonType(dataType1, dataType2, TEXT); } From a36075796864f57108f7c3d1c44738d10118e99d Mon Sep 17 00:00:00 2001 From: Kostas Krikellas <131142368+kkrik-es@users.noreply.github.com> Date: Fri, 1 Nov 2024 09:46:06 +0200 Subject: [PATCH 259/324] [TEST] Replace _source.mode with index.mapping.source.mode in integration tests (#115926) * Replace _source.mode with index.mapping.source.mode in integration tests * fix tests * revert 40_source_mode_setting.yml --- .../test/aggregations/top_hits.yml | 5 +- .../test/painless/50_script_doc_values.yml | 8 +- .../test/match_only_text/10_basic.yml | 10 +- .../test/rank_feature/30_synthetic_source.yml | 5 +- .../rank_features/20_synthetic_source.yml | 5 +- .../30_synthetic_source.yml | 5 +- .../test/token_count/10_basic.yml | 5 +- .../test/60_synthetic_source.yml | 5 +- .../resources/rest-api-spec/test/10_basic.yml | 5 +- .../test/reindex/110_synthetic_source.yml | 5 +- .../update_by_query/100_synthetic_source.yml | 5 +- .../runtime_fields/270_synthetic_source.yml | 10 +- .../20_synthetic_source.yml | 35 ++-- .../test/mapper_murmur3/10_basic.yml | 5 +- .../test/get/100_synthetic_source.yml | 79 +++++---- .../indices.create/20_synthetic_source.yml | 158 ++++++++++-------- .../21_synthetic_source_stored.yml | 96 ++++++----- .../test/indices.put_mapping/10_basic.yml | 28 +--- .../test/logsdb/20_source_mapping.yml | 12 +- .../test/mget/90_synthetic_source.yml | 19 ++- .../search.highlight/50_synthetic_source.yml | 5 +- .../test/search.vectors/90_sparse_vector.yml | 5 +- .../test/search/350_binary_field.yml | 5 +- .../test/search/400_synthetic_source.yml | 43 ++--- .../540_ignore_above_synthetic_source.yml | 12 +- .../rest-api-spec/test/tsdb/20_mapping.yml | 8 +- .../test/update/100_synthetic_source.yml | 10 +- .../rest-api-spec/test/20_ignored_source.yml | 5 +- .../test/20_synthetic_source.yml | 5 +- .../test/80_synthetic_source.yml | 15 +- .../test/40_synthetic_source.yml | 10 +- .../100_synthetic_source.yml | 10 +- .../test/analytics/histogram.yml | 15 +- .../test/enrich/40_synthetic_source.yml | 5 +- .../rest-api-spec/test/esql/30_types.yml | 10 +- .../rest-api-spec/test/esql/80_text.yml | 10 +- .../20_synthetic_source.yml | 4 +- ..._field_level_security_synthetic_source.yml | 23 +-- ...cument_level_security_synthetic_source.yml | 25 +-- .../rest-api-spec/test/snapshot/10_basic.yml | 5 +- .../test/spatial/140_synthetic_source.yml | 40 +++-- .../preview_transforms_synthetic_source.yml | 5 +- .../30_ignore_above_synthetic_source.yml | 3 +- .../test/30_synthetic_source.yml | 10 +- 44 files changed, 436 insertions(+), 357 deletions(-) diff --git a/modules/aggregations/src/yamlRestTest/resources/rest-api-spec/test/aggregations/top_hits.yml b/modules/aggregations/src/yamlRestTest/resources/rest-api-spec/test/aggregations/top_hits.yml index ed24e1cc8404c..91e9b04e35860 100644 --- a/modules/aggregations/src/yamlRestTest/resources/rest-api-spec/test/aggregations/top_hits.yml +++ b/modules/aggregations/src/yamlRestTest/resources/rest-api-spec/test/aggregations/top_hits.yml @@ -356,9 +356,10 @@ synthetic _source: indices.create: index: test_synthetic body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: page: type: keyword diff --git a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/50_script_doc_values.yml b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/50_script_doc_values.yml index ee6803d809087..4e31c1ba36601 100644 --- a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/50_script_doc_values.yml +++ b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/50_script_doc_values.yml @@ -180,9 +180,9 @@ setup: body: settings: number_of_shards: 1 + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: boolean: type: boolean @@ -5630,9 +5630,9 @@ version and sequence number synthetic _source: body: settings: number_of_shards: 1 + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: keyword: type: keyword diff --git a/modules/mapper-extras/src/yamlRestTest/resources/rest-api-spec/test/match_only_text/10_basic.yml b/modules/mapper-extras/src/yamlRestTest/resources/rest-api-spec/test/match_only_text/10_basic.yml index b4ee226f72692..4a6810faab215 100644 --- a/modules/mapper-extras/src/yamlRestTest/resources/rest-api-spec/test/match_only_text/10_basic.yml +++ b/modules/mapper-extras/src/yamlRestTest/resources/rest-api-spec/test/match_only_text/10_basic.yml @@ -285,9 +285,10 @@ synthetic_source: indices.create: index: synthetic_source_test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: foo: type: match_only_text @@ -362,9 +363,10 @@ synthetic_source with copy_to: indices.create: index: synthetic_source_test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: foo: type: match_only_text diff --git a/modules/mapper-extras/src/yamlRestTest/resources/rest-api-spec/test/rank_feature/30_synthetic_source.yml b/modules/mapper-extras/src/yamlRestTest/resources/rest-api-spec/test/rank_feature/30_synthetic_source.yml index 1e0b90ebb9e0f..e6df05189b6f9 100644 --- a/modules/mapper-extras/src/yamlRestTest/resources/rest-api-spec/test/rank_feature/30_synthetic_source.yml +++ b/modules/mapper-extras/src/yamlRestTest/resources/rest-api-spec/test/rank_feature/30_synthetic_source.yml @@ -7,9 +7,10 @@ setup: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: pagerank: type: rank_feature diff --git a/modules/mapper-extras/src/yamlRestTest/resources/rest-api-spec/test/rank_features/20_synthetic_source.yml b/modules/mapper-extras/src/yamlRestTest/resources/rest-api-spec/test/rank_features/20_synthetic_source.yml index c64e35cc2cea4..6ffe896408079 100644 --- a/modules/mapper-extras/src/yamlRestTest/resources/rest-api-spec/test/rank_features/20_synthetic_source.yml +++ b/modules/mapper-extras/src/yamlRestTest/resources/rest-api-spec/test/rank_features/20_synthetic_source.yml @@ -7,9 +7,10 @@ setup: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: tags: type: rank_features diff --git a/modules/mapper-extras/src/yamlRestTest/resources/rest-api-spec/test/search-as-you-type/30_synthetic_source.yml b/modules/mapper-extras/src/yamlRestTest/resources/rest-api-spec/test/search-as-you-type/30_synthetic_source.yml index 75397bd9e0fe9..b764e14c8ced1 100644 --- a/modules/mapper-extras/src/yamlRestTest/resources/rest-api-spec/test/search-as-you-type/30_synthetic_source.yml +++ b/modules/mapper-extras/src/yamlRestTest/resources/rest-api-spec/test/search-as-you-type/30_synthetic_source.yml @@ -7,9 +7,10 @@ setup: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: a_field: type: search_as_you_type diff --git a/modules/mapper-extras/src/yamlRestTest/resources/rest-api-spec/test/token_count/10_basic.yml b/modules/mapper-extras/src/yamlRestTest/resources/rest-api-spec/test/token_count/10_basic.yml index 03b72a2623497..a870cd4d01abc 100644 --- a/modules/mapper-extras/src/yamlRestTest/resources/rest-api-spec/test/token_count/10_basic.yml +++ b/modules/mapper-extras/src/yamlRestTest/resources/rest-api-spec/test/token_count/10_basic.yml @@ -42,9 +42,10 @@ indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: count: type: token_count diff --git a/modules/parent-join/src/yamlRestTest/resources/rest-api-spec/test/60_synthetic_source.yml b/modules/parent-join/src/yamlRestTest/resources/rest-api-spec/test/60_synthetic_source.yml index 12d0f1bbae6c7..5eb5739222905 100644 --- a/modules/parent-join/src/yamlRestTest/resources/rest-api-spec/test/60_synthetic_source.yml +++ b/modules/parent-join/src/yamlRestTest/resources/rest-api-spec/test/60_synthetic_source.yml @@ -7,9 +7,10 @@ supported: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: join_field: type: join diff --git a/modules/percolator/src/yamlRestTest/resources/rest-api-spec/test/10_basic.yml b/modules/percolator/src/yamlRestTest/resources/rest-api-spec/test/10_basic.yml index a5576d203314f..93ecaaa66bb91 100644 --- a/modules/percolator/src/yamlRestTest/resources/rest-api-spec/test/10_basic.yml +++ b/modules/percolator/src/yamlRestTest/resources/rest-api-spec/test/10_basic.yml @@ -137,9 +137,10 @@ indices.create: index: queries_index body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: query: type: percolator diff --git a/modules/reindex/src/yamlRestTest/resources/rest-api-spec/test/reindex/110_synthetic_source.yml b/modules/reindex/src/yamlRestTest/resources/rest-api-spec/test/reindex/110_synthetic_source.yml index 9ae2153f89ca5..db78af6719c1f 100644 --- a/modules/reindex/src/yamlRestTest/resources/rest-api-spec/test/reindex/110_synthetic_source.yml +++ b/modules/reindex/src/yamlRestTest/resources/rest-api-spec/test/reindex/110_synthetic_source.yml @@ -3,9 +3,10 @@ setup: indices.create: index: synthetic body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: kwd: type: keyword diff --git a/modules/reindex/src/yamlRestTest/resources/rest-api-spec/test/update_by_query/100_synthetic_source.yml b/modules/reindex/src/yamlRestTest/resources/rest-api-spec/test/update_by_query/100_synthetic_source.yml index 4329bf8ed471a..a17f29a2d52e7 100644 --- a/modules/reindex/src/yamlRestTest/resources/rest-api-spec/test/update_by_query/100_synthetic_source.yml +++ b/modules/reindex/src/yamlRestTest/resources/rest-api-spec/test/update_by_query/100_synthetic_source.yml @@ -3,9 +3,10 @@ update: indices.create: index: synthetic body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: kwd: type: keyword diff --git a/modules/runtime-fields-common/src/yamlRestTest/resources/rest-api-spec/test/runtime_fields/270_synthetic_source.yml b/modules/runtime-fields-common/src/yamlRestTest/resources/rest-api-spec/test/runtime_fields/270_synthetic_source.yml index 8832b3230910c..0a3e372d2d8e4 100644 --- a/modules/runtime-fields-common/src/yamlRestTest/resources/rest-api-spec/test/runtime_fields/270_synthetic_source.yml +++ b/modules/runtime-fields-common/src/yamlRestTest/resources/rest-api-spec/test/runtime_fields/270_synthetic_source.yml @@ -8,9 +8,10 @@ keywords: indices.create: index: index1 body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: field1: type: keyword @@ -76,9 +77,10 @@ doubles: indices.create: index: index1 body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: field1: type: double diff --git a/plugins/mapper-annotated-text/src/yamlRestTest/resources/rest-api-spec/test/mapper_annotatedtext/20_synthetic_source.yml b/plugins/mapper-annotated-text/src/yamlRestTest/resources/rest-api-spec/test/mapper_annotatedtext/20_synthetic_source.yml index 4aac881700e15..1d5f1daad9958 100644 --- a/plugins/mapper-annotated-text/src/yamlRestTest/resources/rest-api-spec/test/mapper_annotatedtext/20_synthetic_source.yml +++ b/plugins/mapper-annotated-text/src/yamlRestTest/resources/rest-api-spec/test/mapper_annotatedtext/20_synthetic_source.yml @@ -10,9 +10,10 @@ stored annotated_text field: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: annotated_text: type: annotated_text @@ -40,9 +41,10 @@ annotated_text field with keyword multi-field: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: annotated_text: type: annotated_text @@ -72,9 +74,10 @@ multiple values in stored annotated_text field: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: annotated_text: type: annotated_text @@ -102,9 +105,10 @@ multiple values in annotated_text field with keyword multi-field: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: annotated_text: type: annotated_text @@ -135,9 +139,10 @@ multiple values in annotated_text field with stored keyword multi-field: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: annotated_text: type: annotated_text @@ -169,9 +174,10 @@ multiple values in stored annotated_text field with keyword multi-field: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: annotated_text: type: annotated_text @@ -202,9 +208,10 @@ fallback synthetic source: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: annotated_text: type: annotated_text diff --git a/plugins/mapper-murmur3/src/yamlRestTest/resources/rest-api-spec/test/mapper_murmur3/10_basic.yml b/plugins/mapper-murmur3/src/yamlRestTest/resources/rest-api-spec/test/mapper_murmur3/10_basic.yml index 12b23fb3b0395..6873145d39d5c 100644 --- a/plugins/mapper-murmur3/src/yamlRestTest/resources/rest-api-spec/test/mapper_murmur3/10_basic.yml +++ b/plugins/mapper-murmur3/src/yamlRestTest/resources/rest-api-spec/test/mapper_murmur3/10_basic.yml @@ -134,9 +134,10 @@ setup: indices.create: index: test_synthetic_source body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: foo: type: keyword diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/get/100_synthetic_source.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/get/100_synthetic_source.yml index a7600da575cd3..7ca7580b0d5ca 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/get/100_synthetic_source.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/get/100_synthetic_source.yml @@ -7,9 +7,10 @@ keyword: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: kwd: type: keyword @@ -48,9 +49,8 @@ fetch without refresh also produces synthetic source: settings: index: refresh_interval: -1 + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: obj: properties: @@ -90,9 +90,10 @@ force_synthetic_source_ok: indices.create: index: test body: + settings: + index: + mapping.source.mode: stored mappings: - _source: - mode: stored properties: obj: properties: @@ -139,9 +140,10 @@ stored text: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: text: type: text @@ -212,9 +214,10 @@ stored keyword: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: kwd: type: keyword @@ -253,9 +256,10 @@ doc values keyword with ignore_above: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: kwd: type: keyword @@ -336,9 +340,10 @@ stored keyword with ignore_above: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: kwd: type: keyword @@ -421,9 +426,10 @@ indexed dense vectors: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: name: type: keyword @@ -465,9 +471,10 @@ non-indexed dense vectors: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: name: type: keyword @@ -508,9 +515,10 @@ _source filtering: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: kwd: type: keyword @@ -550,9 +558,9 @@ _doc_count: indices.create: index: test body: - mappings: - _source: - mode: synthetic + settings: + index: + mapping.source.mode: synthetic # with _doc_count - do: @@ -679,9 +687,10 @@ fields with ignore_malformed: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: ip: type: ip @@ -914,9 +923,10 @@ flattened field: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: flattened: type: flattened @@ -1006,9 +1016,10 @@ flattened field with ignore_above: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: field: type: flattened @@ -1061,9 +1072,10 @@ flattened field with ignore_above and arrays: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: field: type: flattened @@ -1117,9 +1129,10 @@ completion: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: completion: type: completion diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/20_synthetic_source.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/20_synthetic_source.yml index cc5fd0e08e695..15a712d77a7ef 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/20_synthetic_source.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/20_synthetic_source.yml @@ -11,13 +11,11 @@ object with unmapped fields: settings: index: mapping: + source.mode: synthetic total_fields: ignore_dynamic_beyond_limit: true limit: 1 - mappings: - _source: - mode: synthetic properties: name: type: keyword @@ -64,13 +62,12 @@ unmapped arrays: settings: index: mapping: + source.mode: synthetic total_fields: ignore_dynamic_beyond_limit: true limit: 1 mappings: - _source: - mode: synthetic properties: name: type: keyword @@ -111,13 +108,12 @@ nested object with unmapped fields: settings: index: mapping: + source.mode: synthetic total_fields: ignore_dynamic_beyond_limit: true limit: 3 mappings: - _source: - mode: synthetic properties: path: properties: @@ -163,13 +159,12 @@ empty object with unmapped fields: settings: index: mapping: + source.mode: synthetic total_fields: ignore_dynamic_beyond_limit: true limit: 3 mappings: - _source: - mode: synthetic properties: path: properties: @@ -205,9 +200,10 @@ disabled root object: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic enabled: false - do: @@ -242,9 +238,10 @@ disabled object: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: path: enabled: false @@ -279,9 +276,10 @@ disabled object contains array: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: path: enabled: false @@ -319,9 +317,10 @@ disabled subobject: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: path: properties: @@ -357,9 +356,10 @@ disabled subobject with array: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: path: properties: @@ -396,9 +396,10 @@ mixed disabled and enabled objects: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: path: properties: @@ -442,9 +443,10 @@ object with dynamic override: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: path_no: dynamic: false @@ -489,9 +491,10 @@ subobject with dynamic override: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: path: properties: @@ -537,9 +540,10 @@ object array in object with dynamic override: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: id: type: integer @@ -591,9 +595,10 @@ value array in object with dynamic override: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: path_no: dynamic: false @@ -634,9 +639,10 @@ nested object: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: nested_field: type: nested @@ -679,9 +685,10 @@ nested object next to regular: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: path: properties: @@ -725,9 +732,10 @@ nested object with disabled: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: obj_field: properties: @@ -813,9 +821,10 @@ doubly nested object: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: obj_field: properties: @@ -908,9 +917,10 @@ subobjects auto: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic subobjects: auto properties: id: @@ -996,9 +1006,10 @@ synthetic_source with copy_to: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: number: type: integer @@ -1132,9 +1143,10 @@ synthetic_source with disabled doc_values: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: number: type: integer @@ -1215,9 +1227,10 @@ fallback synthetic_source for text field: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: text: type: text @@ -1249,9 +1262,10 @@ synthetic_source with copy_to and ignored values: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: name: type: keyword @@ -1317,9 +1331,10 @@ synthetic_source with copy_to field having values in source: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: name: type: keyword @@ -1380,9 +1395,10 @@ synthetic_source with ignored source field using copy_to: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: name: type: keyword @@ -1444,9 +1460,10 @@ synthetic_source with copy_to field from dynamic template having values in sourc indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic dynamic_templates: - copy_template: match: "k" @@ -1541,9 +1558,10 @@ synthetic_source with copy_to and invalid values for copy: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: name: type: keyword @@ -1577,9 +1595,10 @@ synthetic_source with copy_to pointing inside object: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: name: type: keyword @@ -1681,9 +1700,10 @@ synthetic_source with copy_to pointing to ambiguous field: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: k: type: keyword @@ -1728,9 +1748,10 @@ synthetic_source with copy_to pointing to ambiguous field and subobjects false: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic subobjects: false properties: k: @@ -1776,9 +1797,10 @@ synthetic_source with copy_to pointing to ambiguous field and subobjects auto: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic subobjects: auto properties: k: @@ -1825,9 +1847,10 @@ synthetic_source with copy_to pointing at dynamic field: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: name: type: keyword @@ -1911,9 +1934,10 @@ synthetic_source with copy_to pointing inside dynamic object: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: name: type: keyword diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/21_synthetic_source_stored.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/21_synthetic_source_stored.yml index f3545bb0a3f0e..4f55b52224a38 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/21_synthetic_source_stored.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/21_synthetic_source_stored.yml @@ -8,9 +8,10 @@ object param - store complex object: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: id: type: integer @@ -72,9 +73,10 @@ object param - object array: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: id: type: integer @@ -136,9 +138,10 @@ object param - object array within array: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: stored: synthetic_source_keep: arrays @@ -179,9 +182,10 @@ object param - no object array: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: stored: synthetic_source_keep: arrays @@ -221,9 +225,10 @@ object param - field ordering in object array: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: a: type: keyword @@ -270,9 +275,10 @@ object param - nested object array next to other fields: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: a: type: keyword @@ -326,9 +332,10 @@ object param - nested object with stored array: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: name: type: keyword @@ -378,9 +385,10 @@ index param - nested array within array: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: name: type: keyword @@ -428,9 +436,9 @@ index param - nested array within array - disabled second pass: index: synthetic_source: enable_second_doc_parsing_pass: false + mapping.source.mode: synthetic + mappings: - _source: - mode: synthetic properties: name: type: keyword @@ -478,9 +486,8 @@ stored field under object with store_array_source: index: sort.field: "name" sort.order: "asc" + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: name: type: keyword @@ -525,9 +532,10 @@ field param - keep root array: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: id: type: integer @@ -582,9 +590,10 @@ field param - keep nested array: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: id: type: integer @@ -650,9 +659,10 @@ field param - keep root singleton fields: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: id: type: integer @@ -739,9 +749,10 @@ field param - keep nested singleton fields: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: id: type: integer @@ -820,9 +831,10 @@ field param - nested array within array: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: name: type: keyword @@ -866,10 +878,9 @@ index param - root arrays: settings: index: mapping: + source.mode: synthetic synthetic_source_keep: arrays mappings: - _source: - mode: synthetic properties: id: type: integer @@ -945,10 +956,9 @@ index param - dynamic root arrays: settings: index: mapping: + source.mode: synthetic synthetic_source_keep: arrays mappings: - _source: - mode: synthetic properties: id: type: integer @@ -998,10 +1008,9 @@ index param - object array within array: settings: index: mapping: + source.mode: synthetic synthetic_source_keep: arrays mappings: - _source: - mode: synthetic properties: stored: properties: @@ -1048,10 +1057,9 @@ index param - no object array: settings: index: mapping: + source.mode: synthetic synthetic_source_keep: arrays mappings: - _source: - mode: synthetic properties: stored: properties: @@ -1093,10 +1101,9 @@ index param - field ordering: settings: index: mapping: + source.mode: synthetic synthetic_source_keep: arrays mappings: - _source: - mode: synthetic properties: a: type: keyword @@ -1144,10 +1151,9 @@ index param - nested arrays: settings: index: mapping: + source.mode: synthetic synthetic_source_keep: arrays mappings: - _source: - mode: synthetic properties: a: type: keyword @@ -1212,10 +1218,9 @@ index param - nested object with stored array: settings: index: mapping: + source.mode: synthetic synthetic_source_keep: arrays mappings: - _source: - mode: synthetic properties: name: type: keyword @@ -1264,10 +1269,9 @@ index param - flattened fields: settings: index: mapping: + source.mode: synthetic synthetic_source_keep: arrays mappings: - _source: - mode: synthetic properties: name: type: keyword diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.put_mapping/10_basic.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.put_mapping/10_basic.yml index 75d282d524607..67419cd18ad99 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.put_mapping/10_basic.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.put_mapping/10_basic.yml @@ -145,28 +145,6 @@ - is_false: test_index.mappings.properties.foo.meta.bar - match: { test_index.mappings.properties.foo.meta.baz: "quux" } ---- -"disabling synthetic source fails": - - requires: - cluster_features: ["gte_v8.4.0"] - reason: "Added in 8.4.0" - - - do: - indices.create: - index: test_index - body: - mappings: - _source: - mode: synthetic - - - do: - catch: /Cannot update parameter \[mode\] from \[synthetic\] to \[stored\]/ - indices.put_mapping: - index: test_index - body: - _source: - mode: stored - --- "enabling synthetic source from explicit succeeds": - requires: @@ -177,9 +155,9 @@ indices.create: index: test_index body: - mappings: - _source: - mode: stored + settings: + index: + mapping.source.mode: stored - do: indices.put_mapping: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/logsdb/20_source_mapping.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/logsdb/20_source_mapping.yml index b4709a4e4d176..e77f4a4c912b7 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/logsdb/20_source_mapping.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/logsdb/20_source_mapping.yml @@ -30,9 +30,7 @@ stored _source mode is supported: settings: index: mode: logsdb - mappings: - _source: - mode: stored + mapping.source.mode: stored - do: indices.get: index: test-stored-source @@ -69,9 +67,7 @@ disabled _source is not supported: settings: index: mode: logsdb - mappings: - _source: - mode: disabled + mapping.source.mode: disabled - match: { error.type: "mapper_parsing_exception" } - match: { error.root_cause.0.type: "mapper_parsing_exception" } @@ -120,9 +116,9 @@ include/exclude is supported with stored _source: settings: index: mode: logsdb + mapping.source.mode: stored mappings: _source: - mode: stored includes: [a] - do: @@ -139,9 +135,9 @@ include/exclude is supported with stored _source: settings: index: mode: logsdb + mapping.source.mode: stored mappings: _source: - mode: stored excludes: [b] - do: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/mget/90_synthetic_source.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/mget/90_synthetic_source.yml index 2f3d2fa2f974d..c728252d98201 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/mget/90_synthetic_source.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/mget/90_synthetic_source.yml @@ -7,9 +7,10 @@ keyword: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: kwd: type: keyword @@ -62,9 +63,9 @@ keyword with normalizer: type: custom filter: - lowercase + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: keyword: type: keyword @@ -144,9 +145,10 @@ stored text: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: text: type: text @@ -193,9 +195,10 @@ force_synthetic_source_ok: indices.create: index: test body: + settings: + index: + mapping.source.mode: stored mappings: - _source: - mode: stored properties: obj: properties: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.highlight/50_synthetic_source.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.highlight/50_synthetic_source.yml index a2fd448f5044d..024ce48562281 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.highlight/50_synthetic_source.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.highlight/50_synthetic_source.yml @@ -9,9 +9,9 @@ setup: body: settings: number_of_shards: 1 + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: foo: type: keyword @@ -21,6 +21,7 @@ setup: index_options: positions vectors: type: text + store: false term_vector: with_positions_offsets positions: type: text diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/90_sparse_vector.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/90_sparse_vector.yml index 27f12f394c6a4..f4a55341dff08 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/90_sparse_vector.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/90_sparse_vector.yml @@ -394,9 +394,10 @@ indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: ml.tokens: type: sparse_vector diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/350_binary_field.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/350_binary_field.yml index 455d06ba2a984..ced9c4fe0825d 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/350_binary_field.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/350_binary_field.yml @@ -55,9 +55,10 @@ indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: binary: type: binary diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/400_synthetic_source.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/400_synthetic_source.yml index 0cc1796bb47de..21338c12415d8 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/400_synthetic_source.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/400_synthetic_source.yml @@ -7,9 +7,10 @@ keyword: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: kwd: type: keyword @@ -44,9 +45,10 @@ stored text: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: text: type: text @@ -83,8 +85,6 @@ stored keyword: index: test body: mappings: - _source: - mode: synthetic properties: kwd: type: keyword @@ -120,9 +120,10 @@ stored keyword without sibling fields: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: kwd: type: keyword @@ -165,9 +166,10 @@ force_synthetic_source_ok: indices.create: index: test body: + settings: + index: + mapping.source.mode: stored mappings: - _source: - mode: stored properties: obj: properties: @@ -218,9 +220,10 @@ doc values keyword with ignore_above: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: kwd: type: keyword @@ -286,9 +289,10 @@ stored keyword with ignore_above: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: kwd: type: keyword @@ -356,9 +360,10 @@ _source filtering: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: kwd: type: keyword @@ -397,9 +402,9 @@ _doc_count: indices.create: index: test body: - mappings: - _source: - mode: synthetic + settings: + index: + mapping.source.mode: synthetic - do: index: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/540_ignore_above_synthetic_source.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/540_ignore_above_synthetic_source.yml index 435cda637cca6..8950a378c7203 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/540_ignore_above_synthetic_source.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/540_ignore_above_synthetic_source.yml @@ -10,10 +10,9 @@ ignore_above mapping level setting: settings: index: mapping: + source.mode: synthetic ignore_above: 10 mappings: - _source: - mode: synthetic properties: keyword: type: keyword @@ -53,10 +52,9 @@ ignore_above mapping level setting on arrays: settings: index: mapping: + source.mode: synthetic ignore_above: 10 mappings: - _source: - mode: synthetic properties: keyword: type: keyword @@ -97,10 +95,9 @@ ignore_above mapping overrides setting: settings: index: mapping: + source.mode: synthetic ignore_above: 10 mappings: - _source: - mode: synthetic properties: keyword: type: keyword @@ -143,10 +140,9 @@ ignore_above mapping overrides setting on arrays: settings: index: mapping: + source.mode: synthetic ignore_above: 10 mappings: - _source: - mode: synthetic properties: keyword: type: keyword diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/20_mapping.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/20_mapping.yml index c5669cd6414b1..4fe4a7d2e2f51 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/20_mapping.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/20_mapping.yml @@ -472,9 +472,9 @@ stored source is supported: time_series: start_time: 2021-04-28T00:00:00Z end_time: 2021-04-29T00:00:00Z + mapping: + source.mode: stored mappings: - _source: - mode: stored properties: "@timestamp": type: date @@ -510,9 +510,9 @@ disabled source is not supported: time_series: start_time: 2021-04-28T00:00:00Z end_time: 2021-04-29T00:00:00Z + mapping: + source.mode: disabled mappings: - _source: - mode: disabled properties: "@timestamp": type: date diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/update/100_synthetic_source.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/update/100_synthetic_source.yml index f74fde7eb2a24..c3f013395ea36 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/update/100_synthetic_source.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/update/100_synthetic_source.yml @@ -7,9 +7,10 @@ keyword: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: kwd: type: keyword @@ -65,9 +66,10 @@ stored text: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: text: type: text diff --git a/x-pack/plugin/logsdb/src/yamlRestTest/resources/rest-api-spec/test/20_ignored_source.yml b/x-pack/plugin/logsdb/src/yamlRestTest/resources/rest-api-spec/test/20_ignored_source.yml index 2f111d579ebb1..ff7f01fccaa3c 100644 --- a/x-pack/plugin/logsdb/src/yamlRestTest/resources/rest-api-spec/test/20_ignored_source.yml +++ b/x-pack/plugin/logsdb/src/yamlRestTest/resources/rest-api-spec/test/20_ignored_source.yml @@ -6,9 +6,10 @@ setup: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: object: enabled: false diff --git a/x-pack/plugin/mapper-constant-keyword/src/yamlRestTest/resources/rest-api-spec/test/20_synthetic_source.yml b/x-pack/plugin/mapper-constant-keyword/src/yamlRestTest/resources/rest-api-spec/test/20_synthetic_source.yml index b64fb7b822713..6cd30f42c52e9 100644 --- a/x-pack/plugin/mapper-constant-keyword/src/yamlRestTest/resources/rest-api-spec/test/20_synthetic_source.yml +++ b/x-pack/plugin/mapper-constant-keyword/src/yamlRestTest/resources/rest-api-spec/test/20_synthetic_source.yml @@ -7,9 +7,10 @@ constant_keyword: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: const_kwd: type: constant_keyword diff --git a/x-pack/plugin/mapper-unsigned-long/src/yamlRestTest/resources/rest-api-spec/test/80_synthetic_source.yml b/x-pack/plugin/mapper-unsigned-long/src/yamlRestTest/resources/rest-api-spec/test/80_synthetic_source.yml index b88fca3c478a9..e8b86231b7196 100644 --- a/x-pack/plugin/mapper-unsigned-long/src/yamlRestTest/resources/rest-api-spec/test/80_synthetic_source.yml +++ b/x-pack/plugin/mapper-unsigned-long/src/yamlRestTest/resources/rest-api-spec/test/80_synthetic_source.yml @@ -7,9 +7,10 @@ synthetic source: indices.create: index: synthetic_source_test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: name: type: keyword @@ -52,9 +53,10 @@ synthetic source with copy_to: indices.create: index: synthetic_source_test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: name: type: keyword @@ -111,9 +113,10 @@ synthetic source with disabled doc_values: indices.create: index: synthetic_source_test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: name: type: keyword diff --git a/x-pack/plugin/mapper-version/src/yamlRestTest/resources/rest-api-spec/test/40_synthetic_source.yml b/x-pack/plugin/mapper-version/src/yamlRestTest/resources/rest-api-spec/test/40_synthetic_source.yml index 1ec91f5fde8d1..c96eeeb943831 100644 --- a/x-pack/plugin/mapper-version/src/yamlRestTest/resources/rest-api-spec/test/40_synthetic_source.yml +++ b/x-pack/plugin/mapper-version/src/yamlRestTest/resources/rest-api-spec/test/40_synthetic_source.yml @@ -7,9 +7,10 @@ setup: indices.create: index: test1 body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: ver: type: version @@ -76,9 +77,10 @@ synthetic source with copy_to: indices.create: index: synthetic_source_test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: ver: type: version diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/aggregate-metrics/100_synthetic_source.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/aggregate-metrics/100_synthetic_source.yml index cc0e8aff9b239..0937d24217e31 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/aggregate-metrics/100_synthetic_source.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/aggregate-metrics/100_synthetic_source.yml @@ -7,9 +7,10 @@ aggregate_metric_double: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: metric: type: aggregate_metric_double @@ -62,9 +63,10 @@ aggregate_metric_double with ignore_malformed: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: metric: type: aggregate_metric_double diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/analytics/histogram.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/analytics/histogram.yml index 726b9d153025e..88445cbad1dc8 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/analytics/histogram.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/analytics/histogram.yml @@ -182,9 +182,10 @@ histogram with synthetic source: indices.create: index: histo_synthetic body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: latency: type: histogram @@ -228,9 +229,10 @@ histogram with synthetic source and zero counts: indices.create: index: histo_synthetic body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: latency: type: histogram @@ -317,9 +319,10 @@ histogram with synthetic source and ignore_malformed: indices.create: index: histo_synthetic body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: latency: type: histogram diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/enrich/40_synthetic_source.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/enrich/40_synthetic_source.yml index 1c2e1cd922a65..1aaa39a0f13b7 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/enrich/40_synthetic_source.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/enrich/40_synthetic_source.yml @@ -4,9 +4,10 @@ setup: indices.create: index: source body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: baz: type: keyword diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/30_types.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/30_types.yml index cfc7f2e4036fb..9658412f150fd 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/30_types.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/30_types.yml @@ -809,9 +809,10 @@ synthetic _source text stored: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: card: type: text @@ -840,9 +841,10 @@ synthetic _source text with parent keyword: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: card: type: keyword diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/80_text.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/80_text.yml index 55bd39bdd73cc..ded66b9453452 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/80_text.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/80_text.yml @@ -481,9 +481,10 @@ setup: indices.create: index: test2 body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: "emp_no": type: long @@ -526,9 +527,10 @@ setup: indices.create: index: test2 body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: "emp_no": type: long diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/searchable_snapshots/20_synthetic_source.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/searchable_snapshots/20_synthetic_source.yml index b1ab120fff441..df3c905408a87 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/searchable_snapshots/20_synthetic_source.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/searchable_snapshots/20_synthetic_source.yml @@ -11,9 +11,9 @@ setup: settings: number_of_shards: 1 number_of_replicas: 0 + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: obj: properties: diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/security/authz_api_keys/30_field_level_security_synthetic_source.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/security/authz_api_keys/30_field_level_security_synthetic_source.yml index b971c246ac50a..5e636aebc0271 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/security/authz_api_keys/30_field_level_security_synthetic_source.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/security/authz_api_keys/30_field_level_security_synthetic_source.yml @@ -13,9 +13,10 @@ Filter single field: indices.create: index: index_fls body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: name: type: keyword @@ -75,9 +76,10 @@ Filter fields in object: indices.create: index: index_fls body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: name: type: keyword @@ -142,9 +144,10 @@ Fields under a disabled object - uses _ignored_source: indices.create: index: index_fls body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: name: type: keyword @@ -236,12 +239,11 @@ Dynamic fields beyond limit - uses _ignored_source: settings: index: mapping: + source.mode: synthetic total_fields: ignore_dynamic_beyond_limit: true limit: 2 mappings: - _source: - mode: synthetic properties: name: type: keyword @@ -301,9 +303,10 @@ Field with ignored_malformed: indices.create: index: index_fls body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: name: type: keyword diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/security/authz_api_keys/40_document_level_security_synthetic_source.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/security/authz_api_keys/40_document_level_security_synthetic_source.yml index 52abe0a3d83d7..37e78d86f6667 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/security/authz_api_keys/40_document_level_security_synthetic_source.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/security/authz_api_keys/40_document_level_security_synthetic_source.yml @@ -13,9 +13,10 @@ Filter on single field: indices.create: index: index_dls body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: name: type: keyword @@ -95,9 +96,10 @@ Filter on nested field: indices.create: index: index_dls body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: name: type: keyword @@ -178,9 +180,10 @@ Filter on object with stored source: indices.create: index: index_dls body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: name: type: keyword @@ -258,9 +261,10 @@ Filter on field within a disabled object: indices.create: index: index_dls body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: name: type: keyword @@ -335,9 +339,10 @@ Filter on field with ignored_malformed: indices.create: index: index_dls body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: name: type: keyword diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/snapshot/10_basic.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/snapshot/10_basic.yml index e1b297f1b5d78..6ccd24ae84af9 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/snapshot/10_basic.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/snapshot/10_basic.yml @@ -100,12 +100,11 @@ setup: indices.create: index: test_synthetic body: - mappings: - _source: - mode: synthetic settings: number_of_shards: 1 number_of_replicas: 0 + index: + mapping.source.mode: synthetic - do: snapshot.create: diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/spatial/140_synthetic_source.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/spatial/140_synthetic_source.yml index 700142cec9987..17517640f2aa5 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/spatial/140_synthetic_source.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/spatial/140_synthetic_source.yml @@ -8,9 +8,10 @@ indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: shape: type: geo_shape @@ -74,9 +75,10 @@ indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: shape: type: geo_shape @@ -157,9 +159,10 @@ indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: shape: type: shape @@ -223,9 +226,10 @@ indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: shape: type: shape @@ -306,9 +310,10 @@ indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: point: type: geo_point @@ -422,9 +427,10 @@ indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: geo_point: type: geo_point @@ -501,9 +507,10 @@ indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: point: type: point @@ -597,9 +604,10 @@ indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: point: type: point diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/transform/preview_transforms_synthetic_source.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/transform/preview_transforms_synthetic_source.yml index 08055946a7831..ee5ec824fd212 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/transform/preview_transforms_synthetic_source.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/transform/preview_transforms_synthetic_source.yml @@ -6,9 +6,10 @@ simple: indices.create: index: airline-data body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: time: type: date diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/wildcard/30_ignore_above_synthetic_source.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/wildcard/30_ignore_above_synthetic_source.yml index 2e3ba773fb0f2..d844bf9de9129 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/wildcard/30_ignore_above_synthetic_source.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/wildcard/30_ignore_above_synthetic_source.yml @@ -10,10 +10,9 @@ wildcard field type ignore_above: settings: index: mapping: + source.mode: synthetic ignore_above: 10 mappings: - _source: - mode: synthetic properties: a_wildcard: type: wildcard diff --git a/x-pack/plugin/wildcard/src/yamlRestTest/resources/rest-api-spec/test/30_synthetic_source.yml b/x-pack/plugin/wildcard/src/yamlRestTest/resources/rest-api-spec/test/30_synthetic_source.yml index ffa76f7433985..20472669d4d77 100644 --- a/x-pack/plugin/wildcard/src/yamlRestTest/resources/rest-api-spec/test/30_synthetic_source.yml +++ b/x-pack/plugin/wildcard/src/yamlRestTest/resources/rest-api-spec/test/30_synthetic_source.yml @@ -7,9 +7,10 @@ synthetic source: indices.create: index: synthetic_source_test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: name: type: keyword @@ -48,9 +49,10 @@ synthetic source with copy_to: indices.create: index: synthetic_source_test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: name: type: keyword From 889f01599f9ed5ddbdf6c1f2ece9f89d9e8dce5d Mon Sep 17 00:00:00 2001 From: Ievgen Degtiarenko Date: Fri, 1 Nov 2024 09:48:05 +0100 Subject: [PATCH 260/324] Prevent multiple sets copies while adding index aliases (#115934) --- .../cluster/metadata/Metadata.java | 85 +++---------------- 1 file changed, 14 insertions(+), 71 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/Metadata.java b/server/src/main/java/org/elasticsearch/cluster/metadata/Metadata.java index 6d2e9c37fc625..60dee79d403d6 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/Metadata.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/Metadata.java @@ -1796,7 +1796,6 @@ public static class Builder { private DiffableStringMap hashesOfConsistentSettings = DiffableStringMap.EMPTY; private final ImmutableOpenMap.Builder indices; - private final ImmutableOpenMap.Builder> aliasedIndices; private final ImmutableOpenMap.Builder templates; private final ImmutableOpenMap.Builder customs; @@ -1826,7 +1825,6 @@ public Builder() { this.hashesOfConsistentSettings = metadata.hashesOfConsistentSettings; this.version = metadata.version; this.indices = ImmutableOpenMap.builder(metadata.indices); - this.aliasedIndices = ImmutableOpenMap.builder(metadata.aliasedIndices); this.templates = ImmutableOpenMap.builder(metadata.templates); this.customs = ImmutableOpenMap.builder(metadata.customs); this.previousIndicesLookup = metadata.indicesLookup; @@ -1839,7 +1837,6 @@ public Builder() { private Builder(Map mappingsByHash, int indexCountHint) { clusterUUID = UNKNOWN_CLUSTER_UUID; indices = ImmutableOpenMap.builder(indexCountHint); - aliasedIndices = ImmutableOpenMap.builder(); templates = ImmutableOpenMap.builder(); customs = ImmutableOpenMap.builder(); reservedStateMetadata = new HashMap<>(); @@ -1854,7 +1851,6 @@ public Builder put(IndexMetadata.Builder indexMetadataBuilder) { dedupeMapping(indexMetadataBuilder); IndexMetadata indexMetadata = indexMetadataBuilder.build(); IndexMetadata previous = indices.put(indexMetadata.getIndex().getName(), indexMetadata); - updateAliases(previous, indexMetadata); if (unsetPreviousIndicesLookup(previous, indexMetadata)) { previousIndicesLookup = null; } @@ -1879,7 +1875,6 @@ public Builder put(IndexMetadata indexMetadata, boolean incrementVersion) { return this; } } - updateAliases(previous, indexMetadata); if (unsetPreviousIndicesLookup(previous, indexMetadata)) { previousIndicesLookup = null; } @@ -1954,8 +1949,7 @@ public IndexMetadata getSafe(Index index) { public Builder remove(String index) { previousIndicesLookup = null; checkForUnusedMappings = true; - IndexMetadata previous = indices.remove(index); - updateAliases(previous, null); + indices.remove(index); return this; } @@ -1965,7 +1959,6 @@ public Builder removeAllIndices() { indices.clear(); mappingsByHash.clear(); - aliasedIndices.clear(); return this; } @@ -1976,67 +1969,6 @@ public Builder indices(Map indices) { return this; } - void updateAliases(IndexMetadata previous, IndexMetadata current) { - if (previous == null && current != null) { - for (var key : current.getAliases().keySet()) { - putAlias(key, current.getIndex()); - } - } else if (previous != null && current == null) { - for (var key : previous.getAliases().keySet()) { - removeAlias(key, previous.getIndex()); - } - } else if (previous != null && current != null) { - if (Objects.equals(previous.getAliases(), current.getAliases())) { - return; - } - - for (var key : current.getAliases().keySet()) { - if (previous.getAliases().containsKey(key) == false) { - putAlias(key, current.getIndex()); - } - } - for (var key : previous.getAliases().keySet()) { - if (current.getAliases().containsKey(key) == false) { - removeAlias(key, current.getIndex()); - } - } - } - } - - private Builder putAlias(String alias, Index index) { - Objects.requireNonNull(alias); - Objects.requireNonNull(index); - - Set indices = new HashSet<>(aliasedIndices.getOrDefault(alias, Set.of())); - if (indices.add(index) == false) { - return this; // indices already contained this index - } - aliasedIndices.put(alias, Collections.unmodifiableSet(indices)); - return this; - } - - private Builder removeAlias(String alias, Index index) { - Objects.requireNonNull(alias); - Objects.requireNonNull(index); - - Set indices = aliasedIndices.get(alias); - if (indices == null || indices.isEmpty()) { - throw new IllegalStateException("Cannot remove non-existent alias [" + alias + "] for index [" + index.getName() + "]"); - } - - indices = new HashSet<>(indices); - if (indices.remove(index) == false) { - throw new IllegalStateException("Cannot remove non-existent alias [" + alias + "] for index [" + index.getName() + "]"); - } - - if (indices.isEmpty()) { - aliasedIndices.remove(alias); // for consistency, we don't store empty sets, so null it out - } else { - aliasedIndices.put(alias, Collections.unmodifiableSet(indices)); - } - return this; - } - public Builder put(IndexTemplateMetadata.Builder template) { return put(template.build()); } @@ -2358,6 +2290,7 @@ public Metadata build(boolean skipNameCollisionChecks) { int totalNumberOfShards = 0; int totalOpenIndexShards = 0; + ImmutableOpenMap.Builder> aliasedIndicesBuilder = ImmutableOpenMap.builder(); final String[] allIndicesArray = new String[indicesMap.size()]; int i = 0; final Set sha256HashesInUse = checkForUnusedMappings ? Sets.newHashSetWithExpectedSize(mappingsByHash.size()) : null; @@ -2389,9 +2322,19 @@ public Metadata build(boolean skipNameCollisionChecks) { sha256HashesInUse.add(mapping.getSha256()); } } + for (var alias : indexMetadata.getAliases().keySet()) { + var indices = aliasedIndicesBuilder.get(alias); + if (indices == null) { + indices = new HashSet<>(); + aliasedIndicesBuilder.put(alias, indices); + } + indices.add(indexMetadata.getIndex()); + } } - - var aliasedIndices = this.aliasedIndices.build(); + for (String alias : aliasedIndicesBuilder.keys()) { + aliasedIndicesBuilder.put(alias, Collections.unmodifiableSet(aliasedIndicesBuilder.get(alias))); + } + var aliasedIndices = aliasedIndicesBuilder.build(); for (var entry : aliasedIndices.entrySet()) { List aliasIndices = entry.getValue().stream().map(idx -> indicesMap.get(idx.getName())).toList(); validateAlias(entry.getKey(), aliasIndices); From e8bf344a28c79f71c2db2fff61525d283a04ef56 Mon Sep 17 00:00:00 2001 From: Kostas Krikellas <131142368+kkrik-es@users.noreply.github.com> Date: Fri, 1 Nov 2024 10:53:08 +0200 Subject: [PATCH 261/324] =?UTF-8?q?Revert=20"[TEST]=20Replace=20=5Fsource.?= =?UTF-8?q?mode=20with=20index.mapping.source.mode=20in=20integra=E2=80=A6?= =?UTF-8?q?"=20(#116069)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This reverts commit a36075796864f57108f7c3d1c44738d10118e99d. --- .../test/aggregations/top_hits.yml | 5 +- .../test/painless/50_script_doc_values.yml | 8 +- .../test/match_only_text/10_basic.yml | 10 +- .../test/rank_feature/30_synthetic_source.yml | 5 +- .../rank_features/20_synthetic_source.yml | 5 +- .../30_synthetic_source.yml | 5 +- .../test/token_count/10_basic.yml | 5 +- .../test/60_synthetic_source.yml | 5 +- .../resources/rest-api-spec/test/10_basic.yml | 5 +- .../test/reindex/110_synthetic_source.yml | 5 +- .../update_by_query/100_synthetic_source.yml | 5 +- .../runtime_fields/270_synthetic_source.yml | 10 +- .../20_synthetic_source.yml | 35 ++-- .../test/mapper_murmur3/10_basic.yml | 5 +- .../test/get/100_synthetic_source.yml | 79 ++++----- .../indices.create/20_synthetic_source.yml | 158 ++++++++---------- .../21_synthetic_source_stored.yml | 96 +++++------ .../test/indices.put_mapping/10_basic.yml | 28 +++- .../test/logsdb/20_source_mapping.yml | 12 +- .../test/mget/90_synthetic_source.yml | 19 +-- .../search.highlight/50_synthetic_source.yml | 5 +- .../test/search.vectors/90_sparse_vector.yml | 5 +- .../test/search/350_binary_field.yml | 5 +- .../test/search/400_synthetic_source.yml | 43 +++-- .../540_ignore_above_synthetic_source.yml | 12 +- .../rest-api-spec/test/tsdb/20_mapping.yml | 8 +- .../test/update/100_synthetic_source.yml | 10 +- .../rest-api-spec/test/20_ignored_source.yml | 5 +- .../test/20_synthetic_source.yml | 5 +- .../test/80_synthetic_source.yml | 15 +- .../test/40_synthetic_source.yml | 10 +- .../100_synthetic_source.yml | 10 +- .../test/analytics/histogram.yml | 15 +- .../test/enrich/40_synthetic_source.yml | 5 +- .../rest-api-spec/test/esql/30_types.yml | 10 +- .../rest-api-spec/test/esql/80_text.yml | 10 +- .../20_synthetic_source.yml | 4 +- ..._field_level_security_synthetic_source.yml | 23 ++- ...cument_level_security_synthetic_source.yml | 25 ++- .../rest-api-spec/test/snapshot/10_basic.yml | 5 +- .../test/spatial/140_synthetic_source.yml | 40 ++--- .../preview_transforms_synthetic_source.yml | 5 +- .../30_ignore_above_synthetic_source.yml | 3 +- .../test/30_synthetic_source.yml | 10 +- 44 files changed, 357 insertions(+), 436 deletions(-) diff --git a/modules/aggregations/src/yamlRestTest/resources/rest-api-spec/test/aggregations/top_hits.yml b/modules/aggregations/src/yamlRestTest/resources/rest-api-spec/test/aggregations/top_hits.yml index 91e9b04e35860..ed24e1cc8404c 100644 --- a/modules/aggregations/src/yamlRestTest/resources/rest-api-spec/test/aggregations/top_hits.yml +++ b/modules/aggregations/src/yamlRestTest/resources/rest-api-spec/test/aggregations/top_hits.yml @@ -356,10 +356,9 @@ synthetic _source: indices.create: index: test_synthetic body: - settings: - index: - mapping.source.mode: synthetic mappings: + _source: + mode: synthetic properties: page: type: keyword diff --git a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/50_script_doc_values.yml b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/50_script_doc_values.yml index 4e31c1ba36601..ee6803d809087 100644 --- a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/50_script_doc_values.yml +++ b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/50_script_doc_values.yml @@ -180,9 +180,9 @@ setup: body: settings: number_of_shards: 1 - index: - mapping.source.mode: synthetic mappings: + _source: + mode: synthetic properties: boolean: type: boolean @@ -5630,9 +5630,9 @@ version and sequence number synthetic _source: body: settings: number_of_shards: 1 - index: - mapping.source.mode: synthetic mappings: + _source: + mode: synthetic properties: keyword: type: keyword diff --git a/modules/mapper-extras/src/yamlRestTest/resources/rest-api-spec/test/match_only_text/10_basic.yml b/modules/mapper-extras/src/yamlRestTest/resources/rest-api-spec/test/match_only_text/10_basic.yml index 4a6810faab215..b4ee226f72692 100644 --- a/modules/mapper-extras/src/yamlRestTest/resources/rest-api-spec/test/match_only_text/10_basic.yml +++ b/modules/mapper-extras/src/yamlRestTest/resources/rest-api-spec/test/match_only_text/10_basic.yml @@ -285,10 +285,9 @@ synthetic_source: indices.create: index: synthetic_source_test body: - settings: - index: - mapping.source.mode: synthetic mappings: + _source: + mode: synthetic properties: foo: type: match_only_text @@ -363,10 +362,9 @@ synthetic_source with copy_to: indices.create: index: synthetic_source_test body: - settings: - index: - mapping.source.mode: synthetic mappings: + _source: + mode: synthetic properties: foo: type: match_only_text diff --git a/modules/mapper-extras/src/yamlRestTest/resources/rest-api-spec/test/rank_feature/30_synthetic_source.yml b/modules/mapper-extras/src/yamlRestTest/resources/rest-api-spec/test/rank_feature/30_synthetic_source.yml index e6df05189b6f9..1e0b90ebb9e0f 100644 --- a/modules/mapper-extras/src/yamlRestTest/resources/rest-api-spec/test/rank_feature/30_synthetic_source.yml +++ b/modules/mapper-extras/src/yamlRestTest/resources/rest-api-spec/test/rank_feature/30_synthetic_source.yml @@ -7,10 +7,9 @@ setup: indices.create: index: test body: - settings: - index: - mapping.source.mode: synthetic mappings: + _source: + mode: synthetic properties: pagerank: type: rank_feature diff --git a/modules/mapper-extras/src/yamlRestTest/resources/rest-api-spec/test/rank_features/20_synthetic_source.yml b/modules/mapper-extras/src/yamlRestTest/resources/rest-api-spec/test/rank_features/20_synthetic_source.yml index 6ffe896408079..c64e35cc2cea4 100644 --- a/modules/mapper-extras/src/yamlRestTest/resources/rest-api-spec/test/rank_features/20_synthetic_source.yml +++ b/modules/mapper-extras/src/yamlRestTest/resources/rest-api-spec/test/rank_features/20_synthetic_source.yml @@ -7,10 +7,9 @@ setup: indices.create: index: test body: - settings: - index: - mapping.source.mode: synthetic mappings: + _source: + mode: synthetic properties: tags: type: rank_features diff --git a/modules/mapper-extras/src/yamlRestTest/resources/rest-api-spec/test/search-as-you-type/30_synthetic_source.yml b/modules/mapper-extras/src/yamlRestTest/resources/rest-api-spec/test/search-as-you-type/30_synthetic_source.yml index b764e14c8ced1..75397bd9e0fe9 100644 --- a/modules/mapper-extras/src/yamlRestTest/resources/rest-api-spec/test/search-as-you-type/30_synthetic_source.yml +++ b/modules/mapper-extras/src/yamlRestTest/resources/rest-api-spec/test/search-as-you-type/30_synthetic_source.yml @@ -7,10 +7,9 @@ setup: indices.create: index: test body: - settings: - index: - mapping.source.mode: synthetic mappings: + _source: + mode: synthetic properties: a_field: type: search_as_you_type diff --git a/modules/mapper-extras/src/yamlRestTest/resources/rest-api-spec/test/token_count/10_basic.yml b/modules/mapper-extras/src/yamlRestTest/resources/rest-api-spec/test/token_count/10_basic.yml index a870cd4d01abc..03b72a2623497 100644 --- a/modules/mapper-extras/src/yamlRestTest/resources/rest-api-spec/test/token_count/10_basic.yml +++ b/modules/mapper-extras/src/yamlRestTest/resources/rest-api-spec/test/token_count/10_basic.yml @@ -42,10 +42,9 @@ indices.create: index: test body: - settings: - index: - mapping.source.mode: synthetic mappings: + _source: + mode: synthetic properties: count: type: token_count diff --git a/modules/parent-join/src/yamlRestTest/resources/rest-api-spec/test/60_synthetic_source.yml b/modules/parent-join/src/yamlRestTest/resources/rest-api-spec/test/60_synthetic_source.yml index 5eb5739222905..12d0f1bbae6c7 100644 --- a/modules/parent-join/src/yamlRestTest/resources/rest-api-spec/test/60_synthetic_source.yml +++ b/modules/parent-join/src/yamlRestTest/resources/rest-api-spec/test/60_synthetic_source.yml @@ -7,10 +7,9 @@ supported: indices.create: index: test body: - settings: - index: - mapping.source.mode: synthetic mappings: + _source: + mode: synthetic properties: join_field: type: join diff --git a/modules/percolator/src/yamlRestTest/resources/rest-api-spec/test/10_basic.yml b/modules/percolator/src/yamlRestTest/resources/rest-api-spec/test/10_basic.yml index 93ecaaa66bb91..a5576d203314f 100644 --- a/modules/percolator/src/yamlRestTest/resources/rest-api-spec/test/10_basic.yml +++ b/modules/percolator/src/yamlRestTest/resources/rest-api-spec/test/10_basic.yml @@ -137,10 +137,9 @@ indices.create: index: queries_index body: - settings: - index: - mapping.source.mode: synthetic mappings: + _source: + mode: synthetic properties: query: type: percolator diff --git a/modules/reindex/src/yamlRestTest/resources/rest-api-spec/test/reindex/110_synthetic_source.yml b/modules/reindex/src/yamlRestTest/resources/rest-api-spec/test/reindex/110_synthetic_source.yml index db78af6719c1f..9ae2153f89ca5 100644 --- a/modules/reindex/src/yamlRestTest/resources/rest-api-spec/test/reindex/110_synthetic_source.yml +++ b/modules/reindex/src/yamlRestTest/resources/rest-api-spec/test/reindex/110_synthetic_source.yml @@ -3,10 +3,9 @@ setup: indices.create: index: synthetic body: - settings: - index: - mapping.source.mode: synthetic mappings: + _source: + mode: synthetic properties: kwd: type: keyword diff --git a/modules/reindex/src/yamlRestTest/resources/rest-api-spec/test/update_by_query/100_synthetic_source.yml b/modules/reindex/src/yamlRestTest/resources/rest-api-spec/test/update_by_query/100_synthetic_source.yml index a17f29a2d52e7..4329bf8ed471a 100644 --- a/modules/reindex/src/yamlRestTest/resources/rest-api-spec/test/update_by_query/100_synthetic_source.yml +++ b/modules/reindex/src/yamlRestTest/resources/rest-api-spec/test/update_by_query/100_synthetic_source.yml @@ -3,10 +3,9 @@ update: indices.create: index: synthetic body: - settings: - index: - mapping.source.mode: synthetic mappings: + _source: + mode: synthetic properties: kwd: type: keyword diff --git a/modules/runtime-fields-common/src/yamlRestTest/resources/rest-api-spec/test/runtime_fields/270_synthetic_source.yml b/modules/runtime-fields-common/src/yamlRestTest/resources/rest-api-spec/test/runtime_fields/270_synthetic_source.yml index 0a3e372d2d8e4..8832b3230910c 100644 --- a/modules/runtime-fields-common/src/yamlRestTest/resources/rest-api-spec/test/runtime_fields/270_synthetic_source.yml +++ b/modules/runtime-fields-common/src/yamlRestTest/resources/rest-api-spec/test/runtime_fields/270_synthetic_source.yml @@ -8,10 +8,9 @@ keywords: indices.create: index: index1 body: - settings: - index: - mapping.source.mode: synthetic mappings: + _source: + mode: synthetic properties: field1: type: keyword @@ -77,10 +76,9 @@ doubles: indices.create: index: index1 body: - settings: - index: - mapping.source.mode: synthetic mappings: + _source: + mode: synthetic properties: field1: type: double diff --git a/plugins/mapper-annotated-text/src/yamlRestTest/resources/rest-api-spec/test/mapper_annotatedtext/20_synthetic_source.yml b/plugins/mapper-annotated-text/src/yamlRestTest/resources/rest-api-spec/test/mapper_annotatedtext/20_synthetic_source.yml index 1d5f1daad9958..4aac881700e15 100644 --- a/plugins/mapper-annotated-text/src/yamlRestTest/resources/rest-api-spec/test/mapper_annotatedtext/20_synthetic_source.yml +++ b/plugins/mapper-annotated-text/src/yamlRestTest/resources/rest-api-spec/test/mapper_annotatedtext/20_synthetic_source.yml @@ -10,10 +10,9 @@ stored annotated_text field: indices.create: index: test body: - settings: - index: - mapping.source.mode: synthetic mappings: + _source: + mode: synthetic properties: annotated_text: type: annotated_text @@ -41,10 +40,9 @@ annotated_text field with keyword multi-field: indices.create: index: test body: - settings: - index: - mapping.source.mode: synthetic mappings: + _source: + mode: synthetic properties: annotated_text: type: annotated_text @@ -74,10 +72,9 @@ multiple values in stored annotated_text field: indices.create: index: test body: - settings: - index: - mapping.source.mode: synthetic mappings: + _source: + mode: synthetic properties: annotated_text: type: annotated_text @@ -105,10 +102,9 @@ multiple values in annotated_text field with keyword multi-field: indices.create: index: test body: - settings: - index: - mapping.source.mode: synthetic mappings: + _source: + mode: synthetic properties: annotated_text: type: annotated_text @@ -139,10 +135,9 @@ multiple values in annotated_text field with stored keyword multi-field: indices.create: index: test body: - settings: - index: - mapping.source.mode: synthetic mappings: + _source: + mode: synthetic properties: annotated_text: type: annotated_text @@ -174,10 +169,9 @@ multiple values in stored annotated_text field with keyword multi-field: indices.create: index: test body: - settings: - index: - mapping.source.mode: synthetic mappings: + _source: + mode: synthetic properties: annotated_text: type: annotated_text @@ -208,10 +202,9 @@ fallback synthetic source: indices.create: index: test body: - settings: - index: - mapping.source.mode: synthetic mappings: + _source: + mode: synthetic properties: annotated_text: type: annotated_text diff --git a/plugins/mapper-murmur3/src/yamlRestTest/resources/rest-api-spec/test/mapper_murmur3/10_basic.yml b/plugins/mapper-murmur3/src/yamlRestTest/resources/rest-api-spec/test/mapper_murmur3/10_basic.yml index 6873145d39d5c..12b23fb3b0395 100644 --- a/plugins/mapper-murmur3/src/yamlRestTest/resources/rest-api-spec/test/mapper_murmur3/10_basic.yml +++ b/plugins/mapper-murmur3/src/yamlRestTest/resources/rest-api-spec/test/mapper_murmur3/10_basic.yml @@ -134,10 +134,9 @@ setup: indices.create: index: test_synthetic_source body: - settings: - index: - mapping.source.mode: synthetic mappings: + _source: + mode: synthetic properties: foo: type: keyword diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/get/100_synthetic_source.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/get/100_synthetic_source.yml index 7ca7580b0d5ca..a7600da575cd3 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/get/100_synthetic_source.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/get/100_synthetic_source.yml @@ -7,10 +7,9 @@ keyword: indices.create: index: test body: - settings: - index: - mapping.source.mode: synthetic mappings: + _source: + mode: synthetic properties: kwd: type: keyword @@ -49,8 +48,9 @@ fetch without refresh also produces synthetic source: settings: index: refresh_interval: -1 - mapping.source.mode: synthetic mappings: + _source: + mode: synthetic properties: obj: properties: @@ -90,10 +90,9 @@ force_synthetic_source_ok: indices.create: index: test body: - settings: - index: - mapping.source.mode: stored mappings: + _source: + mode: stored properties: obj: properties: @@ -140,10 +139,9 @@ stored text: indices.create: index: test body: - settings: - index: - mapping.source.mode: synthetic mappings: + _source: + mode: synthetic properties: text: type: text @@ -214,10 +212,9 @@ stored keyword: indices.create: index: test body: - settings: - index: - mapping.source.mode: synthetic mappings: + _source: + mode: synthetic properties: kwd: type: keyword @@ -256,10 +253,9 @@ doc values keyword with ignore_above: indices.create: index: test body: - settings: - index: - mapping.source.mode: synthetic mappings: + _source: + mode: synthetic properties: kwd: type: keyword @@ -340,10 +336,9 @@ stored keyword with ignore_above: indices.create: index: test body: - settings: - index: - mapping.source.mode: synthetic mappings: + _source: + mode: synthetic properties: kwd: type: keyword @@ -426,10 +421,9 @@ indexed dense vectors: indices.create: index: test body: - settings: - index: - mapping.source.mode: synthetic mappings: + _source: + mode: synthetic properties: name: type: keyword @@ -471,10 +465,9 @@ non-indexed dense vectors: indices.create: index: test body: - settings: - index: - mapping.source.mode: synthetic mappings: + _source: + mode: synthetic properties: name: type: keyword @@ -515,10 +508,9 @@ _source filtering: indices.create: index: test body: - settings: - index: - mapping.source.mode: synthetic mappings: + _source: + mode: synthetic properties: kwd: type: keyword @@ -558,9 +550,9 @@ _doc_count: indices.create: index: test body: - settings: - index: - mapping.source.mode: synthetic + mappings: + _source: + mode: synthetic # with _doc_count - do: @@ -687,10 +679,9 @@ fields with ignore_malformed: indices.create: index: test body: - settings: - index: - mapping.source.mode: synthetic mappings: + _source: + mode: synthetic properties: ip: type: ip @@ -923,10 +914,9 @@ flattened field: indices.create: index: test body: - settings: - index: - mapping.source.mode: synthetic mappings: + _source: + mode: synthetic properties: flattened: type: flattened @@ -1016,10 +1006,9 @@ flattened field with ignore_above: indices.create: index: test body: - settings: - index: - mapping.source.mode: synthetic mappings: + _source: + mode: synthetic properties: field: type: flattened @@ -1072,10 +1061,9 @@ flattened field with ignore_above and arrays: indices.create: index: test body: - settings: - index: - mapping.source.mode: synthetic mappings: + _source: + mode: synthetic properties: field: type: flattened @@ -1129,10 +1117,9 @@ completion: indices.create: index: test body: - settings: - index: - mapping.source.mode: synthetic mappings: + _source: + mode: synthetic properties: completion: type: completion diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/20_synthetic_source.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/20_synthetic_source.yml index 15a712d77a7ef..cc5fd0e08e695 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/20_synthetic_source.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/20_synthetic_source.yml @@ -11,11 +11,13 @@ object with unmapped fields: settings: index: mapping: - source.mode: synthetic total_fields: ignore_dynamic_beyond_limit: true limit: 1 + mappings: + _source: + mode: synthetic properties: name: type: keyword @@ -62,12 +64,13 @@ unmapped arrays: settings: index: mapping: - source.mode: synthetic total_fields: ignore_dynamic_beyond_limit: true limit: 1 mappings: + _source: + mode: synthetic properties: name: type: keyword @@ -108,12 +111,13 @@ nested object with unmapped fields: settings: index: mapping: - source.mode: synthetic total_fields: ignore_dynamic_beyond_limit: true limit: 3 mappings: + _source: + mode: synthetic properties: path: properties: @@ -159,12 +163,13 @@ empty object with unmapped fields: settings: index: mapping: - source.mode: synthetic total_fields: ignore_dynamic_beyond_limit: true limit: 3 mappings: + _source: + mode: synthetic properties: path: properties: @@ -200,10 +205,9 @@ disabled root object: indices.create: index: test body: - settings: - index: - mapping.source.mode: synthetic mappings: + _source: + mode: synthetic enabled: false - do: @@ -238,10 +242,9 @@ disabled object: indices.create: index: test body: - settings: - index: - mapping.source.mode: synthetic mappings: + _source: + mode: synthetic properties: path: enabled: false @@ -276,10 +279,9 @@ disabled object contains array: indices.create: index: test body: - settings: - index: - mapping.source.mode: synthetic mappings: + _source: + mode: synthetic properties: path: enabled: false @@ -317,10 +319,9 @@ disabled subobject: indices.create: index: test body: - settings: - index: - mapping.source.mode: synthetic mappings: + _source: + mode: synthetic properties: path: properties: @@ -356,10 +357,9 @@ disabled subobject with array: indices.create: index: test body: - settings: - index: - mapping.source.mode: synthetic mappings: + _source: + mode: synthetic properties: path: properties: @@ -396,10 +396,9 @@ mixed disabled and enabled objects: indices.create: index: test body: - settings: - index: - mapping.source.mode: synthetic mappings: + _source: + mode: synthetic properties: path: properties: @@ -443,10 +442,9 @@ object with dynamic override: indices.create: index: test body: - settings: - index: - mapping.source.mode: synthetic mappings: + _source: + mode: synthetic properties: path_no: dynamic: false @@ -491,10 +489,9 @@ subobject with dynamic override: indices.create: index: test body: - settings: - index: - mapping.source.mode: synthetic mappings: + _source: + mode: synthetic properties: path: properties: @@ -540,10 +537,9 @@ object array in object with dynamic override: indices.create: index: test body: - settings: - index: - mapping.source.mode: synthetic mappings: + _source: + mode: synthetic properties: id: type: integer @@ -595,10 +591,9 @@ value array in object with dynamic override: indices.create: index: test body: - settings: - index: - mapping.source.mode: synthetic mappings: + _source: + mode: synthetic properties: path_no: dynamic: false @@ -639,10 +634,9 @@ nested object: indices.create: index: test body: - settings: - index: - mapping.source.mode: synthetic mappings: + _source: + mode: synthetic properties: nested_field: type: nested @@ -685,10 +679,9 @@ nested object next to regular: indices.create: index: test body: - settings: - index: - mapping.source.mode: synthetic mappings: + _source: + mode: synthetic properties: path: properties: @@ -732,10 +725,9 @@ nested object with disabled: indices.create: index: test body: - settings: - index: - mapping.source.mode: synthetic mappings: + _source: + mode: synthetic properties: obj_field: properties: @@ -821,10 +813,9 @@ doubly nested object: indices.create: index: test body: - settings: - index: - mapping.source.mode: synthetic mappings: + _source: + mode: synthetic properties: obj_field: properties: @@ -917,10 +908,9 @@ subobjects auto: indices.create: index: test body: - settings: - index: - mapping.source.mode: synthetic mappings: + _source: + mode: synthetic subobjects: auto properties: id: @@ -1006,10 +996,9 @@ synthetic_source with copy_to: indices.create: index: test body: - settings: - index: - mapping.source.mode: synthetic mappings: + _source: + mode: synthetic properties: number: type: integer @@ -1143,10 +1132,9 @@ synthetic_source with disabled doc_values: indices.create: index: test body: - settings: - index: - mapping.source.mode: synthetic mappings: + _source: + mode: synthetic properties: number: type: integer @@ -1227,10 +1215,9 @@ fallback synthetic_source for text field: indices.create: index: test body: - settings: - index: - mapping.source.mode: synthetic mappings: + _source: + mode: synthetic properties: text: type: text @@ -1262,10 +1249,9 @@ synthetic_source with copy_to and ignored values: indices.create: index: test body: - settings: - index: - mapping.source.mode: synthetic mappings: + _source: + mode: synthetic properties: name: type: keyword @@ -1331,10 +1317,9 @@ synthetic_source with copy_to field having values in source: indices.create: index: test body: - settings: - index: - mapping.source.mode: synthetic mappings: + _source: + mode: synthetic properties: name: type: keyword @@ -1395,10 +1380,9 @@ synthetic_source with ignored source field using copy_to: indices.create: index: test body: - settings: - index: - mapping.source.mode: synthetic mappings: + _source: + mode: synthetic properties: name: type: keyword @@ -1460,10 +1444,9 @@ synthetic_source with copy_to field from dynamic template having values in sourc indices.create: index: test body: - settings: - index: - mapping.source.mode: synthetic mappings: + _source: + mode: synthetic dynamic_templates: - copy_template: match: "k" @@ -1558,10 +1541,9 @@ synthetic_source with copy_to and invalid values for copy: indices.create: index: test body: - settings: - index: - mapping.source.mode: synthetic mappings: + _source: + mode: synthetic properties: name: type: keyword @@ -1595,10 +1577,9 @@ synthetic_source with copy_to pointing inside object: indices.create: index: test body: - settings: - index: - mapping.source.mode: synthetic mappings: + _source: + mode: synthetic properties: name: type: keyword @@ -1700,10 +1681,9 @@ synthetic_source with copy_to pointing to ambiguous field: indices.create: index: test body: - settings: - index: - mapping.source.mode: synthetic mappings: + _source: + mode: synthetic properties: k: type: keyword @@ -1748,10 +1728,9 @@ synthetic_source with copy_to pointing to ambiguous field and subobjects false: indices.create: index: test body: - settings: - index: - mapping.source.mode: synthetic mappings: + _source: + mode: synthetic subobjects: false properties: k: @@ -1797,10 +1776,9 @@ synthetic_source with copy_to pointing to ambiguous field and subobjects auto: indices.create: index: test body: - settings: - index: - mapping.source.mode: synthetic mappings: + _source: + mode: synthetic subobjects: auto properties: k: @@ -1847,10 +1825,9 @@ synthetic_source with copy_to pointing at dynamic field: indices.create: index: test body: - settings: - index: - mapping.source.mode: synthetic mappings: + _source: + mode: synthetic properties: name: type: keyword @@ -1934,10 +1911,9 @@ synthetic_source with copy_to pointing inside dynamic object: indices.create: index: test body: - settings: - index: - mapping.source.mode: synthetic mappings: + _source: + mode: synthetic properties: name: type: keyword diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/21_synthetic_source_stored.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/21_synthetic_source_stored.yml index 4f55b52224a38..f3545bb0a3f0e 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/21_synthetic_source_stored.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/21_synthetic_source_stored.yml @@ -8,10 +8,9 @@ object param - store complex object: indices.create: index: test body: - settings: - index: - mapping.source.mode: synthetic mappings: + _source: + mode: synthetic properties: id: type: integer @@ -73,10 +72,9 @@ object param - object array: indices.create: index: test body: - settings: - index: - mapping.source.mode: synthetic mappings: + _source: + mode: synthetic properties: id: type: integer @@ -138,10 +136,9 @@ object param - object array within array: indices.create: index: test body: - settings: - index: - mapping.source.mode: synthetic mappings: + _source: + mode: synthetic properties: stored: synthetic_source_keep: arrays @@ -182,10 +179,9 @@ object param - no object array: indices.create: index: test body: - settings: - index: - mapping.source.mode: synthetic mappings: + _source: + mode: synthetic properties: stored: synthetic_source_keep: arrays @@ -225,10 +221,9 @@ object param - field ordering in object array: indices.create: index: test body: - settings: - index: - mapping.source.mode: synthetic mappings: + _source: + mode: synthetic properties: a: type: keyword @@ -275,10 +270,9 @@ object param - nested object array next to other fields: indices.create: index: test body: - settings: - index: - mapping.source.mode: synthetic mappings: + _source: + mode: synthetic properties: a: type: keyword @@ -332,10 +326,9 @@ object param - nested object with stored array: indices.create: index: test body: - settings: - index: - mapping.source.mode: synthetic mappings: + _source: + mode: synthetic properties: name: type: keyword @@ -385,10 +378,9 @@ index param - nested array within array: indices.create: index: test body: - settings: - index: - mapping.source.mode: synthetic mappings: + _source: + mode: synthetic properties: name: type: keyword @@ -436,9 +428,9 @@ index param - nested array within array - disabled second pass: index: synthetic_source: enable_second_doc_parsing_pass: false - mapping.source.mode: synthetic - mappings: + _source: + mode: synthetic properties: name: type: keyword @@ -486,8 +478,9 @@ stored field under object with store_array_source: index: sort.field: "name" sort.order: "asc" - mapping.source.mode: synthetic mappings: + _source: + mode: synthetic properties: name: type: keyword @@ -532,10 +525,9 @@ field param - keep root array: indices.create: index: test body: - settings: - index: - mapping.source.mode: synthetic mappings: + _source: + mode: synthetic properties: id: type: integer @@ -590,10 +582,9 @@ field param - keep nested array: indices.create: index: test body: - settings: - index: - mapping.source.mode: synthetic mappings: + _source: + mode: synthetic properties: id: type: integer @@ -659,10 +650,9 @@ field param - keep root singleton fields: indices.create: index: test body: - settings: - index: - mapping.source.mode: synthetic mappings: + _source: + mode: synthetic properties: id: type: integer @@ -749,10 +739,9 @@ field param - keep nested singleton fields: indices.create: index: test body: - settings: - index: - mapping.source.mode: synthetic mappings: + _source: + mode: synthetic properties: id: type: integer @@ -831,10 +820,9 @@ field param - nested array within array: indices.create: index: test body: - settings: - index: - mapping.source.mode: synthetic mappings: + _source: + mode: synthetic properties: name: type: keyword @@ -878,9 +866,10 @@ index param - root arrays: settings: index: mapping: - source.mode: synthetic synthetic_source_keep: arrays mappings: + _source: + mode: synthetic properties: id: type: integer @@ -956,9 +945,10 @@ index param - dynamic root arrays: settings: index: mapping: - source.mode: synthetic synthetic_source_keep: arrays mappings: + _source: + mode: synthetic properties: id: type: integer @@ -1008,9 +998,10 @@ index param - object array within array: settings: index: mapping: - source.mode: synthetic synthetic_source_keep: arrays mappings: + _source: + mode: synthetic properties: stored: properties: @@ -1057,9 +1048,10 @@ index param - no object array: settings: index: mapping: - source.mode: synthetic synthetic_source_keep: arrays mappings: + _source: + mode: synthetic properties: stored: properties: @@ -1101,9 +1093,10 @@ index param - field ordering: settings: index: mapping: - source.mode: synthetic synthetic_source_keep: arrays mappings: + _source: + mode: synthetic properties: a: type: keyword @@ -1151,9 +1144,10 @@ index param - nested arrays: settings: index: mapping: - source.mode: synthetic synthetic_source_keep: arrays mappings: + _source: + mode: synthetic properties: a: type: keyword @@ -1218,9 +1212,10 @@ index param - nested object with stored array: settings: index: mapping: - source.mode: synthetic synthetic_source_keep: arrays mappings: + _source: + mode: synthetic properties: name: type: keyword @@ -1269,9 +1264,10 @@ index param - flattened fields: settings: index: mapping: - source.mode: synthetic synthetic_source_keep: arrays mappings: + _source: + mode: synthetic properties: name: type: keyword diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.put_mapping/10_basic.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.put_mapping/10_basic.yml index 67419cd18ad99..75d282d524607 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.put_mapping/10_basic.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.put_mapping/10_basic.yml @@ -145,6 +145,28 @@ - is_false: test_index.mappings.properties.foo.meta.bar - match: { test_index.mappings.properties.foo.meta.baz: "quux" } +--- +"disabling synthetic source fails": + - requires: + cluster_features: ["gte_v8.4.0"] + reason: "Added in 8.4.0" + + - do: + indices.create: + index: test_index + body: + mappings: + _source: + mode: synthetic + + - do: + catch: /Cannot update parameter \[mode\] from \[synthetic\] to \[stored\]/ + indices.put_mapping: + index: test_index + body: + _source: + mode: stored + --- "enabling synthetic source from explicit succeeds": - requires: @@ -155,9 +177,9 @@ indices.create: index: test_index body: - settings: - index: - mapping.source.mode: stored + mappings: + _source: + mode: stored - do: indices.put_mapping: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/logsdb/20_source_mapping.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/logsdb/20_source_mapping.yml index e77f4a4c912b7..b4709a4e4d176 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/logsdb/20_source_mapping.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/logsdb/20_source_mapping.yml @@ -30,7 +30,9 @@ stored _source mode is supported: settings: index: mode: logsdb - mapping.source.mode: stored + mappings: + _source: + mode: stored - do: indices.get: index: test-stored-source @@ -67,7 +69,9 @@ disabled _source is not supported: settings: index: mode: logsdb - mapping.source.mode: disabled + mappings: + _source: + mode: disabled - match: { error.type: "mapper_parsing_exception" } - match: { error.root_cause.0.type: "mapper_parsing_exception" } @@ -116,9 +120,9 @@ include/exclude is supported with stored _source: settings: index: mode: logsdb - mapping.source.mode: stored mappings: _source: + mode: stored includes: [a] - do: @@ -135,9 +139,9 @@ include/exclude is supported with stored _source: settings: index: mode: logsdb - mapping.source.mode: stored mappings: _source: + mode: stored excludes: [b] - do: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/mget/90_synthetic_source.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/mget/90_synthetic_source.yml index c728252d98201..2f3d2fa2f974d 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/mget/90_synthetic_source.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/mget/90_synthetic_source.yml @@ -7,10 +7,9 @@ keyword: indices.create: index: test body: - settings: - index: - mapping.source.mode: synthetic mappings: + _source: + mode: synthetic properties: kwd: type: keyword @@ -63,9 +62,9 @@ keyword with normalizer: type: custom filter: - lowercase - index: - mapping.source.mode: synthetic mappings: + _source: + mode: synthetic properties: keyword: type: keyword @@ -145,10 +144,9 @@ stored text: indices.create: index: test body: - settings: - index: - mapping.source.mode: synthetic mappings: + _source: + mode: synthetic properties: text: type: text @@ -195,10 +193,9 @@ force_synthetic_source_ok: indices.create: index: test body: - settings: - index: - mapping.source.mode: stored mappings: + _source: + mode: stored properties: obj: properties: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.highlight/50_synthetic_source.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.highlight/50_synthetic_source.yml index 024ce48562281..a2fd448f5044d 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.highlight/50_synthetic_source.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.highlight/50_synthetic_source.yml @@ -9,9 +9,9 @@ setup: body: settings: number_of_shards: 1 - index: - mapping.source.mode: synthetic mappings: + _source: + mode: synthetic properties: foo: type: keyword @@ -21,7 +21,6 @@ setup: index_options: positions vectors: type: text - store: false term_vector: with_positions_offsets positions: type: text diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/90_sparse_vector.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/90_sparse_vector.yml index f4a55341dff08..27f12f394c6a4 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/90_sparse_vector.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/90_sparse_vector.yml @@ -394,10 +394,9 @@ indices.create: index: test body: - settings: - index: - mapping.source.mode: synthetic mappings: + _source: + mode: synthetic properties: ml.tokens: type: sparse_vector diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/350_binary_field.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/350_binary_field.yml index ced9c4fe0825d..455d06ba2a984 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/350_binary_field.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/350_binary_field.yml @@ -55,10 +55,9 @@ indices.create: index: test body: - settings: - index: - mapping.source.mode: synthetic mappings: + _source: + mode: synthetic properties: binary: type: binary diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/400_synthetic_source.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/400_synthetic_source.yml index 21338c12415d8..0cc1796bb47de 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/400_synthetic_source.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/400_synthetic_source.yml @@ -7,10 +7,9 @@ keyword: indices.create: index: test body: - settings: - index: - mapping.source.mode: synthetic mappings: + _source: + mode: synthetic properties: kwd: type: keyword @@ -45,10 +44,9 @@ stored text: indices.create: index: test body: - settings: - index: - mapping.source.mode: synthetic mappings: + _source: + mode: synthetic properties: text: type: text @@ -85,6 +83,8 @@ stored keyword: index: test body: mappings: + _source: + mode: synthetic properties: kwd: type: keyword @@ -120,10 +120,9 @@ stored keyword without sibling fields: indices.create: index: test body: - settings: - index: - mapping.source.mode: synthetic mappings: + _source: + mode: synthetic properties: kwd: type: keyword @@ -166,10 +165,9 @@ force_synthetic_source_ok: indices.create: index: test body: - settings: - index: - mapping.source.mode: stored mappings: + _source: + mode: stored properties: obj: properties: @@ -220,10 +218,9 @@ doc values keyword with ignore_above: indices.create: index: test body: - settings: - index: - mapping.source.mode: synthetic mappings: + _source: + mode: synthetic properties: kwd: type: keyword @@ -289,10 +286,9 @@ stored keyword with ignore_above: indices.create: index: test body: - settings: - index: - mapping.source.mode: synthetic mappings: + _source: + mode: synthetic properties: kwd: type: keyword @@ -360,10 +356,9 @@ _source filtering: indices.create: index: test body: - settings: - index: - mapping.source.mode: synthetic mappings: + _source: + mode: synthetic properties: kwd: type: keyword @@ -402,9 +397,9 @@ _doc_count: indices.create: index: test body: - settings: - index: - mapping.source.mode: synthetic + mappings: + _source: + mode: synthetic - do: index: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/540_ignore_above_synthetic_source.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/540_ignore_above_synthetic_source.yml index 8950a378c7203..435cda637cca6 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/540_ignore_above_synthetic_source.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/540_ignore_above_synthetic_source.yml @@ -10,9 +10,10 @@ ignore_above mapping level setting: settings: index: mapping: - source.mode: synthetic ignore_above: 10 mappings: + _source: + mode: synthetic properties: keyword: type: keyword @@ -52,9 +53,10 @@ ignore_above mapping level setting on arrays: settings: index: mapping: - source.mode: synthetic ignore_above: 10 mappings: + _source: + mode: synthetic properties: keyword: type: keyword @@ -95,9 +97,10 @@ ignore_above mapping overrides setting: settings: index: mapping: - source.mode: synthetic ignore_above: 10 mappings: + _source: + mode: synthetic properties: keyword: type: keyword @@ -140,9 +143,10 @@ ignore_above mapping overrides setting on arrays: settings: index: mapping: - source.mode: synthetic ignore_above: 10 mappings: + _source: + mode: synthetic properties: keyword: type: keyword diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/20_mapping.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/20_mapping.yml index 4fe4a7d2e2f51..c5669cd6414b1 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/20_mapping.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/20_mapping.yml @@ -472,9 +472,9 @@ stored source is supported: time_series: start_time: 2021-04-28T00:00:00Z end_time: 2021-04-29T00:00:00Z - mapping: - source.mode: stored mappings: + _source: + mode: stored properties: "@timestamp": type: date @@ -510,9 +510,9 @@ disabled source is not supported: time_series: start_time: 2021-04-28T00:00:00Z end_time: 2021-04-29T00:00:00Z - mapping: - source.mode: disabled mappings: + _source: + mode: disabled properties: "@timestamp": type: date diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/update/100_synthetic_source.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/update/100_synthetic_source.yml index c3f013395ea36..f74fde7eb2a24 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/update/100_synthetic_source.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/update/100_synthetic_source.yml @@ -7,10 +7,9 @@ keyword: indices.create: index: test body: - settings: - index: - mapping.source.mode: synthetic mappings: + _source: + mode: synthetic properties: kwd: type: keyword @@ -66,10 +65,9 @@ stored text: indices.create: index: test body: - settings: - index: - mapping.source.mode: synthetic mappings: + _source: + mode: synthetic properties: text: type: text diff --git a/x-pack/plugin/logsdb/src/yamlRestTest/resources/rest-api-spec/test/20_ignored_source.yml b/x-pack/plugin/logsdb/src/yamlRestTest/resources/rest-api-spec/test/20_ignored_source.yml index ff7f01fccaa3c..2f111d579ebb1 100644 --- a/x-pack/plugin/logsdb/src/yamlRestTest/resources/rest-api-spec/test/20_ignored_source.yml +++ b/x-pack/plugin/logsdb/src/yamlRestTest/resources/rest-api-spec/test/20_ignored_source.yml @@ -6,10 +6,9 @@ setup: indices.create: index: test body: - settings: - index: - mapping.source.mode: synthetic mappings: + _source: + mode: synthetic properties: object: enabled: false diff --git a/x-pack/plugin/mapper-constant-keyword/src/yamlRestTest/resources/rest-api-spec/test/20_synthetic_source.yml b/x-pack/plugin/mapper-constant-keyword/src/yamlRestTest/resources/rest-api-spec/test/20_synthetic_source.yml index 6cd30f42c52e9..b64fb7b822713 100644 --- a/x-pack/plugin/mapper-constant-keyword/src/yamlRestTest/resources/rest-api-spec/test/20_synthetic_source.yml +++ b/x-pack/plugin/mapper-constant-keyword/src/yamlRestTest/resources/rest-api-spec/test/20_synthetic_source.yml @@ -7,10 +7,9 @@ constant_keyword: indices.create: index: test body: - settings: - index: - mapping.source.mode: synthetic mappings: + _source: + mode: synthetic properties: const_kwd: type: constant_keyword diff --git a/x-pack/plugin/mapper-unsigned-long/src/yamlRestTest/resources/rest-api-spec/test/80_synthetic_source.yml b/x-pack/plugin/mapper-unsigned-long/src/yamlRestTest/resources/rest-api-spec/test/80_synthetic_source.yml index e8b86231b7196..b88fca3c478a9 100644 --- a/x-pack/plugin/mapper-unsigned-long/src/yamlRestTest/resources/rest-api-spec/test/80_synthetic_source.yml +++ b/x-pack/plugin/mapper-unsigned-long/src/yamlRestTest/resources/rest-api-spec/test/80_synthetic_source.yml @@ -7,10 +7,9 @@ synthetic source: indices.create: index: synthetic_source_test body: - settings: - index: - mapping.source.mode: synthetic mappings: + _source: + mode: synthetic properties: name: type: keyword @@ -53,10 +52,9 @@ synthetic source with copy_to: indices.create: index: synthetic_source_test body: - settings: - index: - mapping.source.mode: synthetic mappings: + _source: + mode: synthetic properties: name: type: keyword @@ -113,10 +111,9 @@ synthetic source with disabled doc_values: indices.create: index: synthetic_source_test body: - settings: - index: - mapping.source.mode: synthetic mappings: + _source: + mode: synthetic properties: name: type: keyword diff --git a/x-pack/plugin/mapper-version/src/yamlRestTest/resources/rest-api-spec/test/40_synthetic_source.yml b/x-pack/plugin/mapper-version/src/yamlRestTest/resources/rest-api-spec/test/40_synthetic_source.yml index c96eeeb943831..1ec91f5fde8d1 100644 --- a/x-pack/plugin/mapper-version/src/yamlRestTest/resources/rest-api-spec/test/40_synthetic_source.yml +++ b/x-pack/plugin/mapper-version/src/yamlRestTest/resources/rest-api-spec/test/40_synthetic_source.yml @@ -7,10 +7,9 @@ setup: indices.create: index: test1 body: - settings: - index: - mapping.source.mode: synthetic mappings: + _source: + mode: synthetic properties: ver: type: version @@ -77,10 +76,9 @@ synthetic source with copy_to: indices.create: index: synthetic_source_test body: - settings: - index: - mapping.source.mode: synthetic mappings: + _source: + mode: synthetic properties: ver: type: version diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/aggregate-metrics/100_synthetic_source.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/aggregate-metrics/100_synthetic_source.yml index 0937d24217e31..cc0e8aff9b239 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/aggregate-metrics/100_synthetic_source.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/aggregate-metrics/100_synthetic_source.yml @@ -7,10 +7,9 @@ aggregate_metric_double: indices.create: index: test body: - settings: - index: - mapping.source.mode: synthetic mappings: + _source: + mode: synthetic properties: metric: type: aggregate_metric_double @@ -63,10 +62,9 @@ aggregate_metric_double with ignore_malformed: indices.create: index: test body: - settings: - index: - mapping.source.mode: synthetic mappings: + _source: + mode: synthetic properties: metric: type: aggregate_metric_double diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/analytics/histogram.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/analytics/histogram.yml index 88445cbad1dc8..726b9d153025e 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/analytics/histogram.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/analytics/histogram.yml @@ -182,10 +182,9 @@ histogram with synthetic source: indices.create: index: histo_synthetic body: - settings: - index: - mapping.source.mode: synthetic mappings: + _source: + mode: synthetic properties: latency: type: histogram @@ -229,10 +228,9 @@ histogram with synthetic source and zero counts: indices.create: index: histo_synthetic body: - settings: - index: - mapping.source.mode: synthetic mappings: + _source: + mode: synthetic properties: latency: type: histogram @@ -319,10 +317,9 @@ histogram with synthetic source and ignore_malformed: indices.create: index: histo_synthetic body: - settings: - index: - mapping.source.mode: synthetic mappings: + _source: + mode: synthetic properties: latency: type: histogram diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/enrich/40_synthetic_source.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/enrich/40_synthetic_source.yml index 1aaa39a0f13b7..1c2e1cd922a65 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/enrich/40_synthetic_source.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/enrich/40_synthetic_source.yml @@ -4,10 +4,9 @@ setup: indices.create: index: source body: - settings: - index: - mapping.source.mode: synthetic mappings: + _source: + mode: synthetic properties: baz: type: keyword diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/30_types.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/30_types.yml index 9658412f150fd..cfc7f2e4036fb 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/30_types.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/30_types.yml @@ -809,10 +809,9 @@ synthetic _source text stored: indices.create: index: test body: - settings: - index: - mapping.source.mode: synthetic mappings: + _source: + mode: synthetic properties: card: type: text @@ -841,10 +840,9 @@ synthetic _source text with parent keyword: indices.create: index: test body: - settings: - index: - mapping.source.mode: synthetic mappings: + _source: + mode: synthetic properties: card: type: keyword diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/80_text.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/80_text.yml index ded66b9453452..55bd39bdd73cc 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/80_text.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/80_text.yml @@ -481,10 +481,9 @@ setup: indices.create: index: test2 body: - settings: - index: - mapping.source.mode: synthetic mappings: + _source: + mode: synthetic properties: "emp_no": type: long @@ -527,10 +526,9 @@ setup: indices.create: index: test2 body: - settings: - index: - mapping.source.mode: synthetic mappings: + _source: + mode: synthetic properties: "emp_no": type: long diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/searchable_snapshots/20_synthetic_source.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/searchable_snapshots/20_synthetic_source.yml index df3c905408a87..b1ab120fff441 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/searchable_snapshots/20_synthetic_source.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/searchable_snapshots/20_synthetic_source.yml @@ -11,9 +11,9 @@ setup: settings: number_of_shards: 1 number_of_replicas: 0 - index: - mapping.source.mode: synthetic mappings: + _source: + mode: synthetic properties: obj: properties: diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/security/authz_api_keys/30_field_level_security_synthetic_source.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/security/authz_api_keys/30_field_level_security_synthetic_source.yml index 5e636aebc0271..b971c246ac50a 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/security/authz_api_keys/30_field_level_security_synthetic_source.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/security/authz_api_keys/30_field_level_security_synthetic_source.yml @@ -13,10 +13,9 @@ Filter single field: indices.create: index: index_fls body: - settings: - index: - mapping.source.mode: synthetic mappings: + _source: + mode: synthetic properties: name: type: keyword @@ -76,10 +75,9 @@ Filter fields in object: indices.create: index: index_fls body: - settings: - index: - mapping.source.mode: synthetic mappings: + _source: + mode: synthetic properties: name: type: keyword @@ -144,10 +142,9 @@ Fields under a disabled object - uses _ignored_source: indices.create: index: index_fls body: - settings: - index: - mapping.source.mode: synthetic mappings: + _source: + mode: synthetic properties: name: type: keyword @@ -239,11 +236,12 @@ Dynamic fields beyond limit - uses _ignored_source: settings: index: mapping: - source.mode: synthetic total_fields: ignore_dynamic_beyond_limit: true limit: 2 mappings: + _source: + mode: synthetic properties: name: type: keyword @@ -303,10 +301,9 @@ Field with ignored_malformed: indices.create: index: index_fls body: - settings: - index: - mapping.source.mode: synthetic mappings: + _source: + mode: synthetic properties: name: type: keyword diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/security/authz_api_keys/40_document_level_security_synthetic_source.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/security/authz_api_keys/40_document_level_security_synthetic_source.yml index 37e78d86f6667..52abe0a3d83d7 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/security/authz_api_keys/40_document_level_security_synthetic_source.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/security/authz_api_keys/40_document_level_security_synthetic_source.yml @@ -13,10 +13,9 @@ Filter on single field: indices.create: index: index_dls body: - settings: - index: - mapping.source.mode: synthetic mappings: + _source: + mode: synthetic properties: name: type: keyword @@ -96,10 +95,9 @@ Filter on nested field: indices.create: index: index_dls body: - settings: - index: - mapping.source.mode: synthetic mappings: + _source: + mode: synthetic properties: name: type: keyword @@ -180,10 +178,9 @@ Filter on object with stored source: indices.create: index: index_dls body: - settings: - index: - mapping.source.mode: synthetic mappings: + _source: + mode: synthetic properties: name: type: keyword @@ -261,10 +258,9 @@ Filter on field within a disabled object: indices.create: index: index_dls body: - settings: - index: - mapping.source.mode: synthetic mappings: + _source: + mode: synthetic properties: name: type: keyword @@ -339,10 +335,9 @@ Filter on field with ignored_malformed: indices.create: index: index_dls body: - settings: - index: - mapping.source.mode: synthetic mappings: + _source: + mode: synthetic properties: name: type: keyword diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/snapshot/10_basic.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/snapshot/10_basic.yml index 6ccd24ae84af9..e1b297f1b5d78 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/snapshot/10_basic.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/snapshot/10_basic.yml @@ -100,11 +100,12 @@ setup: indices.create: index: test_synthetic body: + mappings: + _source: + mode: synthetic settings: number_of_shards: 1 number_of_replicas: 0 - index: - mapping.source.mode: synthetic - do: snapshot.create: diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/spatial/140_synthetic_source.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/spatial/140_synthetic_source.yml index 17517640f2aa5..700142cec9987 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/spatial/140_synthetic_source.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/spatial/140_synthetic_source.yml @@ -8,10 +8,9 @@ indices.create: index: test body: - settings: - index: - mapping.source.mode: synthetic mappings: + _source: + mode: synthetic properties: shape: type: geo_shape @@ -75,10 +74,9 @@ indices.create: index: test body: - settings: - index: - mapping.source.mode: synthetic mappings: + _source: + mode: synthetic properties: shape: type: geo_shape @@ -159,10 +157,9 @@ indices.create: index: test body: - settings: - index: - mapping.source.mode: synthetic mappings: + _source: + mode: synthetic properties: shape: type: shape @@ -226,10 +223,9 @@ indices.create: index: test body: - settings: - index: - mapping.source.mode: synthetic mappings: + _source: + mode: synthetic properties: shape: type: shape @@ -310,10 +306,9 @@ indices.create: index: test body: - settings: - index: - mapping.source.mode: synthetic mappings: + _source: + mode: synthetic properties: point: type: geo_point @@ -427,10 +422,9 @@ indices.create: index: test body: - settings: - index: - mapping.source.mode: synthetic mappings: + _source: + mode: synthetic properties: geo_point: type: geo_point @@ -507,10 +501,9 @@ indices.create: index: test body: - settings: - index: - mapping.source.mode: synthetic mappings: + _source: + mode: synthetic properties: point: type: point @@ -604,10 +597,9 @@ indices.create: index: test body: - settings: - index: - mapping.source.mode: synthetic mappings: + _source: + mode: synthetic properties: point: type: point diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/transform/preview_transforms_synthetic_source.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/transform/preview_transforms_synthetic_source.yml index ee5ec824fd212..08055946a7831 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/transform/preview_transforms_synthetic_source.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/transform/preview_transforms_synthetic_source.yml @@ -6,10 +6,9 @@ simple: indices.create: index: airline-data body: - settings: - index: - mapping.source.mode: synthetic mappings: + _source: + mode: synthetic properties: time: type: date diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/wildcard/30_ignore_above_synthetic_source.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/wildcard/30_ignore_above_synthetic_source.yml index d844bf9de9129..2e3ba773fb0f2 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/wildcard/30_ignore_above_synthetic_source.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/wildcard/30_ignore_above_synthetic_source.yml @@ -10,9 +10,10 @@ wildcard field type ignore_above: settings: index: mapping: - source.mode: synthetic ignore_above: 10 mappings: + _source: + mode: synthetic properties: a_wildcard: type: wildcard diff --git a/x-pack/plugin/wildcard/src/yamlRestTest/resources/rest-api-spec/test/30_synthetic_source.yml b/x-pack/plugin/wildcard/src/yamlRestTest/resources/rest-api-spec/test/30_synthetic_source.yml index 20472669d4d77..ffa76f7433985 100644 --- a/x-pack/plugin/wildcard/src/yamlRestTest/resources/rest-api-spec/test/30_synthetic_source.yml +++ b/x-pack/plugin/wildcard/src/yamlRestTest/resources/rest-api-spec/test/30_synthetic_source.yml @@ -7,10 +7,9 @@ synthetic source: indices.create: index: synthetic_source_test body: - settings: - index: - mapping.source.mode: synthetic mappings: + _source: + mode: synthetic properties: name: type: keyword @@ -49,10 +48,9 @@ synthetic source with copy_to: indices.create: index: synthetic_source_test body: - settings: - index: - mapping.source.mode: synthetic mappings: + _source: + mode: synthetic properties: name: type: keyword From 64e4c3870889562fe6952d068c05eaea4302ac8a Mon Sep 17 00:00:00 2001 From: Pete Gillin Date: Fri, 1 Nov 2024 09:29:07 +0000 Subject: [PATCH 262/324] Remove code for `?verbose` in `_segments` API (#116030) A change made in 8.0 intended to deprecate this parameter. However, because the new code only checked for the presence of the parameter and never consumed it, the effect was actually to remove support for the parameter. This code therefore basically does nothing and can be removed. --- .../resources/rest-api-spec/api/indices.segments.json | 9 --------- .../admin/indices/RestIndicesSegmentsAction.java | 11 ----------- 2 files changed, 20 deletions(-) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.segments.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.segments.json index e48b51f3e24e1..5e75caaae4bc6 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.segments.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.segments.json @@ -51,15 +51,6 @@ ], "default":"open", "description":"Whether to expand wildcard expression to concrete indices that are open, closed or both." - }, - "verbose":{ - "type":"boolean", - "description":"Includes detailed memory usage by Lucene.", - "default":false, - "deprecated" : { - "version" : "8.0.0", - "description" : "lucene no longer keeps track of segment memory overhead as it is largely off-heap" - } } } } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestIndicesSegmentsAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestIndicesSegmentsAction.java index c6c9d162cd83d..8545f281bbea9 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestIndicesSegmentsAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestIndicesSegmentsAction.java @@ -13,8 +13,6 @@ import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.logging.DeprecationCategory; -import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.Scope; @@ -30,8 +28,6 @@ @ServerlessScope(Scope.INTERNAL) public class RestIndicesSegmentsAction extends BaseRestHandler { - private static final DeprecationLogger DEPRECATION_LOGGER = DeprecationLogger.getLogger(RestIndicesSegmentsAction.class); - public RestIndicesSegmentsAction() {} @Override @@ -49,13 +45,6 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC IndicesSegmentsRequest indicesSegmentsRequest = new IndicesSegmentsRequest( Strings.splitStringByCommaToArray(request.param("index")) ).withVectorFormatsInfo(request.paramAsBoolean("vector_formats", false)); - if (request.hasParam("verbose")) { - DEPRECATION_LOGGER.warn( - DeprecationCategory.INDICES, - "indices_segments_action_verbose", - "The [verbose] query parameter for [indices_segments_action] has no effect and is deprecated" - ); - } indicesSegmentsRequest.indicesOptions(IndicesOptions.fromRequest(request, indicesSegmentsRequest.indicesOptions())); return channel -> new RestCancellableNodeClient(client, request.getHttpChannel()).admin() .indices() From 71dfb0689b116db2061d37a34ff46a0b65ea077a Mon Sep 17 00:00:00 2001 From: Andrei Dan Date: Fri, 1 Nov 2024 10:10:19 +0000 Subject: [PATCH 263/324] Enable _tier based coordinator rewrites for all indices (not just mounted indices) (#115797) As part of https://github.com/elastic/elasticsearch/pull/114990 we enabled using the `_tier` field as part of the coordinator rewrite in order to skip shards that do not match a `_tier` filter, but only for fully/partially mounted indices. This PR enhances the previous work by allowing a coordinator rewrite to skip shards that will not match the `_tier` query for all indices (irrespective of their lifecycle state i.e. hot and warm indices can now skip shards based on the `_tier` query) Note however, that hot/warm indices will not automatically take advantage of the `can_match` coordinator rewrite (like read only indices do) but only the search requests that surpass the `pre_filter_shard_size` threshold will. Relates to [#114910](https://github.com/elastic/elasticsearch/issues/114910) --- docs/changelog/115797.yaml | 6 + .../query/CoordinatorRewriteContext.java | 13 ++- .../CoordinatorRewriteContextProvider.java | 40 ++++--- .../CanMatchPreFilterSearchPhaseTests.java | 95 +++++++++++++++ .../index/query/QueryRewriteContextTests.java | 35 +++--- .../CanMatchDataTierCoordinatorRewriteIT.java | 108 ++++++++++++++++++ 6 files changed, 258 insertions(+), 39 deletions(-) create mode 100644 docs/changelog/115797.yaml create mode 100644 x-pack/plugin/core/src/internalClusterTest/java/org/elasticsearch/xpack/core/action/search/CanMatchDataTierCoordinatorRewriteIT.java diff --git a/docs/changelog/115797.yaml b/docs/changelog/115797.yaml new file mode 100644 index 0000000000000..8adf51887c28a --- /dev/null +++ b/docs/changelog/115797.yaml @@ -0,0 +1,6 @@ +pr: 115797 +summary: Enable `_tier` based coordinator rewrites for all indices (not just mounted + indices) +area: Search +type: enhancement +issues: [] diff --git a/server/src/main/java/org/elasticsearch/index/query/CoordinatorRewriteContext.java b/server/src/main/java/org/elasticsearch/index/query/CoordinatorRewriteContext.java index 964358610e074..b0d3065ba3a3f 100644 --- a/server/src/main/java/org/elasticsearch/index/query/CoordinatorRewriteContext.java +++ b/server/src/main/java/org/elasticsearch/index/query/CoordinatorRewriteContext.java @@ -39,7 +39,7 @@ public class CoordinatorRewriteContext extends QueryRewriteContext { public static final String TIER_FIELD_NAME = "_tier"; - private static final ConstantFieldType TIER_FIELD_TYPE = new ConstantFieldType(TIER_FIELD_NAME, Map.of()) { + static final ConstantFieldType TIER_FIELD_TYPE = new ConstantFieldType(TIER_FIELD_NAME, Map.of()) { @Override public ValueFetcher valueFetcher(SearchExecutionContext context, String format) { throw new UnsupportedOperationException("fetching field values is not supported on the coordinator node"); @@ -69,6 +69,7 @@ public Query existsQuery(SearchExecutionContext context) { } }; + @Nullable private final DateFieldRangeInfo dateFieldRangeInfo; private final String tier; @@ -85,7 +86,7 @@ public CoordinatorRewriteContext( XContentParserConfiguration parserConfig, Client client, LongSupplier nowInMillis, - DateFieldRangeInfo dateFieldRangeInfo, + @Nullable DateFieldRangeInfo dateFieldRangeInfo, String tier ) { super( @@ -116,9 +117,9 @@ public CoordinatorRewriteContext( */ @Nullable public MappedFieldType getFieldType(String fieldName) { - if (DataStream.TIMESTAMP_FIELD_NAME.equals(fieldName)) { + if (dateFieldRangeInfo != null && DataStream.TIMESTAMP_FIELD_NAME.equals(fieldName)) { return dateFieldRangeInfo.timestampFieldType(); - } else if (IndexMetadata.EVENT_INGESTED_FIELD_NAME.equals(fieldName)) { + } else if (dateFieldRangeInfo != null && IndexMetadata.EVENT_INGESTED_FIELD_NAME.equals(fieldName)) { return dateFieldRangeInfo.eventIngestedFieldType(); } else if (TIER_FIELD_NAME.equals(fieldName)) { return TIER_FIELD_TYPE; @@ -133,9 +134,9 @@ public MappedFieldType getFieldType(String fieldName) { */ @Nullable public IndexLongFieldRange getFieldRange(String fieldName) { - if (DataStream.TIMESTAMP_FIELD_NAME.equals(fieldName)) { + if (dateFieldRangeInfo != null && DataStream.TIMESTAMP_FIELD_NAME.equals(fieldName)) { return dateFieldRangeInfo.timestampRange(); - } else if (IndexMetadata.EVENT_INGESTED_FIELD_NAME.equals(fieldName)) { + } else if (dateFieldRangeInfo != null && IndexMetadata.EVENT_INGESTED_FIELD_NAME.equals(fieldName)) { return dateFieldRangeInfo.eventIngestedRange(); } else { return null; diff --git a/server/src/main/java/org/elasticsearch/index/query/CoordinatorRewriteContextProvider.java b/server/src/main/java/org/elasticsearch/index/query/CoordinatorRewriteContextProvider.java index e48d7699d03ef..d59655700fcf3 100644 --- a/server/src/main/java/org/elasticsearch/index/query/CoordinatorRewriteContextProvider.java +++ b/server/src/main/java/org/elasticsearch/index/query/CoordinatorRewriteContextProvider.java @@ -52,35 +52,37 @@ public CoordinatorRewriteContext getCoordinatorRewriteContext(Index index) { return null; } DateFieldRangeInfo dateFieldRangeInfo = mappingSupplier.apply(index); - // we've now added a coordinator rewrite based on the _tier field so the requirement - // for the timestamps fields to be present is artificial (we could do a coordinator - // rewrite only based on the _tier field) and we might decide to remove this artificial - // limitation to enable coordinator rewrites based on _tier for hot and warm indices - // (currently the _tier coordinator rewrite is only available for mounted and partially mounted - // indices) - if (dateFieldRangeInfo == null) { - return null; - } - DateFieldMapper.DateFieldType timestampFieldType = dateFieldRangeInfo.timestampFieldType(); IndexLongFieldRange timestampRange = indexMetadata.getTimestampRange(); IndexLongFieldRange eventIngestedRange = indexMetadata.getEventIngestedRange(); + DateFieldMapper.DateFieldType timestampFieldType = null; + if (dateFieldRangeInfo != null) { + timestampFieldType = dateFieldRangeInfo.timestampFieldType(); - if (timestampRange.containsAllShardRanges() == false) { - // if @timestamp range is not present or not ready in cluster state, fallback to using time series range (if present) - timestampRange = indexMetadata.getTimeSeriesTimestampRange(timestampFieldType); - // if timestampRange in the time series is null AND the eventIngestedRange is not ready for use, return null (no coord rewrite) - if (timestampRange == null && eventIngestedRange.containsAllShardRanges() == false) { - return null; + if (timestampRange.containsAllShardRanges() == false) { + // if @timestamp range is not present or not ready in cluster state, fallback to using time series range (if present) + timestampRange = indexMetadata.getTimeSeriesTimestampRange(timestampFieldType); + // if timestampRange in the time series is null AND the eventIngestedRange is not ready for use, return null (no coord + // rewrite) + if (timestampRange == null && eventIngestedRange.containsAllShardRanges() == false) { + return null; + } } } - // the DateFieldRangeInfo from the mappingSupplier only has field types, but not ranges - // so create a new object with ranges pulled from cluster state return new CoordinatorRewriteContext( parserConfig, client, nowInMillis, - new DateFieldRangeInfo(timestampFieldType, timestampRange, dateFieldRangeInfo.eventIngestedFieldType(), eventIngestedRange), + dateFieldRangeInfo == null + ? null + // the DateFieldRangeInfo from the mappingSupplier only has field types, but not ranges + // so create a new object with ranges pulled from cluster state + : new DateFieldRangeInfo( + timestampFieldType, + timestampRange, + dateFieldRangeInfo.eventIngestedFieldType(), + eventIngestedRange + ), indexMetadata.getTierPreference().isEmpty() == false ? indexMetadata.getTierPreference().getFirst() : "" ); } diff --git a/server/src/test/java/org/elasticsearch/action/search/CanMatchPreFilterSearchPhaseTests.java b/server/src/test/java/org/elasticsearch/action/search/CanMatchPreFilterSearchPhaseTests.java index 8eebb98c348b1..c1119ee5973f4 100644 --- a/server/src/test/java/org/elasticsearch/action/search/CanMatchPreFilterSearchPhaseTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/CanMatchPreFilterSearchPhaseTests.java @@ -24,6 +24,7 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodeUtils; import org.elasticsearch.cluster.routing.GroupShardsIterator; +import org.elasticsearch.cluster.routing.allocation.DataTier; import org.elasticsearch.common.Strings; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.settings.Settings; @@ -31,8 +32,10 @@ import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.mapper.DateFieldMapper; import org.elasticsearch.index.query.BoolQueryBuilder; +import org.elasticsearch.index.query.CoordinatorRewriteContext; import org.elasticsearch.index.query.CoordinatorRewriteContextProvider; import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.index.query.RangeQueryBuilder; import org.elasticsearch.index.query.TermQueryBuilder; import org.elasticsearch.index.shard.IndexLongFieldRange; @@ -476,6 +479,98 @@ public void testCanMatchFilteringOnCoordinatorThatCanBeSkippedUsingEventIngested doCanMatchFilteringOnCoordinatorThatCanBeSkipped(IndexMetadata.EVENT_INGESTED_FIELD_NAME); } + public void testCanMatchFilteringOnCoordinatorSkipsBasedOnTier() throws Exception { + // we'll test that we're executing _tier coordinator rewrite for indices (data stream backing or regular) without any @timestamp + // or event.ingested fields + // for both data stream backing and regular indices we'll have one index in hot and one in warm. the warm indices will be skipped as + // our queries will filter based on _tier: hot + + Map indexNameToSettings = new HashMap<>(); + ClusterState state = ClusterState.EMPTY_STATE; + + String dataStreamName = randomAlphaOfLengthBetween(10, 20); + Index warmDataStreamIndex = new Index(DataStream.getDefaultBackingIndexName(dataStreamName, 1), UUIDs.base64UUID()); + indexNameToSettings.put( + warmDataStreamIndex, + settings(IndexVersion.current()).put(IndexMetadata.SETTING_INDEX_UUID, warmDataStreamIndex.getUUID()) + .put(DataTier.TIER_PREFERENCE, "data_warm,data_hot") + ); + Index hotDataStreamIndex = new Index(DataStream.getDefaultBackingIndexName(dataStreamName, 2), UUIDs.base64UUID()); + indexNameToSettings.put( + hotDataStreamIndex, + settings(IndexVersion.current()).put(IndexMetadata.SETTING_INDEX_UUID, hotDataStreamIndex.getUUID()) + .put(DataTier.TIER_PREFERENCE, "data_hot") + ); + DataStream dataStream = DataStreamTestHelper.newInstance(dataStreamName, List.of(warmDataStreamIndex, hotDataStreamIndex)); + + Index warmRegularIndex = new Index("warm-index", UUIDs.base64UUID()); + indexNameToSettings.put( + warmRegularIndex, + settings(IndexVersion.current()).put(IndexMetadata.SETTING_INDEX_UUID, warmRegularIndex.getUUID()) + .put(DataTier.TIER_PREFERENCE, "data_warm,data_hot") + ); + Index hotRegularIndex = new Index("hot-index", UUIDs.base64UUID()); + indexNameToSettings.put( + hotRegularIndex, + settings(IndexVersion.current()).put(IndexMetadata.SETTING_INDEX_UUID, hotRegularIndex.getUUID()) + .put(DataTier.TIER_PREFERENCE, "data_hot") + ); + + List allIndices = new ArrayList<>(4); + allIndices.addAll(dataStream.getIndices()); + allIndices.add(warmRegularIndex); + allIndices.add(hotRegularIndex); + + List hotIndices = List.of(hotRegularIndex, hotDataStreamIndex); + List warmIndices = List.of(warmRegularIndex, warmDataStreamIndex); + + for (Index index : allIndices) { + IndexMetadata.Builder indexMetadataBuilder = IndexMetadata.builder(index.getName()) + .settings(indexNameToSettings.get(index)) + .numberOfShards(1) + .numberOfReplicas(0); + Metadata.Builder metadataBuilder = Metadata.builder(state.metadata()).put(indexMetadataBuilder); + state = ClusterState.builder(state).metadata(metadataBuilder).build(); + } + + ClusterState finalState = state; + CoordinatorRewriteContextProvider coordinatorRewriteContextProvider = new CoordinatorRewriteContextProvider( + parserConfig(), + mock(Client.class), + System::currentTimeMillis, + () -> finalState, + (index) -> null + ); + + BoolQueryBuilder boolQueryBuilder = QueryBuilders.boolQuery() + .filter(QueryBuilders.termQuery(CoordinatorRewriteContext.TIER_FIELD_NAME, "data_hot")); + + assignShardsAndExecuteCanMatchPhase( + List.of(dataStream), + List.of(hotRegularIndex, warmRegularIndex), + coordinatorRewriteContextProvider, + boolQueryBuilder, + List.of(), + null, + (updatedSearchShardIterators, requests) -> { + var skippedShards = updatedSearchShardIterators.stream().filter(SearchShardIterator::skip).toList(); + var nonSkippedShards = updatedSearchShardIterators.stream() + .filter(searchShardIterator -> searchShardIterator.skip() == false) + .toList(); + + boolean allSkippedShardAreFromWarmIndices = skippedShards.stream() + .allMatch(shardIterator -> warmIndices.contains(shardIterator.shardId().getIndex())); + assertThat(allSkippedShardAreFromWarmIndices, equalTo(true)); + boolean allNonSkippedShardAreHotIndices = nonSkippedShards.stream() + .allMatch(shardIterator -> hotIndices.contains(shardIterator.shardId().getIndex())); + assertThat(allNonSkippedShardAreHotIndices, equalTo(true)); + boolean allRequestMadeToHotIndices = requests.stream() + .allMatch(request -> hotIndices.contains(request.shardId().getIndex())); + assertThat(allRequestMadeToHotIndices, equalTo(true)); + } + ); + } + public void doCanMatchFilteringOnCoordinatorThatCanBeSkipped(String timestampField) throws Exception { Index dataStreamIndex1 = new Index(".ds-mydata0001", UUIDs.base64UUID()); Index dataStreamIndex2 = new Index(".ds-mydata0002", UUIDs.base64UUID()); diff --git a/server/src/test/java/org/elasticsearch/index/query/QueryRewriteContextTests.java b/server/src/test/java/org/elasticsearch/index/query/QueryRewriteContextTests.java index 0b2a8ab4856b3..d07bcf54fdf09 100644 --- a/server/src/test/java/org/elasticsearch/index/query/QueryRewriteContextTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/QueryRewriteContextTests.java @@ -21,6 +21,7 @@ import java.util.Collections; +import static org.elasticsearch.index.query.CoordinatorRewriteContext.TIER_FIELD_TYPE; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.nullValue; @@ -86,13 +87,6 @@ public void testGetTierPreference() { { // coordinator rewrite context - IndexMetadata metadata = newIndexMeta( - "index", - Settings.builder() - .put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current()) - .put(DataTier.TIER_PREFERENCE, "data_cold,data_warm,data_hot") - .build() - ); CoordinatorRewriteContext coordinatorRewriteContext = new CoordinatorRewriteContext( parserConfig(), null, @@ -103,15 +97,9 @@ public void testGetTierPreference() { assertThat(coordinatorRewriteContext.getTierPreference(), is("data_frozen")); } + { // coordinator rewrite context empty tier - IndexMetadata metadata = newIndexMeta( - "index", - Settings.builder() - .put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current()) - .put(DataTier.TIER_PREFERENCE, "data_cold,data_warm,data_hot") - .build() - ); CoordinatorRewriteContext coordinatorRewriteContext = new CoordinatorRewriteContext( parserConfig(), null, @@ -122,6 +110,25 @@ public void testGetTierPreference() { assertThat(coordinatorRewriteContext.getTierPreference(), is(nullValue())); } + + { + // null date field range info + CoordinatorRewriteContext coordinatorRewriteContext = new CoordinatorRewriteContext( + parserConfig(), + null, + System::currentTimeMillis, + null, + "data_frozen" + ); + assertThat(coordinatorRewriteContext.getFieldRange(IndexMetadata.EVENT_INGESTED_FIELD_NAME), is(nullValue())); + assertThat(coordinatorRewriteContext.getFieldRange(IndexMetadata.EVENT_INGESTED_FIELD_NAME), is(nullValue())); + // tier field doesn't have a range + assertThat(coordinatorRewriteContext.getFieldRange(CoordinatorRewriteContext.TIER_FIELD_NAME), is(nullValue())); + assertThat(coordinatorRewriteContext.getFieldType(IndexMetadata.EVENT_INGESTED_FIELD_NAME), is(nullValue())); + assertThat(coordinatorRewriteContext.getFieldType(IndexMetadata.EVENT_INGESTED_FIELD_NAME), is(nullValue())); + // _tier field type should still work even without the data field info + assertThat(coordinatorRewriteContext.getFieldType(CoordinatorRewriteContext.TIER_FIELD_NAME), is(TIER_FIELD_TYPE)); + } } public static IndexMetadata newIndexMeta(String name, Settings indexSettings) { diff --git a/x-pack/plugin/core/src/internalClusterTest/java/org/elasticsearch/xpack/core/action/search/CanMatchDataTierCoordinatorRewriteIT.java b/x-pack/plugin/core/src/internalClusterTest/java/org/elasticsearch/xpack/core/action/search/CanMatchDataTierCoordinatorRewriteIT.java new file mode 100644 index 0000000000000..59803456104db --- /dev/null +++ b/x-pack/plugin/core/src/internalClusterTest/java/org/elasticsearch/xpack/core/action/search/CanMatchDataTierCoordinatorRewriteIT.java @@ -0,0 +1,108 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.core.action.search; + +import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.cluster.routing.allocation.DataTier; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.query.BoolQueryBuilder; +import org.elasticsearch.index.query.CoordinatorRewriteContext; +import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.xpack.core.LocalStateCompositeXPackPlugin; +import org.junit.Before; + +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; + +import static org.elasticsearch.cluster.metadata.IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; +import static org.hamcrest.Matchers.equalTo; + +@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0, numClientNodes = 0) +public class CanMatchDataTierCoordinatorRewriteIT extends ESIntegTestCase { + + @Override + protected Collection> nodePlugins() { + return Collections.singleton(LocalStateCompositeXPackPlugin.class); + } + + @Before + public void setUpMasterNode() { + internalCluster().startMasterOnlyNode(); + } + + public void testTierFiledCoordinatorRewrite() throws Exception { + startHotOnlyNode(); + String warmNode = startWarmOnlyNode(); + ensureGreen(); + + String hotIndex = "hot-index"; + String warmIndex = "warm-index"; + createIndexWithTierPreference(hotIndex, DataTier.DATA_HOT); + createIndexWithTierPreference(warmIndex, DataTier.DATA_WARM); + + ensureGreen(hotIndex, warmIndex); + // index 2 docs in the hot index and 1 doc in the warm index + indexDoc(hotIndex, "1", "field", "value"); + indexDoc(hotIndex, "2", "field", "value"); + indexDoc(warmIndex, "3", "field2", "valuee"); + + refresh(hotIndex, warmIndex); + + internalCluster().stopNode(warmNode); + + ensureRed(warmIndex); + + BoolQueryBuilder boolQueryBuilder = QueryBuilders.boolQuery() + .must(QueryBuilders.termQuery(CoordinatorRewriteContext.TIER_FIELD_NAME, "data_hot")); + + final SearchRequest searchRequest = new SearchRequest(); + // we set the pre filter shard size to 1 automatically for mounted indices however, + // we do have to explicitly make sure the can_match phase runs for hot/warm indices by lowering + // the threshold for the pre filter shard size + searchRequest.setPreFilterShardSize(1); + searchRequest.indices(hotIndex, warmIndex); + searchRequest.source(SearchSourceBuilder.searchSource().query(boolQueryBuilder)); + + assertResponse(client().search(searchRequest), searchResponse -> { + // we're only querying the hot tier which is available so we shouldn't get any failures + assertThat(searchResponse.getFailedShards(), equalTo(0)); + // we should be receiving the 2 docs from the index that's in the data_hot tier + assertNotNull(searchResponse.getHits().getTotalHits()); + assertThat(searchResponse.getHits().getTotalHits().value(), equalTo(2L)); + }); + } + + public String startHotOnlyNode() { + Settings.Builder nodeSettings = Settings.builder() + .putList("node.roles", Arrays.asList("master", "data_hot", "ingest")) + .put("node.attr.box", "hot"); + + return internalCluster().startNode(nodeSettings.build()); + } + + public String startWarmOnlyNode() { + Settings.Builder nodeSettings = Settings.builder() + .putList("node.roles", Arrays.asList("master", "data_warm", "ingest")) + .put("node.attr.box", "warm"); + + return internalCluster().startNode(nodeSettings.build()); + } + + private void createIndexWithTierPreference(String indexName, String tierPreference) { + + indicesAdmin().prepareCreate(indexName) + .setWaitForActiveShards(0) + .setSettings(Settings.builder().put(DataTier.TIER_PREFERENCE, tierPreference).put(INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0)) + .get(); + } +} From 8b9cbcab11477843ed5d98bc7b28ef7bfab1fab5 Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Fri, 1 Nov 2024 21:12:45 +1100 Subject: [PATCH 264/324] Mute org.elasticsearch.indices.state.CloseIndexIT testConcurrentClose #116073 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index d04fc426a7a6b..ec7f9a631f9b1 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -293,6 +293,9 @@ tests: - class: org.elasticsearch.smoketest.SmokeTestMultiNodeClientYamlTestSuiteIT method: test {yaml=logsdb/10_settings/logsdb with default ignore dynamic beyond limit and subobjects false} issue: https://github.com/elastic/elasticsearch/issues/116054 +- class: org.elasticsearch.indices.state.CloseIndexIT + method: testConcurrentClose + issue: https://github.com/elastic/elasticsearch/issues/116073 # Examples: # From e100f88343152c2021526dffb0ec49c62e89f60b Mon Sep 17 00:00:00 2001 From: Pete Gillin Date: Fri, 1 Nov 2024 11:29:50 +0000 Subject: [PATCH 265/324] Tidy up `ClusterStatsNodeRequest` (#116036) There was an `@UpdateForV9` on this class saying that it could be replaced with `TransportRequest.Empty`. But that was removed by https://github.com/elastic/elasticsearch/pull/109790, and all its non-test usages replaced by trivial implementations of `TransportRequest`. This change therefore just simplifies the class a bit and removes that annotation. --- .../admin/cluster/stats/TransportClusterStatsAction.java | 8 -------- 1 file changed, 8 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/TransportClusterStatsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/TransportClusterStatsAction.java index b132a1177d98a..36b018b5002eb 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/TransportClusterStatsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/TransportClusterStatsAction.java @@ -35,11 +35,9 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.CancellableSingleObjectCache; import org.elasticsearch.common.util.concurrent.ThreadContext; -import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.engine.CommitStats; import org.elasticsearch.index.seqno.RetentionLeaseStats; @@ -307,7 +305,6 @@ protected ClusterStatsNodeResponse nodeOperation(ClusterStatsNodeRequest nodeReq ); } - @UpdateForV9(owner = UpdateForV9.Owner.DATA_MANAGEMENT) // this can be replaced with TransportRequest.Empty in v9 public static class ClusterStatsNodeRequest extends TransportRequest { ClusterStatsNodeRequest() {} @@ -320,11 +317,6 @@ public ClusterStatsNodeRequest(StreamInput in) throws IOException { public Task createTask(long id, String type, String action, TaskId parentTaskId, Map headers) { return new CancellableTask(id, type, action, "", parentTaskId, headers); } - - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - } } private static class MetadataStatsCache extends CancellableSingleObjectCache { From 70afe1b204bb642c15060bbc7288fac5cd2c9054 Mon Sep 17 00:00:00 2001 From: Nikolaj Volgushev Date: Fri, 1 Nov 2024 12:50:55 +0100 Subject: [PATCH 266/324] Fix file settings service restart IT (#115917) Copy of https://github.com/elastic/elasticsearch/pull/115835 since that branch and PR are stuck processing the latest updates after repo unavailability. This PR is already reviewed. --- muted-tests.yml | 3 - .../FileSettingsRoleMappingsRestartIT.java | 94 ++++++++----------- 2 files changed, 41 insertions(+), 56 deletions(-) diff --git a/muted-tests.yml b/muted-tests.yml index ec7f9a631f9b1..49465cc4fc09f 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -215,9 +215,6 @@ tests: - class: org.elasticsearch.xpack.inference.DefaultEndPointsIT method: testInferDeploysDefaultE5 issue: https://github.com/elastic/elasticsearch/issues/115361 -- class: org.elasticsearch.xpack.security.FileSettingsRoleMappingsRestartIT - method: testFileSettingsReprocessedOnRestartWithoutVersionChange - issue: https://github.com/elastic/elasticsearch/issues/115450 - class: org.elasticsearch.xpack.restart.MLModelDeploymentFullClusterRestartIT method: testDeploymentSurvivesRestart {cluster=UPGRADED} issue: https://github.com/elastic/elasticsearch/issues/115528 diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/FileSettingsRoleMappingsRestartIT.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/FileSettingsRoleMappingsRestartIT.java index 97a5f080cee4e..15892c8d021f0 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/FileSettingsRoleMappingsRestartIT.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/FileSettingsRoleMappingsRestartIT.java @@ -9,6 +9,7 @@ import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest; import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.core.Tuple; import org.elasticsearch.reservedstate.service.FileSettingsService; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.SecurityIntegTestCase; @@ -36,6 +37,7 @@ @ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0, autoManageMasterNodes = false) public class FileSettingsRoleMappingsRestartIT extends SecurityIntegTestCase { + private static final int MAX_WAIT_TIME_SECONDS = 20; private final AtomicLong versionCounter = new AtomicLong(1); @Before @@ -115,10 +117,9 @@ public void testReservedStatePersistsOnRestart() throws Exception { awaitFileSettingsWatcher(); logger.info("--> write some role mappings, no other file settings"); writeJSONFile(masterNode, testJSONOnlyRoleMappings, logger, versionCounter); - boolean awaitSuccessful = savedClusterState.v1().await(20, TimeUnit.SECONDS); - assertTrue(awaitSuccessful); - assertRoleMappingsInClusterState( + assertRoleMappingsInClusterStateWithAwait( + savedClusterState, new ExpressionRoleMapping( "everyone_kibana_alone", new FieldExpression("username", List.of(new FieldExpression.FieldValue("*"))), @@ -175,11 +176,8 @@ public void testReservedStatePersistsOnRestart() throws Exception { ) ); - // now remove the role mappings via the same settings file - cleanupClusterState(masterNode); - - // no role mappings - assertRoleMappingsInClusterState(); + // now remove the role mappings via an empty settings file + cleanupClusterStateAndAssertNoMappings(masterNode); // and restart the master to confirm the role mappings are all gone logger.info("--> restart master again"); @@ -195,14 +193,13 @@ public void testFileSettingsReprocessedOnRestartWithoutVersionChange() throws Ex final String masterNode = internalCluster().getMasterName(); - var savedClusterState = setupClusterStateListener(masterNode, "everyone_kibana_alone"); + Tuple savedClusterState = setupClusterStateListener(masterNode, "everyone_kibana_alone"); awaitFileSettingsWatcher(); logger.info("--> write some role mappings, no other file settings"); writeJSONFile(masterNode, testJSONOnlyRoleMappings, logger, versionCounter); - boolean awaitSuccessful = savedClusterState.v1().await(20, TimeUnit.SECONDS); - assertTrue(awaitSuccessful); - assertRoleMappingsInClusterState( + assertRoleMappingsInClusterStateWithAwait( + savedClusterState, new ExpressionRoleMapping( "everyone_kibana_alone", new FieldExpression("username", List.of(new FieldExpression.FieldValue("*"))), @@ -228,41 +225,8 @@ public void testFileSettingsReprocessedOnRestartWithoutVersionChange() throws Ex ) ); - final CountDownLatch latch = new CountDownLatch(1); - final FileSettingsService fileSettingsService = internalCluster().getInstance(FileSettingsService.class, masterNode); - fileSettingsService.addFileChangedListener(latch::countDown); - // Don't increment version but write new file contents to test re-processing on restart + // write without version increment and assert that change gets applied on restart writeJSONFileWithoutVersionIncrement(masterNode, testJSONOnlyUpdatedRoleMappings, logger, versionCounter); - // Make sure we saw a file settings update so that we know it got processed, but it did not affect cluster state - assertTrue(latch.await(20, TimeUnit.SECONDS)); - - // Nothing changed yet because version is the same and there was no restart - assertRoleMappingsInClusterState( - new ExpressionRoleMapping( - "everyone_kibana_alone", - new FieldExpression("username", List.of(new FieldExpression.FieldValue("*"))), - List.of("kibana_user"), - List.of(), - Map.of("uuid", "b9a59ba9-6b92-4be2-bb8d-02bb270cb3a7", "_foo", "something", METADATA_NAME_FIELD, "everyone_kibana_alone"), - true - ), - new ExpressionRoleMapping( - "everyone_fleet_alone", - new FieldExpression("username", List.of(new FieldExpression.FieldValue("*"))), - List.of("fleet_user"), - List.of(), - Map.of( - "uuid", - "b9a59ba9-6b92-4be3-bb8d-02bb270cb3a7", - "_foo", - "something_else", - METADATA_NAME_FIELD, - "everyone_fleet_alone" - ), - false - ) - ); - logger.info("--> restart master"); internalCluster().restartNode(masterNode); ensureGreen(); @@ -286,28 +250,52 @@ public void testFileSettingsReprocessedOnRestartWithoutVersionChange() throws Ex ), true ) - ) + ), + MAX_WAIT_TIME_SECONDS, + TimeUnit.SECONDS ); - cleanupClusterState(masterNode); + cleanupClusterStateAndAssertNoMappings(masterNode); } - private void assertRoleMappingsInClusterState(ExpressionRoleMapping... expectedRoleMappings) { - var clusterState = clusterAdmin().state(new ClusterStateRequest(TEST_REQUEST_TIMEOUT)).actionGet().getState(); + private void assertRoleMappingsInClusterStateWithAwait( + Tuple latchWithClusterStateVersion, + ExpressionRoleMapping... expectedRoleMappings + ) throws InterruptedException { + boolean awaitSuccessful = latchWithClusterStateVersion.v1().await(MAX_WAIT_TIME_SECONDS, TimeUnit.SECONDS); + assertTrue(awaitSuccessful); + var clusterState = clusterAdmin().state( + new ClusterStateRequest(TEST_REQUEST_TIMEOUT).waitForMetadataVersion(latchWithClusterStateVersion.v2().get()) + ).actionGet().getState(); + assertRoleMappingsInClusterState(clusterState, expectedRoleMappings); + } + + private void assertRoleMappingsInClusterState(ClusterState clusterState, ExpressionRoleMapping... expectedRoleMappings) { String[] expectedRoleMappingNames = Arrays.stream(expectedRoleMappings).map(ExpressionRoleMapping::getName).toArray(String[]::new); assertRoleMappingReservedMetadata(clusterState, expectedRoleMappingNames); var actualRoleMappings = new ArrayList<>(RoleMappingMetadata.getFromClusterState(clusterState).getRoleMappings()); assertThat(actualRoleMappings, containsInAnyOrder(expectedRoleMappings)); } - private void cleanupClusterState(String masterNode) throws Exception { - // now remove the role mappings via the same settings file + private void assertRoleMappingsInClusterState(ExpressionRoleMapping... expectedRoleMappings) { + assertRoleMappingsInClusterState( + clusterAdmin().state(new ClusterStateRequest(TEST_REQUEST_TIMEOUT)).actionGet().getState(), + expectedRoleMappings + ); + } + + private void cleanupClusterStateAndAssertNoMappings(String masterNode) throws Exception { var savedClusterState = setupClusterStateListenerForCleanup(masterNode); awaitFileSettingsWatcher(); logger.info("--> remove the role mappings with an empty settings file"); writeJSONFile(masterNode, emptyJSON, logger, versionCounter); - boolean awaitSuccessful = savedClusterState.v1().await(20, TimeUnit.SECONDS); + boolean awaitSuccessful = savedClusterState.v1().await(MAX_WAIT_TIME_SECONDS, TimeUnit.SECONDS); assertTrue(awaitSuccessful); + // ensure cluster-state update got propagated to expected version + var clusterState = clusterAdmin().state( + new ClusterStateRequest(TEST_REQUEST_TIMEOUT).waitForMetadataVersion(savedClusterState.v2().get()) + ).actionGet(); + assertRoleMappingsInClusterState(clusterState.getState()); } private void assertRoleMappingReservedMetadata(ClusterState clusterState, String... names) { From 2275894ca04a5be196432c8ceee92737717f8ea0 Mon Sep 17 00:00:00 2001 From: Chris Hegarty <62058229+ChrisHegarty@users.noreply.github.com> Date: Fri, 1 Nov 2024 12:04:55 +0000 Subject: [PATCH 267/324] ES|QL Add full-text search to the functions docs page (#116024) Now that the match and qstr functions are Tech Previewing, we should add them to the top-level functions doc page. Co-authored-by: Craig Taverner --- .../esql/esql-functions-operators.asciidoc | 7 +++++++ docs/reference/esql/esql-limitations.asciidoc | 14 +++++++++++--- .../esql/functions/search-functions.asciidoc | 16 ++++++++++++++++ 3 files changed, 34 insertions(+), 3 deletions(-) create mode 100644 docs/reference/esql/functions/search-functions.asciidoc diff --git a/docs/reference/esql/esql-functions-operators.asciidoc b/docs/reference/esql/esql-functions-operators.asciidoc index 61f2ac6566d27..9743fd59f5e98 100644 --- a/docs/reference/esql/esql-functions-operators.asciidoc +++ b/docs/reference/esql/esql-functions-operators.asciidoc @@ -48,6 +48,12 @@ include::functions/ip-functions.asciidoc[tag=ip_list] include::functions/math-functions.asciidoc[tag=math_list] ==== +.*Search functions* +[%collapsible] +==== +include::functions/search-functions.asciidoc[tag=search_list] +==== + .*Spatial functions* [%collapsible] ==== @@ -89,6 +95,7 @@ include::functions/conditional-functions-and-expressions.asciidoc[] include::functions/date-time-functions.asciidoc[] include::functions/ip-functions.asciidoc[] include::functions/math-functions.asciidoc[] +include::functions/search-functions.asciidoc[] include::functions/spatial-functions.asciidoc[] include::functions/string-functions.asciidoc[] include::functions/type-conversion-functions.asciidoc[] diff --git a/docs/reference/esql/esql-limitations.asciidoc b/docs/reference/esql/esql-limitations.asciidoc index 8accc7550edbb..72c960c1b9699 100644 --- a/docs/reference/esql/esql-limitations.asciidoc +++ b/docs/reference/esql/esql-limitations.asciidoc @@ -102,11 +102,19 @@ is currently experimental. [discrete] [[esql-limitations-full-text-search]] -=== Full-text search is not supported +=== Full-text search + +experimental:[] {esql}'s support for <> +is currently in Technical Preview. One limitation of full-text search is that +it is necessary to use the search function, like <>, in a <> command +directly after the <> source command, or close enough to it. +Otherwise, the query will fail with a validation error. +Another limitation is that any <> command containing a full-text search function +cannot also use disjunctions (`OR`). Because of <>, -full-text search is not yet supported. Queries on `text` fields are like queries -on `keyword` fields: they are case-sensitive and need to match the full string. +queries on `text` fields are like queries on `keyword` fields: they are +case-sensitive and need to match the full string. For example, after indexing a field of type `text` with the value `Elasticsearch query language`, the following `WHERE` clause does not match because the `LIKE` diff --git a/docs/reference/esql/functions/search-functions.asciidoc b/docs/reference/esql/functions/search-functions.asciidoc new file mode 100644 index 0000000000000..943a262497d4c --- /dev/null +++ b/docs/reference/esql/functions/search-functions.asciidoc @@ -0,0 +1,16 @@ +[[esql-search-functions]] +==== {esql} Full-text search functions + +++++ +Full-text Search functions +++++ + +{esql} supports these full-text search functions: + +// tag::search_list[] +* experimental:[] <> +* experimental:[] <> +// end::search_list[] + +include::layout/match.asciidoc[] +include::layout/qstr.asciidoc[] From 3cc748a2633c3e50a24c33551251e2eef7b850da Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Aur=C3=A9lien=20FOUCRET?= Date: Fri, 1 Nov 2024 13:08:10 +0100 Subject: [PATCH 268/324] [KQL query] Implement the KQL AST builder (#115084) --- .../index/query/TermsQueryBuilderTests.java | 3 + .../test/AbstractBuilderTestCase.java | 12 +- .../test/AbstractQueryTestCase.java | 2 + x-pack/plugin/kql/src/main/antlr/KqlBase.g4 | 18 +- .../xpack/kql/parser/KqlAstBuilder.java | 208 ++++++- .../xpack/kql/parser/KqlBase.interp | 3 +- .../xpack/kql/parser/KqlBaseBaseListener.java | 16 +- .../xpack/kql/parser/KqlBaseBaseVisitor.java | 8 +- .../xpack/kql/parser/KqlBaseListener.java | 22 +- .../xpack/kql/parser/KqlBaseParser.java | 563 ++++++++++-------- .../xpack/kql/parser/KqlBaseVisitor.java | 13 +- .../xpack/kql/parser/KqlParser.java | 11 +- .../kql/parser/KqlParserExecutionContext.java | 79 +++ .../kql/parser/AbstractKqlParserTestCase.java | 172 ++++++ .../parser/KqlParserBooleanQueryTests.java | 210 +++++++ .../kql/parser/KqlParserExistsQueryTests.java | 63 ++ .../kql/parser/KqlParserFieldQueryTests.java | 312 ++++++++++ .../parser/KqlParserFieldlessQueryTests.java | 95 +++ .../kql/parser/KqlParserRangeQueryTests.java | 84 +++ .../xpack/kql/parser/KqlParserTests.java | 104 ++-- .../kql/src/test/resources/supported-queries | 7 +- .../src/test/resources/unsupported-queries | 3 + 22 files changed, 1630 insertions(+), 378 deletions(-) create mode 100644 x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlParserExecutionContext.java create mode 100644 x-pack/plugin/kql/src/test/java/org/elasticsearch/xpack/kql/parser/AbstractKqlParserTestCase.java create mode 100644 x-pack/plugin/kql/src/test/java/org/elasticsearch/xpack/kql/parser/KqlParserBooleanQueryTests.java create mode 100644 x-pack/plugin/kql/src/test/java/org/elasticsearch/xpack/kql/parser/KqlParserExistsQueryTests.java create mode 100644 x-pack/plugin/kql/src/test/java/org/elasticsearch/xpack/kql/parser/KqlParserFieldQueryTests.java create mode 100644 x-pack/plugin/kql/src/test/java/org/elasticsearch/xpack/kql/parser/KqlParserFieldlessQueryTests.java create mode 100644 x-pack/plugin/kql/src/test/java/org/elasticsearch/xpack/kql/parser/KqlParserRangeQueryTests.java diff --git a/server/src/test/java/org/elasticsearch/index/query/TermsQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/TermsQueryBuilderTests.java index 2faee7bc89eb5..1cf2ff5614eff 100644 --- a/server/src/test/java/org/elasticsearch/index/query/TermsQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/TermsQueryBuilderTests.java @@ -71,8 +71,10 @@ protected TermsQueryBuilder doCreateTestQueryBuilder() { // make between 0 and 5 different values of the same type String fieldName = randomValueOtherThanMany( choice -> choice.equals(GEO_POINT_FIELD_NAME) + || choice.equals(BINARY_FIELD_NAME) || choice.equals(GEO_POINT_ALIAS_FIELD_NAME) || choice.equals(INT_RANGE_FIELD_NAME) + || choice.equals(DATE_ALIAS_FIELD_NAME) || choice.equals(DATE_RANGE_FIELD_NAME) || choice.equals(DATE_NANOS_FIELD_NAME), // TODO: needs testing for date_nanos type AbstractQueryTestCase::getRandomFieldName @@ -115,6 +117,7 @@ protected void doAssertLuceneQuery(TermsQueryBuilder queryBuilder, Query query, // we only do the check below for string fields (otherwise we'd have to decode the values) if (queryBuilder.fieldName().equals(INT_FIELD_NAME) + || queryBuilder.fieldName().equals(INT_ALIAS_FIELD_NAME) || queryBuilder.fieldName().equals(DOUBLE_FIELD_NAME) || queryBuilder.fieldName().equals(BOOLEAN_FIELD_NAME) || queryBuilder.fieldName().equals(DATE_FIELD_NAME)) { diff --git a/test/framework/src/main/java/org/elasticsearch/test/AbstractBuilderTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/AbstractBuilderTestCase.java index 0543bc7a78f8b..ef6600032ca1b 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/AbstractBuilderTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/AbstractBuilderTestCase.java @@ -136,28 +136,36 @@ public abstract class AbstractBuilderTestCase extends ESTestCase { protected static final String[] MAPPED_FIELD_NAMES = new String[] { TEXT_FIELD_NAME, TEXT_ALIAS_FIELD_NAME, + KEYWORD_FIELD_NAME, INT_FIELD_NAME, + INT_ALIAS_FIELD_NAME, INT_RANGE_FIELD_NAME, DOUBLE_FIELD_NAME, BOOLEAN_FIELD_NAME, DATE_NANOS_FIELD_NAME, DATE_FIELD_NAME, + DATE_ALIAS_FIELD_NAME, DATE_RANGE_FIELD_NAME, OBJECT_FIELD_NAME, GEO_POINT_FIELD_NAME, - GEO_POINT_ALIAS_FIELD_NAME }; + GEO_POINT_ALIAS_FIELD_NAME, + BINARY_FIELD_NAME }; protected static final String[] MAPPED_LEAF_FIELD_NAMES = new String[] { TEXT_FIELD_NAME, TEXT_ALIAS_FIELD_NAME, + KEYWORD_FIELD_NAME, INT_FIELD_NAME, + INT_ALIAS_FIELD_NAME, INT_RANGE_FIELD_NAME, DOUBLE_FIELD_NAME, BOOLEAN_FIELD_NAME, DATE_NANOS_FIELD_NAME, DATE_FIELD_NAME, + DATE_ALIAS_FIELD_NAME, DATE_RANGE_FIELD_NAME, GEO_POINT_FIELD_NAME, - GEO_POINT_ALIAS_FIELD_NAME }; + GEO_POINT_ALIAS_FIELD_NAME, + BINARY_FIELD_NAME }; private static final Map ALIAS_TO_CONCRETE_FIELD_NAME = new HashMap<>(); static { diff --git a/test/framework/src/main/java/org/elasticsearch/test/AbstractQueryTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/AbstractQueryTestCase.java index e8ca68f41854d..0d505dab40fed 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/AbstractQueryTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/AbstractQueryTestCase.java @@ -751,6 +751,7 @@ protected static Object getRandomValueForFieldName(String fieldName) { } break; case INT_FIELD_NAME: + case INT_ALIAS_FIELD_NAME: value = randomIntBetween(0, 10); break; case DOUBLE_FIELD_NAME: @@ -815,6 +816,7 @@ protected static String getRandomRewriteMethod() { protected static Fuzziness randomFuzziness(String fieldName) { switch (fieldName) { case INT_FIELD_NAME: + case INT_ALIAS_FIELD_NAME: case DOUBLE_FIELD_NAME: case DATE_FIELD_NAME: case DATE_NANOS_FIELD_NAME: diff --git a/x-pack/plugin/kql/src/main/antlr/KqlBase.g4 b/x-pack/plugin/kql/src/main/antlr/KqlBase.g4 index dbf7c1979796a..52a70b9d4c018 100644 --- a/x-pack/plugin/kql/src/main/antlr/KqlBase.g4 +++ b/x-pack/plugin/kql/src/main/antlr/KqlBase.g4 @@ -26,13 +26,13 @@ topLevelQuery ; query - : query operator=(AND | OR) query #booleanQuery - | NOT subQuery=simpleQuery #notQuery - | simpleQuery #defaultQuery + : query operator=(AND|OR) query #booleanQuery + | simpleQuery #defaultQuery ; simpleQuery - : nestedQuery + : notQuery + | nestedQuery | parenthesizedQuery | matchAllQuery | existsQuery @@ -41,6 +41,10 @@ simpleQuery | fieldLessQuery ; +notQuery: + NOT subQuery=simpleQuery + ; + nestedQuery : fieldName COLON LEFT_CURLY_BRACKET query RIGHT_CURLY_BRACKET ; @@ -77,9 +81,9 @@ fieldLessQuery ; fieldQueryValue - : (AND|OR)? (UNQUOTED_LITERAL | WILDCARD )+ - | (UNQUOTED_LITERAL | WILDCARD )+ (AND|OR)? - | (NOT|AND|OR) + : (AND|OR|NOT)? (UNQUOTED_LITERAL|WILDCARD)+ (NOT|AND|OR)? + | (AND|OR) (AND|OR|NOT)? + | NOT (AND|OR)? | QUOTED_STRING ; diff --git a/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlAstBuilder.java b/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlAstBuilder.java index 4ee7bdc1c7f21..a6de28104e313 100644 --- a/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlAstBuilder.java +++ b/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlAstBuilder.java @@ -8,22 +8,222 @@ package org.elasticsearch.xpack.kql.parser; import org.antlr.v4.runtime.ParserRuleContext; +import org.antlr.v4.runtime.Token; +import org.elasticsearch.index.mapper.MappedFieldType; +import org.elasticsearch.index.query.BoolQueryBuilder; import org.elasticsearch.index.query.MatchAllQueryBuilder; +import org.elasticsearch.index.query.MatchNoneQueryBuilder; +import org.elasticsearch.index.query.MultiMatchQueryBuilder; import org.elasticsearch.index.query.QueryBuilder; -import org.elasticsearch.index.query.SearchExecutionContext; +import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.index.query.RangeQueryBuilder; + +import java.util.function.BiConsumer; +import java.util.function.BiFunction; + +import static org.elasticsearch.common.logging.LoggerMessageFormat.format; +import static org.elasticsearch.xpack.kql.parser.KqlParserExecutionContext.isDateField; +import static org.elasticsearch.xpack.kql.parser.KqlParserExecutionContext.isKeywordField; +import static org.elasticsearch.xpack.kql.parser.KqlParserExecutionContext.isRuntimeField; +import static org.elasticsearch.xpack.kql.parser.ParserUtils.escapeLuceneQueryString; +import static org.elasticsearch.xpack.kql.parser.ParserUtils.hasWildcard; class KqlAstBuilder extends KqlBaseBaseVisitor { - private final SearchExecutionContext searchExecutionContext; + private final KqlParserExecutionContext kqlParserExecutionContext; - KqlAstBuilder(SearchExecutionContext searchExecutionContext) { - this.searchExecutionContext = searchExecutionContext; + KqlAstBuilder(KqlParserExecutionContext kqlParserExecutionContext) { + this.kqlParserExecutionContext = kqlParserExecutionContext; } public QueryBuilder toQueryBuilder(ParserRuleContext ctx) { if (ctx instanceof KqlBaseParser.TopLevelQueryContext topLeveQueryContext) { + if (topLeveQueryContext.query() != null) { + return ParserUtils.typedParsing(this, topLeveQueryContext.query(), QueryBuilder.class); + } + return new MatchAllQueryBuilder(); } throw new IllegalArgumentException("context should be of type TopLevelQueryContext"); } + + @Override + public QueryBuilder visitBooleanQuery(KqlBaseParser.BooleanQueryContext ctx) { + assert ctx.operator != null; + return isAndQuery(ctx) ? visitAndBooleanQuery(ctx) : visitOrBooleanQuery(ctx); + } + + public QueryBuilder visitAndBooleanQuery(KqlBaseParser.BooleanQueryContext ctx) { + BoolQueryBuilder builder = QueryBuilders.boolQuery(); + + // TODO: KQLContext has an option to wrap the clauses into a filter instead of a must clause. Do we need it? + for (ParserRuleContext subQueryCtx : ctx.query()) { + if (subQueryCtx instanceof KqlBaseParser.BooleanQueryContext booleanSubQueryCtx && isAndQuery(booleanSubQueryCtx)) { + ParserUtils.typedParsing(this, subQueryCtx, BoolQueryBuilder.class).must().forEach(builder::must); + } else { + builder.must(ParserUtils.typedParsing(this, subQueryCtx, QueryBuilder.class)); + } + } + + return rewriteConjunctionQuery(builder); + } + + public QueryBuilder visitOrBooleanQuery(KqlBaseParser.BooleanQueryContext ctx) { + BoolQueryBuilder builder = QueryBuilders.boolQuery().minimumShouldMatch(1); + + for (ParserRuleContext subQueryCtx : ctx.query()) { + if (subQueryCtx instanceof KqlBaseParser.BooleanQueryContext booleanSubQueryCtx && isOrQuery(booleanSubQueryCtx)) { + ParserUtils.typedParsing(this, subQueryCtx, BoolQueryBuilder.class).should().forEach(builder::should); + } else { + builder.should(ParserUtils.typedParsing(this, subQueryCtx, QueryBuilder.class)); + } + } + + return rewriteDisjunctionQuery(builder); + } + + @Override + public QueryBuilder visitNotQuery(KqlBaseParser.NotQueryContext ctx) { + return QueryBuilders.boolQuery().mustNot(ParserUtils.typedParsing(this, ctx.simpleQuery(), QueryBuilder.class)); + } + + @Override + public QueryBuilder visitParenthesizedQuery(KqlBaseParser.ParenthesizedQueryContext ctx) { + return ParserUtils.typedParsing(this, ctx.query(), QueryBuilder.class); + } + + @Override + public QueryBuilder visitNestedQuery(KqlBaseParser.NestedQueryContext ctx) { + // TODO: implementation + return new MatchNoneQueryBuilder(); + } + + @Override + public QueryBuilder visitMatchAllQuery(KqlBaseParser.MatchAllQueryContext ctx) { + return new MatchAllQueryBuilder(); + } + + @Override + public QueryBuilder visitExistsQuery(KqlBaseParser.ExistsQueryContext ctx) { + assert ctx.fieldName() != null; // Should not happen since the grammar does not allow the fieldname to be null. + + BoolQueryBuilder boolQueryBuilder = QueryBuilders.boolQuery().minimumShouldMatch(1); + withFields(ctx.fieldName(), (fieldName, mappedFieldType) -> { + if (isRuntimeField(mappedFieldType) == false) { + boolQueryBuilder.should(QueryBuilders.existsQuery(fieldName)); + } + }); + + return rewriteDisjunctionQuery(boolQueryBuilder); + } + + @Override + public QueryBuilder visitRangeQuery(KqlBaseParser.RangeQueryContext ctx) { + BoolQueryBuilder boolQueryBuilder = QueryBuilders.boolQuery().minimumShouldMatch(1); + + String queryText = ParserUtils.extractText(ctx.rangeQueryValue()); + BiFunction rangeOperation = rangeOperation(ctx.operator); + + withFields(ctx.fieldName(), (fieldName, mappedFieldType) -> { + RangeQueryBuilder rangeQuery = rangeOperation.apply(QueryBuilders.rangeQuery(fieldName), queryText); + // TODO: add timezone for date fields + boolQueryBuilder.should(rangeQuery); + }); + + return rewriteDisjunctionQuery(boolQueryBuilder); + } + + @Override + public QueryBuilder visitFieldLessQuery(KqlBaseParser.FieldLessQueryContext ctx) { + String queryText = ParserUtils.extractText(ctx.fieldQueryValue()); + + if (hasWildcard(ctx.fieldQueryValue())) { + // TODO: set default fields. + return QueryBuilders.queryStringQuery(escapeLuceneQueryString(queryText, true)); + } + + boolean isPhraseMatch = ctx.fieldQueryValue().QUOTED_STRING() != null; + + return QueryBuilders.multiMatchQuery(queryText) + // TODO: add default fields? + .type(isPhraseMatch ? MultiMatchQueryBuilder.Type.PHRASE : MultiMatchQueryBuilder.Type.BEST_FIELDS) + .lenient(true); + } + + @Override + public QueryBuilder visitFieldQuery(KqlBaseParser.FieldQueryContext ctx) { + + BoolQueryBuilder boolQueryBuilder = QueryBuilders.boolQuery().minimumShouldMatch(1); + String queryText = ParserUtils.extractText(ctx.fieldQueryValue()); + boolean hasWildcard = hasWildcard(ctx.fieldQueryValue()); + + withFields(ctx.fieldName(), (fieldName, mappedFieldType) -> { + QueryBuilder fieldQuery = null; + + if (hasWildcard && isKeywordField(mappedFieldType)) { + fieldQuery = QueryBuilders.wildcardQuery(fieldName, queryText) + .caseInsensitive(kqlParserExecutionContext.isCaseSensitive() == false); + } else if (hasWildcard) { + fieldQuery = QueryBuilders.queryStringQuery(escapeLuceneQueryString(queryText, true)).field(fieldName); + } else if (isDateField(mappedFieldType)) { + // TODO: add timezone + fieldQuery = QueryBuilders.rangeQuery(fieldName).gte(queryText).lte(queryText); + } else if (isKeywordField(mappedFieldType)) { + fieldQuery = QueryBuilders.termQuery(fieldName, queryText) + .caseInsensitive(kqlParserExecutionContext.isCaseSensitive() == false); + } else if (ctx.fieldQueryValue().QUOTED_STRING() != null) { + fieldQuery = QueryBuilders.matchPhraseQuery(fieldName, queryText); + } else { + fieldQuery = QueryBuilders.matchQuery(fieldName, queryText); + } + + if (fieldQuery != null) { + boolQueryBuilder.should(fieldQuery); + } + }); + + return rewriteDisjunctionQuery(boolQueryBuilder); + } + + private static boolean isAndQuery(KqlBaseParser.BooleanQueryContext ctx) { + return ctx.operator.getType() == KqlBaseParser.AND; + } + + private static boolean isOrQuery(KqlBaseParser.BooleanQueryContext ctx) { + return ctx.operator.getType() == KqlBaseParser.OR; + } + + private void withFields(KqlBaseParser.FieldNameContext ctx, BiConsumer fieldConsummer) { + kqlParserExecutionContext.resolveFields(ctx).forEach(fieldDef -> fieldConsummer.accept(fieldDef.v1(), fieldDef.v2())); + } + + private QueryBuilder rewriteDisjunctionQuery(BoolQueryBuilder boolQueryBuilder) { + assert boolQueryBuilder.must().isEmpty() && boolQueryBuilder.filter().isEmpty() && boolQueryBuilder.mustNot().isEmpty(); + + if (boolQueryBuilder.should().isEmpty()) { + return new MatchNoneQueryBuilder(); + } + + return boolQueryBuilder.should().size() == 1 ? boolQueryBuilder.should().getFirst() : boolQueryBuilder; + } + + private QueryBuilder rewriteConjunctionQuery(BoolQueryBuilder boolQueryBuilder) { + assert boolQueryBuilder.should().isEmpty() && boolQueryBuilder.filter().isEmpty() && boolQueryBuilder.mustNot().isEmpty(); + + if (boolQueryBuilder.must().isEmpty()) { + return new MatchNoneQueryBuilder(); + } + + return boolQueryBuilder.must().size() == 1 ? boolQueryBuilder.must().getFirst() : boolQueryBuilder; + } + + private BiFunction rangeOperation(Token operator) { + return switch (operator.getType()) { + case KqlBaseParser.OP_LESS -> RangeQueryBuilder::lt; + case KqlBaseParser.OP_LESS_EQ -> RangeQueryBuilder::lte; + case KqlBaseParser.OP_MORE -> RangeQueryBuilder::gt; + case KqlBaseParser.OP_MORE_EQ -> RangeQueryBuilder::gte; + default -> throw new IllegalArgumentException(format(null, "Invalid range operator {}\"", operator.getText())); + }; + } } diff --git a/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBase.interp b/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBase.interp index 111cac6d641b9..2b09dd52e95b0 100644 --- a/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBase.interp +++ b/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBase.interp @@ -40,6 +40,7 @@ rule names: topLevelQuery query simpleQuery +notQuery nestedQuery matchAllQuery parenthesizedQuery @@ -53,4 +54,4 @@ fieldName atn: -[4, 1, 16, 135, 2, 0, 7, 0, 2, 1, 7, 1, 2, 2, 7, 2, 2, 3, 7, 3, 2, 4, 7, 4, 2, 5, 7, 5, 2, 6, 7, 6, 2, 7, 7, 7, 2, 8, 7, 8, 2, 9, 7, 9, 2, 10, 7, 10, 2, 11, 7, 11, 2, 12, 7, 12, 1, 0, 3, 0, 28, 8, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 3, 1, 36, 8, 1, 1, 1, 1, 1, 1, 1, 5, 1, 41, 8, 1, 10, 1, 12, 1, 44, 9, 1, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 3, 2, 53, 8, 2, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 1, 4, 1, 4, 3, 4, 63, 8, 4, 1, 4, 1, 4, 1, 5, 1, 5, 1, 5, 1, 5, 1, 6, 1, 6, 1, 6, 1, 6, 1, 7, 4, 7, 76, 8, 7, 11, 7, 12, 7, 77, 1, 7, 3, 7, 81, 8, 7, 1, 8, 1, 8, 1, 8, 1, 8, 1, 9, 1, 9, 1, 9, 1, 9, 1, 9, 1, 9, 1, 9, 1, 9, 1, 9, 1, 9, 3, 9, 97, 8, 9, 1, 10, 1, 10, 1, 10, 1, 10, 1, 10, 3, 10, 104, 8, 10, 1, 11, 3, 11, 107, 8, 11, 1, 11, 4, 11, 110, 8, 11, 11, 11, 12, 11, 111, 1, 11, 4, 11, 115, 8, 11, 11, 11, 12, 11, 116, 1, 11, 3, 11, 120, 8, 11, 1, 11, 1, 11, 3, 11, 124, 8, 11, 1, 12, 4, 12, 127, 8, 12, 11, 12, 12, 12, 128, 1, 12, 1, 12, 3, 12, 133, 8, 12, 1, 12, 0, 1, 2, 13, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 0, 4, 1, 0, 2, 3, 1, 0, 6, 9, 2, 0, 14, 14, 16, 16, 1, 0, 2, 4, 145, 0, 27, 1, 0, 0, 0, 2, 35, 1, 0, 0, 0, 4, 52, 1, 0, 0, 0, 6, 54, 1, 0, 0, 0, 8, 62, 1, 0, 0, 0, 10, 66, 1, 0, 0, 0, 12, 70, 1, 0, 0, 0, 14, 80, 1, 0, 0, 0, 16, 82, 1, 0, 0, 0, 18, 96, 1, 0, 0, 0, 20, 103, 1, 0, 0, 0, 22, 123, 1, 0, 0, 0, 24, 132, 1, 0, 0, 0, 26, 28, 3, 2, 1, 0, 27, 26, 1, 0, 0, 0, 27, 28, 1, 0, 0, 0, 28, 29, 1, 0, 0, 0, 29, 30, 5, 0, 0, 1, 30, 1, 1, 0, 0, 0, 31, 32, 6, 1, -1, 0, 32, 33, 5, 4, 0, 0, 33, 36, 3, 4, 2, 0, 34, 36, 3, 4, 2, 0, 35, 31, 1, 0, 0, 0, 35, 34, 1, 0, 0, 0, 36, 42, 1, 0, 0, 0, 37, 38, 10, 3, 0, 0, 38, 39, 7, 0, 0, 0, 39, 41, 3, 2, 1, 3, 40, 37, 1, 0, 0, 0, 41, 44, 1, 0, 0, 0, 42, 40, 1, 0, 0, 0, 42, 43, 1, 0, 0, 0, 43, 3, 1, 0, 0, 0, 44, 42, 1, 0, 0, 0, 45, 53, 3, 6, 3, 0, 46, 53, 3, 10, 5, 0, 47, 53, 3, 8, 4, 0, 48, 53, 3, 16, 8, 0, 49, 53, 3, 12, 6, 0, 50, 53, 3, 18, 9, 0, 51, 53, 3, 20, 10, 0, 52, 45, 1, 0, 0, 0, 52, 46, 1, 0, 0, 0, 52, 47, 1, 0, 0, 0, 52, 48, 1, 0, 0, 0, 52, 49, 1, 0, 0, 0, 52, 50, 1, 0, 0, 0, 52, 51, 1, 0, 0, 0, 53, 5, 1, 0, 0, 0, 54, 55, 3, 24, 12, 0, 55, 56, 5, 5, 0, 0, 56, 57, 5, 12, 0, 0, 57, 58, 3, 2, 1, 0, 58, 59, 5, 13, 0, 0, 59, 7, 1, 0, 0, 0, 60, 61, 5, 16, 0, 0, 61, 63, 5, 5, 0, 0, 62, 60, 1, 0, 0, 0, 62, 63, 1, 0, 0, 0, 63, 64, 1, 0, 0, 0, 64, 65, 5, 16, 0, 0, 65, 9, 1, 0, 0, 0, 66, 67, 5, 10, 0, 0, 67, 68, 3, 2, 1, 0, 68, 69, 5, 11, 0, 0, 69, 11, 1, 0, 0, 0, 70, 71, 3, 24, 12, 0, 71, 72, 7, 1, 0, 0, 72, 73, 3, 14, 7, 0, 73, 13, 1, 0, 0, 0, 74, 76, 7, 2, 0, 0, 75, 74, 1, 0, 0, 0, 76, 77, 1, 0, 0, 0, 77, 75, 1, 0, 0, 0, 77, 78, 1, 0, 0, 0, 78, 81, 1, 0, 0, 0, 79, 81, 5, 15, 0, 0, 80, 75, 1, 0, 0, 0, 80, 79, 1, 0, 0, 0, 81, 15, 1, 0, 0, 0, 82, 83, 3, 24, 12, 0, 83, 84, 5, 5, 0, 0, 84, 85, 5, 16, 0, 0, 85, 17, 1, 0, 0, 0, 86, 87, 3, 24, 12, 0, 87, 88, 5, 5, 0, 0, 88, 89, 3, 22, 11, 0, 89, 97, 1, 0, 0, 0, 90, 91, 3, 24, 12, 0, 91, 92, 5, 5, 0, 0, 92, 93, 5, 10, 0, 0, 93, 94, 3, 22, 11, 0, 94, 95, 5, 11, 0, 0, 95, 97, 1, 0, 0, 0, 96, 86, 1, 0, 0, 0, 96, 90, 1, 0, 0, 0, 97, 19, 1, 0, 0, 0, 98, 104, 3, 22, 11, 0, 99, 100, 5, 10, 0, 0, 100, 101, 3, 22, 11, 0, 101, 102, 5, 11, 0, 0, 102, 104, 1, 0, 0, 0, 103, 98, 1, 0, 0, 0, 103, 99, 1, 0, 0, 0, 104, 21, 1, 0, 0, 0, 105, 107, 7, 0, 0, 0, 106, 105, 1, 0, 0, 0, 106, 107, 1, 0, 0, 0, 107, 109, 1, 0, 0, 0, 108, 110, 7, 2, 0, 0, 109, 108, 1, 0, 0, 0, 110, 111, 1, 0, 0, 0, 111, 109, 1, 0, 0, 0, 111, 112, 1, 0, 0, 0, 112, 124, 1, 0, 0, 0, 113, 115, 7, 2, 0, 0, 114, 113, 1, 0, 0, 0, 115, 116, 1, 0, 0, 0, 116, 114, 1, 0, 0, 0, 116, 117, 1, 0, 0, 0, 117, 119, 1, 0, 0, 0, 118, 120, 7, 0, 0, 0, 119, 118, 1, 0, 0, 0, 119, 120, 1, 0, 0, 0, 120, 124, 1, 0, 0, 0, 121, 124, 7, 3, 0, 0, 122, 124, 5, 15, 0, 0, 123, 106, 1, 0, 0, 0, 123, 114, 1, 0, 0, 0, 123, 121, 1, 0, 0, 0, 123, 122, 1, 0, 0, 0, 124, 23, 1, 0, 0, 0, 125, 127, 5, 14, 0, 0, 126, 125, 1, 0, 0, 0, 127, 128, 1, 0, 0, 0, 128, 126, 1, 0, 0, 0, 128, 129, 1, 0, 0, 0, 129, 133, 1, 0, 0, 0, 130, 133, 5, 15, 0, 0, 131, 133, 5, 16, 0, 0, 132, 126, 1, 0, 0, 0, 132, 130, 1, 0, 0, 0, 132, 131, 1, 0, 0, 0, 133, 25, 1, 0, 0, 0, 16, 27, 35, 42, 52, 62, 77, 80, 96, 103, 106, 111, 116, 119, 123, 128, 132] \ No newline at end of file +[4, 1, 16, 140, 2, 0, 7, 0, 2, 1, 7, 1, 2, 2, 7, 2, 2, 3, 7, 3, 2, 4, 7, 4, 2, 5, 7, 5, 2, 6, 7, 6, 2, 7, 7, 7, 2, 8, 7, 8, 2, 9, 7, 9, 2, 10, 7, 10, 2, 11, 7, 11, 2, 12, 7, 12, 2, 13, 7, 13, 1, 0, 3, 0, 30, 8, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 5, 1, 40, 8, 1, 10, 1, 12, 1, 43, 9, 1, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 3, 2, 53, 8, 2, 1, 3, 1, 3, 1, 3, 1, 4, 1, 4, 1, 4, 1, 4, 1, 4, 1, 4, 1, 5, 1, 5, 3, 5, 66, 8, 5, 1, 5, 1, 5, 1, 6, 1, 6, 1, 6, 1, 6, 1, 7, 1, 7, 1, 7, 1, 7, 1, 8, 4, 8, 79, 8, 8, 11, 8, 12, 8, 80, 1, 8, 3, 8, 84, 8, 8, 1, 9, 1, 9, 1, 9, 1, 9, 1, 10, 1, 10, 1, 10, 1, 10, 1, 10, 1, 10, 1, 10, 1, 10, 1, 10, 1, 10, 3, 10, 100, 8, 10, 1, 11, 1, 11, 1, 11, 1, 11, 1, 11, 3, 11, 107, 8, 11, 1, 12, 3, 12, 110, 8, 12, 1, 12, 4, 12, 113, 8, 12, 11, 12, 12, 12, 114, 1, 12, 3, 12, 118, 8, 12, 1, 12, 1, 12, 3, 12, 122, 8, 12, 1, 12, 1, 12, 3, 12, 126, 8, 12, 1, 12, 3, 12, 129, 8, 12, 1, 13, 4, 13, 132, 8, 13, 11, 13, 12, 13, 133, 1, 13, 1, 13, 3, 13, 138, 8, 13, 1, 13, 0, 1, 2, 14, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 0, 4, 1, 0, 2, 3, 1, 0, 6, 9, 2, 0, 14, 14, 16, 16, 1, 0, 2, 4, 150, 0, 29, 1, 0, 0, 0, 2, 33, 1, 0, 0, 0, 4, 52, 1, 0, 0, 0, 6, 54, 1, 0, 0, 0, 8, 57, 1, 0, 0, 0, 10, 65, 1, 0, 0, 0, 12, 69, 1, 0, 0, 0, 14, 73, 1, 0, 0, 0, 16, 83, 1, 0, 0, 0, 18, 85, 1, 0, 0, 0, 20, 99, 1, 0, 0, 0, 22, 106, 1, 0, 0, 0, 24, 128, 1, 0, 0, 0, 26, 137, 1, 0, 0, 0, 28, 30, 3, 2, 1, 0, 29, 28, 1, 0, 0, 0, 29, 30, 1, 0, 0, 0, 30, 31, 1, 0, 0, 0, 31, 32, 5, 0, 0, 1, 32, 1, 1, 0, 0, 0, 33, 34, 6, 1, -1, 0, 34, 35, 3, 4, 2, 0, 35, 41, 1, 0, 0, 0, 36, 37, 10, 2, 0, 0, 37, 38, 7, 0, 0, 0, 38, 40, 3, 2, 1, 2, 39, 36, 1, 0, 0, 0, 40, 43, 1, 0, 0, 0, 41, 39, 1, 0, 0, 0, 41, 42, 1, 0, 0, 0, 42, 3, 1, 0, 0, 0, 43, 41, 1, 0, 0, 0, 44, 53, 3, 6, 3, 0, 45, 53, 3, 8, 4, 0, 46, 53, 3, 12, 6, 0, 47, 53, 3, 10, 5, 0, 48, 53, 3, 18, 9, 0, 49, 53, 3, 14, 7, 0, 50, 53, 3, 20, 10, 0, 51, 53, 3, 22, 11, 0, 52, 44, 1, 0, 0, 0, 52, 45, 1, 0, 0, 0, 52, 46, 1, 0, 0, 0, 52, 47, 1, 0, 0, 0, 52, 48, 1, 0, 0, 0, 52, 49, 1, 0, 0, 0, 52, 50, 1, 0, 0, 0, 52, 51, 1, 0, 0, 0, 53, 5, 1, 0, 0, 0, 54, 55, 5, 4, 0, 0, 55, 56, 3, 4, 2, 0, 56, 7, 1, 0, 0, 0, 57, 58, 3, 26, 13, 0, 58, 59, 5, 5, 0, 0, 59, 60, 5, 12, 0, 0, 60, 61, 3, 2, 1, 0, 61, 62, 5, 13, 0, 0, 62, 9, 1, 0, 0, 0, 63, 64, 5, 16, 0, 0, 64, 66, 5, 5, 0, 0, 65, 63, 1, 0, 0, 0, 65, 66, 1, 0, 0, 0, 66, 67, 1, 0, 0, 0, 67, 68, 5, 16, 0, 0, 68, 11, 1, 0, 0, 0, 69, 70, 5, 10, 0, 0, 70, 71, 3, 2, 1, 0, 71, 72, 5, 11, 0, 0, 72, 13, 1, 0, 0, 0, 73, 74, 3, 26, 13, 0, 74, 75, 7, 1, 0, 0, 75, 76, 3, 16, 8, 0, 76, 15, 1, 0, 0, 0, 77, 79, 7, 2, 0, 0, 78, 77, 1, 0, 0, 0, 79, 80, 1, 0, 0, 0, 80, 78, 1, 0, 0, 0, 80, 81, 1, 0, 0, 0, 81, 84, 1, 0, 0, 0, 82, 84, 5, 15, 0, 0, 83, 78, 1, 0, 0, 0, 83, 82, 1, 0, 0, 0, 84, 17, 1, 0, 0, 0, 85, 86, 3, 26, 13, 0, 86, 87, 5, 5, 0, 0, 87, 88, 5, 16, 0, 0, 88, 19, 1, 0, 0, 0, 89, 90, 3, 26, 13, 0, 90, 91, 5, 5, 0, 0, 91, 92, 3, 24, 12, 0, 92, 100, 1, 0, 0, 0, 93, 94, 3, 26, 13, 0, 94, 95, 5, 5, 0, 0, 95, 96, 5, 10, 0, 0, 96, 97, 3, 24, 12, 0, 97, 98, 5, 11, 0, 0, 98, 100, 1, 0, 0, 0, 99, 89, 1, 0, 0, 0, 99, 93, 1, 0, 0, 0, 100, 21, 1, 0, 0, 0, 101, 107, 3, 24, 12, 0, 102, 103, 5, 10, 0, 0, 103, 104, 3, 24, 12, 0, 104, 105, 5, 11, 0, 0, 105, 107, 1, 0, 0, 0, 106, 101, 1, 0, 0, 0, 106, 102, 1, 0, 0, 0, 107, 23, 1, 0, 0, 0, 108, 110, 7, 3, 0, 0, 109, 108, 1, 0, 0, 0, 109, 110, 1, 0, 0, 0, 110, 112, 1, 0, 0, 0, 111, 113, 7, 2, 0, 0, 112, 111, 1, 0, 0, 0, 113, 114, 1, 0, 0, 0, 114, 112, 1, 0, 0, 0, 114, 115, 1, 0, 0, 0, 115, 117, 1, 0, 0, 0, 116, 118, 7, 3, 0, 0, 117, 116, 1, 0, 0, 0, 117, 118, 1, 0, 0, 0, 118, 129, 1, 0, 0, 0, 119, 121, 7, 0, 0, 0, 120, 122, 7, 3, 0, 0, 121, 120, 1, 0, 0, 0, 121, 122, 1, 0, 0, 0, 122, 129, 1, 0, 0, 0, 123, 125, 5, 4, 0, 0, 124, 126, 7, 0, 0, 0, 125, 124, 1, 0, 0, 0, 125, 126, 1, 0, 0, 0, 126, 129, 1, 0, 0, 0, 127, 129, 5, 15, 0, 0, 128, 109, 1, 0, 0, 0, 128, 119, 1, 0, 0, 0, 128, 123, 1, 0, 0, 0, 128, 127, 1, 0, 0, 0, 129, 25, 1, 0, 0, 0, 130, 132, 5, 14, 0, 0, 131, 130, 1, 0, 0, 0, 132, 133, 1, 0, 0, 0, 133, 131, 1, 0, 0, 0, 133, 134, 1, 0, 0, 0, 134, 138, 1, 0, 0, 0, 135, 138, 5, 15, 0, 0, 136, 138, 5, 16, 0, 0, 137, 131, 1, 0, 0, 0, 137, 135, 1, 0, 0, 0, 137, 136, 1, 0, 0, 0, 138, 27, 1, 0, 0, 0, 16, 29, 41, 52, 65, 80, 83, 99, 106, 109, 114, 117, 121, 125, 128, 133, 137] \ No newline at end of file diff --git a/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBaseBaseListener.java b/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBaseBaseListener.java index 426af7f7115b9..e1015edcd4931 100644 --- a/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBaseBaseListener.java +++ b/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBaseBaseListener.java @@ -37,49 +37,49 @@ class KqlBaseBaseListener implements KqlBaseListener { * *

      The default implementation does nothing.

      */ - @Override public void enterNotQuery(KqlBaseParser.NotQueryContext ctx) { } + @Override public void enterBooleanQuery(KqlBaseParser.BooleanQueryContext ctx) { } /** * {@inheritDoc} * *

      The default implementation does nothing.

      */ - @Override public void exitNotQuery(KqlBaseParser.NotQueryContext ctx) { } + @Override public void exitBooleanQuery(KqlBaseParser.BooleanQueryContext ctx) { } /** * {@inheritDoc} * *

      The default implementation does nothing.

      */ - @Override public void enterBooleanQuery(KqlBaseParser.BooleanQueryContext ctx) { } + @Override public void enterDefaultQuery(KqlBaseParser.DefaultQueryContext ctx) { } /** * {@inheritDoc} * *

      The default implementation does nothing.

      */ - @Override public void exitBooleanQuery(KqlBaseParser.BooleanQueryContext ctx) { } + @Override public void exitDefaultQuery(KqlBaseParser.DefaultQueryContext ctx) { } /** * {@inheritDoc} * *

      The default implementation does nothing.

      */ - @Override public void enterDefaultQuery(KqlBaseParser.DefaultQueryContext ctx) { } + @Override public void enterSimpleQuery(KqlBaseParser.SimpleQueryContext ctx) { } /** * {@inheritDoc} * *

      The default implementation does nothing.

      */ - @Override public void exitDefaultQuery(KqlBaseParser.DefaultQueryContext ctx) { } + @Override public void exitSimpleQuery(KqlBaseParser.SimpleQueryContext ctx) { } /** * {@inheritDoc} * *

      The default implementation does nothing.

      */ - @Override public void enterSimpleQuery(KqlBaseParser.SimpleQueryContext ctx) { } + @Override public void enterNotQuery(KqlBaseParser.NotQueryContext ctx) { } /** * {@inheritDoc} * *

      The default implementation does nothing.

      */ - @Override public void exitSimpleQuery(KqlBaseParser.SimpleQueryContext ctx) { } + @Override public void exitNotQuery(KqlBaseParser.NotQueryContext ctx) { } /** * {@inheritDoc} * diff --git a/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBaseBaseVisitor.java b/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBaseBaseVisitor.java index cf1f2b3972823..3973a647c8cd8 100644 --- a/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBaseBaseVisitor.java +++ b/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBaseBaseVisitor.java @@ -33,28 +33,28 @@ class KqlBaseBaseVisitor extends AbstractParseTreeVisitor implements KqlBa *

      The default implementation returns the result of calling * {@link #visitChildren} on {@code ctx}.

      */ - @Override public T visitNotQuery(KqlBaseParser.NotQueryContext ctx) { return visitChildren(ctx); } + @Override public T visitBooleanQuery(KqlBaseParser.BooleanQueryContext ctx) { return visitChildren(ctx); } /** * {@inheritDoc} * *

      The default implementation returns the result of calling * {@link #visitChildren} on {@code ctx}.

      */ - @Override public T visitBooleanQuery(KqlBaseParser.BooleanQueryContext ctx) { return visitChildren(ctx); } + @Override public T visitDefaultQuery(KqlBaseParser.DefaultQueryContext ctx) { return visitChildren(ctx); } /** * {@inheritDoc} * *

      The default implementation returns the result of calling * {@link #visitChildren} on {@code ctx}.

      */ - @Override public T visitDefaultQuery(KqlBaseParser.DefaultQueryContext ctx) { return visitChildren(ctx); } + @Override public T visitSimpleQuery(KqlBaseParser.SimpleQueryContext ctx) { return visitChildren(ctx); } /** * {@inheritDoc} * *

      The default implementation returns the result of calling * {@link #visitChildren} on {@code ctx}.

      */ - @Override public T visitSimpleQuery(KqlBaseParser.SimpleQueryContext ctx) { return visitChildren(ctx); } + @Override public T visitNotQuery(KqlBaseParser.NotQueryContext ctx) { return visitChildren(ctx); } /** * {@inheritDoc} * diff --git a/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBaseListener.java b/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBaseListener.java index 505569dbde58d..49f2031208642 100644 --- a/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBaseListener.java +++ b/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBaseListener.java @@ -25,18 +25,6 @@ interface KqlBaseListener extends ParseTreeListener { * @param ctx the parse tree */ void exitTopLevelQuery(KqlBaseParser.TopLevelQueryContext ctx); - /** - * Enter a parse tree produced by the {@code notQuery} - * labeled alternative in {@link KqlBaseParser#query}. - * @param ctx the parse tree - */ - void enterNotQuery(KqlBaseParser.NotQueryContext ctx); - /** - * Exit a parse tree produced by the {@code notQuery} - * labeled alternative in {@link KqlBaseParser#query}. - * @param ctx the parse tree - */ - void exitNotQuery(KqlBaseParser.NotQueryContext ctx); /** * Enter a parse tree produced by the {@code booleanQuery} * labeled alternative in {@link KqlBaseParser#query}. @@ -71,6 +59,16 @@ interface KqlBaseListener extends ParseTreeListener { * @param ctx the parse tree */ void exitSimpleQuery(KqlBaseParser.SimpleQueryContext ctx); + /** + * Enter a parse tree produced by {@link KqlBaseParser#notQuery}. + * @param ctx the parse tree + */ + void enterNotQuery(KqlBaseParser.NotQueryContext ctx); + /** + * Exit a parse tree produced by {@link KqlBaseParser#notQuery}. + * @param ctx the parse tree + */ + void exitNotQuery(KqlBaseParser.NotQueryContext ctx); /** * Enter a parse tree produced by {@link KqlBaseParser#nestedQuery}. * @param ctx the parse tree diff --git a/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBaseParser.java b/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBaseParser.java index 3ee44e389a371..b4b0a69a82387 100644 --- a/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBaseParser.java +++ b/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBaseParser.java @@ -29,13 +29,13 @@ class KqlBaseParser extends Parser { OP_MORE=8, OP_MORE_EQ=9, LEFT_PARENTHESIS=10, RIGHT_PARENTHESIS=11, LEFT_CURLY_BRACKET=12, RIGHT_CURLY_BRACKET=13, UNQUOTED_LITERAL=14, QUOTED_STRING=15, WILDCARD=16; public static final int - RULE_topLevelQuery = 0, RULE_query = 1, RULE_simpleQuery = 2, RULE_nestedQuery = 3, - RULE_matchAllQuery = 4, RULE_parenthesizedQuery = 5, RULE_rangeQuery = 6, - RULE_rangeQueryValue = 7, RULE_existsQuery = 8, RULE_fieldQuery = 9, RULE_fieldLessQuery = 10, - RULE_fieldQueryValue = 11, RULE_fieldName = 12; + RULE_topLevelQuery = 0, RULE_query = 1, RULE_simpleQuery = 2, RULE_notQuery = 3, + RULE_nestedQuery = 4, RULE_matchAllQuery = 5, RULE_parenthesizedQuery = 6, + RULE_rangeQuery = 7, RULE_rangeQueryValue = 8, RULE_existsQuery = 9, RULE_fieldQuery = 10, + RULE_fieldLessQuery = 11, RULE_fieldQueryValue = 12, RULE_fieldName = 13; private static String[] makeRuleNames() { return new String[] { - "topLevelQuery", "query", "simpleQuery", "nestedQuery", "matchAllQuery", + "topLevelQuery", "query", "simpleQuery", "notQuery", "nestedQuery", "matchAllQuery", "parenthesizedQuery", "rangeQuery", "rangeQueryValue", "existsQuery", "fieldQuery", "fieldLessQuery", "fieldQueryValue", "fieldName" }; @@ -139,17 +139,17 @@ public final TopLevelQueryContext topLevelQuery() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(27); + setState(29); _errHandler.sync(this); _la = _input.LA(1); if ((((_la) & ~0x3f) == 0 && ((1L << _la) & 115740L) != 0)) { { - setState(26); + setState(28); query(0); } } - setState(29); + setState(31); match(EOF); } } @@ -177,28 +177,6 @@ public void copyFrom(QueryContext ctx) { } } @SuppressWarnings("CheckReturnValue") - public static class NotQueryContext extends QueryContext { - public SimpleQueryContext subQuery; - public TerminalNode NOT() { return getToken(KqlBaseParser.NOT, 0); } - public SimpleQueryContext simpleQuery() { - return getRuleContext(SimpleQueryContext.class,0); - } - public NotQueryContext(QueryContext ctx) { copyFrom(ctx); } - @Override - public void enterRule(ParseTreeListener listener) { - if ( listener instanceof KqlBaseListener ) ((KqlBaseListener)listener).enterNotQuery(this); - } - @Override - public void exitRule(ParseTreeListener listener) { - if ( listener instanceof KqlBaseListener ) ((KqlBaseListener)listener).exitNotQuery(this); - } - @Override - public T accept(ParseTreeVisitor visitor) { - if ( visitor instanceof KqlBaseVisitor ) return ((KqlBaseVisitor)visitor).visitNotQuery(this); - else return visitor.visitChildren(this); - } - } - @SuppressWarnings("CheckReturnValue") public static class BooleanQueryContext extends QueryContext { public Token operator; public List query() { @@ -261,35 +239,18 @@ private QueryContext query(int _p) throws RecognitionException { int _alt; enterOuterAlt(_localctx, 1); { - setState(35); - _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,1,_ctx) ) { - case 1: - { - _localctx = new NotQueryContext(_localctx); - _ctx = _localctx; - _prevctx = _localctx; + { + _localctx = new DefaultQueryContext(_localctx); + _ctx = _localctx; + _prevctx = _localctx; - setState(32); - match(NOT); - setState(33); - ((NotQueryContext)_localctx).subQuery = simpleQuery(); - } - break; - case 2: - { - _localctx = new DefaultQueryContext(_localctx); - _ctx = _localctx; - _prevctx = _localctx; - setState(34); - simpleQuery(); - } - break; + setState(34); + simpleQuery(); } _ctx.stop = _input.LT(-1); - setState(42); + setState(41); _errHandler.sync(this); - _alt = getInterpreter().adaptivePredict(_input,2,_ctx); + _alt = getInterpreter().adaptivePredict(_input,1,_ctx); while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ) { if ( _alt==1 ) { if ( _parseListeners!=null ) triggerExitRuleEvent(); @@ -298,9 +259,9 @@ private QueryContext query(int _p) throws RecognitionException { { _localctx = new BooleanQueryContext(new QueryContext(_parentctx, _parentState)); pushNewRecursionContext(_localctx, _startState, RULE_query); + setState(36); + if (!(precpred(_ctx, 2))) throw new FailedPredicateException(this, "precpred(_ctx, 2)"); setState(37); - if (!(precpred(_ctx, 3))) throw new FailedPredicateException(this, "precpred(_ctx, 3)"); - setState(38); ((BooleanQueryContext)_localctx).operator = _input.LT(1); _la = _input.LA(1); if ( !(_la==AND || _la==OR) ) { @@ -311,14 +272,14 @@ private QueryContext query(int _p) throws RecognitionException { _errHandler.reportMatch(this); consume(); } - setState(39); - query(3); + setState(38); + query(2); } } } - setState(44); + setState(43); _errHandler.sync(this); - _alt = getInterpreter().adaptivePredict(_input,2,_ctx); + _alt = getInterpreter().adaptivePredict(_input,1,_ctx); } } } @@ -335,6 +296,9 @@ private QueryContext query(int _p) throws RecognitionException { @SuppressWarnings("CheckReturnValue") public static class SimpleQueryContext extends ParserRuleContext { + public NotQueryContext notQuery() { + return getRuleContext(NotQueryContext.class,0); + } public NestedQueryContext nestedQuery() { return getRuleContext(NestedQueryContext.class,0); } @@ -381,52 +345,59 @@ public final SimpleQueryContext simpleQuery() throws RecognitionException { try { setState(52); _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,3,_ctx) ) { + switch ( getInterpreter().adaptivePredict(_input,2,_ctx) ) { case 1: enterOuterAlt(_localctx, 1); { - setState(45); - nestedQuery(); + setState(44); + notQuery(); } break; case 2: enterOuterAlt(_localctx, 2); { - setState(46); - parenthesizedQuery(); + setState(45); + nestedQuery(); } break; case 3: enterOuterAlt(_localctx, 3); { - setState(47); - matchAllQuery(); + setState(46); + parenthesizedQuery(); } break; case 4: enterOuterAlt(_localctx, 4); { - setState(48); - existsQuery(); + setState(47); + matchAllQuery(); } break; case 5: enterOuterAlt(_localctx, 5); { - setState(49); - rangeQuery(); + setState(48); + existsQuery(); } break; case 6: enterOuterAlt(_localctx, 6); { - setState(50); - fieldQuery(); + setState(49); + rangeQuery(); } break; case 7: enterOuterAlt(_localctx, 7); { + setState(50); + fieldQuery(); + } + break; + case 8: + enterOuterAlt(_localctx, 8); + { setState(51); fieldLessQuery(); } @@ -444,6 +415,55 @@ public final SimpleQueryContext simpleQuery() throws RecognitionException { return _localctx; } + @SuppressWarnings("CheckReturnValue") + public static class NotQueryContext extends ParserRuleContext { + public SimpleQueryContext subQuery; + public TerminalNode NOT() { return getToken(KqlBaseParser.NOT, 0); } + public SimpleQueryContext simpleQuery() { + return getRuleContext(SimpleQueryContext.class,0); + } + public NotQueryContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_notQuery; } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof KqlBaseListener ) ((KqlBaseListener)listener).enterNotQuery(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof KqlBaseListener ) ((KqlBaseListener)listener).exitNotQuery(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof KqlBaseVisitor ) return ((KqlBaseVisitor)visitor).visitNotQuery(this); + else return visitor.visitChildren(this); + } + } + + public final NotQueryContext notQuery() throws RecognitionException { + NotQueryContext _localctx = new NotQueryContext(_ctx, getState()); + enterRule(_localctx, 6, RULE_notQuery); + try { + enterOuterAlt(_localctx, 1); + { + setState(54); + match(NOT); + setState(55); + ((NotQueryContext)_localctx).subQuery = simpleQuery(); + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + exitRule(); + } + return _localctx; + } + @SuppressWarnings("CheckReturnValue") public static class NestedQueryContext extends ParserRuleContext { public FieldNameContext fieldName() { @@ -476,19 +496,19 @@ public T accept(ParseTreeVisitor visitor) { public final NestedQueryContext nestedQuery() throws RecognitionException { NestedQueryContext _localctx = new NestedQueryContext(_ctx, getState()); - enterRule(_localctx, 6, RULE_nestedQuery); + enterRule(_localctx, 8, RULE_nestedQuery); try { enterOuterAlt(_localctx, 1); { - setState(54); + setState(57); fieldName(); - setState(55); + setState(58); match(COLON); - setState(56); + setState(59); match(LEFT_CURLY_BRACKET); - setState(57); + setState(60); query(0); - setState(58); + setState(61); match(RIGHT_CURLY_BRACKET); } } @@ -531,23 +551,23 @@ public T accept(ParseTreeVisitor visitor) { public final MatchAllQueryContext matchAllQuery() throws RecognitionException { MatchAllQueryContext _localctx = new MatchAllQueryContext(_ctx, getState()); - enterRule(_localctx, 8, RULE_matchAllQuery); + enterRule(_localctx, 10, RULE_matchAllQuery); try { enterOuterAlt(_localctx, 1); { - setState(62); + setState(65); _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,4,_ctx) ) { + switch ( getInterpreter().adaptivePredict(_input,3,_ctx) ) { case 1: { - setState(60); + setState(63); match(WILDCARD); - setState(61); + setState(64); match(COLON); } break; } - setState(64); + setState(67); match(WILDCARD); } } @@ -590,15 +610,15 @@ public T accept(ParseTreeVisitor visitor) { public final ParenthesizedQueryContext parenthesizedQuery() throws RecognitionException { ParenthesizedQueryContext _localctx = new ParenthesizedQueryContext(_ctx, getState()); - enterRule(_localctx, 10, RULE_parenthesizedQuery); + enterRule(_localctx, 12, RULE_parenthesizedQuery); try { enterOuterAlt(_localctx, 1); { - setState(66); + setState(69); match(LEFT_PARENTHESIS); - setState(67); + setState(70); query(0); - setState(68); + setState(71); match(RIGHT_PARENTHESIS); } } @@ -647,14 +667,14 @@ public T accept(ParseTreeVisitor visitor) { public final RangeQueryContext rangeQuery() throws RecognitionException { RangeQueryContext _localctx = new RangeQueryContext(_ctx, getState()); - enterRule(_localctx, 12, RULE_rangeQuery); + enterRule(_localctx, 14, RULE_rangeQuery); int _la; try { enterOuterAlt(_localctx, 1); { - setState(70); + setState(73); fieldName(); - setState(71); + setState(74); ((RangeQueryContext)_localctx).operator = _input.LT(1); _la = _input.LA(1); if ( !((((_la) & ~0x3f) == 0 && ((1L << _la) & 960L) != 0)) ) { @@ -665,7 +685,7 @@ public final RangeQueryContext rangeQuery() throws RecognitionException { _errHandler.reportMatch(this); consume(); } - setState(72); + setState(75); rangeQueryValue(); } } @@ -712,18 +732,18 @@ public T accept(ParseTreeVisitor visitor) { public final RangeQueryValueContext rangeQueryValue() throws RecognitionException { RangeQueryValueContext _localctx = new RangeQueryValueContext(_ctx, getState()); - enterRule(_localctx, 14, RULE_rangeQueryValue); + enterRule(_localctx, 16, RULE_rangeQueryValue); int _la; try { int _alt; - setState(80); + setState(83); _errHandler.sync(this); switch (_input.LA(1)) { case UNQUOTED_LITERAL: case WILDCARD: enterOuterAlt(_localctx, 1); { - setState(75); + setState(78); _errHandler.sync(this); _alt = 1; do { @@ -731,7 +751,7 @@ public final RangeQueryValueContext rangeQueryValue() throws RecognitionExceptio case 1: { { - setState(74); + setState(77); _la = _input.LA(1); if ( !(_la==UNQUOTED_LITERAL || _la==WILDCARD) ) { _errHandler.recoverInline(this); @@ -747,16 +767,16 @@ public final RangeQueryValueContext rangeQueryValue() throws RecognitionExceptio default: throw new NoViableAltException(this); } - setState(77); + setState(80); _errHandler.sync(this); - _alt = getInterpreter().adaptivePredict(_input,5,_ctx); + _alt = getInterpreter().adaptivePredict(_input,4,_ctx); } while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ); } break; case QUOTED_STRING: enterOuterAlt(_localctx, 2); { - setState(79); + setState(82); match(QUOTED_STRING); } break; @@ -803,15 +823,15 @@ public T accept(ParseTreeVisitor visitor) { public final ExistsQueryContext existsQuery() throws RecognitionException { ExistsQueryContext _localctx = new ExistsQueryContext(_ctx, getState()); - enterRule(_localctx, 16, RULE_existsQuery); + enterRule(_localctx, 18, RULE_existsQuery); try { enterOuterAlt(_localctx, 1); { - setState(82); + setState(85); fieldName(); - setState(83); + setState(86); match(COLON); - setState(84); + setState(87); match(WILDCARD); } } @@ -858,34 +878,34 @@ public T accept(ParseTreeVisitor visitor) { public final FieldQueryContext fieldQuery() throws RecognitionException { FieldQueryContext _localctx = new FieldQueryContext(_ctx, getState()); - enterRule(_localctx, 18, RULE_fieldQuery); + enterRule(_localctx, 20, RULE_fieldQuery); try { - setState(96); + setState(99); _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,7,_ctx) ) { + switch ( getInterpreter().adaptivePredict(_input,6,_ctx) ) { case 1: enterOuterAlt(_localctx, 1); { - setState(86); + setState(89); fieldName(); - setState(87); + setState(90); match(COLON); - setState(88); + setState(91); fieldQueryValue(); } break; case 2: enterOuterAlt(_localctx, 2); { - setState(90); + setState(93); fieldName(); - setState(91); + setState(94); match(COLON); - setState(92); + setState(95); match(LEFT_PARENTHESIS); - setState(93); + setState(96); fieldQueryValue(); - setState(94); + setState(97); match(RIGHT_PARENTHESIS); } break; @@ -930,9 +950,9 @@ public T accept(ParseTreeVisitor visitor) { public final FieldLessQueryContext fieldLessQuery() throws RecognitionException { FieldLessQueryContext _localctx = new FieldLessQueryContext(_ctx, getState()); - enterRule(_localctx, 20, RULE_fieldLessQuery); + enterRule(_localctx, 22, RULE_fieldLessQuery); try { - setState(103); + setState(106); _errHandler.sync(this); switch (_input.LA(1)) { case AND: @@ -943,18 +963,18 @@ public final FieldLessQueryContext fieldLessQuery() throws RecognitionException case WILDCARD: enterOuterAlt(_localctx, 1); { - setState(98); + setState(101); fieldQueryValue(); } break; case LEFT_PARENTHESIS: enterOuterAlt(_localctx, 2); { - setState(99); + setState(102); match(LEFT_PARENTHESIS); - setState(100); + setState(103); fieldQueryValue(); - setState(101); + setState(104); match(RIGHT_PARENTHESIS); } break; @@ -975,8 +995,18 @@ public final FieldLessQueryContext fieldLessQuery() throws RecognitionException @SuppressWarnings("CheckReturnValue") public static class FieldQueryValueContext extends ParserRuleContext { - public TerminalNode AND() { return getToken(KqlBaseParser.AND, 0); } - public TerminalNode OR() { return getToken(KqlBaseParser.OR, 0); } + public List AND() { return getTokens(KqlBaseParser.AND); } + public TerminalNode AND(int i) { + return getToken(KqlBaseParser.AND, i); + } + public List OR() { return getTokens(KqlBaseParser.OR); } + public TerminalNode OR(int i) { + return getToken(KqlBaseParser.OR, i); + } + public List NOT() { return getTokens(KqlBaseParser.NOT); } + public TerminalNode NOT(int i) { + return getToken(KqlBaseParser.NOT, i); + } public List UNQUOTED_LITERAL() { return getTokens(KqlBaseParser.UNQUOTED_LITERAL); } public TerminalNode UNQUOTED_LITERAL(int i) { return getToken(KqlBaseParser.UNQUOTED_LITERAL, i); @@ -985,7 +1015,6 @@ public TerminalNode UNQUOTED_LITERAL(int i) { public TerminalNode WILDCARD(int i) { return getToken(KqlBaseParser.WILDCARD, i); } - public TerminalNode NOT() { return getToken(KqlBaseParser.NOT, 0); } public TerminalNode QUOTED_STRING() { return getToken(KqlBaseParser.QUOTED_STRING, 0); } public FieldQueryValueContext(ParserRuleContext parent, int invokingState) { super(parent, invokingState); @@ -1008,24 +1037,24 @@ public T accept(ParseTreeVisitor visitor) { public final FieldQueryValueContext fieldQueryValue() throws RecognitionException { FieldQueryValueContext _localctx = new FieldQueryValueContext(_ctx, getState()); - enterRule(_localctx, 22, RULE_fieldQueryValue); + enterRule(_localctx, 24, RULE_fieldQueryValue); int _la; try { int _alt; - setState(123); + setState(128); _errHandler.sync(this); switch ( getInterpreter().adaptivePredict(_input,13,_ctx) ) { case 1: enterOuterAlt(_localctx, 1); { - setState(106); + setState(109); _errHandler.sync(this); _la = _input.LA(1); - if (_la==AND || _la==OR) { + if ((((_la) & ~0x3f) == 0 && ((1L << _la) & 28L) != 0)) { { - setState(105); + setState(108); _la = _input.LA(1); - if ( !(_la==AND || _la==OR) ) { + if ( !((((_la) & ~0x3f) == 0 && ((1L << _la) & 28L) != 0)) ) { _errHandler.recoverInline(this); } else { @@ -1036,7 +1065,7 @@ public final FieldQueryValueContext fieldQueryValue() throws RecognitionExceptio } } - setState(109); + setState(112); _errHandler.sync(this); _alt = 1; do { @@ -1044,7 +1073,7 @@ public final FieldQueryValueContext fieldQueryValue() throws RecognitionExceptio case 1: { { - setState(108); + setState(111); _la = _input.LA(1); if ( !(_la==UNQUOTED_LITERAL || _la==WILDCARD) ) { _errHandler.recoverInline(this); @@ -1060,51 +1089,51 @@ public final FieldQueryValueContext fieldQueryValue() throws RecognitionExceptio default: throw new NoViableAltException(this); } - setState(111); + setState(114); _errHandler.sync(this); - _alt = getInterpreter().adaptivePredict(_input,10,_ctx); + _alt = getInterpreter().adaptivePredict(_input,9,_ctx); } while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ); + setState(117); + _errHandler.sync(this); + switch ( getInterpreter().adaptivePredict(_input,10,_ctx) ) { + case 1: + { + setState(116); + _la = _input.LA(1); + if ( !((((_la) & ~0x3f) == 0 && ((1L << _la) & 28L) != 0)) ) { + _errHandler.recoverInline(this); + } + else { + if ( _input.LA(1)==Token.EOF ) matchedEOF = true; + _errHandler.reportMatch(this); + consume(); + } + } + break; + } } break; case 2: enterOuterAlt(_localctx, 2); { - setState(114); - _errHandler.sync(this); - _alt = 1; - do { - switch (_alt) { - case 1: - { - { - setState(113); - _la = _input.LA(1); - if ( !(_la==UNQUOTED_LITERAL || _la==WILDCARD) ) { - _errHandler.recoverInline(this); - } - else { - if ( _input.LA(1)==Token.EOF ) matchedEOF = true; - _errHandler.reportMatch(this); - consume(); - } - } - } - break; - default: - throw new NoViableAltException(this); - } - setState(116); - _errHandler.sync(this); - _alt = getInterpreter().adaptivePredict(_input,11,_ctx); - } while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ); setState(119); + _la = _input.LA(1); + if ( !(_la==AND || _la==OR) ) { + _errHandler.recoverInline(this); + } + else { + if ( _input.LA(1)==Token.EOF ) matchedEOF = true; + _errHandler.reportMatch(this); + consume(); + } + setState(121); _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,12,_ctx) ) { + switch ( getInterpreter().adaptivePredict(_input,11,_ctx) ) { case 1: { - setState(118); + setState(120); _la = _input.LA(1); - if ( !(_la==AND || _la==OR) ) { + if ( !((((_la) & ~0x3f) == 0 && ((1L << _la) & 28L) != 0)) ) { _errHandler.recoverInline(this); } else { @@ -1120,22 +1149,32 @@ public final FieldQueryValueContext fieldQueryValue() throws RecognitionExceptio case 3: enterOuterAlt(_localctx, 3); { - setState(121); - _la = _input.LA(1); - if ( !((((_la) & ~0x3f) == 0 && ((1L << _la) & 28L) != 0)) ) { - _errHandler.recoverInline(this); - } - else { - if ( _input.LA(1)==Token.EOF ) matchedEOF = true; - _errHandler.reportMatch(this); - consume(); + setState(123); + match(NOT); + setState(125); + _errHandler.sync(this); + switch ( getInterpreter().adaptivePredict(_input,12,_ctx) ) { + case 1: + { + setState(124); + _la = _input.LA(1); + if ( !(_la==AND || _la==OR) ) { + _errHandler.recoverInline(this); + } + else { + if ( _input.LA(1)==Token.EOF ) matchedEOF = true; + _errHandler.reportMatch(this); + consume(); + } + } + break; } } break; case 4: enterOuterAlt(_localctx, 4); { - setState(122); + setState(127); match(QUOTED_STRING); } break; @@ -1182,26 +1221,26 @@ public T accept(ParseTreeVisitor visitor) { public final FieldNameContext fieldName() throws RecognitionException { FieldNameContext _localctx = new FieldNameContext(_ctx, getState()); - enterRule(_localctx, 24, RULE_fieldName); + enterRule(_localctx, 26, RULE_fieldName); int _la; try { - setState(132); + setState(137); _errHandler.sync(this); switch (_input.LA(1)) { case UNQUOTED_LITERAL: enterOuterAlt(_localctx, 1); { - setState(126); + setState(131); _errHandler.sync(this); _la = _input.LA(1); do { { { - setState(125); + setState(130); ((FieldNameContext)_localctx).value = match(UNQUOTED_LITERAL); } } - setState(128); + setState(133); _errHandler.sync(this); _la = _input.LA(1); } while ( _la==UNQUOTED_LITERAL ); @@ -1210,14 +1249,14 @@ public final FieldNameContext fieldName() throws RecognitionException { case QUOTED_STRING: enterOuterAlt(_localctx, 2); { - setState(130); + setState(135); ((FieldNameContext)_localctx).value = match(QUOTED_STRING); } break; case WILDCARD: enterOuterAlt(_localctx, 3); { - setState(131); + setState(136); ((FieldNameContext)_localctx).value = match(WILDCARD); } break; @@ -1246,92 +1285,94 @@ public boolean sempred(RuleContext _localctx, int ruleIndex, int predIndex) { private boolean query_sempred(QueryContext _localctx, int predIndex) { switch (predIndex) { case 0: - return precpred(_ctx, 3); + return precpred(_ctx, 2); } return true; } public static final String _serializedATN = - "\u0004\u0001\u0010\u0087\u0002\u0000\u0007\u0000\u0002\u0001\u0007\u0001"+ + "\u0004\u0001\u0010\u008c\u0002\u0000\u0007\u0000\u0002\u0001\u0007\u0001"+ "\u0002\u0002\u0007\u0002\u0002\u0003\u0007\u0003\u0002\u0004\u0007\u0004"+ "\u0002\u0005\u0007\u0005\u0002\u0006\u0007\u0006\u0002\u0007\u0007\u0007"+ "\u0002\b\u0007\b\u0002\t\u0007\t\u0002\n\u0007\n\u0002\u000b\u0007\u000b"+ - "\u0002\f\u0007\f\u0001\u0000\u0003\u0000\u001c\b\u0000\u0001\u0000\u0001"+ - "\u0000\u0001\u0001\u0001\u0001\u0001\u0001\u0001\u0001\u0003\u0001$\b"+ - "\u0001\u0001\u0001\u0001\u0001\u0001\u0001\u0005\u0001)\b\u0001\n\u0001"+ - "\f\u0001,\t\u0001\u0001\u0002\u0001\u0002\u0001\u0002\u0001\u0002\u0001"+ - "\u0002\u0001\u0002\u0001\u0002\u0003\u00025\b\u0002\u0001\u0003\u0001"+ - "\u0003\u0001\u0003\u0001\u0003\u0001\u0003\u0001\u0003\u0001\u0004\u0001"+ - "\u0004\u0003\u0004?\b\u0004\u0001\u0004\u0001\u0004\u0001\u0005\u0001"+ - "\u0005\u0001\u0005\u0001\u0005\u0001\u0006\u0001\u0006\u0001\u0006\u0001"+ - "\u0006\u0001\u0007\u0004\u0007L\b\u0007\u000b\u0007\f\u0007M\u0001\u0007"+ - "\u0003\u0007Q\b\u0007\u0001\b\u0001\b\u0001\b\u0001\b\u0001\t\u0001\t"+ - "\u0001\t\u0001\t\u0001\t\u0001\t\u0001\t\u0001\t\u0001\t\u0001\t\u0003"+ - "\ta\b\t\u0001\n\u0001\n\u0001\n\u0001\n\u0001\n\u0003\nh\b\n\u0001\u000b"+ - "\u0003\u000bk\b\u000b\u0001\u000b\u0004\u000bn\b\u000b\u000b\u000b\f\u000b"+ - "o\u0001\u000b\u0004\u000bs\b\u000b\u000b\u000b\f\u000bt\u0001\u000b\u0003"+ - "\u000bx\b\u000b\u0001\u000b\u0001\u000b\u0003\u000b|\b\u000b\u0001\f\u0004"+ - "\f\u007f\b\f\u000b\f\f\f\u0080\u0001\f\u0001\f\u0003\f\u0085\b\f\u0001"+ - "\f\u0000\u0001\u0002\r\u0000\u0002\u0004\u0006\b\n\f\u000e\u0010\u0012"+ - "\u0014\u0016\u0018\u0000\u0004\u0001\u0000\u0002\u0003\u0001\u0000\u0006"+ - "\t\u0002\u0000\u000e\u000e\u0010\u0010\u0001\u0000\u0002\u0004\u0091\u0000"+ - "\u001b\u0001\u0000\u0000\u0000\u0002#\u0001\u0000\u0000\u0000\u00044\u0001"+ - "\u0000\u0000\u0000\u00066\u0001\u0000\u0000\u0000\b>\u0001\u0000\u0000"+ - "\u0000\nB\u0001\u0000\u0000\u0000\fF\u0001\u0000\u0000\u0000\u000eP\u0001"+ - "\u0000\u0000\u0000\u0010R\u0001\u0000\u0000\u0000\u0012`\u0001\u0000\u0000"+ - "\u0000\u0014g\u0001\u0000\u0000\u0000\u0016{\u0001\u0000\u0000\u0000\u0018"+ - "\u0084\u0001\u0000\u0000\u0000\u001a\u001c\u0003\u0002\u0001\u0000\u001b"+ - "\u001a\u0001\u0000\u0000\u0000\u001b\u001c\u0001\u0000\u0000\u0000\u001c"+ - "\u001d\u0001\u0000\u0000\u0000\u001d\u001e\u0005\u0000\u0000\u0001\u001e"+ - "\u0001\u0001\u0000\u0000\u0000\u001f \u0006\u0001\uffff\uffff\u0000 !"+ - "\u0005\u0004\u0000\u0000!$\u0003\u0004\u0002\u0000\"$\u0003\u0004\u0002"+ - "\u0000#\u001f\u0001\u0000\u0000\u0000#\"\u0001\u0000\u0000\u0000$*\u0001"+ - "\u0000\u0000\u0000%&\n\u0003\u0000\u0000&\'\u0007\u0000\u0000\u0000\'"+ - ")\u0003\u0002\u0001\u0003(%\u0001\u0000\u0000\u0000),\u0001\u0000\u0000"+ - "\u0000*(\u0001\u0000\u0000\u0000*+\u0001\u0000\u0000\u0000+\u0003\u0001"+ - "\u0000\u0000\u0000,*\u0001\u0000\u0000\u0000-5\u0003\u0006\u0003\u0000"+ - ".5\u0003\n\u0005\u0000/5\u0003\b\u0004\u000005\u0003\u0010\b\u000015\u0003"+ - "\f\u0006\u000025\u0003\u0012\t\u000035\u0003\u0014\n\u00004-\u0001\u0000"+ - "\u0000\u00004.\u0001\u0000\u0000\u00004/\u0001\u0000\u0000\u000040\u0001"+ - "\u0000\u0000\u000041\u0001\u0000\u0000\u000042\u0001\u0000\u0000\u0000"+ - "43\u0001\u0000\u0000\u00005\u0005\u0001\u0000\u0000\u000067\u0003\u0018"+ - "\f\u000078\u0005\u0005\u0000\u000089\u0005\f\u0000\u00009:\u0003\u0002"+ - "\u0001\u0000:;\u0005\r\u0000\u0000;\u0007\u0001\u0000\u0000\u0000<=\u0005"+ - "\u0010\u0000\u0000=?\u0005\u0005\u0000\u0000><\u0001\u0000\u0000\u0000"+ - ">?\u0001\u0000\u0000\u0000?@\u0001\u0000\u0000\u0000@A\u0005\u0010\u0000"+ - "\u0000A\t\u0001\u0000\u0000\u0000BC\u0005\n\u0000\u0000CD\u0003\u0002"+ - "\u0001\u0000DE\u0005\u000b\u0000\u0000E\u000b\u0001\u0000\u0000\u0000"+ - "FG\u0003\u0018\f\u0000GH\u0007\u0001\u0000\u0000HI\u0003\u000e\u0007\u0000"+ - "I\r\u0001\u0000\u0000\u0000JL\u0007\u0002\u0000\u0000KJ\u0001\u0000\u0000"+ - "\u0000LM\u0001\u0000\u0000\u0000MK\u0001\u0000\u0000\u0000MN\u0001\u0000"+ - "\u0000\u0000NQ\u0001\u0000\u0000\u0000OQ\u0005\u000f\u0000\u0000PK\u0001"+ - "\u0000\u0000\u0000PO\u0001\u0000\u0000\u0000Q\u000f\u0001\u0000\u0000"+ - "\u0000RS\u0003\u0018\f\u0000ST\u0005\u0005\u0000\u0000TU\u0005\u0010\u0000"+ - "\u0000U\u0011\u0001\u0000\u0000\u0000VW\u0003\u0018\f\u0000WX\u0005\u0005"+ - "\u0000\u0000XY\u0003\u0016\u000b\u0000Ya\u0001\u0000\u0000\u0000Z[\u0003"+ - "\u0018\f\u0000[\\\u0005\u0005\u0000\u0000\\]\u0005\n\u0000\u0000]^\u0003"+ - "\u0016\u000b\u0000^_\u0005\u000b\u0000\u0000_a\u0001\u0000\u0000\u0000"+ - "`V\u0001\u0000\u0000\u0000`Z\u0001\u0000\u0000\u0000a\u0013\u0001\u0000"+ - "\u0000\u0000bh\u0003\u0016\u000b\u0000cd\u0005\n\u0000\u0000de\u0003\u0016"+ - "\u000b\u0000ef\u0005\u000b\u0000\u0000fh\u0001\u0000\u0000\u0000gb\u0001"+ - "\u0000\u0000\u0000gc\u0001\u0000\u0000\u0000h\u0015\u0001\u0000\u0000"+ - "\u0000ik\u0007\u0000\u0000\u0000ji\u0001\u0000\u0000\u0000jk\u0001\u0000"+ - "\u0000\u0000km\u0001\u0000\u0000\u0000ln\u0007\u0002\u0000\u0000ml\u0001"+ - "\u0000\u0000\u0000no\u0001\u0000\u0000\u0000om\u0001\u0000\u0000\u0000"+ - "op\u0001\u0000\u0000\u0000p|\u0001\u0000\u0000\u0000qs\u0007\u0002\u0000"+ - "\u0000rq\u0001\u0000\u0000\u0000st\u0001\u0000\u0000\u0000tr\u0001\u0000"+ - "\u0000\u0000tu\u0001\u0000\u0000\u0000uw\u0001\u0000\u0000\u0000vx\u0007"+ - "\u0000\u0000\u0000wv\u0001\u0000\u0000\u0000wx\u0001\u0000\u0000\u0000"+ - "x|\u0001\u0000\u0000\u0000y|\u0007\u0003\u0000\u0000z|\u0005\u000f\u0000"+ - "\u0000{j\u0001\u0000\u0000\u0000{r\u0001\u0000\u0000\u0000{y\u0001\u0000"+ - "\u0000\u0000{z\u0001\u0000\u0000\u0000|\u0017\u0001\u0000\u0000\u0000"+ - "}\u007f\u0005\u000e\u0000\u0000~}\u0001\u0000\u0000\u0000\u007f\u0080"+ - "\u0001\u0000\u0000\u0000\u0080~\u0001\u0000\u0000\u0000\u0080\u0081\u0001"+ - "\u0000\u0000\u0000\u0081\u0085\u0001\u0000\u0000\u0000\u0082\u0085\u0005"+ - "\u000f\u0000\u0000\u0083\u0085\u0005\u0010\u0000\u0000\u0084~\u0001\u0000"+ - "\u0000\u0000\u0084\u0082\u0001\u0000\u0000\u0000\u0084\u0083\u0001\u0000"+ - "\u0000\u0000\u0085\u0019\u0001\u0000\u0000\u0000\u0010\u001b#*4>MP`gj"+ - "otw{\u0080\u0084"; + "\u0002\f\u0007\f\u0002\r\u0007\r\u0001\u0000\u0003\u0000\u001e\b\u0000"+ + "\u0001\u0000\u0001\u0000\u0001\u0001\u0001\u0001\u0001\u0001\u0001\u0001"+ + "\u0001\u0001\u0001\u0001\u0005\u0001(\b\u0001\n\u0001\f\u0001+\t\u0001"+ + "\u0001\u0002\u0001\u0002\u0001\u0002\u0001\u0002\u0001\u0002\u0001\u0002"+ + "\u0001\u0002\u0001\u0002\u0003\u00025\b\u0002\u0001\u0003\u0001\u0003"+ + "\u0001\u0003\u0001\u0004\u0001\u0004\u0001\u0004\u0001\u0004\u0001\u0004"+ + "\u0001\u0004\u0001\u0005\u0001\u0005\u0003\u0005B\b\u0005\u0001\u0005"+ + "\u0001\u0005\u0001\u0006\u0001\u0006\u0001\u0006\u0001\u0006\u0001\u0007"+ + "\u0001\u0007\u0001\u0007\u0001\u0007\u0001\b\u0004\bO\b\b\u000b\b\f\b"+ + "P\u0001\b\u0003\bT\b\b\u0001\t\u0001\t\u0001\t\u0001\t\u0001\n\u0001\n"+ + "\u0001\n\u0001\n\u0001\n\u0001\n\u0001\n\u0001\n\u0001\n\u0001\n\u0003"+ + "\nd\b\n\u0001\u000b\u0001\u000b\u0001\u000b\u0001\u000b\u0001\u000b\u0003"+ + "\u000bk\b\u000b\u0001\f\u0003\fn\b\f\u0001\f\u0004\fq\b\f\u000b\f\f\f"+ + "r\u0001\f\u0003\fv\b\f\u0001\f\u0001\f\u0003\fz\b\f\u0001\f\u0001\f\u0003"+ + "\f~\b\f\u0001\f\u0003\f\u0081\b\f\u0001\r\u0004\r\u0084\b\r\u000b\r\f"+ + "\r\u0085\u0001\r\u0001\r\u0003\r\u008a\b\r\u0001\r\u0000\u0001\u0002\u000e"+ + "\u0000\u0002\u0004\u0006\b\n\f\u000e\u0010\u0012\u0014\u0016\u0018\u001a"+ + "\u0000\u0004\u0001\u0000\u0002\u0003\u0001\u0000\u0006\t\u0002\u0000\u000e"+ + "\u000e\u0010\u0010\u0001\u0000\u0002\u0004\u0096\u0000\u001d\u0001\u0000"+ + "\u0000\u0000\u0002!\u0001\u0000\u0000\u0000\u00044\u0001\u0000\u0000\u0000"+ + "\u00066\u0001\u0000\u0000\u0000\b9\u0001\u0000\u0000\u0000\nA\u0001\u0000"+ + "\u0000\u0000\fE\u0001\u0000\u0000\u0000\u000eI\u0001\u0000\u0000\u0000"+ + "\u0010S\u0001\u0000\u0000\u0000\u0012U\u0001\u0000\u0000\u0000\u0014c"+ + "\u0001\u0000\u0000\u0000\u0016j\u0001\u0000\u0000\u0000\u0018\u0080\u0001"+ + "\u0000\u0000\u0000\u001a\u0089\u0001\u0000\u0000\u0000\u001c\u001e\u0003"+ + "\u0002\u0001\u0000\u001d\u001c\u0001\u0000\u0000\u0000\u001d\u001e\u0001"+ + "\u0000\u0000\u0000\u001e\u001f\u0001\u0000\u0000\u0000\u001f \u0005\u0000"+ + "\u0000\u0001 \u0001\u0001\u0000\u0000\u0000!\"\u0006\u0001\uffff\uffff"+ + "\u0000\"#\u0003\u0004\u0002\u0000#)\u0001\u0000\u0000\u0000$%\n\u0002"+ + "\u0000\u0000%&\u0007\u0000\u0000\u0000&(\u0003\u0002\u0001\u0002\'$\u0001"+ + "\u0000\u0000\u0000(+\u0001\u0000\u0000\u0000)\'\u0001\u0000\u0000\u0000"+ + ")*\u0001\u0000\u0000\u0000*\u0003\u0001\u0000\u0000\u0000+)\u0001\u0000"+ + "\u0000\u0000,5\u0003\u0006\u0003\u0000-5\u0003\b\u0004\u0000.5\u0003\f"+ + "\u0006\u0000/5\u0003\n\u0005\u000005\u0003\u0012\t\u000015\u0003\u000e"+ + "\u0007\u000025\u0003\u0014\n\u000035\u0003\u0016\u000b\u00004,\u0001\u0000"+ + "\u0000\u00004-\u0001\u0000\u0000\u00004.\u0001\u0000\u0000\u00004/\u0001"+ + "\u0000\u0000\u000040\u0001\u0000\u0000\u000041\u0001\u0000\u0000\u0000"+ + "42\u0001\u0000\u0000\u000043\u0001\u0000\u0000\u00005\u0005\u0001\u0000"+ + "\u0000\u000067\u0005\u0004\u0000\u000078\u0003\u0004\u0002\u00008\u0007"+ + "\u0001\u0000\u0000\u00009:\u0003\u001a\r\u0000:;\u0005\u0005\u0000\u0000"+ + ";<\u0005\f\u0000\u0000<=\u0003\u0002\u0001\u0000=>\u0005\r\u0000\u0000"+ + ">\t\u0001\u0000\u0000\u0000?@\u0005\u0010\u0000\u0000@B\u0005\u0005\u0000"+ + "\u0000A?\u0001\u0000\u0000\u0000AB\u0001\u0000\u0000\u0000BC\u0001\u0000"+ + "\u0000\u0000CD\u0005\u0010\u0000\u0000D\u000b\u0001\u0000\u0000\u0000"+ + "EF\u0005\n\u0000\u0000FG\u0003\u0002\u0001\u0000GH\u0005\u000b\u0000\u0000"+ + "H\r\u0001\u0000\u0000\u0000IJ\u0003\u001a\r\u0000JK\u0007\u0001\u0000"+ + "\u0000KL\u0003\u0010\b\u0000L\u000f\u0001\u0000\u0000\u0000MO\u0007\u0002"+ + "\u0000\u0000NM\u0001\u0000\u0000\u0000OP\u0001\u0000\u0000\u0000PN\u0001"+ + "\u0000\u0000\u0000PQ\u0001\u0000\u0000\u0000QT\u0001\u0000\u0000\u0000"+ + "RT\u0005\u000f\u0000\u0000SN\u0001\u0000\u0000\u0000SR\u0001\u0000\u0000"+ + "\u0000T\u0011\u0001\u0000\u0000\u0000UV\u0003\u001a\r\u0000VW\u0005\u0005"+ + "\u0000\u0000WX\u0005\u0010\u0000\u0000X\u0013\u0001\u0000\u0000\u0000"+ + "YZ\u0003\u001a\r\u0000Z[\u0005\u0005\u0000\u0000[\\\u0003\u0018\f\u0000"+ + "\\d\u0001\u0000\u0000\u0000]^\u0003\u001a\r\u0000^_\u0005\u0005\u0000"+ + "\u0000_`\u0005\n\u0000\u0000`a\u0003\u0018\f\u0000ab\u0005\u000b\u0000"+ + "\u0000bd\u0001\u0000\u0000\u0000cY\u0001\u0000\u0000\u0000c]\u0001\u0000"+ + "\u0000\u0000d\u0015\u0001\u0000\u0000\u0000ek\u0003\u0018\f\u0000fg\u0005"+ + "\n\u0000\u0000gh\u0003\u0018\f\u0000hi\u0005\u000b\u0000\u0000ik\u0001"+ + "\u0000\u0000\u0000je\u0001\u0000\u0000\u0000jf\u0001\u0000\u0000\u0000"+ + "k\u0017\u0001\u0000\u0000\u0000ln\u0007\u0003\u0000\u0000ml\u0001\u0000"+ + "\u0000\u0000mn\u0001\u0000\u0000\u0000np\u0001\u0000\u0000\u0000oq\u0007"+ + "\u0002\u0000\u0000po\u0001\u0000\u0000\u0000qr\u0001\u0000\u0000\u0000"+ + "rp\u0001\u0000\u0000\u0000rs\u0001\u0000\u0000\u0000su\u0001\u0000\u0000"+ + "\u0000tv\u0007\u0003\u0000\u0000ut\u0001\u0000\u0000\u0000uv\u0001\u0000"+ + "\u0000\u0000v\u0081\u0001\u0000\u0000\u0000wy\u0007\u0000\u0000\u0000"+ + "xz\u0007\u0003\u0000\u0000yx\u0001\u0000\u0000\u0000yz\u0001\u0000\u0000"+ + "\u0000z\u0081\u0001\u0000\u0000\u0000{}\u0005\u0004\u0000\u0000|~\u0007"+ + "\u0000\u0000\u0000}|\u0001\u0000\u0000\u0000}~\u0001\u0000\u0000\u0000"+ + "~\u0081\u0001\u0000\u0000\u0000\u007f\u0081\u0005\u000f\u0000\u0000\u0080"+ + "m\u0001\u0000\u0000\u0000\u0080w\u0001\u0000\u0000\u0000\u0080{\u0001"+ + "\u0000\u0000\u0000\u0080\u007f\u0001\u0000\u0000\u0000\u0081\u0019\u0001"+ + "\u0000\u0000\u0000\u0082\u0084\u0005\u000e\u0000\u0000\u0083\u0082\u0001"+ + "\u0000\u0000\u0000\u0084\u0085\u0001\u0000\u0000\u0000\u0085\u0083\u0001"+ + "\u0000\u0000\u0000\u0085\u0086\u0001\u0000\u0000\u0000\u0086\u008a\u0001"+ + "\u0000\u0000\u0000\u0087\u008a\u0005\u000f\u0000\u0000\u0088\u008a\u0005"+ + "\u0010\u0000\u0000\u0089\u0083\u0001\u0000\u0000\u0000\u0089\u0087\u0001"+ + "\u0000\u0000\u0000\u0089\u0088\u0001\u0000\u0000\u0000\u008a\u001b\u0001"+ + "\u0000\u0000\u0000\u0010\u001d)4APScjmruy}\u0080\u0085\u0089"; public static final ATN _ATN = new ATNDeserializer().deserialize(_serializedATN.toCharArray()); static { diff --git a/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBaseVisitor.java b/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBaseVisitor.java index 67253e4364190..18ef8f389195b 100644 --- a/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBaseVisitor.java +++ b/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBaseVisitor.java @@ -24,13 +24,6 @@ interface KqlBaseVisitor extends ParseTreeVisitor { * @return the visitor result */ T visitTopLevelQuery(KqlBaseParser.TopLevelQueryContext ctx); - /** - * Visit a parse tree produced by the {@code notQuery} - * labeled alternative in {@link KqlBaseParser#query}. - * @param ctx the parse tree - * @return the visitor result - */ - T visitNotQuery(KqlBaseParser.NotQueryContext ctx); /** * Visit a parse tree produced by the {@code booleanQuery} * labeled alternative in {@link KqlBaseParser#query}. @@ -51,6 +44,12 @@ interface KqlBaseVisitor extends ParseTreeVisitor { * @return the visitor result */ T visitSimpleQuery(KqlBaseParser.SimpleQueryContext ctx); + /** + * Visit a parse tree produced by {@link KqlBaseParser#notQuery}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitNotQuery(KqlBaseParser.NotQueryContext ctx); /** * Visit a parse tree produced by {@link KqlBaseParser#nestedQuery}. * @param ctx the parse tree diff --git a/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlParser.java b/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlParser.java index 41bda7524c653..1064f901cacb8 100644 --- a/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlParser.java +++ b/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlParser.java @@ -30,12 +30,17 @@ public QueryBuilder parseKqlQuery(String kqlQuery, SearchExecutionContext search log.debug("Parsing KQL query: {}", kqlQuery); } - return invokeParser(kqlQuery, searchExecutionContext, KqlBaseParser::topLevelQuery, KqlAstBuilder::toQueryBuilder); + return invokeParser( + kqlQuery, + new KqlParserExecutionContext(searchExecutionContext), + KqlBaseParser::topLevelQuery, + KqlAstBuilder::toQueryBuilder + ); } private T invokeParser( String kqlQuery, - SearchExecutionContext searchExecutionContext, + KqlParserExecutionContext kqlParserExecutionContext, Function parseFunction, BiFunction visitor ) { @@ -58,7 +63,7 @@ private T invokeParser( log.trace("Parse tree: {}", tree.toStringTree()); } - return visitor.apply(new KqlAstBuilder(searchExecutionContext), tree); + return visitor.apply(new KqlAstBuilder(kqlParserExecutionContext), tree); } private static final BaseErrorListener ERROR_LISTENER = new BaseErrorListener() { diff --git a/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlParserExecutionContext.java b/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlParserExecutionContext.java new file mode 100644 index 0000000000000..d05c70c6b933f --- /dev/null +++ b/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlParserExecutionContext.java @@ -0,0 +1,79 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.kql.parser; + +import org.elasticsearch.core.Tuple; +import org.elasticsearch.index.mapper.AbstractScriptFieldType; +import org.elasticsearch.index.mapper.DateFieldMapper; +import org.elasticsearch.index.mapper.KeywordFieldMapper; +import org.elasticsearch.index.mapper.MappedFieldType; +import org.elasticsearch.index.query.SearchExecutionContext; + +import java.time.ZoneId; +import java.util.List; +import java.util.function.Predicate; +import java.util.stream.Collectors; + +import static org.elasticsearch.core.Tuple.tuple; + +class KqlParserExecutionContext extends SearchExecutionContext { + + private static final List IGNORED_METADATA_FIELDS = List.of( + "_seq_no", + "_index_mode", + "_routing", + "_ignored", + "_nested_path", + "_field_names" + ); + + private static Predicate> searchableFieldFilter = (fieldDef) -> fieldDef.v2().isSearchable(); + + private static Predicate> ignoredFieldFilter = (fieldDef) -> IGNORED_METADATA_FIELDS.contains( + fieldDef.v1() + ); + + KqlParserExecutionContext(SearchExecutionContext source) { + super(source); + } + + public Iterable> resolveFields(KqlBaseParser.FieldNameContext fieldNameContext) { + // TODO: use index settings default field. + String fieldNamePattern = fieldNameContext != null ? ParserUtils.extractText(fieldNameContext) : "*"; + + if (fieldNameContext != null && fieldNameContext.value != null && fieldNameContext.value.getType() == KqlBaseParser.QUOTED_STRING) { + return isFieldMapped(fieldNamePattern) ? List.of(tuple(fieldNamePattern, getFieldType(fieldNamePattern))) : List.of(); + } + + return getMatchingFieldNames(fieldNamePattern).stream() + .map(fieldName -> tuple(fieldName, getFieldType(fieldName))) + .filter(searchableFieldFilter.and(Predicate.not(ignoredFieldFilter))) + .collect(Collectors.toList()); + } + + public boolean isCaseSensitive() { + // TODO: implementation + return false; + } + + public ZoneId timeZone() { + return null; + } + + public static boolean isRuntimeField(MappedFieldType fieldType) { + return fieldType instanceof AbstractScriptFieldType; + } + + public static boolean isDateField(MappedFieldType fieldType) { + return fieldType.typeName().equals(DateFieldMapper.CONTENT_TYPE); + } + + public static boolean isKeywordField(MappedFieldType fieldType) { + return fieldType.typeName().equals(KeywordFieldMapper.CONTENT_TYPE); + } +} diff --git a/x-pack/plugin/kql/src/test/java/org/elasticsearch/xpack/kql/parser/AbstractKqlParserTestCase.java b/x-pack/plugin/kql/src/test/java/org/elasticsearch/xpack/kql/parser/AbstractKqlParserTestCase.java new file mode 100644 index 0000000000000..88c63e9a2585b --- /dev/null +++ b/x-pack/plugin/kql/src/test/java/org/elasticsearch/xpack/kql/parser/AbstractKqlParserTestCase.java @@ -0,0 +1,172 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.kql.parser; + +import org.elasticsearch.common.regex.Regex; +import org.elasticsearch.core.Predicates; +import org.elasticsearch.core.SuppressForbidden; +import org.elasticsearch.index.query.MatchPhraseQueryBuilder; +import org.elasticsearch.index.query.MatchQueryBuilder; +import org.elasticsearch.index.query.MultiMatchQueryBuilder; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.index.query.QueryStringQueryBuilder; +import org.elasticsearch.index.query.RangeQueryBuilder; +import org.elasticsearch.index.query.SearchExecutionContext; +import org.elasticsearch.index.query.TermQueryBuilder; +import org.elasticsearch.index.query.WildcardQueryBuilder; +import org.elasticsearch.test.AbstractBuilderTestCase; + +import java.io.BufferedReader; +import java.io.IOException; +import java.io.InputStream; +import java.io.InputStreamReader; +import java.net.URL; +import java.net.URLConnection; +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.Objects; +import java.util.function.Consumer; +import java.util.function.Predicate; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +import static org.hamcrest.Matchers.anEmptyMap; +import static org.hamcrest.Matchers.contains; +import static org.hamcrest.Matchers.equalTo; + +public abstract class AbstractKqlParserTestCase extends AbstractBuilderTestCase { + + protected static final String SUPPORTED_QUERY_FILE_PATH = "/supported-queries"; + protected static final String UNSUPPORTED_QUERY_FILE_PATH = "/unsupported-queries"; + protected static final Predicate BOOLEAN_QUERY_FILTER = (q) -> q.matches("(?i)[^{]*[^\\\\]*(NOT|AND|OR)[^}]*"); + + protected static String wrapWithRandomWhitespaces(String input) { + return String.join("", randomWhitespaces(), input, randomWhitespaces()); + } + + protected static String randomWhitespaces() { + return randomWhitespaces(randomInt(20)); + } + + protected static String randomWhitespaces(int length) { + return Stream.generate(() -> randomFrom(" ", "\t", "\n", "\r", "\u3000")).limit(length).collect(Collectors.joining()); + } + + protected static List readQueries(String source) throws IOException { + return readQueries(source, Predicates.always()); + } + + protected static List readQueries(String source, Predicate filter) throws IOException { + URL url = KqlParserTests.class.getResource(source); + Objects.requireNonNull(source, "Cannot find resource " + url); + + List queries = new ArrayList<>(); + + try (BufferedReader reader = new BufferedReader(new InputStreamReader(readFromJarUrl(url), StandardCharsets.UTF_8))) { + String line; + + while ((line = reader.readLine()) != null) { + String query = line.trim(); + // ignore comments + if (query.isEmpty() == false && query.startsWith("//") == false && filter.test(query)) { + queries.add(query); + } + } + } + return queries; + } + + @SuppressForbidden(reason = "test reads from jar") + private static InputStream readFromJarUrl(URL source) throws IOException { + URLConnection con = source.openConnection(); + // do not to cache files (to avoid keeping file handles around) + con.setUseCaches(false); + return con.getInputStream(); + } + + protected List mappedLeafFields() { + return Stream.concat( + Arrays.stream(MAPPED_LEAF_FIELD_NAMES), + List.of(DATE_FIELD_NAME, INT_FIELD_NAME).stream().map(subfieldName -> OBJECT_FIELD_NAME + "." + subfieldName) + ).toList(); + } + + protected List searchableFields() { + return Stream.concat( + mappedLeafFields().stream().filter(fieldName -> fieldName.equals(BINARY_FIELD_NAME) == false), + Stream.of("_id", "_index") + ).toList(); + } + + protected List searchableFields(String fieldNamePattern) { + return searchableFields().stream().filter(fieldName -> Regex.simpleMatch(fieldNamePattern, fieldName)).toList(); + } + + protected QueryBuilder parseKqlQuery(String kqlQuery) { + KqlParser parser = new KqlParser(); + SearchExecutionContext searchExecutionContext = createSearchExecutionContext(); + + return parser.parseKqlQuery(kqlQuery, searchExecutionContext); + } + + protected static void assertMultiMatchQuery(QueryBuilder query, String expectedValue, MultiMatchQueryBuilder.Type expectedType) { + MultiMatchQueryBuilder multiMatchQuery = asInstanceOf(MultiMatchQueryBuilder.class, query); + assertThat(multiMatchQuery.fields(), anEmptyMap()); + assertThat(multiMatchQuery.lenient(), equalTo(true)); + assertThat(multiMatchQuery.type(), equalTo(expectedType)); + assertThat(multiMatchQuery.value(), equalTo(expectedValue)); + } + + protected static void assertQueryStringBuilder(QueryBuilder query, String expectedFieldName, String expectedValue) { + QueryStringQueryBuilder queryStringQuery = asInstanceOf(QueryStringQueryBuilder.class, query); + assertThat(queryStringQuery.queryString(), equalTo(expectedValue)); + assertThat(queryStringQuery.fields().keySet(), contains(expectedFieldName)); + } + + protected static void assertQueryStringBuilder(QueryBuilder query, String expectedValue) { + QueryStringQueryBuilder queryStringQuery = asInstanceOf(QueryStringQueryBuilder.class, query); + assertThat(queryStringQuery.queryString(), equalTo(expectedValue)); + assertThat(queryStringQuery.fields(), anEmptyMap()); + } + + protected static void assertTermQueryBuilder(QueryBuilder queryBuilder, String expectedFieldName, String expectedValue) { + TermQueryBuilder termQuery = asInstanceOf(TermQueryBuilder.class, queryBuilder); + assertThat(termQuery.fieldName(), equalTo(expectedFieldName)); + assertThat(termQuery.value(), equalTo(expectedValue)); + } + + protected static void assertMatchQueryBuilder(QueryBuilder queryBuilder, String expectedFieldName, String expectedValue) { + MatchQueryBuilder matchQuery = asInstanceOf(MatchQueryBuilder.class, queryBuilder); + assertThat(matchQuery.fieldName(), equalTo(expectedFieldName)); + assertThat(matchQuery.value(), equalTo(expectedValue)); + } + + protected static void assertMatchPhraseBuilder(QueryBuilder queryBuilder, String expectedFieldName, String expectedValue) { + MatchPhraseQueryBuilder matchQuery = asInstanceOf(MatchPhraseQueryBuilder.class, queryBuilder); + assertThat(matchQuery.fieldName(), equalTo(expectedFieldName)); + assertThat(matchQuery.value(), equalTo(expectedValue)); + } + + protected static void assertWildcardQueryBuilder(QueryBuilder queryBuilder, String expectedFieldName, String expectedValue) { + WildcardQueryBuilder matchQuery = asInstanceOf(WildcardQueryBuilder.class, queryBuilder); + assertThat(matchQuery.fieldName(), equalTo(expectedFieldName)); + assertThat(matchQuery.value(), equalTo(expectedValue)); + } + + protected static void assertRangeQueryBuilder( + QueryBuilder queryBuilder, + String expectedFieldName, + Consumer codeBlock + ) { + RangeQueryBuilder rangeQuery = asInstanceOf(RangeQueryBuilder.class, queryBuilder); + assertThat(rangeQuery.fieldName(), equalTo(expectedFieldName)); + codeBlock.accept(rangeQuery); + } +} diff --git a/x-pack/plugin/kql/src/test/java/org/elasticsearch/xpack/kql/parser/KqlParserBooleanQueryTests.java b/x-pack/plugin/kql/src/test/java/org/elasticsearch/xpack/kql/parser/KqlParserBooleanQueryTests.java new file mode 100644 index 0000000000000..b87f84f458e9d --- /dev/null +++ b/x-pack/plugin/kql/src/test/java/org/elasticsearch/xpack/kql/parser/KqlParserBooleanQueryTests.java @@ -0,0 +1,210 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.kql.parser; + +import org.elasticsearch.core.Strings; +import org.elasticsearch.index.query.BoolQueryBuilder; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.index.query.QueryBuilders; + +import java.io.IOException; +import java.util.List; +import java.util.function.Predicate; +import java.util.stream.Stream; + +import static org.hamcrest.Matchers.allOf; +import static org.hamcrest.Matchers.empty; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasItem; +import static org.hamcrest.Matchers.hasSize; + +public class KqlParserBooleanQueryTests extends AbstractKqlParserTestCase { + + public void testParseNotQuery() throws IOException { + for (String baseQuery : readQueries(SUPPORTED_QUERY_FILE_PATH)) { + if (BOOLEAN_QUERY_FILTER.test(baseQuery)) { + baseQuery = wrapWithRandomWhitespaces("(") + baseQuery + wrapWithRandomWhitespaces(")"); + } + + String notQuery = wrapWithRandomWhitespaces("NOT ") + baseQuery; + + BoolQueryBuilder parsedQuery = asInstanceOf(BoolQueryBuilder.class, parseKqlQuery(notQuery)); + assertThat(parsedQuery.filter(), empty()); + assertThat(parsedQuery.should(), empty()); + assertThat(parsedQuery.must(), empty()); + assertThat(parsedQuery.mustNot(), hasSize(1)); + assertThat(parsedQuery.mustNot(), hasItem(equalTo((parseKqlQuery(baseQuery))))); + + assertThat( + parseKqlQuery("NOT" + wrapWithRandomWhitespaces("(") + baseQuery + wrapWithRandomWhitespaces(")")), + equalTo(parsedQuery) + ); + } + } + + public void testParseOrQuery() throws IOException { + List supportedQueries = readQueries(SUPPORTED_QUERY_FILE_PATH); + + for (int runs = 0; runs < 100; runs++) { + String queryA = randomFrom(supportedQueries); + String queryB = randomFrom(supportedQueries); + String orQuery = queryA + wrapWithRandomWhitespaces(randomFrom(" or ", " OR ", " Or ", " oR ")) + queryB; + + BoolQueryBuilder parsedQuery = asInstanceOf(BoolQueryBuilder.class, parseKqlQuery(orQuery)); + + if (Stream.of(queryA, queryB).noneMatch(BOOLEAN_QUERY_FILTER)) { + // If one of the subquery is a boolean query, it is impossible to test the content of the generated query because + // operator precedence rules are applied. There are specific tests for it in testOperatorPrecedence. + assertThat(parsedQuery.filter(), empty()); + assertThat(parsedQuery.must(), empty()); + assertThat(parsedQuery.mustNot(), empty()); + assertThat(parsedQuery.should(), hasSize(2)); + assertThat(parsedQuery.minimumShouldMatch(), equalTo("1")); + assertThat( + parsedQuery.should(), + allOf(hasItem(equalTo((parseKqlQuery(queryA)))), hasItem(equalTo((parseKqlQuery(queryB))))) + ); + } + } + + for (int runs = 0; runs < 100; runs++) { + String queryA = randomFrom(supportedQueries); + String queryB = randomFrom(supportedQueries); + String queryC = randomFrom(supportedQueries); + String orQuery = Strings.format("%s OR %s OR %s", queryA, queryB, queryC); + + BoolQueryBuilder parsedQuery = asInstanceOf(BoolQueryBuilder.class, parseKqlQuery(orQuery)); + + if (Stream.of(queryA, queryB, queryC).noneMatch(BOOLEAN_QUERY_FILTER)) { + // If one of the subquery is a boolean query, it is impossible to test the content of the generated query because + // operator precedence rules are applied. There are specific tests for it in testOperatorPrecedence. + assertThat(parsedQuery.should(), hasSize(3)); + assertThat( + parsedQuery.should(), + allOf( + hasItem(equalTo((parseKqlQuery(queryA)))), + hasItem(equalTo((parseKqlQuery(queryB)))), + hasItem(equalTo((parseKqlQuery(queryC)))) + ) + ); + } + } + } + + public void testParseAndQuery() throws IOException { + List supportedQueries = readQueries(SUPPORTED_QUERY_FILE_PATH); + + for (int runs = 0; runs < 100; runs++) { + String queryA = randomFrom(supportedQueries); + String queryB = randomFrom(supportedQueries); + String andQuery = queryA + wrapWithRandomWhitespaces(randomFrom(" and ", " AND ", " And ", " anD ")) + queryB; + + BoolQueryBuilder parsedQuery = asInstanceOf(BoolQueryBuilder.class, parseKqlQuery(andQuery)); + + if (Stream.of(queryA, queryB).noneMatch(BOOLEAN_QUERY_FILTER)) { + // If one of the subquery is a boolean query, it is impossible to test the content of the generated query because + // operator precedence rules are applied. There are specific tests for it in testOperatorPrecedence. + assertThat(parsedQuery.filter(), empty()); + assertThat(parsedQuery.should(), empty()); + assertThat(parsedQuery.mustNot(), empty()); + assertThat(parsedQuery.must(), hasSize(2)); + assertThat(parsedQuery.must(), allOf(hasItem(equalTo((parseKqlQuery(queryA)))), hasItem(equalTo((parseKqlQuery(queryB)))))); + } + } + + for (int runs = 0; runs < 100; runs++) { + String queryA = randomFrom(supportedQueries); + String queryB = randomFrom(supportedQueries); + String queryC = randomFrom(supportedQueries); + String andQuery = Strings.format("%s AND %s AND %s", queryA, queryB, queryC); + + if (Stream.of(queryA, queryB, queryC).noneMatch(BOOLEAN_QUERY_FILTER)) { + // If one of the subquery is a boolean query, it is impossible to test the content of the generated query because + // operator precedence rules are applied. There are specific tests for it in testOperatorPrecedence. + BoolQueryBuilder parsedQuery = asInstanceOf(BoolQueryBuilder.class, parseKqlQuery(andQuery)); + assertThat(parsedQuery.must(), hasSize(3)); + assertThat( + parsedQuery.must(), + allOf( + hasItem(equalTo((parseKqlQuery(queryA)))), + hasItem(equalTo((parseKqlQuery(queryB)))), + hasItem(equalTo((parseKqlQuery(queryC)))) + ) + ); + } + } + } + + public void testOperatorPrecedence() throws IOException { + List supportedQueries = readQueries(SUPPORTED_QUERY_FILE_PATH, Predicate.not(BOOLEAN_QUERY_FILTER)); + + for (int runs = 0; runs < 100; runs++) { + String queryA = randomFrom(supportedQueries); + String queryB = randomFrom(supportedQueries); + String queryC = randomFrom(supportedQueries); + + // AND OR is equivalent to AND ( OR ) + { + QueryBuilder parsedQuery = parseKqlQuery(Strings.format("%s AND %s OR %s", queryA, queryB, queryC)); + assertThat( + parsedQuery, + equalTo( + QueryBuilders.boolQuery() + .must(parseKqlQuery(queryA)) + .must( + QueryBuilders.boolQuery().minimumShouldMatch(1).should(parseKqlQuery(queryB)).should(parseKqlQuery(queryC)) + ) + ) + ); + assertThat(parsedQuery, equalTo(parseKqlQuery(Strings.format("%s AND (%s OR %s)", queryA, queryB, queryC)))); + } + + // OR AND is equivalent to OR ( AND ) + { + QueryBuilder parsedQuery = parseKqlQuery(Strings.format("%s OR %s AND %s", queryA, queryB, queryC)); + assertThat( + parsedQuery, + equalTo( + QueryBuilders.boolQuery() + .minimumShouldMatch(1) + .should(parseKqlQuery(queryA)) + .should(QueryBuilders.boolQuery().must(parseKqlQuery(queryB)).must(parseKqlQuery(queryC))) + ) + ); + assertThat(parsedQuery, equalTo(parseKqlQuery(Strings.format("%s OR (%s AND %s)", queryA, queryB, queryC)))); + } + + // AND is equivalent to (NOT ) AND + { + QueryBuilder parsedQuery = parseKqlQuery(Strings.format("NOT %s AND %s", queryA, queryB)); + assertThat( + parsedQuery, + equalTo( + QueryBuilders.boolQuery().must(QueryBuilders.boolQuery().mustNot(parseKqlQuery(queryA))).must(parseKqlQuery(queryB)) + ) + ); + assertThat(parsedQuery, equalTo(parseKqlQuery(Strings.format("(NOT %s) AND %s", queryA, queryB)))); + } + + // OR is equivalent to (NOT ) OR + { + QueryBuilder parsedQuery = parseKqlQuery(Strings.format("NOT %s OR %s", queryA, queryB)); + assertThat( + parsedQuery, + equalTo( + QueryBuilders.boolQuery() + .minimumShouldMatch(1) + .should(QueryBuilders.boolQuery().mustNot(parseKqlQuery(queryA))) + .should(parseKqlQuery(queryB)) + ) + ); + assertThat(parsedQuery, equalTo(parseKqlQuery(Strings.format("(NOT %s) OR %s", queryA, queryB)))); + } + } + } +} diff --git a/x-pack/plugin/kql/src/test/java/org/elasticsearch/xpack/kql/parser/KqlParserExistsQueryTests.java b/x-pack/plugin/kql/src/test/java/org/elasticsearch/xpack/kql/parser/KqlParserExistsQueryTests.java new file mode 100644 index 0000000000000..45dd3312bbc03 --- /dev/null +++ b/x-pack/plugin/kql/src/test/java/org/elasticsearch/xpack/kql/parser/KqlParserExistsQueryTests.java @@ -0,0 +1,63 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.kql.parser; + +import org.elasticsearch.index.query.BoolQueryBuilder; +import org.elasticsearch.index.query.ExistsQueryBuilder; +import org.elasticsearch.index.query.MatchNoneQueryBuilder; +import org.elasticsearch.index.query.QueryBuilders; + +import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.empty; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.isA; + +public class KqlParserExistsQueryTests extends AbstractKqlParserTestCase { + + public void testParseExistsQueryWithNoMatchingFields() { + // Using an unquoted literal + assertThat(parseKqlQuery(kqlExistsQuery("not_a_valid_field")), isA(MatchNoneQueryBuilder.class)); + + // Using an a quoted string + assertThat(parseKqlQuery(kqlExistsQuery("\"not_a_valid_field\"")), isA(MatchNoneQueryBuilder.class)); + + // Not expanding wildcard with quoted string + assertThat(parseKqlQuery(kqlExistsQuery("\"mapped_*\"")), isA(MatchNoneQueryBuilder.class)); + + // Object fields are not supported by the exists query. Returning a MatchNoneQueryBuilder in this case. + assertThat(parseKqlQuery(kqlExistsQuery(OBJECT_FIELD_NAME)), isA(MatchNoneQueryBuilder.class)); + } + + public void testParseExistsQueryWithASingleField() { + for (String fieldName : searchableFields()) { + ExistsQueryBuilder parsedQuery = asInstanceOf(ExistsQueryBuilder.class, parseKqlQuery(kqlExistsQuery(fieldName))); + assertThat(parsedQuery.fieldName(), equalTo(fieldName)); + + // Using quotes to wrap the field name does not change the result. + assertThat(parseKqlQuery(kqlExistsQuery("\"" + fieldName + "\"")), equalTo(parsedQuery)); + } + } + + public void testParseExistsQueryUsingWildcardFieldName() { + String fieldNamePattern = "mapped_*"; + BoolQueryBuilder parsedQuery = asInstanceOf(BoolQueryBuilder.class, parseKqlQuery(kqlExistsQuery(fieldNamePattern))); + assertThat(parsedQuery.minimumShouldMatch(), equalTo("1")); + assertThat(parsedQuery.must(), empty()); + assertThat(parsedQuery.mustNot(), empty()); + assertThat(parsedQuery.filter(), empty()); + + assertThat( + parsedQuery.should(), + containsInAnyOrder(searchableFields(fieldNamePattern).stream().map(QueryBuilders::existsQuery).toArray()) + ); + } + + private static String kqlExistsQuery(String field) { + return wrapWithRandomWhitespaces(field) + wrapWithRandomWhitespaces(":") + wrapWithRandomWhitespaces("*"); + } +} diff --git a/x-pack/plugin/kql/src/test/java/org/elasticsearch/xpack/kql/parser/KqlParserFieldQueryTests.java b/x-pack/plugin/kql/src/test/java/org/elasticsearch/xpack/kql/parser/KqlParserFieldQueryTests.java new file mode 100644 index 0000000000000..95814ee265745 --- /dev/null +++ b/x-pack/plugin/kql/src/test/java/org/elasticsearch/xpack/kql/parser/KqlParserFieldQueryTests.java @@ -0,0 +1,312 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.kql.parser; + +import org.elasticsearch.core.Strings; +import org.elasticsearch.index.query.BoolQueryBuilder; +import org.elasticsearch.index.query.MatchNoneQueryBuilder; + +import java.util.List; + +import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.empty; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.isA; + +public class KqlParserFieldQueryTests extends AbstractKqlParserTestCase { + + public void testParseFieldQueryWithNoMatchingFields() { + // Using an unquoted literal + assertThat(parseKqlQuery(kqlFieldQuery("not_a_field", randomIdentifier())), isA(MatchNoneQueryBuilder.class)); + assertThat(parseKqlQuery(kqlFieldQuery("not_a_field", randomInt())), isA(MatchNoneQueryBuilder.class)); + assertThat(parseKqlQuery(kqlFieldQuery("not_a_field", quoteString(randomIdentifier()))), isA(MatchNoneQueryBuilder.class)); + + // Using a quoted string as field name + assertThat(parseKqlQuery(kqlFieldQuery(quoteString("not_a_field"), randomIdentifier())), isA(MatchNoneQueryBuilder.class)); + assertThat(parseKqlQuery(kqlFieldQuery(quoteString("not_a_field"), randomInt())), isA(MatchNoneQueryBuilder.class)); + assertThat( + parseKqlQuery(kqlFieldQuery(quoteString("not_a_field"), quoteString(randomIdentifier()))), + isA(MatchNoneQueryBuilder.class) + ); + + // Not expanding wildcard with quoted string + assertThat(parseKqlQuery(kqlFieldQuery(quoteString("mapped_*"), randomIdentifier())), isA(MatchNoneQueryBuilder.class)); + assertThat(parseKqlQuery(kqlFieldQuery(quoteString("mapped_*"), randomInt())), isA(MatchNoneQueryBuilder.class)); + assertThat( + parseKqlQuery(kqlFieldQuery(quoteString("mapped_*"), quoteString(randomIdentifier()))), + isA(MatchNoneQueryBuilder.class) + ); + } + + public void testParseUnquotedLiteralKeywordFieldQuery() { + // Single word + assertTermQueryBuilder(parseKqlQuery(kqlFieldQuery(KEYWORD_FIELD_NAME, "foo")), KEYWORD_FIELD_NAME, "foo"); + + // Multiple words + assertTermQueryBuilder(parseKqlQuery(kqlFieldQuery(KEYWORD_FIELD_NAME, "foo bar")), KEYWORD_FIELD_NAME, "foo bar"); + + // Escaped keywords + assertTermQueryBuilder(parseKqlQuery(kqlFieldQuery(KEYWORD_FIELD_NAME, "foo \\and bar")), KEYWORD_FIELD_NAME, "foo and bar"); + assertTermQueryBuilder(parseKqlQuery(kqlFieldQuery(KEYWORD_FIELD_NAME, "foo \\or bar")), KEYWORD_FIELD_NAME, "foo or bar"); + assertTermQueryBuilder(parseKqlQuery(kqlFieldQuery(KEYWORD_FIELD_NAME, "\\not foo bar")), KEYWORD_FIELD_NAME, "not foo bar"); + + // Escaped characters + assertTermQueryBuilder(parseKqlQuery(kqlFieldQuery(KEYWORD_FIELD_NAME, "foo \\* bar")), KEYWORD_FIELD_NAME, "foo * bar"); + assertTermQueryBuilder(parseKqlQuery(kqlFieldQuery(KEYWORD_FIELD_NAME, "foo\\(bar\\)")), KEYWORD_FIELD_NAME, "foo(bar)"); + assertTermQueryBuilder(parseKqlQuery(kqlFieldQuery(KEYWORD_FIELD_NAME, "foo\\{bar\\}")), KEYWORD_FIELD_NAME, "foo{bar}"); + assertTermQueryBuilder(parseKqlQuery(kqlFieldQuery(KEYWORD_FIELD_NAME, "foo\\:bar")), KEYWORD_FIELD_NAME, "foo:bar"); + assertTermQueryBuilder(parseKqlQuery(kqlFieldQuery(KEYWORD_FIELD_NAME, "foo\\bar")), KEYWORD_FIELD_NAME, "foo>bar"); + assertTermQueryBuilder(parseKqlQuery(kqlFieldQuery(KEYWORD_FIELD_NAME, "foo \\\\ bar")), KEYWORD_FIELD_NAME, "foo \\ bar"); + assertTermQueryBuilder(parseKqlQuery(kqlFieldQuery(KEYWORD_FIELD_NAME, "foo\\\"bar\\\"")), KEYWORD_FIELD_NAME, "foo\"bar\""); + + // Wrapping terms into parentheses + assertTermQueryBuilder(parseKqlQuery(kqlFieldQuery(KEYWORD_FIELD_NAME, "(foo baz)")), KEYWORD_FIELD_NAME, "foo baz"); + + // Trailing operators (AND, NOT, OR) are terms of the match query + assertTermQueryBuilder(parseKqlQuery(kqlFieldQuery(KEYWORD_FIELD_NAME, "bar AND")), KEYWORD_FIELD_NAME, "bar AND"); + assertTermQueryBuilder(parseKqlQuery(kqlFieldQuery(KEYWORD_FIELD_NAME, "bar OR")), KEYWORD_FIELD_NAME, "bar OR"); + assertTermQueryBuilder(parseKqlQuery(kqlFieldQuery(KEYWORD_FIELD_NAME, "bar NOT")), KEYWORD_FIELD_NAME, "bar NOT"); + + // Leading operators (AND, OR) are terms of the match query + assertTermQueryBuilder(parseKqlQuery(kqlFieldQuery(KEYWORD_FIELD_NAME, "AND bar")), KEYWORD_FIELD_NAME, "AND bar"); + assertTermQueryBuilder(parseKqlQuery(kqlFieldQuery(KEYWORD_FIELD_NAME, "OR bar")), KEYWORD_FIELD_NAME, "OR bar"); + assertTermQueryBuilder(parseKqlQuery(kqlFieldQuery(KEYWORD_FIELD_NAME, "NOT bar")), KEYWORD_FIELD_NAME, "NOT bar"); + + // Lonely operators (AND, NOT, OR) + assertTermQueryBuilder(parseKqlQuery(kqlFieldQuery(KEYWORD_FIELD_NAME, "AND AND")), KEYWORD_FIELD_NAME, "AND AND"); + assertTermQueryBuilder(parseKqlQuery(kqlFieldQuery(KEYWORD_FIELD_NAME, "AND OR")), KEYWORD_FIELD_NAME, "AND OR"); + assertTermQueryBuilder(parseKqlQuery(kqlFieldQuery(KEYWORD_FIELD_NAME, "AND NOT")), KEYWORD_FIELD_NAME, "AND NOT"); + assertTermQueryBuilder(parseKqlQuery(kqlFieldQuery(KEYWORD_FIELD_NAME, "OR")), KEYWORD_FIELD_NAME, "OR"); + assertTermQueryBuilder(parseKqlQuery(kqlFieldQuery(KEYWORD_FIELD_NAME, "OR AND")), KEYWORD_FIELD_NAME, "OR AND"); + assertTermQueryBuilder(parseKqlQuery(kqlFieldQuery(KEYWORD_FIELD_NAME, "OR OR")), KEYWORD_FIELD_NAME, "OR OR"); + assertTermQueryBuilder(parseKqlQuery(kqlFieldQuery(KEYWORD_FIELD_NAME, "OR NOT")), KEYWORD_FIELD_NAME, "OR NOT"); + assertTermQueryBuilder(parseKqlQuery(kqlFieldQuery(KEYWORD_FIELD_NAME, "NOT")), KEYWORD_FIELD_NAME, "NOT"); + assertTermQueryBuilder(parseKqlQuery(kqlFieldQuery(KEYWORD_FIELD_NAME, "NOT AND")), KEYWORD_FIELD_NAME, "NOT AND"); + assertTermQueryBuilder(parseKqlQuery(kqlFieldQuery(KEYWORD_FIELD_NAME, "NOT OR")), KEYWORD_FIELD_NAME, "NOT OR"); + + // Check we can use quoted field name as well + assertThat( + parseKqlQuery(kqlFieldQuery(KEYWORD_FIELD_NAME, "foo")), + equalTo(parseKqlQuery(kqlFieldQuery(quoteString(KEYWORD_FIELD_NAME), "foo"))) + ); + } + + public void testParseDateFieldQuery() { + assertRangeQueryBuilder(parseKqlQuery(kqlFieldQuery(DATE_FIELD_NAME, "2010-06-03")), DATE_FIELD_NAME, (rangeQuery) -> { + assertThat(rangeQuery.from(), equalTo("2010-06-03")); + assertThat(rangeQuery.includeLower(), equalTo(true)); + assertThat(rangeQuery.to(), equalTo("2010-06-03")); + assertThat(rangeQuery.includeUpper(), equalTo(true)); + + // Check we can use quoted field name as well + assertThat(parseKqlQuery(kqlFieldQuery(quoteString(DATE_FIELD_NAME), "2010-06-03")), equalTo(rangeQuery)); + + // Check we can use quoted value as well + assertThat(parseKqlQuery(kqlFieldQuery(DATE_FIELD_NAME, quoteString("2010-06-03"))), equalTo(rangeQuery)); + }); + } + + public void testParseUnquotedLiteralMatchFieldsQuery() { + for (String fieldName : List.of(TEXT_FIELD_NAME, INT_FIELD_NAME, DOUBLE_FIELD_NAME)) { + // Single word + assertMatchQueryBuilder(parseKqlQuery(kqlFieldQuery(fieldName, "foo")), fieldName, "foo"); + + // Numbers are converted to string + assertMatchQueryBuilder(parseKqlQuery(kqlFieldQuery(fieldName, 1)), fieldName, "1"); + assertMatchQueryBuilder(parseKqlQuery(kqlFieldQuery(fieldName, 1.5)), fieldName, "1.5"); + + // Multiple words + assertMatchQueryBuilder(parseKqlQuery(kqlFieldQuery(fieldName, "foo bar")), fieldName, "foo bar"); + + // Escaped keywords + assertMatchQueryBuilder(parseKqlQuery(kqlFieldQuery(fieldName, "foo \\and bar")), fieldName, "foo and bar"); + assertMatchQueryBuilder(parseKqlQuery(kqlFieldQuery(fieldName, "foo \\or bar")), fieldName, "foo or bar"); + assertMatchQueryBuilder(parseKqlQuery(kqlFieldQuery(fieldName, "\\not foo bar")), fieldName, "not foo bar"); + + // Escaped characters + assertMatchQueryBuilder(parseKqlQuery(kqlFieldQuery(fieldName, "foo \\* bar")), fieldName, "foo * bar"); + assertMatchQueryBuilder(parseKqlQuery(kqlFieldQuery(fieldName, "foo\\(bar\\)")), fieldName, "foo(bar)"); + assertMatchQueryBuilder(parseKqlQuery(kqlFieldQuery(fieldName, "foo\\{bar\\}")), fieldName, "foo{bar}"); + assertMatchQueryBuilder(parseKqlQuery(kqlFieldQuery(fieldName, "foo\\:bar")), fieldName, "foo:bar"); + assertMatchQueryBuilder(parseKqlQuery(kqlFieldQuery(fieldName, "foo\\bar")), fieldName, "foo>bar"); + assertMatchQueryBuilder(parseKqlQuery(kqlFieldQuery(fieldName, "foo \\\\ bar")), fieldName, "foo \\ bar"); + assertMatchQueryBuilder(parseKqlQuery(kqlFieldQuery(fieldName, "foo\\\"bar\\\"")), fieldName, "foo\"bar\""); + + // Wrapping terms into parentheses + assertMatchQueryBuilder(parseKqlQuery(kqlFieldQuery(fieldName, "(foo baz)")), fieldName, "foo baz"); + + // Trailing operators (AND, NOT, OR) are terms of the match query + assertMatchQueryBuilder(parseKqlQuery(kqlFieldQuery(fieldName, "bar AND")), fieldName, "bar AND"); + assertMatchQueryBuilder(parseKqlQuery(kqlFieldQuery(fieldName, "bar OR")), fieldName, "bar OR"); + assertMatchQueryBuilder(parseKqlQuery(kqlFieldQuery(fieldName, "bar NOT")), fieldName, "bar NOT"); + + // Leading operators (AND, OR) are terms of the match query + assertMatchQueryBuilder(parseKqlQuery(kqlFieldQuery(fieldName, "AND bar")), fieldName, "AND bar"); + assertMatchQueryBuilder(parseKqlQuery(kqlFieldQuery(fieldName, "OR bar")), fieldName, "OR bar"); + assertMatchQueryBuilder(parseKqlQuery(kqlFieldQuery(fieldName, "NOT bar")), fieldName, "NOT bar"); + + // Lonely operators (AND, NOT, OR) + assertMatchQueryBuilder(parseKqlQuery(kqlFieldQuery(fieldName, "AND")), fieldName, "AND"); + assertMatchQueryBuilder(parseKqlQuery(kqlFieldQuery(fieldName, "AND AND")), fieldName, "AND AND"); + assertMatchQueryBuilder(parseKqlQuery(kqlFieldQuery(fieldName, "AND OR")), fieldName, "AND OR"); + assertMatchQueryBuilder(parseKqlQuery(kqlFieldQuery(fieldName, "AND NOT")), fieldName, "AND NOT"); + assertMatchQueryBuilder(parseKqlQuery(kqlFieldQuery(fieldName, "OR")), fieldName, "OR"); + assertMatchQueryBuilder(parseKqlQuery(kqlFieldQuery(fieldName, "OR AND")), fieldName, "OR AND"); + assertMatchQueryBuilder(parseKqlQuery(kqlFieldQuery(fieldName, "OR OR")), fieldName, "OR OR"); + assertMatchQueryBuilder(parseKqlQuery(kqlFieldQuery(fieldName, "OR NOT")), fieldName, "OR NOT"); + assertMatchQueryBuilder(parseKqlQuery(kqlFieldQuery(fieldName, "NOT")), fieldName, "NOT"); + assertMatchQueryBuilder(parseKqlQuery(kqlFieldQuery(fieldName, "NOT AND")), fieldName, "NOT AND"); + assertMatchQueryBuilder(parseKqlQuery(kqlFieldQuery(fieldName, "NOT OR")), fieldName, "NOT OR"); + + // Check we can use quoted field name as well + assertThat( + parseKqlQuery(kqlFieldQuery(fieldName, "foo")), + equalTo(parseKqlQuery(kqlFieldQuery(quoteString(fieldName), "foo"))) + ); + } + } + + public void testParseQuotedStringKeywordFieldQuery() { + // Single word + assertTermQueryBuilder(parseKqlQuery(kqlFieldQuery(KEYWORD_FIELD_NAME, quoteString("foo"))), KEYWORD_FIELD_NAME, "foo"); + + // Multiple words + assertTermQueryBuilder(parseKqlQuery(kqlFieldQuery(KEYWORD_FIELD_NAME, quoteString("foo bar"))), KEYWORD_FIELD_NAME, "foo bar"); + + // Containing unescaped KQL reserved keyword + assertTermQueryBuilder( + parseKqlQuery(kqlFieldQuery(KEYWORD_FIELD_NAME, quoteString("not foo and bar or baz"))), + KEYWORD_FIELD_NAME, + "not foo and bar or baz" + ); + + // Containing unescaped KQL reserved characters + assertTermQueryBuilder( + parseKqlQuery(kqlFieldQuery(KEYWORD_FIELD_NAME, quoteString("foo*: {(})"))), + KEYWORD_FIELD_NAME, + "foo*: {(})" + ); + } + + public void testParseQuotedStringMatchFieldsQuery() { + for (String fieldName : List.of(TEXT_FIELD_NAME, INT_FIELD_NAME, DOUBLE_FIELD_NAME)) { + + // Single word + assertMatchPhraseBuilder(parseKqlQuery(kqlFieldQuery(fieldName, quoteString("foo"))), fieldName, "foo"); + + // Multiple words + assertMatchPhraseBuilder(parseKqlQuery(kqlFieldQuery(fieldName, quoteString("foo bar"))), fieldName, "foo bar"); + + // Containing unescaped KQL reserved keyword + assertMatchPhraseBuilder( + parseKqlQuery(kqlFieldQuery(fieldName, quoteString("not foo and bar or baz"))), + fieldName, + "not foo and bar or baz" + ); + + // Containing unescaped KQL reserved characters + assertMatchPhraseBuilder(parseKqlQuery(kqlFieldQuery(fieldName, quoteString("foo*: {(})"))), fieldName, "foo*: {(})"); + } + } + + public void testParseWildcardMatchFieldQuery() { + for (String fieldName : List.of(TEXT_FIELD_NAME, INT_FIELD_NAME, DOUBLE_FIELD_NAME)) { + // Single word + assertQueryStringBuilder(parseKqlQuery(kqlFieldQuery(fieldName, "foo*")), fieldName, "foo*"); + assertQueryStringBuilder(parseKqlQuery(kqlFieldQuery(fieldName, "*foo")), fieldName, "*foo"); + assertQueryStringBuilder(parseKqlQuery(kqlFieldQuery(fieldName, "fo*o")), fieldName, "fo*o"); + + // Multiple words + assertQueryStringBuilder(parseKqlQuery(kqlFieldQuery(fieldName, "fo* bar")), fieldName, "fo* bar"); + assertQueryStringBuilder(parseKqlQuery(kqlFieldQuery(fieldName, "foo * bar")), fieldName, "foo * bar"); + assertQueryStringBuilder(parseKqlQuery(kqlFieldQuery(fieldName, "* foo bar")), fieldName, "* foo bar"); + assertQueryStringBuilder(parseKqlQuery(kqlFieldQuery(fieldName, "foo bar *")), fieldName, "foo bar *"); + + // Check Lucene query string special chars are escaped + assertQueryStringBuilder(parseKqlQuery(kqlFieldQuery(fieldName, "foo*[bar]")), fieldName, "foo*\\[bar\\]"); + assertQueryStringBuilder(parseKqlQuery(kqlFieldQuery(fieldName, "+foo* -bar")), fieldName, "\\+foo* \\-bar"); + + // Trailing operators AND, NOT, OR are terms of the match query + assertQueryStringBuilder(parseKqlQuery(kqlFieldQuery(fieldName, "foo* AND")), fieldName, "foo* AND"); + assertQueryStringBuilder(parseKqlQuery(kqlFieldQuery(fieldName, "foo* OR")), fieldName, "foo* OR"); + assertQueryStringBuilder(parseKqlQuery(kqlFieldQuery(fieldName, "foo* NOT")), fieldName, "foo* NOT"); + + // Leading operators AND, NOT, OR are terms of the match query + assertQueryStringBuilder(parseKqlQuery(kqlFieldQuery(fieldName, "AND foo*")), fieldName, "AND foo*"); + assertQueryStringBuilder(parseKqlQuery(kqlFieldQuery(fieldName, "OR foo*")), fieldName, "OR foo*"); + } + } + + public void testParseWildcardKeywordFieldQuery() { + // Single word + assertWildcardQueryBuilder(parseKqlQuery(kqlFieldQuery(KEYWORD_FIELD_NAME, "fo*")), KEYWORD_FIELD_NAME, "fo*"); + + // Multiple words + assertWildcardQueryBuilder(parseKqlQuery(kqlFieldQuery(KEYWORD_FIELD_NAME, "fo* bar")), KEYWORD_FIELD_NAME, "fo* bar"); + + // Escaped keywords + assertWildcardQueryBuilder(parseKqlQuery(kqlFieldQuery(KEYWORD_FIELD_NAME, "fo* \\and bar")), KEYWORD_FIELD_NAME, "fo* and bar"); + assertWildcardQueryBuilder(parseKqlQuery(kqlFieldQuery(KEYWORD_FIELD_NAME, "fo* \\or bar")), KEYWORD_FIELD_NAME, "fo* or bar"); + assertWildcardQueryBuilder(parseKqlQuery(kqlFieldQuery(KEYWORD_FIELD_NAME, "\\not fo* bar")), KEYWORD_FIELD_NAME, "not fo* bar"); + + // Escaped characters + assertWildcardQueryBuilder(parseKqlQuery(kqlFieldQuery(KEYWORD_FIELD_NAME, "fo* \\* bar")), KEYWORD_FIELD_NAME, "fo* * bar"); + assertWildcardQueryBuilder(parseKqlQuery(kqlFieldQuery(KEYWORD_FIELD_NAME, "fo*\\(bar\\)")), KEYWORD_FIELD_NAME, "fo*(bar)"); + assertWildcardQueryBuilder(parseKqlQuery(kqlFieldQuery(KEYWORD_FIELD_NAME, "fo*\\{bar\\}")), KEYWORD_FIELD_NAME, "fo*{bar}"); + assertWildcardQueryBuilder(parseKqlQuery(kqlFieldQuery(KEYWORD_FIELD_NAME, "fo*\\:bar")), KEYWORD_FIELD_NAME, "fo*:bar"); + assertWildcardQueryBuilder(parseKqlQuery(kqlFieldQuery(KEYWORD_FIELD_NAME, "fo*\\bar")), KEYWORD_FIELD_NAME, "fo*>bar"); + assertWildcardQueryBuilder(parseKqlQuery(kqlFieldQuery(KEYWORD_FIELD_NAME, "fo* \\\\ bar")), KEYWORD_FIELD_NAME, "fo* \\ bar"); + assertWildcardQueryBuilder(parseKqlQuery(kqlFieldQuery(KEYWORD_FIELD_NAME, "fo*\\\"bar\\\"")), KEYWORD_FIELD_NAME, "fo*\"bar\""); + + // Wrapping terms into parentheses + assertWildcardQueryBuilder(parseKqlQuery(kqlFieldQuery(KEYWORD_FIELD_NAME, "(fo* baz)")), KEYWORD_FIELD_NAME, "fo* baz"); + + // Trailing operators (AND, NOT, OR) are terms of the match query + assertWildcardQueryBuilder(parseKqlQuery(kqlFieldQuery(KEYWORD_FIELD_NAME, "fo* AND")), KEYWORD_FIELD_NAME, "fo* AND"); + assertWildcardQueryBuilder(parseKqlQuery(kqlFieldQuery(KEYWORD_FIELD_NAME, "fo* OR")), KEYWORD_FIELD_NAME, "fo* OR"); + assertWildcardQueryBuilder(parseKqlQuery(kqlFieldQuery(KEYWORD_FIELD_NAME, "fo* NOT")), KEYWORD_FIELD_NAME, "fo* NOT"); + + // Leading operators (AND, OR) are terms of the match query + assertWildcardQueryBuilder(parseKqlQuery(kqlFieldQuery(KEYWORD_FIELD_NAME, "AND fo*")), KEYWORD_FIELD_NAME, "AND fo*"); + assertWildcardQueryBuilder(parseKqlQuery(kqlFieldQuery(KEYWORD_FIELD_NAME, "OR fo*")), KEYWORD_FIELD_NAME, "OR fo*"); + assertWildcardQueryBuilder(parseKqlQuery(kqlFieldQuery(KEYWORD_FIELD_NAME, "NOT fo*")), KEYWORD_FIELD_NAME, "NOT fo*"); + } + + public void testFieldWildcardFieldQueries() { + List queries = List.of("foo", "foo bar", quoteString("foo"), "foo*"); + for (String query : queries) { + for (String fieldNamePattern : List.of("mapped_*", "*")) { + List searchableFields = searchableFields(fieldNamePattern); + String kqlQuery = kqlFieldQuery(fieldNamePattern, query); + BoolQueryBuilder parsedQuery = asInstanceOf(BoolQueryBuilder.class, parseKqlQuery(kqlQuery)); + assertThat(parsedQuery.mustNot(), empty()); + assertThat(parsedQuery.must(), empty()); + assertThat(parsedQuery.filter(), empty()); + assertThat(parsedQuery.minimumShouldMatch(), equalTo("1")); + assertThat(parsedQuery.should(), hasSize(searchableFields.size())); + + assertThat( + parsedQuery.should(), + containsInAnyOrder(searchableFields.stream().map(fieldName -> parseKqlQuery(kqlFieldQuery(fieldName, query))).toArray()) + ); + } + } + } + + private static String kqlFieldQuery(String field, Object value) { + return wrapWithRandomWhitespaces(field) + wrapWithRandomWhitespaces(":") + wrapWithRandomWhitespaces(value.toString()); + } + + private static String quoteString(String input) { + return Strings.format("\"%s\"", input); + } +} diff --git a/x-pack/plugin/kql/src/test/java/org/elasticsearch/xpack/kql/parser/KqlParserFieldlessQueryTests.java b/x-pack/plugin/kql/src/test/java/org/elasticsearch/xpack/kql/parser/KqlParserFieldlessQueryTests.java new file mode 100644 index 0000000000000..c1f080fdc1eb4 --- /dev/null +++ b/x-pack/plugin/kql/src/test/java/org/elasticsearch/xpack/kql/parser/KqlParserFieldlessQueryTests.java @@ -0,0 +1,95 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.kql.parser; + +import org.elasticsearch.index.query.MultiMatchQueryBuilder; + +public class KqlParserFieldlessQueryTests extends AbstractKqlParserTestCase { + + public void testParseUnquotedLiteralQuery() { + // Single word + assertMultiMatchQuery(parseKqlQuery("foo"), "foo", MultiMatchQueryBuilder.Type.BEST_FIELDS); + // Multiple words + assertMultiMatchQuery(parseKqlQuery("foo bar baz"), "foo bar baz", MultiMatchQueryBuilder.Type.BEST_FIELDS); + + // Escaped keywords + assertMultiMatchQuery(parseKqlQuery("foo \\and bar"), "foo and bar", MultiMatchQueryBuilder.Type.BEST_FIELDS); + assertMultiMatchQuery(parseKqlQuery("foo \\or bar"), "foo or bar", MultiMatchQueryBuilder.Type.BEST_FIELDS); + assertMultiMatchQuery(parseKqlQuery("\\not foo bar"), "not foo bar", MultiMatchQueryBuilder.Type.BEST_FIELDS); + + // With an escaped characters + assertMultiMatchQuery(parseKqlQuery("foo \\* bar"), "foo * bar", MultiMatchQueryBuilder.Type.BEST_FIELDS); + assertMultiMatchQuery(parseKqlQuery("foo\\(bar\\)"), "foo(bar)", MultiMatchQueryBuilder.Type.BEST_FIELDS); + assertMultiMatchQuery(parseKqlQuery("foo\\{bar\\}"), "foo{bar}", MultiMatchQueryBuilder.Type.BEST_FIELDS); + assertMultiMatchQuery(parseKqlQuery("foo\\:bar"), "foo:bar", MultiMatchQueryBuilder.Type.BEST_FIELDS); + assertMultiMatchQuery(parseKqlQuery("foo\\bar"), "foo>bar", MultiMatchQueryBuilder.Type.BEST_FIELDS); + assertMultiMatchQuery(parseKqlQuery("foo \\\\ bar"), "foo \\ bar", MultiMatchQueryBuilder.Type.BEST_FIELDS); + assertMultiMatchQuery(parseKqlQuery("foo\\\"bar\\\""), "foo\"bar\"", MultiMatchQueryBuilder.Type.BEST_FIELDS); + + // Wrapping terms into parentheses + assertMultiMatchQuery(parseKqlQuery("(foo baz)"), "foo baz", MultiMatchQueryBuilder.Type.BEST_FIELDS); + + // Trailing operators (AND, NOT, OR) are terms of the match query + assertMultiMatchQuery(parseKqlQuery("foo AND"), "foo AND", MultiMatchQueryBuilder.Type.BEST_FIELDS); + assertMultiMatchQuery(parseKqlQuery("foo OR"), "foo OR", MultiMatchQueryBuilder.Type.BEST_FIELDS); + assertMultiMatchQuery(parseKqlQuery("foo NOT"), "foo NOT", MultiMatchQueryBuilder.Type.BEST_FIELDS); + + // Leading operators (AND, OR) are terms of the match query + assertMultiMatchQuery(parseKqlQuery("AND foo"), "AND foo", MultiMatchQueryBuilder.Type.BEST_FIELDS); + assertMultiMatchQuery(parseKqlQuery("OR foo"), "OR foo", MultiMatchQueryBuilder.Type.BEST_FIELDS); + + // Lonely operators (AND, NOT, OR) + assertMultiMatchQuery(parseKqlQuery("AND"), "AND", MultiMatchQueryBuilder.Type.BEST_FIELDS); + assertMultiMatchQuery(parseKqlQuery("AND AND"), "AND AND", MultiMatchQueryBuilder.Type.BEST_FIELDS); + assertMultiMatchQuery(parseKqlQuery("AND OR"), "AND OR", MultiMatchQueryBuilder.Type.BEST_FIELDS); + assertMultiMatchQuery(parseKqlQuery("AND NOT"), "AND NOT", MultiMatchQueryBuilder.Type.BEST_FIELDS); + assertMultiMatchQuery(parseKqlQuery("OR"), "OR", MultiMatchQueryBuilder.Type.BEST_FIELDS); + assertMultiMatchQuery(parseKqlQuery("OR AND"), "OR AND", MultiMatchQueryBuilder.Type.BEST_FIELDS); + assertMultiMatchQuery(parseKqlQuery("OR OR"), "OR OR", MultiMatchQueryBuilder.Type.BEST_FIELDS); + assertMultiMatchQuery(parseKqlQuery("OR NOT"), "OR NOT", MultiMatchQueryBuilder.Type.BEST_FIELDS); + assertMultiMatchQuery(parseKqlQuery("NOT"), "NOT", MultiMatchQueryBuilder.Type.BEST_FIELDS); + } + + public void testParseWildcardQuery() { + // Single word + assertQueryStringBuilder(parseKqlQuery("foo*"), "foo*"); + assertQueryStringBuilder(parseKqlQuery("*foo"), "*foo"); + assertQueryStringBuilder(parseKqlQuery("fo*o"), "fo*o"); + + // Multiple words + assertQueryStringBuilder(parseKqlQuery("fo* bar"), "fo* bar"); + assertQueryStringBuilder(parseKqlQuery("foo * bar"), "foo * bar"); + assertQueryStringBuilder(parseKqlQuery("* foo bar"), "* foo bar"); + assertQueryStringBuilder(parseKqlQuery("foo bar *"), "foo bar *"); + + // Check Lucene query string special chars are escaped + assertQueryStringBuilder(parseKqlQuery("foo*[bar]"), "foo*\\[bar\\]"); + assertQueryStringBuilder(parseKqlQuery("+foo* -bar"), "\\+foo* \\-bar"); + + // Trailing operators AND, NOT, OR are terms of the match query + assertQueryStringBuilder(parseKqlQuery("foo* AND"), "foo* AND"); + assertQueryStringBuilder(parseKqlQuery("foo* OR"), "foo* OR"); + assertQueryStringBuilder(parseKqlQuery("foo* NOT"), "foo* NOT"); + + // Leading operators AND, NOT, OR are terms of the match query + assertQueryStringBuilder(parseKqlQuery("AND foo*"), "AND foo*"); + assertQueryStringBuilder(parseKqlQuery("OR foo*"), "OR foo*"); + } + + public void testParseQuotedStringQuery() { + // Single word + assertMultiMatchQuery(parseKqlQuery("\"foo\""), "foo", MultiMatchQueryBuilder.Type.PHRASE); + // Multiple words + assertMultiMatchQuery(parseKqlQuery("\"foo bar\""), "foo bar", MultiMatchQueryBuilder.Type.PHRASE); + // Containing unescaped KQL reserved keyword + assertMultiMatchQuery(parseKqlQuery("\"not foo and bar or baz\""), "not foo and bar or baz", MultiMatchQueryBuilder.Type.PHRASE); + // Containing unescaped KQL reserved characters + assertMultiMatchQuery(parseKqlQuery("\"foo*: {(})\""), "foo*: {(})", MultiMatchQueryBuilder.Type.PHRASE); + } +} diff --git a/x-pack/plugin/kql/src/test/java/org/elasticsearch/xpack/kql/parser/KqlParserRangeQueryTests.java b/x-pack/plugin/kql/src/test/java/org/elasticsearch/xpack/kql/parser/KqlParserRangeQueryTests.java new file mode 100644 index 0000000000000..8a076d611b5f8 --- /dev/null +++ b/x-pack/plugin/kql/src/test/java/org/elasticsearch/xpack/kql/parser/KqlParserRangeQueryTests.java @@ -0,0 +1,84 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.kql.parser; + +import org.elasticsearch.core.Strings; + +import java.util.List; +import java.util.stream.Stream; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.nullValue; + +public class KqlParserRangeQueryTests extends AbstractKqlParserTestCase { + + public void testParseLtQuery() { + for (String fieldName : List.of(DATE_FIELD_NAME, TEXT_FIELD_NAME, INT_FIELD_NAME, DOUBLE_FIELD_NAME, KEYWORD_FIELD_NAME)) { + for (String query : queryValueSamples()) { + assertRangeQueryBuilder(parseKqlQuery(Strings.format("%s < %s", fieldName, query)), fieldName, (rangeQuery) -> { + assertThat(rangeQuery.from(), nullValue()); + assertThat(rangeQuery.to(), equalTo(query)); + assertThat(rangeQuery.includeUpper(), equalTo(false)); + + // For range query adding quote does not change the generated of the query. + assertThat(parseKqlQuery(Strings.format("%s < %s", fieldName, quoteString(query))), equalTo(rangeQuery)); + }); + } + } + } + + public void testParseLteQuery() { + for (String fieldName : List.of(DATE_FIELD_NAME, TEXT_FIELD_NAME, INT_FIELD_NAME, DOUBLE_FIELD_NAME, KEYWORD_FIELD_NAME)) { + for (String query : queryValueSamples()) { + assertRangeQueryBuilder(parseKqlQuery(Strings.format("%s <= %s", fieldName, query)), fieldName, (rangeQuery) -> { + assertThat(rangeQuery.from(), nullValue()); + assertThat(rangeQuery.to(), equalTo(query)); + assertThat(rangeQuery.includeUpper(), equalTo(true)); + // For range query adding quote does not change the generated of the query. + assertThat(parseKqlQuery(Strings.format("%s <= %s", fieldName, quoteString(query))), equalTo(rangeQuery)); + }); + } + } + } + + public void testParseGtQuery() { + for (String fieldName : List.of(DATE_FIELD_NAME, TEXT_FIELD_NAME, INT_FIELD_NAME, DOUBLE_FIELD_NAME, KEYWORD_FIELD_NAME)) { + for (String query : queryValueSamples()) { + assertRangeQueryBuilder(parseKqlQuery(Strings.format("%s > %s", fieldName, query)), fieldName, (rangeQuery) -> { + assertThat(rangeQuery.to(), nullValue()); + assertThat(rangeQuery.from(), equalTo(query)); + assertThat(rangeQuery.includeLower(), equalTo(false)); + // For range query adding quote does not change the generated of the query. + assertThat(parseKqlQuery(Strings.format("%s > %s", fieldName, quoteString(query))), equalTo(rangeQuery)); + }); + } + } + } + + public void testParseGteQuery() { + for (String fieldName : List.of(DATE_FIELD_NAME, TEXT_FIELD_NAME, INT_FIELD_NAME, DOUBLE_FIELD_NAME, KEYWORD_FIELD_NAME)) { + for (String query : queryValueSamples()) { + assertRangeQueryBuilder(parseKqlQuery(Strings.format("%s >= %s", fieldName, query)), fieldName, (rangeQuery) -> { + assertThat(rangeQuery.to(), nullValue()); + assertThat(rangeQuery.from(), equalTo(query)); + assertThat(rangeQuery.includeLower(), equalTo(true)); + // For range query adding quote does not change the generated of the query. + assertThat(parseKqlQuery(Strings.format("%s >= %s", fieldName, quoteString(query))), equalTo(rangeQuery)); + }); + } + } + } + + private static List queryValueSamples() { + return Stream.of(randomIdentifier(), randomTimeValue(), randomInt(), randomDouble()).map(Object::toString).toList(); + } + + private static String quoteString(String input) { + return Strings.format("\"%s\"", input); + } +} diff --git a/x-pack/plugin/kql/src/test/java/org/elasticsearch/xpack/kql/parser/KqlParserTests.java b/x-pack/plugin/kql/src/test/java/org/elasticsearch/xpack/kql/parser/KqlParserTests.java index 58b162409412d..b9055ae166aa7 100644 --- a/x-pack/plugin/kql/src/test/java/org/elasticsearch/xpack/kql/parser/KqlParserTests.java +++ b/x-pack/plugin/kql/src/test/java/org/elasticsearch/xpack/kql/parser/KqlParserTests.java @@ -7,109 +7,81 @@ package org.elasticsearch.xpack.kql.parser; -import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.index.query.MatchAllQueryBuilder; -import org.elasticsearch.index.query.SearchExecutionContext; -import org.elasticsearch.test.AbstractBuilderTestCase; +import org.elasticsearch.index.query.QueryBuilder; -import java.io.BufferedReader; import java.io.IOException; -import java.io.InputStream; -import java.io.InputStreamReader; -import java.net.URL; -import java.net.URLConnection; -import java.nio.charset.StandardCharsets; -import java.util.ArrayList; -import java.util.List; -import java.util.Objects; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.isA; -public class KqlParserTests extends AbstractBuilderTestCase { +public class KqlParserTests extends AbstractKqlParserTestCase { public void testEmptyQueryParsing() { - KqlParser parser = new KqlParser(); - SearchExecutionContext searchExecutionContext = createSearchExecutionContext(); - assertThat(parser.parseKqlQuery("", searchExecutionContext), isA(MatchAllQueryBuilder.class)); + { + // In Kql, an empty query is a match_all query. + assertThat(parseKqlQuery(""), isA(MatchAllQueryBuilder.class)); + } + + for (int runs = 0; runs < 100; runs++) { + // Also testing that a query that is composed only of whitespace chars returns a match_all query. + String kqlQuery = randomWhitespaces(); + assertThat(parseKqlQuery(kqlQuery), isA(MatchAllQueryBuilder.class)); + } + } + + public void testMatchAllQuery() { + assertThat(parseKqlQuery("*"), isA(MatchAllQueryBuilder.class)); + assertThat(parseKqlQuery(wrapWithRandomWhitespaces("*")), isA(MatchAllQueryBuilder.class)); + assertThat(parseKqlQuery("*:*"), isA(MatchAllQueryBuilder.class)); + assertThat(parseKqlQuery(String.join(wrapWithRandomWhitespaces(":"), "*", "*")), isA(MatchAllQueryBuilder.class)); } - public void testSupportedQueries() throws Exception { - KqlParser parser = new KqlParser(); - SearchExecutionContext searchExecutionContext = createSearchExecutionContext(); + public void testParenthesizedQuery() throws IOException { + for (String baseQuuery : readQueries(SUPPORTED_QUERY_FILE_PATH)) { + // For each supported query, wrap it into parentheses and check query remains the same. + // Adding random whitespaces as well and test they are ignored. + String parenthesizedQuery = wrapWithRandomWhitespaces("(") + baseQuuery + wrapWithRandomWhitespaces(")"); + assertThat(parseKqlQuery(parenthesizedQuery), equalTo(parseKqlQuery(baseQuuery))); + } + } - for (String query : readQueries("/supported-queries")) { + public void testSupportedQueries() throws IOException { + for (String query : readQueries(SUPPORTED_QUERY_FILE_PATH)) { try { - parser.parseKqlQuery(query, searchExecutionContext); + QueryBuilder parsedQuery = parseKqlQuery(query); + + // leading and trailing whitespaces does not change the query builing result: + assertThat(parseKqlQuery(wrapWithRandomWhitespaces(query)), equalTo(parsedQuery)); } catch (Throwable e) { throw new AssertionError("Unexpected error during query parsing [ " + query + "]", e); } } } - public void testUnsupportedQueries() throws Exception { - KqlParser parser = new KqlParser(); - SearchExecutionContext searchExecutionContext = createSearchExecutionContext(); - - for (String query : readQueries("/unsupported-queries")) { + public void testUnsupportedQueries() throws IOException { + for (String query : readQueries(UNSUPPORTED_QUERY_FILE_PATH)) { assertThrows( "Was expecting a KqlParsingException exception to be thrown while parsing query [" + query + "]", KqlParsingException.class, - () -> parser.parseKqlQuery(query, searchExecutionContext) + () -> parseKqlQuery(query) ); } } public void testSyntaxErrorsHandling() { - KqlParser parser = new KqlParser(); - SearchExecutionContext searchExecutionContext = createSearchExecutionContext(); - { - KqlParsingException e = assertThrows( - KqlParsingException.class, - () -> parser.parseKqlQuery("foo: \"bar", searchExecutionContext) - ); + KqlParsingException e = assertThrows(KqlParsingException.class, () -> parseKqlQuery("foo: \"bar")); assertThat(e.getLineNumber(), equalTo(1)); assertThat(e.getColumnNumber(), equalTo(6)); assertThat(e.getMessage(), equalTo("line 1:6: token recognition error at: '\"bar'")); } { - KqlParsingException e = assertThrows( - KqlParsingException.class, - () -> parser.parseKqlQuery("foo: (bar baz AND qux", searchExecutionContext) - ); + KqlParsingException e = assertThrows(KqlParsingException.class, () -> parseKqlQuery("foo: (bar baz AND qux")); assertThat(e.getLineNumber(), equalTo(1)); assertThat(e.getColumnNumber(), equalTo(15)); assertThat(e.getMessage(), equalTo("line 1:15: missing ')' at 'AND'")); } } - - private static List readQueries(String source) throws Exception { - URL url = KqlParserTests.class.getResource(source); - Objects.requireNonNull(source, "Cannot find resource " + url); - - List queries = new ArrayList<>(); - - try (BufferedReader reader = new BufferedReader(new InputStreamReader(readFromJarUrl(url), StandardCharsets.UTF_8))) { - String line; - - while ((line = reader.readLine()) != null) { - String query = line.trim(); - // ignore comments - if (query.isEmpty() == false && query.startsWith("//") == false) { - queries.add(line.trim()); - } - } - } - return queries; - } - - @SuppressForbidden(reason = "test reads from jar") - private static InputStream readFromJarUrl(URL source) throws IOException { - URLConnection con = source.openConnection(); - // do not to cache files (to avoid keeping file handles around) - con.setUseCaches(false); - return con.getInputStream(); - } } diff --git a/x-pack/plugin/kql/src/test/resources/supported-queries b/x-pack/plugin/kql/src/test/resources/supported-queries index d9378cf9041c2..4911c9e3ebecd 100644 --- a/x-pack/plugin/kql/src/test/resources/supported-queries +++ b/x-pack/plugin/kql/src/test/resources/supported-queries @@ -73,6 +73,7 @@ foo:OR foo:NOT foo AND foo OR +foo NOT AND foo OR foo NOT @@ -102,9 +103,9 @@ nested_field: { sub_nested_field : { foo_field:foo } AND foo_field:foo bar } // Queries with escape sequences foo_field : (foo\(bar\)) foo_field : foo\:bar -foo_field : (foo \and bar) -foo_field : (foo \or bar) -foo_field : foo \not bar +foo_field : (foo \\and bar) +foo_field : (foo \\or bar) +foo_field : foo \\not bar foo_field : foo \{bar\} foo_field : foo \(bar\) foo_field : foo \\ bar diff --git a/x-pack/plugin/kql/src/test/resources/unsupported-queries b/x-pack/plugin/kql/src/test/resources/unsupported-queries index 97a26f16db141..64901891c6786 100644 --- a/x-pack/plugin/kql/src/test/resources/unsupported-queries +++ b/x-pack/plugin/kql/src/test/resources/unsupported-queries @@ -5,6 +5,9 @@ foo_field < foo_field > foo_field >= foo_field <= +>= foo +: "foo" +: foo // Parentheses mismatch foo_field: (foo bar From 22d1ce763b96d94996760dc83f03d3c454a191ad Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Fri, 1 Nov 2024 23:21:35 +1100 Subject: [PATCH 269/324] Mute org.elasticsearch.xpack.ccr.action.ShardFollowTaskReplicationTests testRetryBulkShardOperations #116080 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index 49465cc4fc09f..b65e871071563 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -293,6 +293,9 @@ tests: - class: org.elasticsearch.indices.state.CloseIndexIT method: testConcurrentClose issue: https://github.com/elastic/elasticsearch/issues/116073 +- class: org.elasticsearch.xpack.ccr.action.ShardFollowTaskReplicationTests + method: testRetryBulkShardOperations + issue: https://github.com/elastic/elasticsearch/issues/116080 # Examples: # From 7de15a0cb872fb4ec9da3ff2b5ff6f9caed49ea2 Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Fri, 1 Nov 2024 23:57:36 +1100 Subject: [PATCH 270/324] Mute org.elasticsearch.smoketest.SmokeTestMultiNodeClientYamlTestSuiteIT test {yaml=logsdb/10_settings/logsdb with default ignore dynamic beyond limit and default sorting} #116062 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index b65e871071563..ff83cac1e19c7 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -296,6 +296,9 @@ tests: - class: org.elasticsearch.xpack.ccr.action.ShardFollowTaskReplicationTests method: testRetryBulkShardOperations issue: https://github.com/elastic/elasticsearch/issues/116080 +- class: org.elasticsearch.smoketest.SmokeTestMultiNodeClientYamlTestSuiteIT + method: test {yaml=logsdb/10_settings/logsdb with default ignore dynamic beyond limit and default sorting} + issue: https://github.com/elastic/elasticsearch/issues/116062 # Examples: # From 0aa25a9023ce17d8ea5ec27bda8dbeb6b291f286 Mon Sep 17 00:00:00 2001 From: Chris Hegarty <62058229+ChrisHegarty@users.noreply.github.com> Date: Fri, 1 Nov 2024 13:07:03 +0000 Subject: [PATCH 271/324] Fix and unmute VectorSystemPropertyTests (#115927) --- libs/native/build.gradle | 5 +++++ .../nativeaccess/VectorSystemPropertyTests.java | 6 +++++- muted-tests.yml | 3 --- 3 files changed, 10 insertions(+), 4 deletions(-) diff --git a/libs/native/build.gradle b/libs/native/build.gradle index eff8f82434461..50263017c2565 100644 --- a/libs/native/build.gradle +++ b/libs/native/build.gradle @@ -21,6 +21,11 @@ dependencies { } } +tasks.named("test").configure { + // this is necessary so that VectorSystemPropertyTests can invoke java + systemProperty "tests.system_call_filter", "false" +} + tasks.withType(CheckForbiddenApisTask).configureEach { replaceSignatureFiles 'jdk-signatures' } diff --git a/libs/native/src/test/java/org/elasticsearch/nativeaccess/VectorSystemPropertyTests.java b/libs/native/src/test/java/org/elasticsearch/nativeaccess/VectorSystemPropertyTests.java index 4cdf04fe5a294..334069b4b554c 100644 --- a/libs/native/src/test/java/org/elasticsearch/nativeaccess/VectorSystemPropertyTests.java +++ b/libs/native/src/test/java/org/elasticsearch/nativeaccess/VectorSystemPropertyTests.java @@ -9,6 +9,7 @@ package org.elasticsearch.nativeaccess; +import org.apache.lucene.util.Constants; import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.ESTestCase.WithoutSecurityManager; @@ -34,6 +35,8 @@ public class VectorSystemPropertyTests extends ESTestCase { @BeforeClass public static void setup() throws Exception { + assumeTrue("native scorers are not on Windows", Constants.WINDOWS == false); + var classBytes = InMemoryJavaCompiler.compile("p.Test", TEST_SOURCE); Map jarEntries = new HashMap<>(); jarEntries.put("p/Test.class", classBytes); @@ -47,7 +50,8 @@ public void testSystemPropertyDisabled() throws Exception { var process = new ProcessBuilder( getJavaExecutable(), "-D" + ENABLE_JDK_VECTOR_LIBRARY + "=false", - "-Xms4m", + "-Xms16m", + "-Xmx16m", "-cp", jarPath + File.pathSeparator + System.getProperty("java.class.path"), "-Des.nativelibs.path=" + System.getProperty("es.nativelibs.path"), diff --git a/muted-tests.yml b/muted-tests.yml index ff83cac1e19c7..41dd32d882637 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -17,9 +17,6 @@ tests: - class: "org.elasticsearch.xpack.searchablesnapshots.FrozenSearchableSnapshotsIntegTests" issue: "https://github.com/elastic/elasticsearch/issues/110408" method: "testCreateAndRestorePartialSearchableSnapshot" -- class: org.elasticsearch.nativeaccess.VectorSystemPropertyTests - method: testSystemPropertyDisabled - issue: https://github.com/elastic/elasticsearch/issues/110949 - class: org.elasticsearch.packaging.test.DockerTests method: test021InstallPlugin issue: https://github.com/elastic/elasticsearch/issues/110343 From a6cd2652a602dc9c79f3873ab04c9c0879d4eacb Mon Sep 17 00:00:00 2001 From: Pat Whelan Date: Fri, 1 Nov 2024 09:17:26 -0400 Subject: [PATCH 272/324] [ML] Fix streaming test regex (#115589) Replace regex with string parsing. Fix #114788 --- muted-tests.yml | 3 --- ...rverSentEventsRestActionListenerTests.java | 23 ++++++++++--------- 2 files changed, 12 insertions(+), 14 deletions(-) diff --git a/muted-tests.yml b/muted-tests.yml index 41dd32d882637..06ce42a1601c2 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -197,9 +197,6 @@ tests: - class: org.elasticsearch.xpack.security.operator.OperatorPrivilegesIT method: testEveryActionIsEitherOperatorOnlyOrNonOperator issue: https://github.com/elastic/elasticsearch/issues/102992 -- class: org.elasticsearch.xpack.inference.rest.ServerSentEventsRestActionListenerTests - method: testNoStream - issue: https://github.com/elastic/elasticsearch/issues/114788 - class: org.elasticsearch.xpack.remotecluster.RemoteClusterSecurityWithApmTracingRestIT method: testTracingCrossCluster issue: https://github.com/elastic/elasticsearch/issues/112731 diff --git a/x-pack/plugin/inference/src/internalClusterTest/java/org/elasticsearch/xpack/inference/rest/ServerSentEventsRestActionListenerTests.java b/x-pack/plugin/inference/src/internalClusterTest/java/org/elasticsearch/xpack/inference/rest/ServerSentEventsRestActionListenerTests.java index f3cefa04c2911..ab3f466f3c11f 100644 --- a/x-pack/plugin/inference/src/internalClusterTest/java/org/elasticsearch/xpack/inference/rest/ServerSentEventsRestActionListenerTests.java +++ b/x-pack/plugin/inference/src/internalClusterTest/java/org/elasticsearch/xpack/inference/rest/ServerSentEventsRestActionListenerTests.java @@ -64,7 +64,6 @@ import java.util.concurrent.atomic.AtomicReference; import java.util.function.Predicate; import java.util.function.Supplier; -import java.util.regex.Pattern; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; @@ -414,17 +413,19 @@ public void testErrorMidStream() { assertThat(collector.stringsVerified.getLast(), equalTo(expectedExceptionAsServerSentEvent)); } - public void testNoStream() throws IOException { - var pattern = Pattern.compile("^\uFEFFevent: message\ndata: \\{\"result\":\".*\"}\n\n\uFEFFevent: message\ndata: \\[DONE]\n\n$"); + public void testNoStream() { + var collector = new RandomStringCollector(); + var expectedTestCount = randomIntBetween(2, 30); var request = new Request(RestRequest.Method.POST.name(), NO_STREAM_ROUTE); - var response = getRestClient().performRequest(request); - assertThat(response.getStatusLine().getStatusCode(), is(HttpStatus.SC_OK)); - var responseString = EntityUtils.toString(response.getEntity(), StandardCharsets.UTF_8); - - assertThat( - "Expected " + responseString + " to match pattern " + pattern.pattern(), - pattern.matcher(responseString).matches(), - is(true) + request.setOptions( + RequestOptions.DEFAULT.toBuilder() + .setHttpAsyncResponseConsumerFactory(() -> new AsyncResponseConsumer(collector)) + .addParameter(REQUEST_COUNT, String.valueOf(expectedTestCount)) + .build() ); + var response = callAsync(request); + assertThat(response.getStatusLine().getStatusCode(), is(HttpStatus.SC_OK)); + assertThat(collector.stringsVerified.size(), equalTo(2)); // single payload count + done byte + assertThat(collector.stringsVerified.peekLast(), equalTo("[DONE]")); } } From 9c3b973ca66453afd760a88f5273ecd945974fdd Mon Sep 17 00:00:00 2001 From: Iraklis Psaroudakis Date: Fri, 1 Nov 2024 15:22:05 +0200 Subject: [PATCH 273/324] ReplicationOperation exceptions should not escape (#116074) We introduce ActionListener.run() in order to ensure the RefCountingListener introduced by PR #115341 , is the single point that is failed upon exceptions, and no exception escapes through the ReplicationOperation.execute() method. Fixes #116071 Fixes #116081 Fixes #116073 --- .../replication/ReplicationOperation.java | 101 +++++++++--------- 1 file changed, 52 insertions(+), 49 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java b/server/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java index 43167e206bfb5..257073f6ceb11 100644 --- a/server/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java +++ b/server/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java @@ -115,28 +115,25 @@ public void execute() throws Exception { ); resultListener.onResponse(primaryResult); }, resultListener::onFailure))) { - final String activeShardCountFailure = checkActiveShardCount(); - final ShardRouting primaryRouting = primary.routingEntry(); - final ShardId primaryId = primaryRouting.shardId(); - if (activeShardCountFailure != null) { - pendingActionsListener.acquire() - .onFailure( - new UnavailableShardsException( - primaryId, - "{} Timeout: [{}], request: [{}]", - activeShardCountFailure, - request.timeout(), - request - ) + ActionListener.run(pendingActionsListener.acquire(), (primaryCoordinationListener) -> { // triggered when we finish coordination + final String activeShardCountFailure = checkActiveShardCount(); + final ShardRouting primaryRouting = primary.routingEntry(); + final ShardId primaryId = primaryRouting.shardId(); + if (activeShardCountFailure != null) { + throw new UnavailableShardsException( + primaryId, + "{} Timeout: [{}], request: [{}]", + activeShardCountFailure, + request.timeout(), + request ); - return; - } + } - totalShards.incrementAndGet(); - var primaryCoordinationPendingActionListener = pendingActionsListener.acquire(); // triggered when we finish all coordination - primary.perform(request, primaryCoordinationPendingActionListener.delegateFailureAndWrap((l, primaryResult) -> { - handlePrimaryResult(primaryResult, l, pendingActionsListener); - })); + totalShards.incrementAndGet(); + primary.perform(request, primaryCoordinationListener.delegateFailureAndWrap((l, primaryResult) -> { + handlePrimaryResult(primaryResult, l, pendingActionsListener); + })); + }); } } @@ -153,24 +150,25 @@ private void handlePrimaryResult( } final ReplicationGroup replicationGroup = primary.getReplicationGroup(); - var primaryOperationPendingActionListener = pendingActionsListener.acquire(); - replicasProxy.onPrimaryOperationComplete( - replicaRequest, - replicationGroup.getRoutingTable(), - ActionListener.wrap(ignored -> primaryOperationPendingActionListener.onResponse(null), exception -> { - totalShards.incrementAndGet(); - shardReplicaFailures.add( - new ReplicationResponse.ShardInfo.Failure( - primary.routingEntry().shardId(), - null, - exception, - ExceptionsHelper.status(exception), - false - ) - ); - primaryOperationPendingActionListener.onResponse(null); - }) - ); + ActionListener.run(pendingActionsListener.acquire(), primaryOperationPendingActionListener -> { + replicasProxy.onPrimaryOperationComplete( + replicaRequest, + replicationGroup.getRoutingTable(), + ActionListener.wrap(ignored -> primaryOperationPendingActionListener.onResponse(null), exception -> { + totalShards.incrementAndGet(); + shardReplicaFailures.add( + new ReplicationResponse.ShardInfo.Failure( + primary.routingEntry().shardId(), + null, + exception, + ExceptionsHelper.status(exception), + false + ) + ); + primaryOperationPendingActionListener.onResponse(null); + }) + ); + }); // we have to get the replication group after successfully indexing into the primary in order to honour recovery semantics. // we have to make sure that every operation indexed into the primary after recovery start will also be replicated @@ -234,13 +232,14 @@ private void markUnavailableShardsAsStale( ) { // if inSyncAllocationIds contains allocation ids of shards that don't exist in RoutingTable, mark copies as stale for (String allocationId : replicationGroup.getUnavailableInSyncShards()) { - var staleCopyPendingActionListener = pendingActionsListener.acquire(); - replicasProxy.markShardCopyAsStaleIfNeeded( - replicaRequest.shardId(), - allocationId, - primaryTerm, - staleCopyPendingActionListener.delegateResponse((l, e) -> onNoLongerPrimary(e, l)) - ); + ActionListener.run(pendingActionsListener.acquire(), (staleCopyPendingActionListener) -> { + replicasProxy.markShardCopyAsStaleIfNeeded( + replicaRequest.shardId(), + allocationId, + primaryTerm, + staleCopyPendingActionListener.delegateResponse((l, e) -> onNoLongerPrimary(e, l)) + ); + }); } } @@ -285,13 +284,17 @@ private void performOnReplica( logger.trace("[{}] sending op [{}] to replica {} for request [{}]", shard.shardId(), opType, shard, replicaRequest); } totalShards.incrementAndGet(); - var replicationPendingActionListener = pendingActionsListener.acquire(); - ActionListener.run(replicationPendingActionListener, (listener) -> { + ActionListener.run(pendingActionsListener.acquire(), (replicationPendingActionListener) -> { final ActionListener replicationListener = new ActionListener<>() { @Override public void onResponse(ReplicaResponse response) { successfulShards.incrementAndGet(); - updateCheckPoints(shard, response::localCheckpoint, response::globalCheckpoint, () -> listener.onResponse(null)); + updateCheckPoints( + shard, + response::localCheckpoint, + response::globalCheckpoint, + () -> replicationPendingActionListener.onResponse(null) + ); } @Override @@ -325,7 +328,7 @@ public void onFailure(Exception replicaException) { primaryTerm, message, replicaException, - listener.delegateResponse((l, e) -> onNoLongerPrimary(e, l)) + replicationPendingActionListener.delegateResponse((l, e) -> onNoLongerPrimary(e, l)) ); } From 38acee99cc80e85c9adc580e543c89a651a6e25a Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Fri, 1 Nov 2024 14:26:28 +0100 Subject: [PATCH 274/324] Fix leak in DfsQueryPhase and introduce search disconnect stress test (#116060) Fixing an obvious leak and finally adding a stress test for search disconnects. --- docs/changelog/116060.yaml | 6 + .../basic/SearchWithRandomDisconnectsIT.java | 103 ++++++++++++++++++ .../action/search/DfsQueryPhase.java | 2 +- .../discovery/AbstractDisruptionTestCase.java | 4 +- 4 files changed, 112 insertions(+), 3 deletions(-) create mode 100644 docs/changelog/116060.yaml create mode 100644 server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchWithRandomDisconnectsIT.java diff --git a/docs/changelog/116060.yaml b/docs/changelog/116060.yaml new file mode 100644 index 0000000000000..b067677ed41d9 --- /dev/null +++ b/docs/changelog/116060.yaml @@ -0,0 +1,6 @@ +pr: 116060 +summary: Fix leak in `DfsQueryPhase` and introduce search disconnect stress test +area: Search +type: bug +issues: + - 115056 diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchWithRandomDisconnectsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchWithRandomDisconnectsIT.java new file mode 100644 index 0000000000000..d2c7e10f8aa62 --- /dev/null +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchWithRandomDisconnectsIT.java @@ -0,0 +1,103 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ +package org.elasticsearch.search.basic; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.bulk.BulkRequestBuilder; +import org.elasticsearch.action.search.SearchRequestBuilder; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.discovery.AbstractDisruptionTestCase; +import org.elasticsearch.index.IndexModule; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.query.MatchAllQueryBuilder; +import org.elasticsearch.test.disruption.NetworkDisruption; + +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.stream.IntStream; + +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; + +public class SearchWithRandomDisconnectsIT extends AbstractDisruptionTestCase { + + public void testSearchWithRandomDisconnects() throws InterruptedException, ExecutionException { + // make sure we have a couple data nodes + int minDataNodes = randomIntBetween(3, 7); + internalCluster().ensureAtLeastNumDataNodes(minDataNodes); + final int indexCount = randomIntBetween(minDataNodes, 10 * minDataNodes); + final String[] indexNames = IntStream.range(0, indexCount).mapToObj(i -> "test-" + i).toArray(String[]::new); + final Settings indexSettings = indexSettings(1, 0).put(IndexModule.INDEX_QUERY_CACHE_ENABLED_SETTING.getKey(), false) + .put(IndexSettings.INDEX_CHECK_ON_STARTUP.getKey(), false) + .build(); + for (String indexName : indexNames) { + createIndex(indexName, indexSettings); + } + BulkRequestBuilder bulkRequestBuilder = client().prepareBulk(); + for (String indexName : indexNames) { + for (int i = 0; i < randomIntBetween(1, 10); i++) { + bulkRequestBuilder = bulkRequestBuilder.add(prepareIndex(indexName).setCreate(false).setSource("foo", "bar-" + i)); + } + } + assertFalse(bulkRequestBuilder.get().hasFailures()); + final AtomicBoolean done = new AtomicBoolean(); + final int concurrentSearches = randomIntBetween(2, 5); + final List> futures = new ArrayList<>(concurrentSearches); + for (int i = 0; i < concurrentSearches; i++) { + final PlainActionFuture finishFuture = new PlainActionFuture<>(); + futures.add(finishFuture); + prepareRandomSearch().execute(new ActionListener<>() { + @Override + public void onResponse(SearchResponse searchResponse) { + runMoreSearches(); + } + + @Override + public void onFailure(Exception e) { + runMoreSearches(); + } + + private void runMoreSearches() { + if (done.get() == false) { + prepareRandomSearch().execute(this); + } else { + finishFuture.onResponse(null); + } + } + }); + } + for (int i = 0, n = randomIntBetween(50, 100); i < n; i++) { + NetworkDisruption networkDisruption = new NetworkDisruption( + isolateNode(internalCluster().getRandomNodeName()), + NetworkDisruption.DISCONNECT + ); + setDisruptionScheme(networkDisruption); + networkDisruption.startDisrupting(); + networkDisruption.stopDisrupting(); + internalCluster().clearDisruptionScheme(); + ensureFullyConnectedCluster(); + } + done.set(true); + for (PlainActionFuture future : futures) { + future.get(); + } + ensureGreen(DISRUPTION_HEALING_OVERHEAD, indexNames); + assertAcked(indicesAdmin().prepareDelete(indexNames)); + } + + private static SearchRequestBuilder prepareRandomSearch() { + return prepareSearch("*").setQuery(new MatchAllQueryBuilder()) + .setSize(9999) + .setFetchSource(true) + .setAllowPartialSearchResults(randomBoolean()); + } +} diff --git a/server/src/main/java/org/elasticsearch/action/search/DfsQueryPhase.java b/server/src/main/java/org/elasticsearch/action/search/DfsQueryPhase.java index 93c8d66447e34..cec841d1b9b4f 100644 --- a/server/src/main/java/org/elasticsearch/action/search/DfsQueryPhase.java +++ b/server/src/main/java/org/elasticsearch/action/search/DfsQueryPhase.java @@ -96,7 +96,7 @@ public void run() { connection = context.getConnection(shardTarget.getClusterAlias(), shardTarget.getNodeId()); } catch (Exception e) { shardFailure(e, querySearchRequest, shardIndex, shardTarget, counter); - return; + continue; } searchTransportService.sendExecuteQuery( connection, diff --git a/server/src/test/java/org/elasticsearch/discovery/AbstractDisruptionTestCase.java b/server/src/test/java/org/elasticsearch/discovery/AbstractDisruptionTestCase.java index 62ffc069b155b..1c3f237f852e5 100644 --- a/server/src/test/java/org/elasticsearch/discovery/AbstractDisruptionTestCase.java +++ b/server/src/test/java/org/elasticsearch/discovery/AbstractDisruptionTestCase.java @@ -48,7 +48,7 @@ public abstract class AbstractDisruptionTestCase extends ESIntegTestCase { - static final TimeValue DISRUPTION_HEALING_OVERHEAD = TimeValue.timeValueSeconds(40); // we use 30s as timeout in many places. + public static final TimeValue DISRUPTION_HEALING_OVERHEAD = TimeValue.timeValueSeconds(40); // we use 30s as timeout in many places. @Override protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) { @@ -220,7 +220,7 @@ NetworkDisruption addRandomDisruptionType(TwoPartitions partitions) { return partition; } - TwoPartitions isolateNode(String isolatedNode) { + protected TwoPartitions isolateNode(String isolatedNode) { Set side1 = new HashSet<>(); Set side2 = new HashSet<>(Arrays.asList(internalCluster().getNodeNames())); side1.add(isolatedNode); From dbf55f54d602f27bc9810658f04991f1a4ff22e6 Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Sat, 2 Nov 2024 00:27:04 +1100 Subject: [PATCH 275/324] Mute org.elasticsearch.xpack.application.connector.ConnectorIndexServiceTests org.elasticsearch.xpack.application.connector.ConnectorIndexServiceTests #116087 --- muted-tests.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index 06ce42a1601c2..c75e1a9e40aa2 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -293,6 +293,8 @@ tests: - class: org.elasticsearch.smoketest.SmokeTestMultiNodeClientYamlTestSuiteIT method: test {yaml=logsdb/10_settings/logsdb with default ignore dynamic beyond limit and default sorting} issue: https://github.com/elastic/elasticsearch/issues/116062 +- class: org.elasticsearch.xpack.application.connector.ConnectorIndexServiceTests + issue: https://github.com/elastic/elasticsearch/issues/116087 # Examples: # From 30beb1168b5fd4a2afb60019c420b3e6ae8f79d3 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Fri, 1 Nov 2024 09:33:10 -0400 Subject: [PATCH 276/324] ESQL: Fix DEBUG log of filter (#116086) This fixes the `DEBUG` level logging of `WHERE`. Previously it was using a `ThrowingDriverContext` to render the description - but this throwing context will, well, throw if you touch it. This removes the `ThrowingDriverContext` entirely because it's never the right thing to use - instead we just use the `toString` of the factories. That works fine because we made sure that the factories had a nice `toString`. Closes #116055 --- docs/changelog/116086.yaml | 6 ++ .../compute/operator/FilterOperator.java | 2 +- .../operator/ThrowingDriverContext.java | 84 ------------------- 3 files changed, 7 insertions(+), 85 deletions(-) create mode 100644 docs/changelog/116086.yaml delete mode 100644 x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/ThrowingDriverContext.java diff --git a/docs/changelog/116086.yaml b/docs/changelog/116086.yaml new file mode 100644 index 0000000000000..73ad77d637a46 --- /dev/null +++ b/docs/changelog/116086.yaml @@ -0,0 +1,6 @@ +pr: 116086 +summary: "ESQL: Fix DEBUG log of filter" +area: ES|QL +type: bug +issues: + - 116055 diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/FilterOperator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/FilterOperator.java index 81d788611125b..5b8d485c4da3a 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/FilterOperator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/FilterOperator.java @@ -28,7 +28,7 @@ public Operator get(DriverContext driverContext) { @Override public String describe() { - return "FilterOperator[evaluator=" + evaluatorSupplier.get(new ThrowingDriverContext()) + "]"; + return "FilterOperator[evaluator=" + evaluatorSupplier + "]"; } } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/ThrowingDriverContext.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/ThrowingDriverContext.java deleted file mode 100644 index 05bda58b34a6b..0000000000000 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/ThrowingDriverContext.java +++ /dev/null @@ -1,84 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -package org.elasticsearch.compute.operator; - -import org.elasticsearch.common.breaker.NoopCircuitBreaker; -import org.elasticsearch.common.util.BigArrays; -import org.elasticsearch.common.util.ByteArray; -import org.elasticsearch.common.util.DoubleArray; -import org.elasticsearch.common.util.FloatArray; -import org.elasticsearch.common.util.IntArray; -import org.elasticsearch.common.util.LongArray; -import org.elasticsearch.compute.data.BlockFactory; -import org.elasticsearch.core.Releasable; - -/** - * A driver context that doesn't support any interaction. Consider it as a place holder where we need a dummy driver context. - */ -final class ThrowingDriverContext extends DriverContext { - ThrowingDriverContext() { - super(new ThrowingBigArrays(), BlockFactory.getInstance(new NoopCircuitBreaker("throwing-context"), new ThrowingBigArrays())); - } - - @Override - public BigArrays bigArrays() { - throw unsupported(); - } - - @Override - public BlockFactory blockFactory() { - throw unsupported(); - } - - @Override - public boolean addReleasable(Releasable releasable) { - throw unsupported(); - } - - @Override - public void addAsyncAction() { - throw unsupported(); - } - - static UnsupportedOperationException unsupported() { - assert false : "ThrowingDriverContext doesn't support any interaction"; - throw new UnsupportedOperationException("ThrowingDriverContext doesn't support any interaction"); - } - - static class ThrowingBigArrays extends BigArrays { - - ThrowingBigArrays() { - super(null, null, "fake"); - } - - @Override - public ByteArray newByteArray(long size, boolean clearOnResize) { - throw unsupported(); - } - - @Override - public IntArray newIntArray(long size, boolean clearOnResize) { - throw unsupported(); - } - - @Override - public LongArray newLongArray(long size, boolean clearOnResize) { - throw unsupported(); - } - - @Override - public FloatArray newFloatArray(long size, boolean clearOnResize) { - throw unsupported(); - } - - @Override - public DoubleArray newDoubleArray(long size, boolean clearOnResize) { - throw unsupported(); - } - } -} From 62d075d1ac3e89b5e56a9e01f8d7650f56dcfcf5 Mon Sep 17 00:00:00 2001 From: Simon Cooper Date: Fri, 1 Nov 2024 13:46:46 +0000 Subject: [PATCH 277/324] Include test features in feature name generation (#116078) --- .../features/HistoricalFeaturesMetadataExtractor.java | 5 ++++- .../features/HistoricalFeaturesMetadataExtractorTests.java | 2 ++ 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/test/metadata-extractor/src/main/java/org/elasticsearch/extractor/features/HistoricalFeaturesMetadataExtractor.java b/test/metadata-extractor/src/main/java/org/elasticsearch/extractor/features/HistoricalFeaturesMetadataExtractor.java index f25dbf256039d..3ffa27126fac8 100644 --- a/test/metadata-extractor/src/main/java/org/elasticsearch/extractor/features/HistoricalFeaturesMetadataExtractor.java +++ b/test/metadata-extractor/src/main/java/org/elasticsearch/extractor/features/HistoricalFeaturesMetadataExtractor.java @@ -30,6 +30,7 @@ import java.util.Map; import java.util.ServiceLoader; import java.util.Set; +import java.util.stream.Stream; public class HistoricalFeaturesMetadataExtractor { private final ClassLoader classLoader; @@ -93,7 +94,9 @@ void extractHistoricalFeatureMetadata(CheckedBiConsumer featureSpecLoader = ServiceLoader.load(FeatureSpecification.class, classLoader); for (FeatureSpecification featureSpecification : featureSpecLoader) { historicalFeatures.putAll(featureSpecification.getHistoricalFeatures()); - featureSpecification.getFeatures().stream().map(NodeFeature::id).forEach(featureNames::add); + Stream.concat(featureSpecification.getFeatures().stream(), featureSpecification.getTestFeatures().stream()) + .map(NodeFeature::id) + .forEach(featureNames::add); } metadataConsumer.accept(historicalFeatures, featureNames); } diff --git a/test/metadata-extractor/src/test/java/org/elasticsearch/extractor/features/HistoricalFeaturesMetadataExtractorTests.java b/test/metadata-extractor/src/test/java/org/elasticsearch/extractor/features/HistoricalFeaturesMetadataExtractorTests.java index b01472bcf463c..e230982073699 100644 --- a/test/metadata-extractor/src/test/java/org/elasticsearch/extractor/features/HistoricalFeaturesMetadataExtractorTests.java +++ b/test/metadata-extractor/src/test/java/org/elasticsearch/extractor/features/HistoricalFeaturesMetadataExtractorTests.java @@ -31,6 +31,7 @@ import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.hasEntry; +import static org.hamcrest.Matchers.hasItem; import static org.hamcrest.Matchers.hasKey; import static org.hamcrest.Matchers.not; @@ -49,6 +50,7 @@ public void testExtractHistoricalMetadata() throws IOException { }); assertThat(nodeFeatureVersionMap, not(anEmptyMap())); assertThat(featureNamesSet, not(empty())); + assertThat(featureNamesSet, hasItem("test_features_enabled")); Path outputFile = temporaryFolder.newFile().toPath(); extractor.generateMetadataFile(outputFile); From ee5229466544fbe4340710ee462bddd2e1e2279c Mon Sep 17 00:00:00 2001 From: Benjamin Trent Date: Fri, 1 Nov 2024 10:05:21 -0400 Subject: [PATCH 278/324] Unmute #111529 (#115884) Unmutes the SearchServiceTests closes: https://github.com/elastic/elasticsearch/issues/111529 --- muted-tests.yml | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/muted-tests.yml b/muted-tests.yml index c75e1a9e40aa2..dc05432f7de1f 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -40,8 +40,6 @@ tests: - class: org.elasticsearch.xpack.restart.FullClusterRestartIT method: testDataStreams {cluster=UPGRADED} issue: https://github.com/elastic/elasticsearch/issues/111448 -- class: org.elasticsearch.search.SearchServiceTests - issue: https://github.com/elastic/elasticsearch/issues/111529 - class: org.elasticsearch.upgrades.FullClusterRestartIT method: testSnapshotRestore {cluster=OLD} issue: https://github.com/elastic/elasticsearch/issues/111777 @@ -248,6 +246,12 @@ tests: - class: org.elasticsearch.reservedstate.service.FileSettingsServiceTests method: testProcessFileChanges issue: https://github.com/elastic/elasticsearch/issues/115280 +- class: org.elasticsearch.search.SearchServiceTests + method: testWaitOnRefreshTimeout + issue: https://github.com/elastic/elasticsearch/issues/115935 +- class: org.elasticsearch.search.SearchServiceTests + method: testParseSourceValidation + issue: https://github.com/elastic/elasticsearch/issues/115936 - class: org.elasticsearch.xpack.test.rest.XPackRestIT method: test {p0=ml/inference_crud/Test delete given model referenced by pipeline} issue: https://github.com/elastic/elasticsearch/issues/115970 From 281416d043f5332e4e384c414ae207e041d7e397 Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Sat, 2 Nov 2024 01:13:09 +1100 Subject: [PATCH 279/324] Mute org.elasticsearch.compute.operator.FilterOperatorTests testSimpleDescription #116094 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index dc05432f7de1f..7ca8be7bd8ae5 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -299,6 +299,9 @@ tests: issue: https://github.com/elastic/elasticsearch/issues/116062 - class: org.elasticsearch.xpack.application.connector.ConnectorIndexServiceTests issue: https://github.com/elastic/elasticsearch/issues/116087 +- class: org.elasticsearch.compute.operator.FilterOperatorTests + method: testSimpleDescription + issue: https://github.com/elastic/elasticsearch/issues/116094 # Examples: # From a222e16a0e9b7f5078148e2bfcfdb277fb688791 Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Fri, 1 Nov 2024 15:45:46 +0100 Subject: [PATCH 280/324] Delay creation of the next SearchPhase in executeNextPhase (#116061) Delaying the creation of the next phase to only when we actually need it makes this a lot easier to reason about and should set up further simplications. Eager creation of the next phase forced a lot of needlessly complicated safety logic around resources on us. Since we never "close" the `nextPhase` on failure all its resources need to be tracked in via `context.addReleasable`. This isn't as much of an issue with some recent refactorings leaving very little resource creation in the constructors but still, delaying things saves memory and makes reasoning about failure cases far easier. --- .../action/search/AbstractSearchAsyncAction.java | 6 ++++-- .../org/elasticsearch/action/search/DfsQueryPhase.java | 2 +- .../elasticsearch/action/search/ExpandSearchPhase.java | 2 +- .../org/elasticsearch/action/search/FetchSearchPhase.java | 8 +++++--- .../org/elasticsearch/action/search/RankFeaturePhase.java | 2 +- .../elasticsearch/action/search/SearchPhaseContext.java | 3 ++- .../action/search/MockSearchPhaseContext.java | 4 +++- 7 files changed, 17 insertions(+), 10 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java b/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java index cf25c5730d341..317d117174e94 100644 --- a/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java @@ -55,6 +55,7 @@ import java.util.concurrent.atomic.AtomicInteger; import java.util.function.BiFunction; import java.util.function.Consumer; +import java.util.function.Supplier; import java.util.stream.Collectors; import static org.elasticsearch.core.Strings.format; @@ -343,7 +344,7 @@ protected abstract void executePhaseOnShard( ); @Override - public final void executeNextPhase(SearchPhase currentPhase, SearchPhase nextPhase) { + public final void executeNextPhase(SearchPhase currentPhase, Supplier nextPhaseSupplier) { /* This is the main search phase transition where we move to the next phase. If all shards * failed or if there was a failure and partial results are not allowed, then we immediately * fail. Otherwise we continue to the next phase. @@ -387,6 +388,7 @@ public final void executeNextPhase(SearchPhase currentPhase, SearchPhase nextPha } return; } + var nextPhase = nextPhaseSupplier.get(); if (logger.isTraceEnabled()) { final String resultsFrom = results.getSuccessfulResults() .map(r -> r.getSearchShardTarget().toString()) @@ -697,7 +699,7 @@ private void raisePhaseFailure(SearchPhaseExecutionException exception) { * @see #onShardResult(SearchPhaseResult, SearchShardIterator) */ final void onPhaseDone() { // as a tribute to @kimchy aka. finishHim() - executeNextPhase(this, getNextPhase(results, this)); + executeNextPhase(this, () -> getNextPhase(results, this)); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/search/DfsQueryPhase.java b/server/src/main/java/org/elasticsearch/action/search/DfsQueryPhase.java index cec841d1b9b4f..0b587e72141ff 100644 --- a/server/src/main/java/org/elasticsearch/action/search/DfsQueryPhase.java +++ b/server/src/main/java/org/elasticsearch/action/search/DfsQueryPhase.java @@ -78,7 +78,7 @@ public void run() { final CountedCollector counter = new CountedCollector<>( queryResult, searchResults.size(), - () -> context.executeNextPhase(this, nextPhaseFactory.apply(queryResult)), + () -> context.executeNextPhase(this, () -> nextPhaseFactory.apply(queryResult)), context ); diff --git a/server/src/main/java/org/elasticsearch/action/search/ExpandSearchPhase.java b/server/src/main/java/org/elasticsearch/action/search/ExpandSearchPhase.java index 5457ca60d0da4..968d9dac958fa 100644 --- a/server/src/main/java/org/elasticsearch/action/search/ExpandSearchPhase.java +++ b/server/src/main/java/org/elasticsearch/action/search/ExpandSearchPhase.java @@ -164,6 +164,6 @@ private static SearchSourceBuilder buildExpandSearchSourceBuilder(InnerHitBuilde } private void onPhaseDone() { - context.executeNextPhase(this, nextPhase.get()); + context.executeNextPhase(this, nextPhase); } } diff --git a/server/src/main/java/org/elasticsearch/action/search/FetchSearchPhase.java b/server/src/main/java/org/elasticsearch/action/search/FetchSearchPhase.java index 29aba0eee1f55..d7b847d835b83 100644 --- a/server/src/main/java/org/elasticsearch/action/search/FetchSearchPhase.java +++ b/server/src/main/java/org/elasticsearch/action/search/FetchSearchPhase.java @@ -269,9 +269,11 @@ private void moveToNextPhase( AtomicArray fetchResultsArr, SearchPhaseController.ReducedQueryPhase reducedQueryPhase ) { - var resp = SearchPhaseController.merge(context.getRequest().scroll() != null, reducedQueryPhase, fetchResultsArr); - context.addReleasable(resp::decRef); - context.executeNextPhase(this, nextPhaseFactory.apply(resp, searchPhaseShardResults)); + context.executeNextPhase(this, () -> { + var resp = SearchPhaseController.merge(context.getRequest().scroll() != null, reducedQueryPhase, fetchResultsArr); + context.addReleasable(resp::decRef); + return nextPhaseFactory.apply(resp, searchPhaseShardResults); + }); } private boolean shouldExplainRankScores(SearchRequest request) { diff --git a/server/src/main/java/org/elasticsearch/action/search/RankFeaturePhase.java b/server/src/main/java/org/elasticsearch/action/search/RankFeaturePhase.java index e37d6d1729f9f..05213eb94b750 100644 --- a/server/src/main/java/org/elasticsearch/action/search/RankFeaturePhase.java +++ b/server/src/main/java/org/elasticsearch/action/search/RankFeaturePhase.java @@ -236,6 +236,6 @@ private float maxScore(ScoreDoc[] scoreDocs) { } void moveToNextPhase(SearchPhaseResults phaseResults, SearchPhaseController.ReducedQueryPhase reducedQueryPhase) { - context.executeNextPhase(this, new FetchSearchPhase(phaseResults, aggregatedDfs, context, reducedQueryPhase)); + context.executeNextPhase(this, () -> new FetchSearchPhase(phaseResults, aggregatedDfs, context, reducedQueryPhase)); } } diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchPhaseContext.java b/server/src/main/java/org/elasticsearch/action/search/SearchPhaseContext.java index 871be0a349a7f..d048887b69c97 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchPhaseContext.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchPhaseContext.java @@ -19,6 +19,7 @@ import org.elasticsearch.transport.Transport; import java.util.concurrent.Executor; +import java.util.function.Supplier; /** * This class provide contextual state and access to resources across multiple search phases. @@ -120,7 +121,7 @@ default void sendReleaseSearchContext( * of the next phase. If there are no successful operations in the context when this method is executed the search is aborted and * a response is returned to the user indicating that all shards have failed. */ - void executeNextPhase(SearchPhase currentPhase, SearchPhase nextPhase); + void executeNextPhase(SearchPhase currentPhase, Supplier nextPhaseSupplier); /** * Registers a {@link Releasable} that will be closed when the search request finishes or fails. diff --git a/server/src/test/java/org/elasticsearch/action/search/MockSearchPhaseContext.java b/server/src/test/java/org/elasticsearch/action/search/MockSearchPhaseContext.java index 5d4d60f6805b1..5395e4569901a 100644 --- a/server/src/test/java/org/elasticsearch/action/search/MockSearchPhaseContext.java +++ b/server/src/test/java/org/elasticsearch/action/search/MockSearchPhaseContext.java @@ -30,6 +30,7 @@ import java.util.Set; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; +import java.util.function.Supplier; /** * SearchPhaseContext for tests @@ -132,7 +133,8 @@ public SearchTransportService getSearchTransport() { } @Override - public void executeNextPhase(SearchPhase currentPhase, SearchPhase nextPhase) { + public void executeNextPhase(SearchPhase currentPhase, Supplier nextPhaseSupplier) { + var nextPhase = nextPhaseSupplier.get(); try { nextPhase.run(); } catch (Exception e) { From d5931941aecffc866419963506f5475806d5733b Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Sat, 2 Nov 2024 02:04:51 +1100 Subject: [PATCH 281/324] Mute org.elasticsearch.xpack.searchbusinessrules.PinnedQueryBuilderIT testPinnedPromotions #116097 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index 7ca8be7bd8ae5..684e1d07b0ec3 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -302,6 +302,9 @@ tests: - class: org.elasticsearch.compute.operator.FilterOperatorTests method: testSimpleDescription issue: https://github.com/elastic/elasticsearch/issues/116094 +- class: org.elasticsearch.xpack.searchbusinessrules.PinnedQueryBuilderIT + method: testPinnedPromotions + issue: https://github.com/elastic/elasticsearch/issues/116097 # Examples: # From d93d333141fbea72ec082592a526ce59bcbe92f3 Mon Sep 17 00:00:00 2001 From: Artem Prigoda Date: Fri, 1 Nov 2024 16:18:12 +0100 Subject: [PATCH 282/324] Remove checking of sync commit ids (#114246) A Lucene commit doesn't contain sync ids `SegmentInfos` anymore, so we can't rely on them during recovery. The fields was marked as deprecated in #102343. --- docs/reference/cat/shards.asciidoc | 9 +- rest-api-spec/build.gradle | 1 + .../test/cat.shards/10_basic.yml | 1 - .../gateway/RecoveryFromGatewayIT.java | 10 -- .../allocation/NodeAllocationResult.java | 17 +-- .../gateway/ReplicaShardAllocator.java | 14 +-- .../elasticsearch/index/engine/Engine.java | 3 - .../org/elasticsearch/index/store/Store.java | 8 -- .../recovery/RecoverySourceHandler.java | 103 ++++-------------- .../TransportNodesListShardStoreMetadata.java | 15 +-- .../rest/action/cat/RestShardsAction.java | 10 +- .../allocation/NodeAllocationResultTests.java | 3 - .../gateway/ReplicaShardAllocatorTests.java | 31 ------ .../index/engine/InternalEngineTests.java | 19 +--- .../elasticsearch/index/store/StoreTests.java | 7 -- .../recovery/RecoverySourceHandlerTests.java | 45 -------- .../action/cat/RestShardsActionTests.java | 5 +- x-pack/plugin/stack/qa/rest/build.gradle | 1 + 18 files changed, 41 insertions(+), 261 deletions(-) diff --git a/docs/reference/cat/shards.asciidoc b/docs/reference/cat/shards.asciidoc index 16f52a11f026a..87dcb01838bfd 100644 --- a/docs/reference/cat/shards.asciidoc +++ b/docs/reference/cat/shards.asciidoc @@ -9,13 +9,13 @@ ==== cat APIs are only intended for human consumption using the command line or {kib} console. -They are _not_ intended for use by applications. For application +They are _not_ intended for use by applications. For application consumption, use the <>. ==== The `shards` command is the detailed view of all nodes' shard <>. -It will tell you if the shard is a primary or replica, the number of docs, the -bytes it takes on disk, the node where it's located, and if the shard is +It will tell you if the shard is a primary or replica, the number of docs, the +bytes it takes on disk, the node where it's located, and if the shard is currently <>. For <>, the API returns information about the stream's backing indices. @@ -258,9 +258,6 @@ Time spent in suggest, such as `0`. `suggest.total`, `suto`, `suggestTotal`:: Number of suggest operations, such as `0`. -`sync_id`:: -Sync ID of the shard. - `unassigned.at`, `ua`:: Time at which the shard became unassigned in {wikipedia}/List_of_UTC_time_offsets[Coordinated Universal Time (UTC)]. diff --git a/rest-api-spec/build.gradle b/rest-api-spec/build.gradle index b9064ab1d79ad..c520fcd4a7f81 100644 --- a/rest-api-spec/build.gradle +++ b/rest-api-spec/build.gradle @@ -65,4 +65,5 @@ tasks.named("yamlRestCompatTestTransform").configure ({ task -> task.skipTest("indices.create/20_synthetic_source/nested object with unmapped fields", "temporary until backported") task.skipTest("indices.create/21_synthetic_source_stored/object param - nested object with stored array", "temporary until backported") task.skipTest("cat.aliases/10_basic/Deprecated local parameter", "CAT APIs not covered by compatibility policy") + task.skipTest("cat.shards/10_basic/Help", "sync_id is removed in 9.0") }) diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cat.shards/10_basic.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cat.shards/10_basic.yml index 511ff63d2095d..03d8b2068d23e 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cat.shards/10_basic.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cat.shards/10_basic.yml @@ -19,7 +19,6 @@ ip .+ \n id .+ \n node .+ \n - sync_id .+ \n unassigned.reason .+ \n unassigned.at .+ \n unassigned.for .+ \n diff --git a/server/src/internalClusterTest/java/org/elasticsearch/gateway/RecoveryFromGatewayIT.java b/server/src/internalClusterTest/java/org/elasticsearch/gateway/RecoveryFromGatewayIT.java index 3561ce2e8cf00..65f290fa5a783 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/gateway/RecoveryFromGatewayIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/gateway/RecoveryFromGatewayIT.java @@ -14,8 +14,6 @@ import org.elasticsearch.action.admin.cluster.configuration.TransportAddVotingConfigExclusionsAction; import org.elasticsearch.action.admin.cluster.configuration.TransportClearVotingConfigExclusionsAction; import org.elasticsearch.action.admin.indices.recovery.RecoveryResponse; -import org.elasticsearch.action.admin.indices.stats.IndexStats; -import org.elasticsearch.action.admin.indices.stats.ShardStats; import org.elasticsearch.action.support.ActionTestUtils; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.coordination.ElectionSchedulerFactory; @@ -30,7 +28,6 @@ import org.elasticsearch.index.IndexService; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.MergePolicyConfig; -import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.shard.ShardPath; @@ -577,13 +574,6 @@ public Settings onNodeStopped(String nodeName) throws Exception { } } - public void assertSyncIdsNotNull() { - IndexStats indexStats = indicesAdmin().prepareStats("test").get().getIndex("test"); - for (ShardStats shardStats : indexStats.getShards()) { - assertNotNull(shardStats.getCommitStats().getUserData().get(Engine.SYNC_COMMIT_ID)); - } - } - public void testStartedShardFoundIfStateNotYetProcessed() throws Exception { // nodes may need to report the shards they processed the initial recovered cluster state from the master final String nodeName = internalCluster().startNode(); diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/NodeAllocationResult.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/NodeAllocationResult.java index cb33ec4ecebed..8f4ac1a906bbc 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/NodeAllocationResult.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/NodeAllocationResult.java @@ -210,19 +210,8 @@ public String getAllocationId() { return allocationId; } - /** - * Returns {@code true} if the shard copy has a matching sync id with the primary shard. - * Returns {@code false} if the shard copy does not have a matching sync id with the primary - * shard, or this explanation pertains to the allocation of a primary shard, in which case - * matching sync ids are irrelevant. - */ - public boolean hasMatchingSyncId() { - return matchingBytes == Long.MAX_VALUE; - } - /** * Gets the number of matching bytes the shard copy has with the primary shard. - * Returns {@code Long.MAX_VALUE} if {@link #hasMatchingSyncId()} returns {@code true}. * Returns -1 if not applicable (this value only applies to assigning replica shards). */ public long getMatchingBytes() { @@ -263,11 +252,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.field("allocation_id", allocationId); } if (matchingBytes >= 0) { - if (hasMatchingSyncId()) { - builder.field("matching_sync_id", true); - } else { - builder.humanReadableField("matching_size_in_bytes", "matching_size", ByteSizeValue.ofBytes(matchingBytes)); - } + builder.humanReadableField("matching_size_in_bytes", "matching_size", ByteSizeValue.ofBytes(matchingBytes)); } if (storeException != null) { builder.startObject("store_exception"); diff --git a/server/src/main/java/org/elasticsearch/gateway/ReplicaShardAllocator.java b/server/src/main/java/org/elasticsearch/gateway/ReplicaShardAllocator.java index a16f432ad1696..0907fc3009945 100644 --- a/server/src/main/java/org/elasticsearch/gateway/ReplicaShardAllocator.java +++ b/server/src/main/java/org/elasticsearch/gateway/ReplicaShardAllocator.java @@ -439,14 +439,6 @@ private static long computeMatchingBytes( return sizeMatched; } - private static boolean hasMatchingSyncId( - TransportNodesListShardStoreMetadata.StoreFilesMetadata primaryStore, - TransportNodesListShardStoreMetadata.StoreFilesMetadata replicaStore - ) { - String primarySyncId = primaryStore.syncId(); - return primarySyncId != null && primarySyncId.equals(replicaStore.syncId()); - } - private static MatchingNode computeMatchingNode( DiscoveryNode primaryNode, TransportNodesListShardStoreMetadata.StoreFilesMetadata primaryStore, @@ -455,8 +447,7 @@ private static MatchingNode computeMatchingNode( ) { final long retainingSeqNoForPrimary = primaryStore.getPeerRecoveryRetentionLeaseRetainingSeqNo(primaryNode); final long retainingSeqNoForReplica = primaryStore.getPeerRecoveryRetentionLeaseRetainingSeqNo(replicaNode); - final boolean isNoopRecovery = (retainingSeqNoForReplica >= retainingSeqNoForPrimary && retainingSeqNoForPrimary >= 0) - || hasMatchingSyncId(primaryStore, replicaStore); + final boolean isNoopRecovery = (retainingSeqNoForReplica >= retainingSeqNoForPrimary && retainingSeqNoForPrimary >= 0); final long matchingBytes = computeMatchingBytes(primaryStore, replicaStore); return new MatchingNode(matchingBytes, retainingSeqNoForReplica, isNoopRecovery); } @@ -470,9 +461,6 @@ private static boolean canPerformOperationBasedRecovery( if (targetNodeStore == null || targetNodeStore.storeFilesMetadata().isEmpty()) { return false; } - if (hasMatchingSyncId(primaryStore, targetNodeStore.storeFilesMetadata())) { - return true; - } return primaryStore.getPeerRecoveryRetentionLeaseRetainingSeqNo(targetNode) >= 0; } diff --git a/server/src/main/java/org/elasticsearch/index/engine/Engine.java b/server/src/main/java/org/elasticsearch/index/engine/Engine.java index 7d23a64b14b86..edafa1ca922fb 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/Engine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/Engine.java @@ -58,7 +58,6 @@ import org.elasticsearch.core.Releasable; import org.elasticsearch.core.Releasables; import org.elasticsearch.core.TimeValue; -import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.VersionType; import org.elasticsearch.index.mapper.DocumentParser; @@ -117,8 +116,6 @@ public abstract class Engine implements Closeable { - @UpdateForV9(owner = UpdateForV9.Owner.DISTRIBUTED_INDEXING) // TODO: Remove sync_id in 9.0 - public static final String SYNC_COMMIT_ID = "sync_id"; public static final String HISTORY_UUID_KEY = "history_uuid"; public static final String FORCE_MERGE_UUID_KEY = "force_merge_uuid"; public static final String MIN_RETAINED_SEQNO = "min_retained_seq_no"; diff --git a/server/src/main/java/org/elasticsearch/index/store/Store.java b/server/src/main/java/org/elasticsearch/index/store/Store.java index c3d21b23d6a49..0df2ed648f0bb 100644 --- a/server/src/main/java/org/elasticsearch/index/store/Store.java +++ b/server/src/main/java/org/elasticsearch/index/store/Store.java @@ -1150,14 +1150,6 @@ private int numSegmentFiles() { // only for asserts return count; } - /** - * Returns the sync id of the commit point that this MetadataSnapshot represents. - * - * @return sync id if exists, else null - */ - public String getSyncId() { - return commitUserData.get(Engine.SYNC_COMMIT_ID); - } } /** diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java b/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java index 30fea41330038..3603b984fb148 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java @@ -534,56 +534,30 @@ void phase1(IndexCommit snapshot, long startingSeqNo, IntSupplier translogOps, A ); } } - // When sync ids were used we could use them to check if two shard copies were equivalent, - // if that's the case we can skip sending files from the source shard to the target shard. // If the shard uses the current replication mechanism, we have to compute the recovery plan, // and it is still possible to skip the sending files from the source shard to the target shard // using a different mechanism to determine it. - // TODO: is this still relevant today? - if (hasSameLegacySyncId(recoverySourceMetadata, request.metadataSnapshot()) == false) { - cancellableThreads.checkForCancel(); - SubscribableListener - // compute the plan - .newForked( - l -> recoveryPlannerService.computeRecoveryPlan( - shard.shardId(), - shardStateIdentifier, - recoverySourceMetadata, - request.metadataSnapshot(), - startingSeqNo, - translogOps.getAsInt(), - getRequest().targetNode().getMaxIndexVersion(), - canUseSnapshots(), - request.isPrimaryRelocation(), - l - ) + cancellableThreads.checkForCancel(); + SubscribableListener + // compute the plan + .newForked( + l -> recoveryPlannerService.computeRecoveryPlan( + shard.shardId(), + shardStateIdentifier, + recoverySourceMetadata, + request.metadataSnapshot(), + startingSeqNo, + translogOps.getAsInt(), + getRequest().targetNode().getMaxIndexVersion(), + canUseSnapshots(), + request.isPrimaryRelocation(), + l ) - // perform the file recovery - .andThen((l, plan) -> recoverFilesFromSourceAndSnapshot(plan, store, stopWatch, l)) - // and respond - .addListener(listener); - } else { - logger.trace("skipping [phase1] since source and target have identical sync id [{}]", recoverySourceMetadata.getSyncId()); - SubscribableListener - // but we must still create a retention lease - .newForked(leaseListener -> createRetentionLease(startingSeqNo, leaseListener)) - // and then compute the result of sending no files - .andThenApply(ignored -> { - final TimeValue took = stopWatch.totalTime(); - logger.trace("recovery [phase1]: took [{}]", took); - return new SendFileResult( - Collections.emptyList(), - Collections.emptyList(), - 0L, - Collections.emptyList(), - Collections.emptyList(), - 0L, - took - ); - }) - // and finally respond - .addListener(listener); - } + ) + // perform the file recovery + .andThen((l, plan) -> recoverFilesFromSourceAndSnapshot(plan, store, stopWatch, l)) + // and respond + .addListener(listener); } catch (Exception e) { throw new RecoverFilesRecoveryException(request.shardId(), 0, ByteSizeValue.ZERO, e); } @@ -1030,43 +1004,6 @@ private ActionListener wrapLeaseSyncListener(ActionListener return new ThreadedActionListener<>(shard.getThreadPool().generic(), listener).map(ignored -> null); } - boolean hasSameLegacySyncId(Store.MetadataSnapshot source, Store.MetadataSnapshot target) { - if (source.getSyncId() == null || source.getSyncId().equals(target.getSyncId()) == false) { - return false; - } - if (source.numDocs() != target.numDocs()) { - throw new IllegalStateException( - "try to recover " - + request.shardId() - + " from primary shard with sync id but number " - + "of docs differ: " - + source.numDocs() - + " (" - + request.sourceNode().getName() - + ", primary) vs " - + target.numDocs() - + "(" - + request.targetNode().getName() - + ")" - ); - } - SequenceNumbers.CommitInfo sourceSeqNos = SequenceNumbers.loadSeqNoInfoFromLuceneCommit(source.commitUserData().entrySet()); - SequenceNumbers.CommitInfo targetSeqNos = SequenceNumbers.loadSeqNoInfoFromLuceneCommit(target.commitUserData().entrySet()); - if (sourceSeqNos.localCheckpoint() != targetSeqNos.localCheckpoint() || targetSeqNos.maxSeqNo() != sourceSeqNos.maxSeqNo()) { - final String message = "try to recover " - + request.shardId() - + " with sync id but " - + "seq_no stats are mismatched: [" - + source.commitUserData() - + "] vs [" - + target.commitUserData() - + "]"; - assert false : message; - throw new IllegalStateException(message); - } - return true; - } - void prepareTargetForTranslog(int totalTranslogOps, ActionListener listener) { StopWatch stopWatch = new StopWatch().start(); final ActionListener wrappedListener = ActionListener.wrap(nullVal -> { diff --git a/server/src/main/java/org/elasticsearch/indices/store/TransportNodesListShardStoreMetadata.java b/server/src/main/java/org/elasticsearch/indices/store/TransportNodesListShardStoreMetadata.java index 345a4b41139cb..c633046e56ab5 100644 --- a/server/src/main/java/org/elasticsearch/indices/store/TransportNodesListShardStoreMetadata.java +++ b/server/src/main/java/org/elasticsearch/indices/store/TransportNodesListShardStoreMetadata.java @@ -259,22 +259,9 @@ public long getPeerRecoveryRetentionLeaseRetainingSeqNo(DiscoveryNode node) { .orElse(-1L); } - /** - * @return commit sync id if exists, else null - */ - public String syncId() { - return metadataSnapshot.getSyncId(); - } - @Override public String toString() { - return "StoreFilesMetadata{" - + ", metadataSnapshot{size=" - + metadataSnapshot.size() - + ", syncId=" - + metadataSnapshot.getSyncId() - + "}" - + '}'; + return "StoreFilesMetadata{" + ", metadataSnapshot{size=" + metadataSnapshot.size() + "}" + '}'; } } diff --git a/server/src/main/java/org/elasticsearch/rest/action/cat/RestShardsAction.java b/server/src/main/java/org/elasticsearch/rest/action/cat/RestShardsAction.java index 3bee9c5f3b04c..8b5e21de2d741 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/cat/RestShardsAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/cat/RestShardsAction.java @@ -23,11 +23,11 @@ import org.elasticsearch.common.Table; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.util.concurrent.ListenableFuture; +import org.elasticsearch.core.RestApiVersion; import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.bulk.stats.BulkStats; import org.elasticsearch.index.cache.query.QueryCacheStats; import org.elasticsearch.index.engine.CommitStats; -import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.engine.SegmentsStats; import org.elasticsearch.index.fielddata.FieldDataStats; import org.elasticsearch.index.flush.FlushStats; @@ -123,7 +123,9 @@ protected Table getTableWithHeader(final RestRequest request) { .addCell("id", "default:false;desc:unique id of node where it lives") .addCell("node", "default:true;alias:n;desc:name of node where it lives"); - table.addCell("sync_id", "alias:sync_id;default:false;desc:sync id"); + if (request.getRestApiVersion() == RestApiVersion.V_8) { + table.addCell("sync_id", "alias:sync_id;default:false;desc:sync id"); + } table.addCell("unassigned.reason", "alias:ur;default:false;desc:reason shard became unassigned"); table.addCell("unassigned.at", "alias:ua;default:false;desc:time shard became unassigned (UTC)"); @@ -320,7 +322,9 @@ Table buildTable(RestRequest request, ClusterStateResponse state, IndicesStatsRe table.addCell(null); } - table.addCell(commitStats == null ? null : commitStats.getUserData().get(Engine.SYNC_COMMIT_ID)); + if (request.getRestApiVersion() == RestApiVersion.V_8) { + table.addCell(null); + } if (shard.unassignedInfo() != null) { table.addCell(shard.unassignedInfo().reason()); diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/NodeAllocationResultTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/NodeAllocationResultTests.java index 1fcc6a29f37f6..0d676c7e6618b 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/NodeAllocationResultTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/NodeAllocationResultTests.java @@ -48,7 +48,6 @@ public void testShardStore() throws IOException { assertEquals(matchingBytes, explanation.getShardStoreInfo().getMatchingBytes()); assertNull(explanation.getShardStoreInfo().getAllocationId()); assertFalse(explanation.getShardStoreInfo().isInSync()); - assertFalse(explanation.getShardStoreInfo().hasMatchingSyncId()); String allocId = randomAlphaOfLength(5); boolean inSync = randomBoolean(); @@ -60,7 +59,6 @@ public void testShardStore() throws IOException { assertNodeExplanationEquals(explanation, readExplanation); assertEquals(inSync, explanation.getShardStoreInfo().isInSync()); assertEquals(-1, explanation.getShardStoreInfo().getMatchingBytes()); - assertFalse(explanation.getShardStoreInfo().hasMatchingSyncId()); assertEquals(allocId, explanation.getShardStoreInfo().getAllocationId()); } @@ -72,7 +70,6 @@ private void assertNodeExplanationEquals(NodeAllocationResult expl1, NodeAllocat assertEquals(expl1.getShardStoreInfo().isInSync(), expl2.getShardStoreInfo().isInSync()); assertEquals(expl1.getShardStoreInfo().getAllocationId(), expl2.getShardStoreInfo().getAllocationId()); assertEquals(expl1.getShardStoreInfo().getMatchingBytes(), expl2.getShardStoreInfo().getMatchingBytes()); - assertEquals(expl1.getShardStoreInfo().hasMatchingSyncId(), expl2.getShardStoreInfo().hasMatchingSyncId()); } else { assertNull(expl2.getShardStoreInfo()); } diff --git a/server/src/test/java/org/elasticsearch/gateway/ReplicaShardAllocatorTests.java b/server/src/test/java/org/elasticsearch/gateway/ReplicaShardAllocatorTests.java index b90aee98384a6..b84edd1f3eb29 100644 --- a/server/src/test/java/org/elasticsearch/gateway/ReplicaShardAllocatorTests.java +++ b/server/src/test/java/org/elasticsearch/gateway/ReplicaShardAllocatorTests.java @@ -39,7 +39,6 @@ import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.IndexVersions; -import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.seqno.ReplicationTracker; import org.elasticsearch.index.seqno.RetentionLease; import org.elasticsearch.index.shard.ShardId; @@ -149,22 +148,6 @@ public void testSimpleFullMatchAllocation() { ); } - /** - * Verifies that when there is a sync id match but no files match, we allocate it to matching node. - */ - public void testSyncIdMatch() { - RoutingAllocation allocation = onePrimaryOnNode1And1Replica(yesAllocationDeciders()); - DiscoveryNode nodeToMatch = randomBoolean() ? node2 : node3; - testAllocator.addData(node1, "MATCH", new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION)) - .addData(nodeToMatch, "MATCH", new StoreFileMetadata("file1", 10, "NO_MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION)); - allocateAllUnassigned(allocation); - assertThat(shardsWithState(allocation.routingNodes(), ShardRoutingState.INITIALIZING).size(), equalTo(1)); - assertThat( - shardsWithState(allocation.routingNodes(), ShardRoutingState.INITIALIZING).get(0).currentNodeId(), - equalTo(nodeToMatch.getId()) - ); - } - /** * Verifies that when there is no sync id match but files match, we allocate it to matching node. */ @@ -439,17 +422,6 @@ public void testDelayedAllocation() { ); } - public void testCancelRecoveryBetterSyncId() { - RoutingAllocation allocation = onePrimaryOnNode1And1ReplicaRecovering(yesAllocationDeciders()); - testAllocator.addData(node1, "MATCH", new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION)) - .addData(node2, "NO_MATCH", new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION)) - .addData(node3, "MATCH", new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION)); - testAllocator.processExistingRecoveries(allocation, shardRouting -> true); - assertThat(allocation.routingNodesChanged(), equalTo(true)); - assertThat(shardsWithState(allocation.routingNodes(), ShardRoutingState.UNASSIGNED).size(), equalTo(1)); - assertThat(shardsWithState(allocation.routingNodes(), ShardRoutingState.UNASSIGNED).get(0).shardId(), equalTo(shardId)); - } - public void testNotCancellingRecoveryIfSyncedOnExistingRecovery() { final UnassignedInfo unassignedInfo; if (randomBoolean()) { @@ -688,9 +660,6 @@ TestAllocator addData( filesAsMap.put(file.name(), file); } Map commitData = new HashMap<>(); - if (syncId != null) { - commitData.put(Engine.SYNC_COMMIT_ID, syncId); - } data.put( node, new TransportNodesListShardStoreMetadata.StoreFilesMetadata( diff --git a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java index bba1fa338559f..9d8c5649f0dce 100644 --- a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java +++ b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java @@ -1282,13 +1282,11 @@ public void testSyncedFlushSurvivesEngineRestart() throws IOException { null, globalCheckpoint::get ); - final String syncId = randomUnicodeOfCodepointLengthBetween(10, 20); ParsedDocument doc = testParsedDocument("1", null, testDocumentWithTextField(), new BytesArray("{}"), null); engine.index(indexForDoc(doc)); globalCheckpoint.set(0L); engine.flush(); - syncFlush(indexWriterHolder.get(), engine, syncId); - assertEquals(store.readLastCommittedSegmentsInfo().getUserData().get(Engine.SYNC_COMMIT_ID), syncId); + syncFlush(indexWriterHolder.get(), engine); EngineConfig config = engine.config(); if (randomBoolean()) { engine.close(); @@ -1306,7 +1304,6 @@ public void testSyncedFlushSurvivesEngineRestart() throws IOException { } engine = new InternalEngine(config); recoverFromTranslog(engine, translogHandler, Long.MAX_VALUE); - assertEquals(engine.getLastCommittedSegmentInfos().getUserData().get(Engine.SYNC_COMMIT_ID), syncId); } public void testSyncedFlushVanishesOnReplay() throws IOException { @@ -1327,31 +1324,21 @@ public void testSyncedFlushVanishesOnReplay() throws IOException { null, globalCheckpoint::get ); - final String syncId = randomUnicodeOfCodepointLengthBetween(10, 20); ParsedDocument doc = testParsedDocument("1", null, testDocumentWithTextField(), new BytesArray("{}"), null); globalCheckpoint.set(engine.getProcessedLocalCheckpoint()); engine.index(indexForDoc(doc)); engine.flush(); - syncFlush(indexWriterHolder.get(), engine, syncId); - assertEquals(store.readLastCommittedSegmentsInfo().getUserData().get(Engine.SYNC_COMMIT_ID), syncId); + syncFlush(indexWriterHolder.get(), engine); doc = testParsedDocument("2", null, testDocumentWithTextField(), new BytesArray("{}"), null); engine.index(indexForDoc(doc)); EngineConfig config = engine.config(); engine.close(); engine = new InternalEngine(config); recoverFromTranslog(engine, translogHandler, Long.MAX_VALUE); - assertNull( - "Sync ID must be gone since we have a document to replay", - engine.getLastCommittedSegmentInfos().getUserData().get(Engine.SYNC_COMMIT_ID) - ); } - void syncFlush(IndexWriter writer, InternalEngine engine, String syncId) throws IOException { + void syncFlush(IndexWriter writer, InternalEngine engine) throws IOException { try (var ignored = engine.acquireEnsureOpenRef()) { - Map userData = new HashMap<>(); - writer.getLiveCommitData().forEach(e -> userData.put(e.getKey(), e.getValue())); - userData.put(Engine.SYNC_COMMIT_ID, syncId); - writer.setLiveCommitData(userData.entrySet()); writer.commit(); } } diff --git a/server/src/test/java/org/elasticsearch/index/store/StoreTests.java b/server/src/test/java/org/elasticsearch/index/store/StoreTests.java index 36ece00ccc0ca..db52d52c72794 100644 --- a/server/src/test/java/org/elasticsearch/index/store/StoreTests.java +++ b/server/src/test/java/org/elasticsearch/index/store/StoreTests.java @@ -52,7 +52,6 @@ import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.lucene.store.FilterIndexOutput; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.util.Maps; import org.elasticsearch.core.IOUtils; import org.elasticsearch.core.TimeValue; import org.elasticsearch.env.ShardLock; @@ -996,17 +995,12 @@ public void testUserDataRead() throws IOException { Document doc = new Document(); doc.add(new TextField("id", "1", Field.Store.NO)); writer.addDocument(doc); - Map commitData = Maps.newMapWithExpectedSize(2); - String syncId = "a sync id"; - commitData.put(Engine.SYNC_COMMIT_ID, syncId); - writer.setLiveCommitData(commitData.entrySet()); writer.commit(); writer.close(); Store.MetadataSnapshot metadata; metadata = store.getMetadata(randomBoolean() ? null : deletionPolicy.snapshot()); assertFalse(metadata.fileMetadataMap().isEmpty()); // do not check for correct files, we have enough tests for that above - assertThat(metadata.commitUserData().get(Engine.SYNC_COMMIT_ID), equalTo(syncId)); TestUtil.checkIndex(store.directory()); assertDeleteContent(store, store.directory()); IOUtils.close(store); @@ -1041,7 +1035,6 @@ public void testStreamStoreFilesMetadata() throws Exception { for (StoreFileMetadata inFile : inStoreFileMetadata) { assertThat(inFile.name(), equalTo(outFiles.next().name())); } - assertThat(outStoreFileMetadata.syncId(), equalTo(inStoreFileMetadata.syncId())); assertThat(outStoreFileMetadata.peerRecoveryRetentionLeases(), equalTo(peerRecoveryRetentionLeases)); } diff --git a/server/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java b/server/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java index 00fddfcecd432..d039c265c98ae 100644 --- a/server/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java +++ b/server/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java @@ -1079,50 +1079,6 @@ void createRetentionLease(long startingSeqNo, ActionListener lis store.close(); } - public void testVerifySeqNoStatsWhenRecoverWithSyncId() throws Exception { - IndexShard shard = mock(IndexShard.class); - when(shard.state()).thenReturn(IndexShardState.STARTED); - RecoverySourceHandler handler = new RecoverySourceHandler( - shard, - new TestRecoveryTargetHandler(), - threadPool, - getStartRecoveryRequest(), - between(1, 16), - between(1, 4), - between(1, 4), - between(1, 4), - false, - recoveryPlannerService - ); - - String syncId = UUIDs.randomBase64UUID(); - int numDocs = between(0, 1000); - long localCheckpoint = randomLongBetween(SequenceNumbers.NO_OPS_PERFORMED, Long.MAX_VALUE); - long maxSeqNo = randomLongBetween(SequenceNumbers.NO_OPS_PERFORMED, Long.MAX_VALUE); - assertTrue( - handler.hasSameLegacySyncId( - newMetadataSnapshot(syncId, Long.toString(localCheckpoint), Long.toString(maxSeqNo), numDocs), - newMetadataSnapshot(syncId, Long.toString(localCheckpoint), Long.toString(maxSeqNo), numDocs) - ) - ); - - AssertionError error = expectThrows(AssertionError.class, () -> { - long localCheckpointOnTarget = randomValueOtherThan( - localCheckpoint, - () -> randomLongBetween(SequenceNumbers.NO_OPS_PERFORMED, Long.MAX_VALUE) - ); - long maxSeqNoOnTarget = randomValueOtherThan( - maxSeqNo, - () -> randomLongBetween(SequenceNumbers.NO_OPS_PERFORMED, Long.MAX_VALUE) - ); - handler.hasSameLegacySyncId( - newMetadataSnapshot(syncId, Long.toString(localCheckpoint), Long.toString(maxSeqNo), numDocs), - newMetadataSnapshot(syncId, Long.toString(localCheckpointOnTarget), Long.toString(maxSeqNoOnTarget), numDocs) - ); - }); - assertThat(error.getMessage(), containsString("try to recover [index][1] with sync id but seq_no stats are mismatched:")); - } - public void testRecoveryPlannerServiceIsUsed() throws Exception { try (Store store = newStore(createTempDir("source"), false)) { IndexShard shard = mock(IndexShard.class); @@ -1784,7 +1740,6 @@ private ShardRecoveryPlan createShardRecoveryPlanWithFallback(Store store, int s private Store.MetadataSnapshot newMetadataSnapshot(String syncId, String localCheckpoint, String maxSeqNo, int numDocs) { Map userData = new HashMap<>(); - userData.put(Engine.SYNC_COMMIT_ID, syncId); if (localCheckpoint != null) { userData.put(SequenceNumbers.LOCAL_CHECKPOINT_KEY, localCheckpoint); } diff --git a/server/src/test/java/org/elasticsearch/rest/action/cat/RestShardsActionTests.java b/server/src/test/java/org/elasticsearch/rest/action/cat/RestShardsActionTests.java index 0c0aa09287679..29857ef4a519f 100644 --- a/server/src/test/java/org/elasticsearch/rest/action/cat/RestShardsActionTests.java +++ b/server/src/test/java/org/elasticsearch/rest/action/cat/RestShardsActionTests.java @@ -100,6 +100,7 @@ public void testBuildTable() { assertThat(headers.get(7).value, equalTo("ip")); assertThat(headers.get(8).value, equalTo("id")); assertThat(headers.get(9).value, equalTo("node")); + assertThat(headers.get(10).value, equalTo("unassigned.reason")); final List> rows = table.getRows(); assertThat(rows.size(), equalTo(numShards)); @@ -114,8 +115,8 @@ public void testBuildTable() { assertThat(row.get(3).value, equalTo(shardRouting.state())); assertThat(row.get(7).value, equalTo(localNode.getHostAddress())); assertThat(row.get(8).value, equalTo(localNode.getId())); - assertThat(row.get(70).value, equalTo(shardStats.getDataPath())); - assertThat(row.get(71).value, equalTo(shardStats.getStatePath())); + assertThat(row.get(69).value, equalTo(shardStats.getDataPath())); + assertThat(row.get(70).value, equalTo(shardStats.getStatePath())); } } } diff --git a/x-pack/plugin/stack/qa/rest/build.gradle b/x-pack/plugin/stack/qa/rest/build.gradle index ff2d67eda82f6..86f1ef7055769 100644 --- a/x-pack/plugin/stack/qa/rest/build.gradle +++ b/x-pack/plugin/stack/qa/rest/build.gradle @@ -20,4 +20,5 @@ testClusters.configureEach { tasks.named("yamlRestCompatTestTransform").configure({ task -> task.skipTest("stack/10_basic/Test kibana reporting index auto creation", "warning does not exist for compatibility") + task.skipTest("cat.shards/10_basic/Help", "sync_id is removed in 9.0") }) From 6d4e11d6bc47a51d80ce27213cb4d23d3d49a829 Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Fri, 1 Nov 2024 08:25:40 -0700 Subject: [PATCH 283/324] Add logsdb telemetry (#115994) This PR adds telemetry for logsdb. However, this change only tracks the count of indices using logsdb and those that use synthetic source. Additional stats, such as shard, indexing, and search stats, will be added in a follow-up, as they require reaching out to data nodes. --- docs/changelog/115994.yaml | 5 ++ docs/reference/rest-api/info.asciidoc | 4 + docs/reference/rest-api/usage.asciidoc | 6 ++ .../org/elasticsearch/TransportVersions.java | 1 + .../xpack/core/XPackClientPlugin.java | 4 +- .../xpack/core/XPackFeatures.java | 4 +- .../elasticsearch/xpack/core/XPackField.java | 1 + .../core/action/XPackInfoFeatureAction.java | 4 +- .../core/action/XPackUsageFeatureAction.java | 4 +- .../application/LogsDBFeatureSetUsage.java | 74 +++++++++++++++++++ .../logsdb/LogsDBInfoTransportAction.java | 41 ++++++++++ .../xpack/logsdb/LogsDBPlugin.java | 23 +++++- .../logsdb/LogsDBUsageTransportAction.java | 73 ++++++++++++++++++ .../LogsdbIndexModeSettingsProvider.java | 9 +-- .../rest-api-spec/test/logsdb/10_usage.yml | 37 ++++++++++ 15 files changed, 277 insertions(+), 13 deletions(-) create mode 100644 docs/changelog/115994.yaml create mode 100644 x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/application/LogsDBFeatureSetUsage.java create mode 100644 x-pack/plugin/logsdb/src/main/java/org/elasticsearch/xpack/logsdb/LogsDBInfoTransportAction.java create mode 100644 x-pack/plugin/logsdb/src/main/java/org/elasticsearch/xpack/logsdb/LogsDBUsageTransportAction.java create mode 100644 x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/logsdb/10_usage.yml diff --git a/docs/changelog/115994.yaml b/docs/changelog/115994.yaml new file mode 100644 index 0000000000000..ac090018c8a12 --- /dev/null +++ b/docs/changelog/115994.yaml @@ -0,0 +1,5 @@ +pr: 115994 +summary: Add logsdb telemetry +area: Logs +type: enhancement +issues: [] diff --git a/docs/reference/rest-api/info.asciidoc b/docs/reference/rest-api/info.asciidoc index 28b6df215a18d..fda5b07d28205 100644 --- a/docs/reference/rest-api/info.asciidoc +++ b/docs/reference/rest-api/info.asciidoc @@ -172,6 +172,10 @@ Example response: "universal_profiling": { "available": true, "enabled": true + }, + "logsdb": { + "available": true, + "enabled": false } }, "tagline" : "You know, for X" diff --git a/docs/reference/rest-api/usage.asciidoc b/docs/reference/rest-api/usage.asciidoc index 27cc1723265c9..b57d2aee9d190 100644 --- a/docs/reference/rest-api/usage.asciidoc +++ b/docs/reference/rest-api/usage.asciidoc @@ -518,6 +518,12 @@ GET /_xpack/usage "universal_profiling" : { "available" : true, "enabled" : true + }, + "logsdb": { + "available": true, + "enabled": false, + "indices_count": 0, + "indices_with_synthetic_source": 0 } } ------------------------------------------------------------ diff --git a/server/src/main/java/org/elasticsearch/TransportVersions.java b/server/src/main/java/org/elasticsearch/TransportVersions.java index ea3e649de9ef8..2acf80e426c82 100644 --- a/server/src/main/java/org/elasticsearch/TransportVersions.java +++ b/server/src/main/java/org/elasticsearch/TransportVersions.java @@ -186,6 +186,7 @@ static TransportVersion def(int id) { public static final TransportVersion CPU_STAT_STRING_PARSING = def(8_781_00_0); public static final TransportVersion QUERY_RULES_RETRIEVER = def(8_782_00_0); public static final TransportVersion ESQL_CCS_EXEC_INFO_WITH_FAILURES = def(8_783_00_0); + public static final TransportVersion LOGSDB_TELEMETRY = def(8_784_00_0); /* * STOP! READ THIS FIRST! No, really, diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java index 9004239478bdf..e2435c3396fa8 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java @@ -25,6 +25,7 @@ import org.elasticsearch.xpack.core.aggregatemetric.AggregateMetricFeatureSetUsage; import org.elasticsearch.xpack.core.analytics.AnalyticsFeatureSetUsage; import org.elasticsearch.xpack.core.application.EnterpriseSearchFeatureSetUsage; +import org.elasticsearch.xpack.core.application.LogsDBFeatureSetUsage; import org.elasticsearch.xpack.core.application.ProfilingUsage; import org.elasticsearch.xpack.core.archive.ArchiveFeatureSetUsage; import org.elasticsearch.xpack.core.ccr.AutoFollowMetadata; @@ -305,7 +306,8 @@ public List getNamedWriteables() { PersistentTaskParams.class, SecurityMigrationTaskParams.TASK_NAME, SecurityMigrationTaskParams::new - ) + ), + new NamedWriteableRegistry.Entry(XPackFeatureUsage.class, XPackField.LOGSDB, LogsDBFeatureSetUsage::new) ).filter(Objects::nonNull).toList(); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackFeatures.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackFeatures.java index 13404772e79a9..a7cf878511d78 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackFeatures.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackFeatures.java @@ -20,12 +20,14 @@ * Provides the XPack features that this version of the code supports */ public class XPackFeatures implements FeatureSpecification { + public static final NodeFeature LOGSDB_TELEMETRY = new NodeFeature("logsdb_telemetry"); @Override public Set getFeatures() { return Set.of( NodesDataTiersUsageTransportAction.LOCALLY_PRECALCULATED_STATS_FEATURE, // Added in 8.12 - License.INDEPENDENT_TRIAL_VERSION_FEATURE // 8.14.0 + License.INDEPENDENT_TRIAL_VERSION_FEATURE, // 8.14.0 + LOGSDB_TELEMETRY ); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackField.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackField.java index 388868188b675..a48f5530a416f 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackField.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackField.java @@ -90,6 +90,7 @@ public final class XPackField { public static final String ENTERPRISE_GEOIP_DOWNLOADER = "enterprise_geoip_downloader"; /** Name for Universal Profiling. */ public static final String UNIVERSAL_PROFILING = "universal_profiling"; + public static final String LOGSDB = "logsdb"; private XPackField() {} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/XPackInfoFeatureAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/XPackInfoFeatureAction.java index 38b0d1a693e64..82d5db059217a 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/XPackInfoFeatureAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/XPackInfoFeatureAction.java @@ -48,6 +48,7 @@ public class XPackInfoFeatureAction { public static final ActionType ARCHIVE = xpackInfoFeatureAction(XPackField.ARCHIVE); public static final ActionType ENTERPRISE_SEARCH = xpackInfoFeatureAction(XPackField.ENTERPRISE_SEARCH); public static final ActionType UNIVERSAL_PROFILING = xpackInfoFeatureAction(XPackField.UNIVERSAL_PROFILING); + public static final ActionType LOGSDB = xpackInfoFeatureAction(XPackField.LOGSDB); public static final List> ALL = List.of( SECURITY, @@ -75,7 +76,8 @@ public class XPackInfoFeatureAction { AGGREGATE_METRIC, ARCHIVE, ENTERPRISE_SEARCH, - UNIVERSAL_PROFILING + UNIVERSAL_PROFILING, + LOGSDB ); public static ActionType xpackInfoFeatureAction(String suffix) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/XPackUsageFeatureAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/XPackUsageFeatureAction.java index b8ca43e46ee29..13f7804848790 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/XPackUsageFeatureAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/XPackUsageFeatureAction.java @@ -58,6 +58,7 @@ private XPackUsageFeatureAction() {/* no instances */} public static final ActionType REMOTE_CLUSTERS = xpackUsageFeatureAction(XPackField.REMOTE_CLUSTERS); public static final ActionType ENTERPRISE_SEARCH = xpackUsageFeatureAction(XPackField.ENTERPRISE_SEARCH); public static final ActionType UNIVERSAL_PROFILING = xpackUsageFeatureAction(XPackField.UNIVERSAL_PROFILING); + public static final ActionType LOGSDB = xpackUsageFeatureAction(XPackField.LOGSDB); static final List> ALL = List.of( AGGREGATE_METRIC, @@ -88,7 +89,8 @@ private XPackUsageFeatureAction() {/* no instances */} HEALTH, REMOTE_CLUSTERS, ENTERPRISE_SEARCH, - UNIVERSAL_PROFILING + UNIVERSAL_PROFILING, + LOGSDB ); public static ActionType xpackUsageFeatureAction(String suffix) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/application/LogsDBFeatureSetUsage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/application/LogsDBFeatureSetUsage.java new file mode 100644 index 0000000000000..a3473bf6224a1 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/application/LogsDBFeatureSetUsage.java @@ -0,0 +1,74 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ +package org.elasticsearch.xpack.core.application; + +import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xpack.core.XPackFeatureUsage; +import org.elasticsearch.xpack.core.XPackField; + +import java.io.IOException; +import java.util.Objects; + +public final class LogsDBFeatureSetUsage extends XPackFeatureUsage { + private final int indicesCount; + private final int indicesWithSyntheticSource; + + public LogsDBFeatureSetUsage(StreamInput input) throws IOException { + super(input); + indicesCount = input.readVInt(); + indicesWithSyntheticSource = input.readVInt(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeVInt(indicesCount); + out.writeVInt(indicesWithSyntheticSource); + } + + public LogsDBFeatureSetUsage(boolean available, boolean enabled, int indicesCount, int indicesWithSyntheticSource) { + super(XPackField.LOGSDB, available, enabled); + this.indicesCount = indicesCount; + this.indicesWithSyntheticSource = indicesWithSyntheticSource; + } + + @Override + public TransportVersion getMinimalSupportedVersion() { + return TransportVersions.LOGSDB_TELEMETRY; + } + + @Override + protected void innerXContent(XContentBuilder builder, Params params) throws IOException { + super.innerXContent(builder, params); + builder.field("indices_count", indicesCount); + builder.field("indices_with_synthetic_source", indicesWithSyntheticSource); + } + + @Override + public int hashCode() { + return Objects.hash(available, enabled, indicesCount, indicesWithSyntheticSource); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + LogsDBFeatureSetUsage other = (LogsDBFeatureSetUsage) obj; + return Objects.equals(available, other.available) + && Objects.equals(enabled, other.enabled) + && Objects.equals(indicesCount, other.indicesCount) + && Objects.equals(indicesWithSyntheticSource, other.indicesWithSyntheticSource); + } +} diff --git a/x-pack/plugin/logsdb/src/main/java/org/elasticsearch/xpack/logsdb/LogsDBInfoTransportAction.java b/x-pack/plugin/logsdb/src/main/java/org/elasticsearch/xpack/logsdb/LogsDBInfoTransportAction.java new file mode 100644 index 0000000000000..c77adbf54f985 --- /dev/null +++ b/x-pack/plugin/logsdb/src/main/java/org/elasticsearch/xpack/logsdb/LogsDBInfoTransportAction.java @@ -0,0 +1,41 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ +package org.elasticsearch.xpack.logsdb; + +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.injection.guice.Inject; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.XPackField; +import org.elasticsearch.xpack.core.action.XPackInfoFeatureAction; +import org.elasticsearch.xpack.core.action.XPackInfoFeatureTransportAction; + +public class LogsDBInfoTransportAction extends XPackInfoFeatureTransportAction { + + private final ClusterService clusterService; + + @Inject + public LogsDBInfoTransportAction(TransportService transportService, ClusterService clusterService, ActionFilters actionFilters) { + super(XPackInfoFeatureAction.LOGSDB.name(), transportService, actionFilters); + this.clusterService = clusterService; + } + + @Override + public String name() { + return XPackField.LOGSDB; + } + + @Override + public boolean available() { + return true; + } + + @Override + public boolean enabled() { + return LogsDBPlugin.CLUSTER_LOGSDB_ENABLED.get(clusterService.getSettings()); + } +} diff --git a/x-pack/plugin/logsdb/src/main/java/org/elasticsearch/xpack/logsdb/LogsDBPlugin.java b/x-pack/plugin/logsdb/src/main/java/org/elasticsearch/xpack/logsdb/LogsDBPlugin.java index 089be0604146f..0eb0754985c94 100644 --- a/x-pack/plugin/logsdb/src/main/java/org/elasticsearch/xpack/logsdb/LogsDBPlugin.java +++ b/x-pack/plugin/logsdb/src/main/java/org/elasticsearch/xpack/logsdb/LogsDBPlugin.java @@ -7,23 +7,34 @@ package org.elasticsearch.xpack.logsdb; +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionResponse; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.IndexSettingProvider; +import org.elasticsearch.plugins.ActionPlugin; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.xpack.core.XPackPlugin; +import org.elasticsearch.xpack.core.action.XPackInfoFeatureAction; +import org.elasticsearch.xpack.core.action.XPackUsageFeatureAction; +import java.util.ArrayList; import java.util.Collection; import java.util.List; -import static org.elasticsearch.xpack.logsdb.LogsdbIndexModeSettingsProvider.CLUSTER_LOGSDB_ENABLED; import static org.elasticsearch.xpack.logsdb.SyntheticSourceLicenseService.FALLBACK_SETTING; -public class LogsDBPlugin extends Plugin { +public class LogsDBPlugin extends Plugin implements ActionPlugin { private final Settings settings; private final SyntheticSourceLicenseService licenseService; + public static final Setting CLUSTER_LOGSDB_ENABLED = Setting.boolSetting( + "cluster.logsdb.enabled", + false, + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); private final LogsdbIndexModeSettingsProvider logsdbIndexModeSettingsProvider; @@ -61,4 +72,12 @@ public Collection getAdditionalIndexSettingProviders(Index public List> getSettings() { return List.of(FALLBACK_SETTING, CLUSTER_LOGSDB_ENABLED); } + + @Override + public List> getActions() { + List> actions = new ArrayList<>(); + actions.add(new ActionPlugin.ActionHandler<>(XPackUsageFeatureAction.LOGSDB, LogsDBUsageTransportAction.class)); + actions.add(new ActionPlugin.ActionHandler<>(XPackInfoFeatureAction.LOGSDB, LogsDBInfoTransportAction.class)); + return actions; + } } diff --git a/x-pack/plugin/logsdb/src/main/java/org/elasticsearch/xpack/logsdb/LogsDBUsageTransportAction.java b/x-pack/plugin/logsdb/src/main/java/org/elasticsearch/xpack/logsdb/LogsDBUsageTransportAction.java new file mode 100644 index 0000000000000..5c385d5920428 --- /dev/null +++ b/x-pack/plugin/logsdb/src/main/java/org/elasticsearch/xpack/logsdb/LogsDBUsageTransportAction.java @@ -0,0 +1,73 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ +package org.elasticsearch.xpack.logsdb; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.index.IndexMode; +import org.elasticsearch.index.mapper.SourceFieldMapper; +import org.elasticsearch.injection.guice.Inject; +import org.elasticsearch.protocol.xpack.XPackUsageRequest; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.action.XPackUsageFeatureAction; +import org.elasticsearch.xpack.core.action.XPackUsageFeatureResponse; +import org.elasticsearch.xpack.core.action.XPackUsageFeatureTransportAction; +import org.elasticsearch.xpack.core.application.LogsDBFeatureSetUsage; + +import static org.elasticsearch.index.mapper.SourceFieldMapper.INDEX_MAPPER_SOURCE_MODE_SETTING; + +public class LogsDBUsageTransportAction extends XPackUsageFeatureTransportAction { + private final ClusterService clusterService; + + @Inject + public LogsDBUsageTransportAction( + TransportService transportService, + ClusterService clusterService, + ThreadPool threadPool, + ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver + ) { + super( + XPackUsageFeatureAction.LOGSDB.name(), + transportService, + clusterService, + threadPool, + actionFilters, + indexNameExpressionResolver + ); + this.clusterService = clusterService; + } + + @Override + protected void masterOperation( + Task task, + XPackUsageRequest request, + ClusterState state, + ActionListener listener + ) { + int numIndices = 0; + int numIndicesWithSyntheticSources = 0; + for (IndexMetadata indexMetadata : state.metadata()) { + if (indexMetadata.getIndexMode() == IndexMode.LOGSDB) { + numIndices++; + if (INDEX_MAPPER_SOURCE_MODE_SETTING.get(indexMetadata.getSettings()) == SourceFieldMapper.Mode.SYNTHETIC) { + numIndicesWithSyntheticSources++; + } + } + } + final boolean enabled = LogsDBPlugin.CLUSTER_LOGSDB_ENABLED.get(clusterService.getSettings()); + listener.onResponse( + new XPackUsageFeatureResponse(new LogsDBFeatureSetUsage(true, enabled, numIndices, numIndicesWithSyntheticSources)) + ); + } +} diff --git a/x-pack/plugin/logsdb/src/main/java/org/elasticsearch/xpack/logsdb/LogsdbIndexModeSettingsProvider.java b/x-pack/plugin/logsdb/src/main/java/org/elasticsearch/xpack/logsdb/LogsdbIndexModeSettingsProvider.java index 329cd3bc8a04b..481657eaf7225 100644 --- a/x-pack/plugin/logsdb/src/main/java/org/elasticsearch/xpack/logsdb/LogsdbIndexModeSettingsProvider.java +++ b/x-pack/plugin/logsdb/src/main/java/org/elasticsearch/xpack/logsdb/LogsdbIndexModeSettingsProvider.java @@ -10,7 +10,6 @@ import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.regex.Regex; -import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.IndexMode; import org.elasticsearch.index.IndexSettingProvider; @@ -20,13 +19,9 @@ import java.util.List; import java.util.Locale; +import static org.elasticsearch.xpack.logsdb.LogsDBPlugin.CLUSTER_LOGSDB_ENABLED; + final class LogsdbIndexModeSettingsProvider implements IndexSettingProvider { - static final Setting CLUSTER_LOGSDB_ENABLED = Setting.boolSetting( - "cluster.logsdb.enabled", - false, - Setting.Property.Dynamic, - Setting.Property.NodeScope - ); private static final String LOGS_PATTERN = "logs-*-*"; private volatile boolean isLogsdbEnabled; diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/logsdb/10_usage.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/logsdb/10_usage.yml new file mode 100644 index 0000000000000..63b9ba71510ed --- /dev/null +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/logsdb/10_usage.yml @@ -0,0 +1,37 @@ +--- +logsdb usage: + - do: + indices.create: + index: test1 + body: + settings: + index: + mode: logsdb + + - do: {xpack.usage: {}} + - match: { logsdb.available: true } + - match: { logsdb.indices_count: 1 } + - match: { logsdb.indices_with_synthetic_source: 1 } + + - do: + indices.create: + index: test2 + + - do: {xpack.usage: {}} + - match: { logsdb.available: true } + - match: { logsdb.indices_count: 1 } + - match: { logsdb.indices_with_synthetic_source: 1 } + + - do: + indices.create: + index: test3 + body: + settings: + index: + mode: logsdb + mapping.source.mode: stored + + - do: {xpack.usage: {}} + - match: { logsdb.available: true } + - match: { logsdb.indices_count: 2 } + - match: { logsdb.indices_with_synthetic_source: 1 } From 535ad91bdbb9bb947a1f035eb285aabd83ab8fa3 Mon Sep 17 00:00:00 2001 From: Craig Taverner Date: Fri, 1 Nov 2024 17:03:49 +0100 Subject: [PATCH 284/324] Refine ESQL limitations (full-text, TEXT fields, unassigned indexes) (#116098) * Refine ESQL limitations (full-text, TEXT fields, unassigned indexes) This PR refactors a section of the ES|QL Limitations page to: * Refactor both full-text and text-behaves-as-keyword sections to better reflect the new behaviour (the old text implies that no full-text search of any kind exists anywhere, which immediately contradicts the statements directly above it). * Update text-behaves-as-keyword to include my recent work on making all functions return KEYWORD instead of TEXT or SEMANTIC_TEXT * Add a section on multi-index querying to cover two limitations (union types and unassigned indexes). * Fix full-text-search examples --- docs/reference/esql/esql-limitations.asciidoc | 86 ++++++++++++++++--- 1 file changed, 72 insertions(+), 14 deletions(-) diff --git a/docs/reference/esql/esql-limitations.asciidoc b/docs/reference/esql/esql-limitations.asciidoc index 72c960c1b9699..1772e956bd9e2 100644 --- a/docs/reference/esql/esql-limitations.asciidoc +++ b/docs/reference/esql/esql-limitations.asciidoc @@ -112,31 +112,36 @@ Otherwise, the query will fail with a validation error. Another limitation is that any <> command containing a full-text search function cannot also use disjunctions (`OR`). -Because of <>, -queries on `text` fields are like queries on `keyword` fields: they are -case-sensitive and need to match the full string. +For example, this query is valid: -For example, after indexing a field of type `text` with the value `Elasticsearch -query language`, the following `WHERE` clause does not match because the `LIKE` -operator is case-sensitive: [source,esql] ---- -| WHERE field LIKE "elasticsearch query language" +FROM books +| WHERE MATCH(author, "Faulkner") AND MATCH(author, "Tolkien") ---- -The following `WHERE` clause does not match either, because the `LIKE` operator -tries to match the whole string: +But this query will fail due to the <> command: + [source,esql] ---- -| WHERE field LIKE "Elasticsearch" +FROM books +| STATS AVG(price) BY author +| WHERE MATCH(author, "Faulkner") ---- -As a workaround, use wildcards and regular expressions. For example: +And this query will fail due to the disjunction: + [source,esql] ---- -| WHERE field RLIKE "[Ee]lasticsearch.*" +FROM books +| WHERE MATCH(author, "Faulkner") OR author LIKE "Hemingway" ---- +Note that, because of <>, +any queries on `text` fields that do not explicitly use the full-text functions, +<> or <>, will behave as if the fields are actually `keyword` fields: +they are case-sensitive and need to match the full string. + [discrete] [[esql-limitations-text-fields]] === `text` fields behave like `keyword` fields @@ -149,15 +154,68 @@ that. If it's not possible to retrieve a `keyword` subfield, {esql} will get the string from a document's `_source`. If the `_source` cannot be retrieved, for example when using synthetic source, `null` is returned. +Once a `text` field is retrieved, if the query touches it in any way, for example passing +it into a function, the type will be converted to `keyword`. In fact, functions that operate on both +`text` and `keyword` fields will perform as if the `text` field was a `keyword` field all along. + +For example, the following query will return a column `greatest` of type `keyword` no matter +whether any or all of `field1`, `field2`, and `field3` are of type `text`: +[source,esql] +---- +| FROM index +| EVAL greatest = GREATEST(field1, field2, field3) +---- + Note that {esql}'s retrieval of `keyword` subfields may have unexpected -consequences. An {esql} query on a `text` field is case-sensitive. Furthermore, -a subfield may have been mapped with a <>, which can +consequences. Other than when explicitly using the full-text functions, <> and <>, +any {esql} query on a `text` field is case-sensitive. + +For example, after indexing a field of type `text` with the value `Elasticsearch +query language`, the following `WHERE` clause does not match because the `LIKE` +operator is case-sensitive: +[source,esql] +---- +| WHERE field LIKE "elasticsearch query language" +---- + +The following `WHERE` clause does not match either, because the `LIKE` operator +tries to match the whole string: +[source,esql] +---- +| WHERE field LIKE "Elasticsearch" +---- + +As a workaround, use wildcards and regular expressions. For example: +[source,esql] +---- +| WHERE field RLIKE "[Ee]lasticsearch.*" +---- + +Furthermore, a subfield may have been mapped with a <>, which can transform the original string. Or it may have been mapped with <>, which can truncate the string. None of these mapping operations are applied to an {esql} query, which may lead to false positives or negatives. To avoid these issues, a best practice is to be explicit about the field that you query, and query `keyword` sub-fields instead of `text` fields. +Or consider using one of the <> functions. + +[discrete] +[[esql-multi-index-limitations]] +=== Using {esql} to query multiple indices + +As discussed in more detail in <>, {esql} can execute a single query across multiple indices, +data streams, or aliases. However, there are some limitations to be aware of: + +* All underlying indexes and shards must be active. Using admin commands or UI, + it is possible to pause an index or shard, for example by disabling a frozen tier instance, + but then any {esql} query that includes that index or shard will fail, even if the query uses + <> to filter out the results from the paused index. + If you see an error of type `search_phase_execution_exception`, + with the message `Search rejected due to missing shards`, you likely have an index or shard in `UNASSIGNED` state. +* The same field must have the same type across all indexes. If the same field is mapped to different types + it is still possible to query the indexes, + but the field must be <>. [discrete] [[esql-tsdb]] From 9640d3148628291c3a79c684d48dcd106e8aedd8 Mon Sep 17 00:00:00 2001 From: Iraklis Psaroudakis Date: Fri, 1 Nov 2024 18:12:04 +0200 Subject: [PATCH 285/324] Unmute tests (#116093) That have been fixed with PR #116074 Fixes #116073 Fixes #116080 --- muted-tests.yml | 6 ------ 1 file changed, 6 deletions(-) diff --git a/muted-tests.yml b/muted-tests.yml index 684e1d07b0ec3..6db21391f91ca 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -288,12 +288,6 @@ tests: - class: org.elasticsearch.smoketest.SmokeTestMultiNodeClientYamlTestSuiteIT method: test {yaml=logsdb/10_settings/logsdb with default ignore dynamic beyond limit and subobjects false} issue: https://github.com/elastic/elasticsearch/issues/116054 -- class: org.elasticsearch.indices.state.CloseIndexIT - method: testConcurrentClose - issue: https://github.com/elastic/elasticsearch/issues/116073 -- class: org.elasticsearch.xpack.ccr.action.ShardFollowTaskReplicationTests - method: testRetryBulkShardOperations - issue: https://github.com/elastic/elasticsearch/issues/116080 - class: org.elasticsearch.smoketest.SmokeTestMultiNodeClientYamlTestSuiteIT method: test {yaml=logsdb/10_settings/logsdb with default ignore dynamic beyond limit and default sorting} issue: https://github.com/elastic/elasticsearch/issues/116062 From 7ab02befbf0963217dda4d3d56dd679799fc5376 Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Sat, 2 Nov 2024 04:11:37 +1100 Subject: [PATCH 286/324] Mute org.elasticsearch.backwards.MixedClusterClientYamlTestSuiteIT test {p0=cat.shards/10_basic/Help} #116110 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index 6db21391f91ca..22f641419604c 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -299,6 +299,9 @@ tests: - class: org.elasticsearch.xpack.searchbusinessrules.PinnedQueryBuilderIT method: testPinnedPromotions issue: https://github.com/elastic/elasticsearch/issues/116097 +- class: org.elasticsearch.backwards.MixedClusterClientYamlTestSuiteIT + method: test {p0=cat.shards/10_basic/Help} + issue: https://github.com/elastic/elasticsearch/issues/116110 # Examples: # From 8cc76e4754bdaa76789e89fd3018f49ebce13b8e Mon Sep 17 00:00:00 2001 From: Brendan Cully Date: Fri, 1 Nov 2024 11:49:41 -0700 Subject: [PATCH 287/324] Store: support empty stores in cleanupAndVerify (#116059) Until now if `store.cleanupAndVerify` was called on a store with no commits, it would throw `IndexNotFoundException`. Based on variable naming (`metadataOrEmpty`), this appears to be unintentional, though the issue has been present since the `cleanupAndVerify` method was introduced. This change is motivated by #104473 - I would like to be able to use this method to clean up a store prior to recovery regardless of how far along a previous recovery attempt got. --- .../main/java/org/elasticsearch/index/store/Store.java | 7 ++++++- .../java/org/elasticsearch/index/store/StoreTests.java | 9 +++++++++ 2 files changed, 15 insertions(+), 1 deletion(-) diff --git a/server/src/main/java/org/elasticsearch/index/store/Store.java b/server/src/main/java/org/elasticsearch/index/store/Store.java index 0df2ed648f0bb..887fe486b6003 100644 --- a/server/src/main/java/org/elasticsearch/index/store/Store.java +++ b/server/src/main/java/org/elasticsearch/index/store/Store.java @@ -696,7 +696,12 @@ public void cleanupAndVerify(String reason, MetadataSnapshot sourceMetadata) thr } } directory.syncMetaData(); - final Store.MetadataSnapshot metadataOrEmpty = getMetadata(null); + Store.MetadataSnapshot metadataOrEmpty; + try { + metadataOrEmpty = getMetadata(null); + } catch (IndexNotFoundException e) { + metadataOrEmpty = MetadataSnapshot.EMPTY; + } verifyAfterCleanup(sourceMetadata, metadataOrEmpty); } finally { metadataLock.writeLock().unlock(); diff --git a/server/src/test/java/org/elasticsearch/index/store/StoreTests.java b/server/src/test/java/org/elasticsearch/index/store/StoreTests.java index db52d52c72794..1dbf761a5a378 100644 --- a/server/src/test/java/org/elasticsearch/index/store/StoreTests.java +++ b/server/src/test/java/org/elasticsearch/index/store/StoreTests.java @@ -732,6 +732,15 @@ public void testCleanupFromSnapshot() throws IOException { IOUtils.close(store); } + public void testCleanupEmptyStore() throws IOException { + final ShardId shardId = new ShardId("index", "_na_", 1); + Store store = new Store(shardId, INDEX_SETTINGS, StoreTests.newDirectory(random()), new DummyShardLock(shardId)); + + store.cleanupAndVerify("test", Store.MetadataSnapshot.EMPTY); + + IOUtils.close(store); + } + public void testOnCloseCallback() throws IOException { final ShardId shardId = new ShardId( new Index(randomRealisticUnicodeOfCodepointLengthBetween(1, 10), "_na_"), From f5c7b4874531c65b5b6cfae5d1c7720b529893bd Mon Sep 17 00:00:00 2001 From: Mike Pellegrini Date: Fri, 1 Nov 2024 16:06:01 -0400 Subject: [PATCH 288/324] Add Default ELSER 2 Capability (#115891) --- muted-tests.yml | 6 ------ x-pack/plugin/inference/build.gradle | 2 +- .../rest/RestGetInferenceModelAction.java | 15 +++++++++++++++ .../test/inference/30_semantic_text_inference.yml | 8 ++++++-- .../test/inference/40_semantic_text_query.yml | 8 ++++++-- 5 files changed, 28 insertions(+), 11 deletions(-) diff --git a/muted-tests.yml b/muted-tests.yml index 22f641419604c..4c42e651f05c9 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -174,12 +174,6 @@ tests: - class: org.elasticsearch.xpack.inference.InferenceCrudIT method: testGet issue: https://github.com/elastic/elasticsearch/issues/114135 -- class: org.elasticsearch.xpack.inference.InferenceRestIT - method: test {p0=inference/30_semantic_text_inference/Calculates embeddings using the default ELSER 2 endpoint} - issue: https://github.com/elastic/elasticsearch/issues/114412 -- class: org.elasticsearch.xpack.inference.InferenceRestIT - method: test {p0=inference/40_semantic_text_query/Query a field that uses the default ELSER 2 endpoint} - issue: https://github.com/elastic/elasticsearch/issues/114376 - class: org.elasticsearch.packaging.test.DockerTests method: test022InstallPluginsFromLocalArchive issue: https://github.com/elastic/elasticsearch/issues/111063 diff --git a/x-pack/plugin/inference/build.gradle b/x-pack/plugin/inference/build.gradle index 6791aad6619d3..15a2d0eb41368 100644 --- a/x-pack/plugin/inference/build.gradle +++ b/x-pack/plugin/inference/build.gradle @@ -12,7 +12,7 @@ apply plugin: 'elasticsearch.internal-yaml-rest-test' restResources { restApi { - include '_common', 'bulk', 'indices', 'inference', 'index', 'get', 'update', 'reindex', 'search', 'field_caps' + include '_common', 'bulk', 'indices', 'inference', 'index', 'get', 'update', 'reindex', 'search', 'field_caps', 'capabilities' } } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rest/RestGetInferenceModelAction.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rest/RestGetInferenceModelAction.java index 34d0f2647b2db..967ad4b46dcb3 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rest/RestGetInferenceModelAction.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rest/RestGetInferenceModelAction.java @@ -15,8 +15,12 @@ import org.elasticsearch.rest.ServerlessScope; import org.elasticsearch.rest.action.RestToXContentListener; import org.elasticsearch.xpack.core.inference.action.GetInferenceModelAction; +import org.elasticsearch.xpack.inference.DefaultElserFeatureFlag; +import java.util.Collections; +import java.util.HashSet; import java.util.List; +import java.util.Set; import static org.elasticsearch.rest.RestRequest.Method.GET; import static org.elasticsearch.xpack.inference.rest.Paths.INFERENCE_ID; @@ -26,6 +30,7 @@ @ServerlessScope(Scope.PUBLIC) public class RestGetInferenceModelAction extends BaseRestHandler { + public static final String DEFAULT_ELSER_2_CAPABILITY = "default_elser_2"; @Override public String getName() { @@ -61,4 +66,14 @@ protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient var request = new GetInferenceModelAction.Request(inferenceEntityId, taskType); return channel -> client.execute(GetInferenceModelAction.INSTANCE, request, new RestToXContentListener<>(channel)); } + + @Override + public Set supportedCapabilities() { + Set capabilities = new HashSet<>(); + if (DefaultElserFeatureFlag.isEnabled()) { + capabilities.add(DEFAULT_ELSER_2_CAPABILITY); + } + + return Collections.unmodifiableSet(capabilities); + } } diff --git a/x-pack/plugin/inference/src/yamlRestTest/resources/rest-api-spec/test/inference/30_semantic_text_inference.yml b/x-pack/plugin/inference/src/yamlRestTest/resources/rest-api-spec/test/inference/30_semantic_text_inference.yml index b4ba527a5bf45..445df1dc302b9 100644 --- a/x-pack/plugin/inference/src/yamlRestTest/resources/rest-api-spec/test/inference/30_semantic_text_inference.yml +++ b/x-pack/plugin/inference/src/yamlRestTest/resources/rest-api-spec/test/inference/30_semantic_text_inference.yml @@ -551,8 +551,12 @@ setup: --- "Calculates embeddings using the default ELSER 2 endpoint": - requires: - cluster_features: "semantic_text.default_elser_2" - reason: semantic_text default ELSER 2 inference ID introduced in 8.16.0 + reason: "default ELSER 2 inference ID is behind a feature flag" + test_runner_features: [capabilities] + capabilities: + - method: GET + path: /_inference + capabilities: [default_elser_2] - do: indices.create: diff --git a/x-pack/plugin/inference/src/yamlRestTest/resources/rest-api-spec/test/inference/40_semantic_text_query.yml b/x-pack/plugin/inference/src/yamlRestTest/resources/rest-api-spec/test/inference/40_semantic_text_query.yml index 10858acc0aff8..17938f3b61a41 100644 --- a/x-pack/plugin/inference/src/yamlRestTest/resources/rest-api-spec/test/inference/40_semantic_text_query.yml +++ b/x-pack/plugin/inference/src/yamlRestTest/resources/rest-api-spec/test/inference/40_semantic_text_query.yml @@ -843,8 +843,12 @@ setup: --- "Query a field that uses the default ELSER 2 endpoint": - requires: - cluster_features: "semantic_text.default_elser_2" - reason: semantic_text default ELSER 2 inference ID introduced in 8.16.0 + reason: "default ELSER 2 inference ID is behind a feature flag" + test_runner_features: [capabilities] + capabilities: + - method: GET + path: /_inference + capabilities: [default_elser_2] - do: indices.create: From edd4ebf1af1e6f3f551ff2fd7e4f2bd8a7337453 Mon Sep 17 00:00:00 2001 From: Oleksandr Kolomiiets Date: Fri, 1 Nov 2024 13:29:43 -0700 Subject: [PATCH 289/324] Fix new logsdb tests (#116108) --- muted-tests.yml | 6 ------ .../rest-api-spec/test/logsdb/10_settings.yml | 15 +++++++++------ 2 files changed, 9 insertions(+), 12 deletions(-) diff --git a/muted-tests.yml b/muted-tests.yml index 4c42e651f05c9..cd6835c2a57ab 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -279,12 +279,6 @@ tests: - class: org.elasticsearch.indexing.IndexActionIT method: testAutoGenerateIdNoDuplicates issue: https://github.com/elastic/elasticsearch/issues/115716 -- class: org.elasticsearch.smoketest.SmokeTestMultiNodeClientYamlTestSuiteIT - method: test {yaml=logsdb/10_settings/logsdb with default ignore dynamic beyond limit and subobjects false} - issue: https://github.com/elastic/elasticsearch/issues/116054 -- class: org.elasticsearch.smoketest.SmokeTestMultiNodeClientYamlTestSuiteIT - method: test {yaml=logsdb/10_settings/logsdb with default ignore dynamic beyond limit and default sorting} - issue: https://github.com/elastic/elasticsearch/issues/116062 - class: org.elasticsearch.xpack.application.connector.ConnectorIndexServiceTests issue: https://github.com/elastic/elasticsearch/issues/116087 - class: org.elasticsearch.compute.operator.FilterOperatorTests diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/logsdb/10_settings.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/logsdb/10_settings.yml index 20c2ef63fc850..d0f89b1b8b6cb 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/logsdb/10_settings.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/logsdb/10_settings.yml @@ -704,6 +704,7 @@ logsdb with default ignore dynamic beyond limit and default sorting: body: query: match_all: {} + sort: "@timestamp" - match: { hits.total.value: 2 } - match: { hits.hits.0._source.name: "bar" } @@ -765,15 +766,16 @@ logsdb with default ignore dynamic beyond limit and non-default sorting: body: query: match_all: {} + sort: "@timestamp" - match: { hits.total.value: 2 } - - match: { hits.hits.0._source.name: "foo" } - - match: { hits.hits.0._source.value: 10 } - - match: { hits.hits.0._source.message: "the quick brown fox" } + - match: { hits.hits.0._source.name: "bar" } + - match: { hits.hits.0._source.value: 20 } + - match: { hits.hits.0._source.message: "jumps over the lazy dog" } - match: { hits.hits.0._ignored: [ "host", "message", "pid", "region", "value" ] } - - match: { hits.hits.1._source.name: "bar" } - - match: { hits.hits.1._source.value: 20 } - - match: { hits.hits.1._source.message: "jumps over the lazy dog" } + - match: { hits.hits.1._source.name: "foo" } + - match: { hits.hits.1._source.value: 10 } + - match: { hits.hits.1._source.message: "the quick brown fox" } - match: { hits.hits.1._ignored: [ "host", "message", "pid", "region", "value" ] } --- @@ -870,6 +872,7 @@ logsdb with default ignore dynamic beyond limit and subobjects false: body: query: match_all: {} + sort: "@timestamp" - match: { hits.total.value: 2 } - match: { hits.hits.0._source.name: "bar" } From ae0c7eed09d44db2c9733c2390c9901ea55c85fd Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Sat, 2 Nov 2024 08:04:19 +1100 Subject: [PATCH 290/324] [test-triage] Unmuting stale muted items --- muted-tests.yml | 44 -------------------------------------------- 1 file changed, 44 deletions(-) diff --git a/muted-tests.yml b/muted-tests.yml index cd6835c2a57ab..c1bcf8ae23a7d 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -2,9 +2,6 @@ tests: - class: org.elasticsearch.smoketest.DocsClientYamlTestSuiteIT method: test {yaml=reference/esql/esql-async-query-api/line_17} issue: https://github.com/elastic/elasticsearch/issues/109260 -- class: "org.elasticsearch.analysis.common.CommonAnalysisClientYamlTestSuiteIT" - issue: "https://github.com/elastic/elasticsearch/issues/109318" - method: "test {yaml=analysis-common/50_char_filters/pattern_replace error handling (too complex pattern)}" - class: "org.elasticsearch.xpack.ml.integration.ClassificationHousePricingIT" issue: "https://github.com/elastic/elasticsearch/issues/101598" method: "testFeatureImportanceValues" @@ -17,20 +14,9 @@ tests: - class: "org.elasticsearch.xpack.searchablesnapshots.FrozenSearchableSnapshotsIntegTests" issue: "https://github.com/elastic/elasticsearch/issues/110408" method: "testCreateAndRestorePartialSearchableSnapshot" -- class: org.elasticsearch.packaging.test.DockerTests - method: test021InstallPlugin - issue: https://github.com/elastic/elasticsearch/issues/110343 - class: org.elasticsearch.xpack.security.authc.oidc.OpenIdConnectAuthIT method: testAuthenticateWithImplicitFlow issue: https://github.com/elastic/elasticsearch/issues/111191 -- class: org.elasticsearch.xpack.ml.integration.DatafeedJobsRestIT - issue: https://github.com/elastic/elasticsearch/issues/111319 -- class: org.elasticsearch.xpack.ml.integration.InferenceIngestInputConfigIT - method: testIngestWithInputFields - issue: https://github.com/elastic/elasticsearch/issues/111383 -- class: org.elasticsearch.xpack.ml.integration.InferenceIngestInputConfigIT - method: testIngestWithMultipleInputFields - issue: https://github.com/elastic/elasticsearch/issues/111384 - class: org.elasticsearch.xpack.security.authc.oidc.OpenIdConnectAuthIT method: testAuthenticateWithCodeFlowAndClientPost issue: https://github.com/elastic/elasticsearch/issues/111396 @@ -40,18 +26,9 @@ tests: - class: org.elasticsearch.xpack.restart.FullClusterRestartIT method: testDataStreams {cluster=UPGRADED} issue: https://github.com/elastic/elasticsearch/issues/111448 -- class: org.elasticsearch.upgrades.FullClusterRestartIT - method: testSnapshotRestore {cluster=OLD} - issue: https://github.com/elastic/elasticsearch/issues/111777 -- class: org.elasticsearch.xpack.restart.CoreFullClusterRestartIT - method: testSnapshotRestore {cluster=OLD} - issue: https://github.com/elastic/elasticsearch/issues/111775 - class: org.elasticsearch.upgrades.FullClusterRestartIT method: testSnapshotRestore {cluster=UPGRADED} issue: https://github.com/elastic/elasticsearch/issues/111798 -- class: org.elasticsearch.xpack.restart.CoreFullClusterRestartIT - method: testSnapshotRestore {cluster=UPGRADED} - issue: https://github.com/elastic/elasticsearch/issues/111799 - class: org.elasticsearch.smoketest.WatcherYamlRestIT method: test {p0=watcher/usage/10_basic/Test watcher usage stats output} issue: https://github.com/elastic/elasticsearch/issues/112189 @@ -69,15 +46,6 @@ tests: issue: https://github.com/elastic/elasticsearch/issues/112424 - class: org.elasticsearch.ingest.geoip.IngestGeoIpClientYamlTestSuiteIT issue: https://github.com/elastic/elasticsearch/issues/111497 -- class: org.elasticsearch.xpack.security.authc.kerberos.SimpleKdcLdapServerTests - method: testClientServiceMutualAuthentication - issue: https://github.com/elastic/elasticsearch/issues/112529 -- class: org.elasticsearch.search.basic.SearchWhileRelocatingIT - method: testSearchAndRelocateConcurrentlyRandomReplicas - issue: https://github.com/elastic/elasticsearch/issues/112515 -- class: org.elasticsearch.xpack.esql.EsqlAsyncSecurityIT - method: testIndexPatternErrorMessageComparison_ESQL_SearchDSL - issue: https://github.com/elastic/elasticsearch/issues/112630 - class: org.elasticsearch.packaging.test.PackagesSecurityAutoConfigurationTests method: test20SecurityNotAutoConfiguredOnReInstallation issue: https://github.com/elastic/elasticsearch/issues/112635 @@ -111,12 +79,6 @@ tests: - class: org.elasticsearch.packaging.test.WindowsServiceTests method: test33JavaChanged issue: https://github.com/elastic/elasticsearch/issues/113177 -- class: org.elasticsearch.xpack.esql.qa.multi_node.EsqlSpecIT - method: test {categorize.Categorize SYNC} - issue: https://github.com/elastic/elasticsearch/issues/113054 -- class: org.elasticsearch.xpack.esql.qa.multi_node.EsqlSpecIT - method: test {categorize.Categorize ASYNC} - issue: https://github.com/elastic/elasticsearch/issues/113055 - class: org.elasticsearch.smoketest.MlWithSecurityIT method: test {yaml=ml/sparse_vector_search/Test sparse_vector search with query vector and pruning config} issue: https://github.com/elastic/elasticsearch/issues/108997 @@ -174,12 +136,6 @@ tests: - class: org.elasticsearch.xpack.inference.InferenceCrudIT method: testGet issue: https://github.com/elastic/elasticsearch/issues/114135 -- class: org.elasticsearch.packaging.test.DockerTests - method: test022InstallPluginsFromLocalArchive - issue: https://github.com/elastic/elasticsearch/issues/111063 -- class: org.elasticsearch.xpack.inference.DefaultElserIT - method: testInferCreatesDefaultElser - issue: https://github.com/elastic/elasticsearch/issues/114503 - class: org.elasticsearch.xpack.inference.integration.ModelRegistryIT method: testGetModel issue: https://github.com/elastic/elasticsearch/issues/114657 From 69403442cfa75a085b2ca3bcb368e11070b68af7 Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Sat, 2 Nov 2024 08:32:59 +1100 Subject: [PATCH 291/324] Mute org.elasticsearch.xpack.restart.CoreFullClusterRestartIT testSnapshotRestore {cluster=UPGRADED} #111799 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index c1bcf8ae23a7d..4b1c216225181 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -246,6 +246,9 @@ tests: - class: org.elasticsearch.backwards.MixedClusterClientYamlTestSuiteIT method: test {p0=cat.shards/10_basic/Help} issue: https://github.com/elastic/elasticsearch/issues/116110 +- class: org.elasticsearch.xpack.restart.CoreFullClusterRestartIT + method: testSnapshotRestore {cluster=UPGRADED} + issue: https://github.com/elastic/elasticsearch/issues/111799 # Examples: # From 09ac7d735f07276918f5e6a4b3985404607950e1 Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Sat, 2 Nov 2024 08:33:03 +1100 Subject: [PATCH 292/324] Mute org.elasticsearch.xpack.restart.CoreFullClusterRestartIT testSnapshotRestore {cluster=OLD} #111774 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index 4b1c216225181..72c0210f818fc 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -249,6 +249,9 @@ tests: - class: org.elasticsearch.xpack.restart.CoreFullClusterRestartIT method: testSnapshotRestore {cluster=UPGRADED} issue: https://github.com/elastic/elasticsearch/issues/111799 +- class: org.elasticsearch.xpack.restart.CoreFullClusterRestartIT + method: testSnapshotRestore {cluster=OLD} + issue: https://github.com/elastic/elasticsearch/issues/111774 # Examples: # From 2662d5a5bd805c5b511ab629a990fb97ddf7796e Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Sat, 2 Nov 2024 08:33:13 +1100 Subject: [PATCH 293/324] Mute org.elasticsearch.upgrades.FullClusterRestartIT testSnapshotRestore {cluster=OLD} #111777 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index 72c0210f818fc..5498920209cd2 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -252,6 +252,9 @@ tests: - class: org.elasticsearch.xpack.restart.CoreFullClusterRestartIT method: testSnapshotRestore {cluster=OLD} issue: https://github.com/elastic/elasticsearch/issues/111774 +- class: org.elasticsearch.upgrades.FullClusterRestartIT + method: testSnapshotRestore {cluster=OLD} + issue: https://github.com/elastic/elasticsearch/issues/111777 # Examples: # From 12ae9a5dac0399de96148a94856ed5ae6f07409f Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Sat, 2 Nov 2024 08:38:10 +1100 Subject: [PATCH 294/324] Mute org.elasticsearch.xpack.spatial.search.GeoGridAggAndQueryConsistencyIT testGeoPointGeoTile #115818 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index 5498920209cd2..1a8dce7d7e5e2 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -255,6 +255,9 @@ tests: - class: org.elasticsearch.upgrades.FullClusterRestartIT method: testSnapshotRestore {cluster=OLD} issue: https://github.com/elastic/elasticsearch/issues/111777 +- class: org.elasticsearch.xpack.spatial.search.GeoGridAggAndQueryConsistencyIT + method: testGeoPointGeoTile + issue: https://github.com/elastic/elasticsearch/issues/115818 # Examples: # From 853c4f2d533a3052bc8a39b73036e992dcc7e756 Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Sat, 2 Nov 2024 09:09:45 +1100 Subject: [PATCH 295/324] Mute org.elasticsearch.xpack.ml.integration.DatafeedJobsRestIT testLookbackWithIndicesOptions #116127 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index 1a8dce7d7e5e2..47d5c3fdf8a37 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -258,6 +258,9 @@ tests: - class: org.elasticsearch.xpack.spatial.search.GeoGridAggAndQueryConsistencyIT method: testGeoPointGeoTile issue: https://github.com/elastic/elasticsearch/issues/115818 +- class: org.elasticsearch.xpack.ml.integration.DatafeedJobsRestIT + method: testLookbackWithIndicesOptions + issue: https://github.com/elastic/elasticsearch/issues/116127 # Examples: # From 2079e8c97e477381b3f5d4ef8f758080b4dba9ec Mon Sep 17 00:00:00 2001 From: Michael Peterson Date: Fri, 1 Nov 2024 20:04:50 -0400 Subject: [PATCH 296/324] Finalized refactorings from closed PR https://github.com/elastic/elasticsearch/pull/115976 (#116121) Pure refactoring PR --- .../xpack/esql/action/EsqlExecutionInfo.java | 12 +- .../esql/enrich/EnrichPolicyResolver.java | 27 +-- .../xpack/esql/session/EsqlSession.java | 10 +- ...CcsUtils.java => EsqlSessionCCSUtils.java} | 135 ++++++------ .../xpack/esql/session/IndexResolver.java | 22 +- ...sts.java => EsqlSessionCCSUtilsTests.java} | 195 ++++++++++++++++-- .../esql/session/IndexResolverTests.java | 80 ------- 7 files changed, 284 insertions(+), 197 deletions(-) rename x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/{CcsUtils.java => EsqlSessionCCSUtils.java} (66%) rename x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/session/{EsqlSessionTests.java => EsqlSessionCCSUtilsTests.java} (66%) delete mode 100644 x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/session/IndexResolverTests.java diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlExecutionInfo.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlExecutionInfo.java index f2ab0355304b3..80bb2afe57122 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlExecutionInfo.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlExecutionInfo.java @@ -229,7 +229,7 @@ public Iterator toXContentChunked(ToXContent.Params params b.field(SKIPPED_FIELD.getPreferredName(), getClusterStateCount(Cluster.Status.SKIPPED)); b.field(PARTIAL_FIELD.getPreferredName(), getClusterStateCount(Cluster.Status.PARTIAL)); b.field(FAILED_FIELD.getPreferredName(), getClusterStateCount(Cluster.Status.FAILED)); - // each clusterinfo defines its own field object name + // each Cluster object defines its own field object name b.xContentObject("details", clusterInfo.values().iterator()); }); } @@ -352,11 +352,7 @@ public Cluster( this.successfulShards = successfulShards; this.skippedShards = skippedShards; this.failedShards = failedShards; - if (failures == null) { - this.failures = List.of(); - } else { - this.failures = failures; - } + this.failures = failures == null ? Collections.emptyList() : failures; this.took = took; } @@ -373,7 +369,7 @@ public Cluster(StreamInput in) throws IOException { if (in.getTransportVersion().onOrAfter(TransportVersions.ESQL_CCS_EXEC_INFO_WITH_FAILURES)) { this.failures = Collections.unmodifiableList(in.readCollectionAsList(ShardSearchFailure::readShardSearchFailure)); } else { - this.failures = List.of(); + this.failures = Collections.emptyList(); } } @@ -475,7 +471,7 @@ public Cluster.Builder setTook(TimeValue took) { @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { String name = clusterAlias; - if (clusterAlias.equals("")) { + if (clusterAlias.equals(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY)) { name = LOCAL_CLUSTER_NAME_REPRESENTATION; } builder.startObject(name); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichPolicyResolver.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichPolicyResolver.java index 77ef5ef597bb5..c8a7a6bcc4e98 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichPolicyResolver.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichPolicyResolver.java @@ -289,24 +289,17 @@ public void onResponse(Transport.Connection connection) { RESOLVE_ACTION_NAME, new LookupRequest(cluster, remotePolicies), TransportRequestOptions.EMPTY, - new ActionListenerResponseHandler<>(lookupListener.delegateResponse((l, e) -> { - if (ExceptionsHelper.isRemoteUnavailableException(e) - && remoteClusterService.isSkipUnavailable(cluster)) { - l.onResponse(new LookupResponse(e)); - } else { - l.onFailure(e); - } - }), LookupResponse::new, threadPool.executor(ThreadPool.Names.SEARCH)) + new ActionListenerResponseHandler<>( + lookupListener.delegateResponse((l, e) -> failIfSkipUnavailableFalse(e, cluster, l)), + LookupResponse::new, + threadPool.executor(ThreadPool.Names.SEARCH) + ) ); } @Override public void onFailure(Exception e) { - if (ExceptionsHelper.isRemoteUnavailableException(e) && remoteClusterService.isSkipUnavailable(cluster)) { - lookupListener.onResponse(new LookupResponse(e)); - } else { - lookupListener.onFailure(e); - } + failIfSkipUnavailableFalse(e, cluster, lookupListener); } }); } @@ -331,6 +324,14 @@ public void onFailure(Exception e) { } } + private void failIfSkipUnavailableFalse(Exception e, String cluster, ActionListener lookupListener) { + if (ExceptionsHelper.isRemoteUnavailableException(e) && remoteClusterService.isSkipUnavailable(cluster)) { + lookupListener.onResponse(new LookupResponse(e)); + } else { + lookupListener.onFailure(e); + } + } + private static class LookupRequest extends TransportRequest { private final String clusterAlias; private final Collection policyNames; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/EsqlSession.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/EsqlSession.java index a4405c32ff91c..504689fdac39b 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/EsqlSession.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/EsqlSession.java @@ -149,7 +149,7 @@ public void execute(EsqlQueryRequest request, EsqlExecutionInfo executionInfo, P analyzedPlan( parse(request.query(), request.params()), executionInfo, - new CcsUtils.CssPartialErrorsActionListener(executionInfo, listener) { + new EsqlSessionCCSUtils.CssPartialErrorsActionListener(executionInfo, listener) { @Override public void onResponse(LogicalPlan analyzedPlan) { executeOptimizedPlan(request, executionInfo, planRunner, optimizedPlan(analyzedPlan), listener); @@ -171,7 +171,7 @@ public void executeOptimizedPlan( ) { PhysicalPlan physicalPlan = logicalPlanToPhysicalPlan(optimizedPlan, request); // TODO: this could be snuck into the underlying listener - CcsUtils.updateExecutionInfoAtEndOfPlanning(executionInfo); + EsqlSessionCCSUtils.updateExecutionInfoAtEndOfPlanning(executionInfo); // execute any potential subplans executeSubPlans(physicalPlan, planRunner, executionInfo, request, listener); } @@ -308,8 +308,8 @@ private void preAnalyze( // TODO in follow-PR (for skip_unavailble handling of missing concrete indexes) add some tests for invalid index // resolution to updateExecutionInfo if (indexResolution.isValid()) { - CcsUtils.updateExecutionInfoWithClustersWithNoMatchingIndices(executionInfo, indexResolution); - CcsUtils.updateExecutionInfoWithUnavailableClusters(executionInfo, indexResolution.getUnavailableClusters()); + EsqlSessionCCSUtils.updateExecutionInfoWithClustersWithNoMatchingIndices(executionInfo, indexResolution); + EsqlSessionCCSUtils.updateExecutionInfoWithUnavailableClusters(executionInfo, indexResolution.getUnavailableClusters()); if (executionInfo.isCrossClusterSearch() && executionInfo.getClusterStateCount(EsqlExecutionInfo.Cluster.Status.RUNNING) == 0) { // for a CCS, if all clusters have been marked as SKIPPED, nothing to search so send a sentinel @@ -383,7 +383,7 @@ private void preAnalyzeIndices( } // if the preceding call to the enrich policy API found unavailable clusters, recreate the index expression to search // based only on available clusters (which could now be an empty list) - String indexExpressionToResolve = CcsUtils.createIndexExpressionFromAvailableClusters(executionInfo); + String indexExpressionToResolve = EsqlSessionCCSUtils.createIndexExpressionFromAvailableClusters(executionInfo); if (indexExpressionToResolve.isEmpty()) { // if this was a pure remote CCS request (no local indices) and all remotes are offline, return an empty IndexResolution listener.onResponse(IndexResolution.valid(new EsIndex(table.index(), Map.of(), Map.of()))); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/CcsUtils.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/EsqlSessionCCSUtils.java similarity index 66% rename from x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/CcsUtils.java rename to x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/EsqlSessionCCSUtils.java index a9314e6f65d87..80709d8f6c4f7 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/CcsUtils.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/EsqlSessionCCSUtils.java @@ -23,14 +23,30 @@ import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; import java.util.Collections; +import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; -class CcsUtils { +class EsqlSessionCCSUtils { - private CcsUtils() {} + private EsqlSessionCCSUtils() {} + + // visible for testing + static Map determineUnavailableRemoteClusters(List failures) { + Map unavailableRemotes = new HashMap<>(); + for (FieldCapabilitiesFailure failure : failures) { + if (ExceptionsHelper.isRemoteUnavailableException(failure.getException())) { + for (String indexExpression : failure.getIndices()) { + if (indexExpression.indexOf(RemoteClusterAware.REMOTE_CLUSTER_INDEX_SEPARATOR) > 0) { + unavailableRemotes.put(RemoteClusterAware.parseClusterAlias(indexExpression), failure); + } + } + } + } + return unavailableRemotes; + } /** * ActionListener that receives LogicalPlan or error from logical planning. @@ -46,70 +62,73 @@ abstract static class CssPartialErrorsActionListener implements ActionListener { + EsqlExecutionInfo.Cluster.Builder builder = new EsqlExecutionInfo.Cluster.Builder(v).setTook(executionInfo.overallTook()) + .setTotalShards(0) + .setSuccessfulShards(0) + .setSkippedShards(0) + .setFailedShards(0); + if (RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY.equals(clusterAlias)) { + // never mark local cluster as skipped + builder.setStatus(EsqlExecutionInfo.Cluster.Status.SUCCESSFUL); } else { - exceptionForResponse = e; - } - for (String clusterAlias : executionInfo.clusterAliases()) { - executionInfo.swapCluster(clusterAlias, (k, v) -> { - EsqlExecutionInfo.Cluster.Builder builder = new EsqlExecutionInfo.Cluster.Builder(v).setTook( - executionInfo.overallTook() - ).setTotalShards(0).setSuccessfulShards(0).setSkippedShards(0).setFailedShards(0); - if (RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY.equals(clusterAlias)) { - // never mark local cluster as skipped - builder.setStatus(EsqlExecutionInfo.Cluster.Status.SUCCESSFUL); - } else { - builder.setStatus(EsqlExecutionInfo.Cluster.Status.SKIPPED); - // add this exception to the failures list only if there is no failure already recorded there - if (v.getFailures() == null || v.getFailures().size() == 0) { - builder.setFailures(List.of(new ShardSearchFailure(exceptionForResponse))); - } - } - return builder.build(); - }); + builder.setStatus(EsqlExecutionInfo.Cluster.Status.SKIPPED); + // add this exception to the failures list only if there is no failure already recorded there + if (v.getFailures() == null || v.getFailures().size() == 0) { + builder.setFailures(List.of(new ShardSearchFailure(exceptionForResponse))); + } } - listener.onResponse(new Result(Analyzer.NO_FIELDS, Collections.emptyList(), Collections.emptyList(), executionInfo)); - } else { - listener.onFailure(e); - } + return builder.build(); + }); } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/IndexResolver.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/IndexResolver.java index f76f7798dece8..210f991306bac 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/IndexResolver.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/IndexResolver.java @@ -6,9 +6,7 @@ */ package org.elasticsearch.xpack.esql.session; -import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.fieldcaps.FieldCapabilitiesFailure; import org.elasticsearch.action.fieldcaps.FieldCapabilitiesIndexResponse; import org.elasticsearch.action.fieldcaps.FieldCapabilitiesRequest; import org.elasticsearch.action.fieldcaps.FieldCapabilitiesResponse; @@ -20,7 +18,6 @@ import org.elasticsearch.index.IndexMode; import org.elasticsearch.index.mapper.TimeSeriesParams; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.RemoteClusterAware; import org.elasticsearch.xpack.esql.action.EsqlResolveFieldsAction; import org.elasticsearch.xpack.esql.core.type.DataType; import org.elasticsearch.xpack.esql.core.type.DateEsField; @@ -159,23 +156,8 @@ public IndexResolution mergedMappings(String indexPattern, FieldCapabilitiesResp for (FieldCapabilitiesIndexResponse ir : fieldCapsResponse.getIndexResponses()) { concreteIndices.put(ir.getIndexName(), ir.getIndexMode()); } - Map unavailableRemotes = determineUnavailableRemoteClusters(fieldCapsResponse.getFailures()); - return IndexResolution.valid(new EsIndex(indexPattern, rootFields, concreteIndices), unavailableRemotes); - } - - // visible for testing - static Map determineUnavailableRemoteClusters(List failures) { - Map unavailableRemotes = new HashMap<>(); - for (FieldCapabilitiesFailure failure : failures) { - if (ExceptionsHelper.isRemoteUnavailableException(failure.getException())) { - for (String indexExpression : failure.getIndices()) { - if (indexExpression.indexOf(RemoteClusterAware.REMOTE_CLUSTER_INDEX_SEPARATOR) > 0) { - unavailableRemotes.put(RemoteClusterAware.parseClusterAlias(indexExpression), failure); - } - } - } - } - return unavailableRemotes; + EsIndex esIndex = new EsIndex(indexPattern, rootFields, concreteIndices); + return IndexResolution.valid(esIndex, EsqlSessionCCSUtils.determineUnavailableRemoteClusters(fieldCapsResponse.getFailures())); } private boolean allNested(List caps) { diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/session/EsqlSessionTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/session/EsqlSessionCCSUtilsTests.java similarity index 66% rename from x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/session/EsqlSessionTests.java rename to x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/session/EsqlSessionCCSUtilsTests.java index 1f814b841f19d..e60024ecd5db4 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/session/EsqlSessionTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/session/EsqlSessionCCSUtilsTests.java @@ -7,11 +7,15 @@ package org.elasticsearch.xpack.esql.session; +import org.apache.lucene.index.CorruptIndexException; import org.elasticsearch.action.fieldcaps.FieldCapabilitiesFailure; +import org.elasticsearch.action.search.ShardSearchFailure; import org.elasticsearch.common.Strings; import org.elasticsearch.index.IndexMode; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.transport.ConnectTransportException; import org.elasticsearch.transport.NoSeedNodeLeftException; +import org.elasticsearch.transport.NoSuchRemoteClusterException; import org.elasticsearch.transport.RemoteClusterAware; import org.elasticsearch.transport.RemoteTransportException; import org.elasticsearch.xpack.esql.action.EsqlExecutionInfo; @@ -20,18 +24,20 @@ import org.elasticsearch.xpack.esql.index.IndexResolution; import org.elasticsearch.xpack.esql.type.EsFieldTests; +import java.util.ArrayList; import java.util.Arrays; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; +import java.util.function.Predicate; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo; -public class EsqlSessionTests extends ESTestCase { +public class EsqlSessionCCSUtilsTests extends ESTestCase { public void testCreateIndexExpressionFromAvailableClusters() { final String localClusterAlias = RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY; @@ -45,7 +51,7 @@ public void testCreateIndexExpressionFromAvailableClusters() { executionInfo.swapCluster(remote1Alias, (k, v) -> new EsqlExecutionInfo.Cluster(remote1Alias, "*", true)); executionInfo.swapCluster(remote2Alias, (k, v) -> new EsqlExecutionInfo.Cluster(remote2Alias, "mylogs1,mylogs2,logs*", true)); - String indexExpr = CcsUtils.createIndexExpressionFromAvailableClusters(executionInfo); + String indexExpr = EsqlSessionCCSUtils.createIndexExpressionFromAvailableClusters(executionInfo); List list = Arrays.stream(Strings.splitStringByCommaToArray(indexExpr)).toList(); assertThat(list.size(), equalTo(5)); assertThat( @@ -69,7 +75,7 @@ public void testCreateIndexExpressionFromAvailableClusters() { ) ); - String indexExpr = CcsUtils.createIndexExpressionFromAvailableClusters(executionInfo); + String indexExpr = EsqlSessionCCSUtils.createIndexExpressionFromAvailableClusters(executionInfo); List list = Arrays.stream(Strings.splitStringByCommaToArray(indexExpr)).toList(); assertThat(list.size(), equalTo(3)); assertThat(new HashSet<>(list), equalTo(Strings.commaDelimitedListToSet("logs*,remote1:*,remote1:foo"))); @@ -93,7 +99,7 @@ public void testCreateIndexExpressionFromAvailableClusters() { ) ); - assertThat(CcsUtils.createIndexExpressionFromAvailableClusters(executionInfo), equalTo("logs*")); + assertThat(EsqlSessionCCSUtils.createIndexExpressionFromAvailableClusters(executionInfo), equalTo("logs*")); } // only remotes present and all marked as skipped, so in revised index expression should be empty string @@ -113,7 +119,7 @@ public void testCreateIndexExpressionFromAvailableClusters() { ) ); - assertThat(CcsUtils.createIndexExpressionFromAvailableClusters(executionInfo), equalTo("")); + assertThat(EsqlSessionCCSUtils.createIndexExpressionFromAvailableClusters(executionInfo), equalTo("")); } } @@ -131,7 +137,7 @@ public void testUpdateExecutionInfoWithUnavailableClusters() { var failure = new FieldCapabilitiesFailure(new String[] { "logs-a" }, new NoSeedNodeLeftException("unable to connect")); var unvailableClusters = Map.of(remote1Alias, failure, remote2Alias, failure); - CcsUtils.updateExecutionInfoWithUnavailableClusters(executionInfo, unvailableClusters); + EsqlSessionCCSUtils.updateExecutionInfoWithUnavailableClusters(executionInfo, unvailableClusters); assertThat(executionInfo.clusterAliases(), equalTo(Set.of(localClusterAlias, remote1Alias, remote2Alias))); assertNull(executionInfo.overallTook()); @@ -159,7 +165,7 @@ public void testUpdateExecutionInfoWithUnavailableClusters() { var failure = new FieldCapabilitiesFailure(new String[] { "logs-a" }, new NoSeedNodeLeftException("unable to connect")); RemoteTransportException e = expectThrows( RemoteTransportException.class, - () -> CcsUtils.updateExecutionInfoWithUnavailableClusters(executionInfo, Map.of(remote2Alias, failure)) + () -> EsqlSessionCCSUtils.updateExecutionInfoWithUnavailableClusters(executionInfo, Map.of(remote2Alias, failure)) ); assertThat(e.status().getStatus(), equalTo(500)); assertThat( @@ -176,7 +182,7 @@ public void testUpdateExecutionInfoWithUnavailableClusters() { executionInfo.swapCluster(remote1Alias, (k, v) -> new EsqlExecutionInfo.Cluster(remote1Alias, "*", true)); executionInfo.swapCluster(remote2Alias, (k, v) -> new EsqlExecutionInfo.Cluster(remote2Alias, "mylogs1,mylogs2,logs*", false)); - CcsUtils.updateExecutionInfoWithUnavailableClusters(executionInfo, Map.of()); + EsqlSessionCCSUtils.updateExecutionInfoWithUnavailableClusters(executionInfo, Map.of()); assertThat(executionInfo.clusterAliases(), equalTo(Set.of(localClusterAlias, remote1Alias, remote2Alias))); assertNull(executionInfo.overallTook()); @@ -224,7 +230,7 @@ public void testUpdateExecutionInfoWithClustersWithNoMatchingIndices() { ); IndexResolution indexResolution = IndexResolution.valid(esIndex, Map.of()); - CcsUtils.updateExecutionInfoWithClustersWithNoMatchingIndices(executionInfo, indexResolution); + EsqlSessionCCSUtils.updateExecutionInfoWithClustersWithNoMatchingIndices(executionInfo, indexResolution); EsqlExecutionInfo.Cluster localCluster = executionInfo.getCluster(localClusterAlias); assertThat(localCluster.getIndexExpression(), equalTo("logs*")); @@ -262,7 +268,7 @@ public void testUpdateExecutionInfoWithClustersWithNoMatchingIndices() { ); IndexResolution indexResolution = IndexResolution.valid(esIndex, Map.of()); - CcsUtils.updateExecutionInfoWithClustersWithNoMatchingIndices(executionInfo, indexResolution); + EsqlSessionCCSUtils.updateExecutionInfoWithClustersWithNoMatchingIndices(executionInfo, indexResolution); EsqlExecutionInfo.Cluster localCluster = executionInfo.getCluster(localClusterAlias); assertThat(localCluster.getIndexExpression(), equalTo("logs*")); @@ -298,7 +304,7 @@ public void testUpdateExecutionInfoWithClustersWithNoMatchingIndices() { var failure = new FieldCapabilitiesFailure(new String[] { "logs-a" }, new NoSeedNodeLeftException("unable to connect")); IndexResolution indexResolution = IndexResolution.valid(esIndex, Map.of(remote1Alias, failure)); - CcsUtils.updateExecutionInfoWithClustersWithNoMatchingIndices(executionInfo, indexResolution); + EsqlSessionCCSUtils.updateExecutionInfoWithClustersWithNoMatchingIndices(executionInfo, indexResolution); EsqlExecutionInfo.Cluster localCluster = executionInfo.getCluster(localClusterAlias); assertThat(localCluster.getIndexExpression(), equalTo("logs*")); @@ -336,7 +342,63 @@ public void testUpdateExecutionInfoWithClustersWithNoMatchingIndices() { var failure = new FieldCapabilitiesFailure(new String[] { "logs-a" }, new NoSeedNodeLeftException("unable to connect")); IndexResolution indexResolution = IndexResolution.valid(esIndex, Map.of(remote1Alias, failure)); - CcsUtils.updateExecutionInfoWithClustersWithNoMatchingIndices(executionInfo, indexResolution); + EsqlSessionCCSUtils.updateExecutionInfoWithClustersWithNoMatchingIndices(executionInfo, indexResolution); + } + } + + public void testDetermineUnavailableRemoteClusters() { + // two clusters, both "remote unavailable" type exceptions + { + List failures = new ArrayList<>(); + failures.add(new FieldCapabilitiesFailure(new String[] { "remote2:mylogs1" }, new NoSuchRemoteClusterException("remote2"))); + failures.add( + new FieldCapabilitiesFailure( + new String[] { "remote1:foo", "remote1:bar" }, + new IllegalStateException("Unable to open any connections") + ) + ); + + Map unavailableClusters = EsqlSessionCCSUtils.determineUnavailableRemoteClusters(failures); + assertThat(unavailableClusters.keySet(), equalTo(Set.of("remote1", "remote2"))); + } + + // one cluster with "remote unavailable" with two failures + { + List failures = new ArrayList<>(); + failures.add(new FieldCapabilitiesFailure(new String[] { "remote2:mylogs1" }, new NoSuchRemoteClusterException("remote2"))); + failures.add(new FieldCapabilitiesFailure(new String[] { "remote2:mylogs1" }, new NoSeedNodeLeftException("no seed node"))); + + Map unavailableClusters = EsqlSessionCCSUtils.determineUnavailableRemoteClusters(failures); + assertThat(unavailableClusters.keySet(), equalTo(Set.of("remote2"))); + } + + // two clusters, one "remote unavailable" type exceptions and one with another type + { + List failures = new ArrayList<>(); + failures.add(new FieldCapabilitiesFailure(new String[] { "remote1:mylogs1" }, new CorruptIndexException("foo", "bar"))); + failures.add( + new FieldCapabilitiesFailure( + new String[] { "remote2:foo", "remote2:bar" }, + new IllegalStateException("Unable to open any connections") + ) + ); + Map unavailableClusters = EsqlSessionCCSUtils.determineUnavailableRemoteClusters(failures); + assertThat(unavailableClusters.keySet(), equalTo(Set.of("remote2"))); + } + + // one cluster1 with exception not known to indicate "remote unavailable" + { + List failures = new ArrayList<>(); + failures.add(new FieldCapabilitiesFailure(new String[] { "remote1:mylogs1" }, new RuntimeException("foo"))); + Map unavailableClusters = EsqlSessionCCSUtils.determineUnavailableRemoteClusters(failures); + assertThat(unavailableClusters.keySet(), equalTo(Set.of())); + } + + // empty failures list + { + List failures = new ArrayList<>(); + Map unavailableClusters = EsqlSessionCCSUtils.determineUnavailableRemoteClusters(failures); + assertThat(unavailableClusters.keySet(), equalTo(Set.of())); } } @@ -358,7 +420,7 @@ public void testUpdateExecutionInfoAtEndOfPlanning() { Thread.sleep(1); } catch (InterruptedException e) {} - CcsUtils.updateExecutionInfoAtEndOfPlanning(executionInfo); + EsqlSessionCCSUtils.updateExecutionInfoAtEndOfPlanning(executionInfo); assertThat(executionInfo.planningTookTime().millis(), greaterThanOrEqualTo(0L)); assertNull(executionInfo.overallTook()); @@ -410,4 +472,111 @@ private static Map randomMapping() { } return result; } + + public void testReturnSuccessWithEmptyResult() { + String localClusterAlias = RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY; + String remote1Alias = "remote1"; + String remote2Alias = "remote2"; + String remote3Alias = "remote3"; + NoClustersToSearchException noClustersException = new NoClustersToSearchException(); + Predicate skipUnPredicate = s -> { + if (s.equals("remote2") || s.equals("remote3")) { + return true; + } + return false; + }; + + EsqlExecutionInfo.Cluster localCluster = new EsqlExecutionInfo.Cluster(localClusterAlias, "logs*", false); + EsqlExecutionInfo.Cluster remote1 = new EsqlExecutionInfo.Cluster(remote1Alias, "logs*", false); + EsqlExecutionInfo.Cluster remote2 = new EsqlExecutionInfo.Cluster(remote2Alias, "logs*", true); + EsqlExecutionInfo.Cluster remote3 = new EsqlExecutionInfo.Cluster(remote3Alias, "logs*", true); + + // not a cross-cluster cluster search, so do not return empty result + { + EsqlExecutionInfo executionInfo = new EsqlExecutionInfo(skipUnPredicate, randomBoolean()); + executionInfo.swapCluster(localClusterAlias, (k, v) -> localCluster); + assertFalse(EsqlSessionCCSUtils.returnSuccessWithEmptyResult(executionInfo, noClustersException)); + } + + // local cluster is present, so do not return empty result + { + EsqlExecutionInfo executionInfo = new EsqlExecutionInfo(skipUnPredicate, randomBoolean()); + executionInfo.swapCluster(localClusterAlias, (k, v) -> localCluster); + executionInfo.swapCluster(remote1Alias, (k, v) -> remote1); + // TODO: this logic will be added in the follow-on PR that handles missing indices + // assertFalse(EsqlSessionCCSUtils.returnSuccessWithEmptyResult(executionInfo, noClustersException)); + } + + // remote-only, one cluster is skip_unavailable=false, so do not return empty result + { + EsqlExecutionInfo executionInfo = new EsqlExecutionInfo(skipUnPredicate, randomBoolean()); + executionInfo.swapCluster(remote1Alias, (k, v) -> remote1); + executionInfo.swapCluster(remote2Alias, (k, v) -> remote2); + assertFalse(EsqlSessionCCSUtils.returnSuccessWithEmptyResult(executionInfo, noClustersException)); + } + + // remote-only, all clusters are skip_unavailable=true, so should return empty result with + // NoSuchClustersException or "remote unavailable" type exception + { + EsqlExecutionInfo executionInfo = new EsqlExecutionInfo(skipUnPredicate, randomBoolean()); + executionInfo.swapCluster(remote2Alias, (k, v) -> remote2); + executionInfo.swapCluster(remote3Alias, (k, v) -> remote3); + Exception e = randomFrom( + new NoSuchRemoteClusterException("foo"), + noClustersException, + new NoSeedNodeLeftException("foo"), + new IllegalStateException("unknown host") + ); + assertTrue(EsqlSessionCCSUtils.returnSuccessWithEmptyResult(executionInfo, e)); + } + + // remote-only, all clusters are skip_unavailable=true, but exception is not "remote unavailable" so return false + // Note: this functionality may change in follow-on PRs, so remove this test in that case + { + EsqlExecutionInfo executionInfo = new EsqlExecutionInfo(skipUnPredicate, randomBoolean()); + executionInfo.swapCluster(remote2Alias, (k, v) -> remote2); + executionInfo.swapCluster(remote3Alias, (k, v) -> remote3); + assertFalse(EsqlSessionCCSUtils.returnSuccessWithEmptyResult(executionInfo, new NullPointerException())); + } + } + + public void testUpdateExecutionInfoToReturnEmptyResult() { + String localClusterAlias = RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY; + String remote1Alias = "remote1"; + String remote2Alias = "remote2"; + String remote3Alias = "remote3"; + ConnectTransportException transportEx = new ConnectTransportException(null, "foo"); + Predicate skipUnPredicate = s -> { + if (s.startsWith("remote")) { + return true; + } + return false; + }; + + EsqlExecutionInfo.Cluster localCluster = new EsqlExecutionInfo.Cluster(localClusterAlias, "logs*", false); + EsqlExecutionInfo.Cluster remote1 = new EsqlExecutionInfo.Cluster(remote1Alias, "logs*", true); + EsqlExecutionInfo.Cluster remote2 = new EsqlExecutionInfo.Cluster(remote2Alias, "logs*", true); + EsqlExecutionInfo.Cluster remote3 = new EsqlExecutionInfo.Cluster(remote3Alias, "logs*", true); + + EsqlExecutionInfo executionInfo = new EsqlExecutionInfo(skipUnPredicate, randomBoolean()); + executionInfo.swapCluster(localCluster.getClusterAlias(), (k, v) -> localCluster); + executionInfo.swapCluster(remote1.getClusterAlias(), (k, v) -> remote1); + executionInfo.swapCluster(remote2.getClusterAlias(), (k, v) -> remote2); + executionInfo.swapCluster(remote3.getClusterAlias(), (k, v) -> remote3); + + assertNull(executionInfo.overallTook()); + + EsqlSessionCCSUtils.updateExecutionInfoToReturnEmptyResult(executionInfo, transportEx); + + assertNotNull(executionInfo.overallTook()); + assertThat(executionInfo.getCluster(localClusterAlias).getStatus(), equalTo(EsqlExecutionInfo.Cluster.Status.SUCCESSFUL)); + assertThat(executionInfo.getCluster(localClusterAlias).getFailures().size(), equalTo(0)); + + for (String remoteAlias : Set.of(remote1Alias, remote2Alias, remote3Alias)) { + assertThat(executionInfo.getCluster(remoteAlias).getStatus(), equalTo(EsqlExecutionInfo.Cluster.Status.SKIPPED)); + List remoteFailures = executionInfo.getCluster(remoteAlias).getFailures(); + assertThat(remoteFailures.size(), equalTo(1)); + assertThat(remoteFailures.get(0).reason(), containsString("unable to connect to remote cluster")); + } + } } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/session/IndexResolverTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/session/IndexResolverTests.java deleted file mode 100644 index d6e410305afaa..0000000000000 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/session/IndexResolverTests.java +++ /dev/null @@ -1,80 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -package org.elasticsearch.xpack.esql.session; - -import org.apache.lucene.index.CorruptIndexException; -import org.elasticsearch.action.fieldcaps.FieldCapabilitiesFailure; -import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.transport.NoSeedNodeLeftException; -import org.elasticsearch.transport.NoSuchRemoteClusterException; - -import java.util.ArrayList; -import java.util.List; -import java.util.Map; -import java.util.Set; - -import static org.hamcrest.Matchers.equalTo; - -public class IndexResolverTests extends ESTestCase { - - public void testDetermineUnavailableRemoteClusters() { - // two clusters, both "remote unavailable" type exceptions - { - List failures = new ArrayList<>(); - failures.add(new FieldCapabilitiesFailure(new String[] { "remote2:mylogs1" }, new NoSuchRemoteClusterException("remote2"))); - failures.add( - new FieldCapabilitiesFailure( - new String[] { "remote1:foo", "remote1:bar" }, - new IllegalStateException("Unable to open any connections") - ) - ); - - Map unavailableClusters = IndexResolver.determineUnavailableRemoteClusters(failures); - assertThat(unavailableClusters.keySet(), equalTo(Set.of("remote1", "remote2"))); - } - - // one cluster with "remote unavailable" with two failures - { - List failures = new ArrayList<>(); - failures.add(new FieldCapabilitiesFailure(new String[] { "remote2:mylogs1" }, new NoSuchRemoteClusterException("remote2"))); - failures.add(new FieldCapabilitiesFailure(new String[] { "remote2:mylogs1" }, new NoSeedNodeLeftException("no seed node"))); - - Map unavailableClusters = IndexResolver.determineUnavailableRemoteClusters(failures); - assertThat(unavailableClusters.keySet(), equalTo(Set.of("remote2"))); - } - - // two clusters, one "remote unavailable" type exceptions and one with another type - { - List failures = new ArrayList<>(); - failures.add(new FieldCapabilitiesFailure(new String[] { "remote1:mylogs1" }, new CorruptIndexException("foo", "bar"))); - failures.add( - new FieldCapabilitiesFailure( - new String[] { "remote2:foo", "remote2:bar" }, - new IllegalStateException("Unable to open any connections") - ) - ); - Map unavailableClusters = IndexResolver.determineUnavailableRemoteClusters(failures); - assertThat(unavailableClusters.keySet(), equalTo(Set.of("remote2"))); - } - - // one cluster1 with exception not known to indicate "remote unavailable" - { - List failures = new ArrayList<>(); - failures.add(new FieldCapabilitiesFailure(new String[] { "remote1:mylogs1" }, new RuntimeException("foo"))); - Map unavailableClusters = IndexResolver.determineUnavailableRemoteClusters(failures); - assertThat(unavailableClusters.keySet(), equalTo(Set.of())); - } - - // empty failures list - { - List failures = new ArrayList<>(); - Map unavailableClusters = IndexResolver.determineUnavailableRemoteClusters(failures); - assertThat(unavailableClusters.keySet(), equalTo(Set.of())); - } - } -} From 6db39d176559beaf6e336cae633a7827b6db8820 Mon Sep 17 00:00:00 2001 From: Parker Timmins Date: Fri, 1 Nov 2024 21:54:55 -0600 Subject: [PATCH 297/324] Resolve pipelines from template if lazy rollover write (#116031) If datastream rollover on write flag is set in cluster state, resolve pipelines from templates rather than from metadata. This fixes the following bug: when a pipeline reroutes every document to another index, and rollover is called with lazy=true (setting the rollover on write flag), changes to the pipeline do not go into effect, because the lack of writes means the data stream never rolls over and pipelines in metadata are not updated. The fix is to resolve pipelines from templates if the lazy rollover flag is set. To improve efficiency we only resolve pipelines once per index in the bulk request, caching the value, and reusing for other requests to the same index. Fixes: #112781 --- docs/changelog/116031.yaml | 6 + .../test/ingest/310_reroute_processor.yml | 324 +++++++++++++++++- .../bulk/TransportAbstractBulkAction.java | 10 +- .../elasticsearch/ingest/IngestService.java | 58 +++- .../ingest/IngestServiceTests.java | 139 ++++++++ 5 files changed, 523 insertions(+), 14 deletions(-) create mode 100644 docs/changelog/116031.yaml diff --git a/docs/changelog/116031.yaml b/docs/changelog/116031.yaml new file mode 100644 index 0000000000000..e30552bf3b513 --- /dev/null +++ b/docs/changelog/116031.yaml @@ -0,0 +1,6 @@ +pr: 116031 +summary: Resolve pipelines from template on lazy rollover write +area: Data streams +type: bug +issues: + - 112781 diff --git a/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/310_reroute_processor.yml b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/310_reroute_processor.yml index 53229290da03e..5b7e6cff63b31 100644 --- a/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/310_reroute_processor.yml +++ b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/310_reroute_processor.yml @@ -36,7 +36,10 @@ teardown: ingest.delete_pipeline: id: "pipeline-2" ignore: 404 - + - do: + indices.delete_data_stream: + name: "data-stream-*" + expand_wildcards: all --- "Test first matching router terminates pipeline": - skip: @@ -252,3 +255,322 @@ teardown: - match: { _source.existing-field : true } - match: { _source.added-in-pipeline-before-reroute : true } - match: { _source.added-in-pipeline-after-reroute : true } + +--- +"Test data stream with lazy rollover obtains pipeline from template": + # This test starts with chain of reroutes, from data-stream-1, to data-stream-2, to data-stream-3 + # We then add higher priority templates that remove the reroute processors. Then we show that + # after a lazy rollover on data-stream-2, a document written to data-stream-1 still gets rerouted + # to data-steam-2, but not on to data-stream-3. Finally, after a lazy rollover on data-stream-1 + # causes the new template to also take effect on data-stream-1, and the last write goes directly + # into data-stream-1. Multiple reroute steps are tested because pipeline resolution using a + # different code path for initial index and indices after a reroute. + + # start with pipeline that reroutes from ds-1 to ds-2 + - do: + ingest.put_pipeline: + id: "reroute-1" + body: > + { + "processors": [ + { + "reroute" : {"destination": "data-stream-2"} + } + ] + } + - match: { acknowledged: true } + + # and pipeline that reroutes from ds-2 to ds-3 + - do: + ingest.put_pipeline: + id: "reroute-2" + body: > + { + "processors": [ + { + "reroute" : {"destination": "data-stream-3"} + } + ] + } + - match: { acknowledged: true } + + # set pipelines in templates + - do: + indices.put_index_template: + name: template-1 + body: + index_patterns: [ "data-stream-1"] + priority: 1 + data_stream: { } + template: + settings: + index.default_pipeline: "reroute-1" + - match: { acknowledged: true } + - do: + indices.put_index_template: + name: template-2 + body: + index_patterns: [ "data-stream-2"] + priority: 1 + data_stream: { } + template: + settings: + index.default_pipeline: "reroute-2" + - match: { acknowledged: true } + - do: + indices.put_index_template: + name: template_3 + body: + index_patterns: [ "data-stream-3" ] + priority: 1 + data_stream: { } + - match: { acknowledged: true } + + - do: + indices.create_data_stream: + name: data-stream-1 + - match: { acknowledged: true } + - do: + indices.create_data_stream: + name: data-stream-2 + - match: { acknowledged: true } + - do: + indices.create_data_stream: + name: data-stream-3 + - match: { acknowledged: true } + + # write to ds-1 + - do: + index: + index: data-stream-1 + body: + '@timestamp': '2020-12-12' + some-field: 1 + - do: + indices.refresh: + index: data-stream-3 + + # document is rerouted to ds-3 + - do: + search: + index: data-stream-3 + body: { query: { match_all: { } } } + - length: { hits.hits: 1 } + - match: { hits.hits.0._source.some-field: 1 } + + # add higher priority templates without reroute processors + - do: + indices.put_index_template: + name: template_4 + body: + index_patterns: [ "data-stream-1" ] + priority: 2 # higher priority + data_stream: { } + - match: { acknowledged: true } + - do: + indices.put_index_template: + name: template_5 + body: + index_patterns: [ "data-stream-2" ] + priority: 2 # higher priority + data_stream: { } + - match: { acknowledged: true } + + # write to ds-1 + - do: + index: + index: data-stream-1 + body: + '@timestamp': '2020-12-12' + some-field: 2 + - do: + indices.refresh: + index: data-stream-3 + + # still rerouted because ds-1 and ds-2 rolled over + - do: + search: + index: data-stream-3 + body: { query: { match_all: { } } } + - length: { hits.hits: 2 } + + # perform lazy rollover on ds-2 + - do: + indices.rollover: + alias: data-stream-2 + lazy: true + + # write to ds-1 + - do: + index: + index: data-stream-1 + body: + '@timestamp': '2020-12-12' + some-field: 3 + - do: + indices.refresh: + index: data-stream-2 + + # written to ds-2, as rerouted to ds-2, but not on to ds-3 + - do: + search: + index: data-stream-2 + body: { query: { match_all: { } } } + - length: { hits.hits: 1 } + - match: { hits.hits.0._source.some-field: 3 } + + # perform lazy rollover on 1 + - do: + indices.rollover: + alias: data-stream-1 + lazy: true + + # write to ds-1 + - do: + index: + index: data-stream-1 + body: + '@timestamp': '2020-12-12' + some-field: 4 + - do: + indices.refresh: + index: data-stream-1 + + # written to ds-1, as not rerouted to ds-2 + - do: + search: + index: data-stream-1 + body: { query: { match_all: { } } } + - length: { hits.hits: 1 } + - match: { hits.hits.0._source.some-field: 4 } + +--- +"Test remove then add reroute processor with and without lazy rollover": + # start with pipeline that reroutes from ds-1 to ds-2 + - do: + ingest.put_pipeline: + id: "reroute-1" + body: > + { + "processors": [ + { + "reroute" : {"destination": "data-stream-2"} + } + ] + } + - match: { acknowledged: true } + + # set pipelines in templates + - do: + indices.put_index_template: + name: template-1 + body: + index_patterns: [ "data-stream-1"] + priority: 1 + data_stream: { } + template: + settings: + index.default_pipeline: "reroute-1" + - match: { acknowledged: true } + - do: + indices.put_index_template: + name: template_2 + body: + index_patterns: [ "data-stream-2" ] + priority: 1 + data_stream: { } + - match: { acknowledged: true } + + - do: + indices.create_data_stream: + name: data-stream-1 + - match: { acknowledged: true } + + - do: + indices.create_data_stream: + name: data-stream-2 + - match: { acknowledged: true } + + # write to ds-1 + - do: + index: + index: data-stream-1 + body: + '@timestamp': '2020-12-12' + some-field: 1 + - do: + indices.refresh: + index: data-stream-2 + + # document is rerouted to ds-2 + - do: + search: + index: data-stream-2 + body: { query: { match_all: { } } } + - length: { hits.hits: 1 } + + # add higher priority templates without reroute processors + - do: + indices.put_index_template: + name: template_3 + body: + index_patterns: [ "data-stream-1" ] + priority: 2 # higher priority + data_stream: { } + - match: { acknowledged: true } + + # perform lazy rollover on ds-2 + - do: + indices.rollover: + alias: data-stream-1 + lazy: true + + # write to ds-1 + - do: + index: + index: data-stream-1 + body: + '@timestamp': '2020-12-12' + some-field: 2 + - do: + indices.refresh: + index: data-stream-1 + + # written to ds-1, as not rerouted to ds-2 + - do: + search: + index: data-stream-1 + body: { query: { match_all: { } } } + - length: { hits.hits: 1 } + + # add another higher priority templates with reroute processors + - do: + indices.put_index_template: + name: template-3 + body: + index_patterns: [ "data-stream-1" ] + priority: 3 + data_stream: { } + template: + settings: + index.default_pipeline: "reroute-1" + - match: { acknowledged: true } + + # don't do a lazy rollover + # write to ds-1 + - do: + index: + index: data-stream-1 + body: + '@timestamp': '2020-12-12' + some-field: 3 + - do: + indices.refresh: + index: data-stream-1 + + # because no lazy rollover, still no reroute processor + - do: + search: + index: data-stream-1 + body: { query: { match_all: { } } } + - length: { hits.hits: 2 } diff --git a/server/src/main/java/org/elasticsearch/action/bulk/TransportAbstractBulkAction.java b/server/src/main/java/org/elasticsearch/action/bulk/TransportAbstractBulkAction.java index 111e4d72c57c6..e83bca4b661c9 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/TransportAbstractBulkAction.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/TransportAbstractBulkAction.java @@ -228,10 +228,18 @@ private boolean applyPipelines(Task task, BulkRequest bulkRequest, Executor exec metadata = clusterService.state().getMetadata(); } + Map resolvedPipelineCache = new HashMap<>(); for (DocWriteRequest actionRequest : bulkRequest.requests) { IndexRequest indexRequest = getIndexWriteRequest(actionRequest); if (indexRequest != null) { - IngestService.resolvePipelinesAndUpdateIndexRequest(actionRequest, indexRequest, metadata); + if (indexRequest.isPipelineResolved() == false) { + var pipeline = resolvedPipelineCache.computeIfAbsent( + indexRequest.index(), + // TODO perhaps this should use `threadPool.absoluteTimeInMillis()`, but leaving as is for now. + (index) -> IngestService.resolvePipelines(actionRequest, indexRequest, metadata, System.currentTimeMillis()) + ); + IngestService.setPipelineOnRequest(indexRequest, pipeline); + } hasIndexRequestsWithPipelines |= IngestService.hasPipeline(indexRequest); } diff --git a/server/src/main/java/org/elasticsearch/ingest/IngestService.java b/server/src/main/java/org/elasticsearch/ingest/IngestService.java index b5ac54b018e46..ce61f197b4831 100644 --- a/server/src/main/java/org/elasticsearch/ingest/IngestService.java +++ b/server/src/main/java/org/elasticsearch/ingest/IngestService.java @@ -282,26 +282,60 @@ static void resolvePipelinesAndUpdateIndexRequest( final Metadata metadata, final long epochMillis ) { - if (indexRequest.isPipelineResolved()) { - return; + if (indexRequest.isPipelineResolved() == false) { + var pipelines = resolvePipelines(originalRequest, indexRequest, metadata, epochMillis); + setPipelineOnRequest(indexRequest, pipelines); } + } - /* - * Here we look for the pipelines associated with the index if the index exists. If the index does not exist we fall back to using - * templates to find the pipelines. - */ - final Pipelines pipelines = resolvePipelinesFromMetadata(originalRequest, indexRequest, metadata, epochMillis).or( - () -> resolvePipelinesFromIndexTemplates(indexRequest, metadata) - ).orElse(Pipelines.NO_PIPELINES_DEFINED); + static boolean isRolloverOnWrite(Metadata metadata, IndexRequest indexRequest) { + DataStream dataStream = metadata.dataStreams().get(indexRequest.index()); + if (dataStream == null) { + return false; + } + return dataStream.getBackingIndices().isRolloverOnWrite(); + } + /** + * Resolve the default and final pipelines from the cluster state metadata or index templates. + * + * @param originalRequest initial request + * @param indexRequest the index request, which could be different from the initial request if rerouted + * @param metadata cluster data metadata + * @param epochMillis current time for index name resolution + * @return the resolved pipelines + */ + public static Pipelines resolvePipelines( + final DocWriteRequest originalRequest, + final IndexRequest indexRequest, + final Metadata metadata, + final long epochMillis + ) { + if (isRolloverOnWrite(metadata, indexRequest)) { + return resolvePipelinesFromIndexTemplates(indexRequest, metadata) // + .orElse(Pipelines.NO_PIPELINES_DEFINED); + } else { + return resolvePipelinesFromMetadata(originalRequest, indexRequest, metadata, epochMillis) // + .or(() -> resolvePipelinesFromIndexTemplates(indexRequest, metadata)) // + .orElse(Pipelines.NO_PIPELINES_DEFINED); + } + } + + /** + * Set the request pipeline on the index request if present, otherwise set the default pipeline. + * Always set the final pipeline. + * @param indexRequest the index request + * @param resolvedPipelines default and final pipelines resolved from metadata and templates + */ + public static void setPipelineOnRequest(IndexRequest indexRequest, Pipelines resolvedPipelines) { // The pipeline coming as part of the request always has priority over the resolved one from metadata or templates String requestPipeline = indexRequest.getPipeline(); if (requestPipeline != null) { indexRequest.setPipeline(requestPipeline); } else { - indexRequest.setPipeline(pipelines.defaultPipeline); + indexRequest.setPipeline(resolvedPipelines.defaultPipeline); } - indexRequest.setFinalPipeline(pipelines.finalPipeline); + indexRequest.setFinalPipeline(resolvedPipelines.finalPipeline); indexRequest.isPipelineResolved(true); } @@ -1507,7 +1541,7 @@ public static boolean hasPipeline(IndexRequest indexRequest) { || NOOP_PIPELINE_NAME.equals(indexRequest.getFinalPipeline()) == false; } - private record Pipelines(String defaultPipeline, String finalPipeline) { + public record Pipelines(String defaultPipeline, String finalPipeline) { private static final Pipelines NO_PIPELINES_DEFINED = new Pipelines(NOOP_PIPELINE_NAME, NOOP_PIPELINE_NAME); diff --git a/server/src/test/java/org/elasticsearch/ingest/IngestServiceTests.java b/server/src/test/java/org/elasticsearch/ingest/IngestServiceTests.java index b3ddc313eaf3a..78baa1699df00 100644 --- a/server/src/test/java/org/elasticsearch/ingest/IngestServiceTests.java +++ b/server/src/test/java/org/elasticsearch/ingest/IngestServiceTests.java @@ -51,6 +51,7 @@ import org.elasticsearch.core.Strings; import org.elasticsearch.core.TimeValue; import org.elasticsearch.core.Tuple; +import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.VersionType; @@ -2506,6 +2507,144 @@ public void testResolveRequestOrDefaultPipelineAndFinalPipeline() { } } + public void testRolloverOnWrite() { + { // false if not data stream + IndexMetadata.Builder builder = IndexMetadata.builder("idx") + .settings(settings(IndexVersion.current())) + .numberOfShards(1) + .numberOfReplicas(0); + Metadata metadata = Metadata.builder().put(builder).build(); + IndexRequest indexRequest = new IndexRequest("idx").setPipeline("request-pipeline"); + assertFalse(IngestService.isRolloverOnWrite(metadata, indexRequest)); + } + + { // false if not rollover on write + var backingIndex = ".ds-data-stream-01"; + var indexUUID = randomUUID(); + + var dataStream = DataStream.builder( + "no-rollover-data-stream", + DataStream.DataStreamIndices.backingIndicesBuilder(List.of(new Index(backingIndex, indexUUID))) + .setRolloverOnWrite(false) + .build() + ).build(); + + Metadata metadata = Metadata.builder().dataStreams(Map.of(dataStream.getName(), dataStream), Map.of()).build(); + + IndexRequest indexRequest = new IndexRequest("no-rollover-data-stream"); + assertFalse(IngestService.isRolloverOnWrite(metadata, indexRequest)); + } + + { // true if rollover on write + var backingIndex = ".ds-data-stream-01"; + var indexUUID = randomUUID(); + + var dataStream = DataStream.builder( + "rollover-data-stream", + DataStream.DataStreamIndices.backingIndicesBuilder(List.of(new Index(backingIndex, indexUUID))) + .setRolloverOnWrite(true) + .build() + ).build(); + + Metadata metadata = Metadata.builder().dataStreams(Map.of(dataStream.getName(), dataStream), Map.of()).build(); + + IndexRequest indexRequest = new IndexRequest("rollover-data-stream"); + assertTrue(IngestService.isRolloverOnWrite(metadata, indexRequest)); + } + } + + public void testResolveFromTemplateIfRolloverOnWrite() { + { // if rolloverOnWrite is false, get pipeline from metadata + var backingIndex = ".ds-data-stream-01"; + var indexUUID = randomUUID(); + + var dataStream = DataStream.builder( + "no-rollover-data-stream", + DataStream.DataStreamIndices.backingIndicesBuilder(List.of(new Index(backingIndex, indexUUID))) + .setRolloverOnWrite(false) + .build() + ).build(); + + IndexMetadata indexMetadata = IndexMetadata.builder(backingIndex) + .settings( + settings(IndexVersion.current()).put(IndexSettings.DEFAULT_PIPELINE.getKey(), "metadata-pipeline") + .put(IndexMetadata.SETTING_INDEX_UUID, indexUUID) + ) + .numberOfShards(1) + .numberOfReplicas(0) + .build(); + + Metadata metadata = Metadata.builder() + .indices(Map.of(backingIndex, indexMetadata)) + .dataStreams(Map.of(dataStream.getName(), dataStream), Map.of()) + .build(); + + IndexRequest indexRequest = new IndexRequest("no-rollover-data-stream"); + IngestService.resolvePipelinesAndUpdateIndexRequest(indexRequest, indexRequest, metadata); + assertTrue(hasPipeline(indexRequest)); + assertTrue(indexRequest.isPipelineResolved()); + assertThat(indexRequest.getPipeline(), equalTo("metadata-pipeline")); + } + + { // if rolloverOnWrite is true, get pipeline from template + var backingIndex = ".ds-data-stream-01"; + var indexUUID = randomUUID(); + + var dataStream = DataStream.builder( + "rollover-data-stream", + DataStream.DataStreamIndices.backingIndicesBuilder(List.of(new Index(backingIndex, indexUUID))) + .setRolloverOnWrite(true) + .build() + ).build(); + + IndexMetadata indexMetadata = IndexMetadata.builder(backingIndex) + .settings( + settings(IndexVersion.current()).put(IndexSettings.DEFAULT_PIPELINE.getKey(), "metadata-pipeline") + .put(IndexMetadata.SETTING_INDEX_UUID, indexUUID) + ) + .numberOfShards(1) + .numberOfReplicas(0) + .build(); + + IndexTemplateMetadata.Builder templateBuilder = IndexTemplateMetadata.builder("name1") + .patterns(List.of("rollover*")) + .settings(settings(IndexVersion.current()).put(IndexSettings.DEFAULT_PIPELINE.getKey(), "template-pipeline")); + + Metadata metadata = Metadata.builder() + .put(templateBuilder) + .indices(Map.of(backingIndex, indexMetadata)) + .dataStreams(Map.of(dataStream.getName(), dataStream), Map.of()) + .build(); + + IndexRequest indexRequest = new IndexRequest("rollover-data-stream"); + IngestService.resolvePipelinesAndUpdateIndexRequest(indexRequest, indexRequest, metadata); + assertTrue(hasPipeline(indexRequest)); + assertTrue(indexRequest.isPipelineResolved()); + assertThat(indexRequest.getPipeline(), equalTo("template-pipeline")); + } + } + + public void testSetPipelineOnRequest() { + { + // with request pipeline + var indexRequest = new IndexRequest("idx").setPipeline("request"); + var pipelines = new IngestService.Pipelines("default", "final"); + IngestService.setPipelineOnRequest(indexRequest, pipelines); + assertTrue(indexRequest.isPipelineResolved()); + assertEquals(indexRequest.getPipeline(), "request"); + assertEquals(indexRequest.getFinalPipeline(), "final"); + } + { + // no request pipeline + var indexRequest = new IndexRequest("idx"); + var pipelines = new IngestService.Pipelines("default", "final"); + IngestService.setPipelineOnRequest(indexRequest, pipelines); + assertTrue(indexRequest.isPipelineResolved()); + assertEquals(indexRequest.getPipeline(), "default"); + assertEquals(indexRequest.getFinalPipeline(), "final"); + } + } + public void testUpdatingRandomPipelineWithoutChangesIsNoOp() throws Exception { var randomMap = randomMap(10, 50, IngestServiceTests::randomMapEntry); From 78cbf648e458b5bc803583163d172cab20ae5258 Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Sat, 2 Nov 2024 15:48:52 +1100 Subject: [PATCH 298/324] Mute org.elasticsearch.xpack.test.rest.XPackRestIT test {p0=ml/inference_crud/Test delete given model with alias referenced by pipeline} #116133 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index 47d5c3fdf8a37..238f04ccbd684 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -261,6 +261,9 @@ tests: - class: org.elasticsearch.xpack.ml.integration.DatafeedJobsRestIT method: testLookbackWithIndicesOptions issue: https://github.com/elastic/elasticsearch/issues/116127 +- class: org.elasticsearch.xpack.test.rest.XPackRestIT + method: test {p0=ml/inference_crud/Test delete given model with alias referenced by pipeline} + issue: https://github.com/elastic/elasticsearch/issues/116133 # Examples: # From 8e50fb5a620131feb1e60db0a6837f6d65da2ce1 Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Sat, 2 Nov 2024 16:21:50 +1100 Subject: [PATCH 299/324] Mute org.elasticsearch.xpack.inference.InferenceRestIT test {p0=inference/30_semantic_text_inference/Calculates embeddings using the default ELSER 2 endpoint} #114412 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index 238f04ccbd684..38654ccc5dd44 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -264,6 +264,9 @@ tests: - class: org.elasticsearch.xpack.test.rest.XPackRestIT method: test {p0=ml/inference_crud/Test delete given model with alias referenced by pipeline} issue: https://github.com/elastic/elasticsearch/issues/116133 +- class: org.elasticsearch.xpack.inference.InferenceRestIT + method: test {p0=inference/30_semantic_text_inference/Calculates embeddings using the default ELSER 2 endpoint} + issue: https://github.com/elastic/elasticsearch/issues/114412 # Examples: # From 1ef054980b6a05684e8712c5fedd9bb6a997c2f4 Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Sat, 2 Nov 2024 16:33:08 +1100 Subject: [PATCH 300/324] Mute org.elasticsearch.xpack.esql.qa.multi_node.EsqlSpecIT test {categorize.Categorize SYNC} #113054 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index 38654ccc5dd44..6bb4c0d21b1aa 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -267,6 +267,9 @@ tests: - class: org.elasticsearch.xpack.inference.InferenceRestIT method: test {p0=inference/30_semantic_text_inference/Calculates embeddings using the default ELSER 2 endpoint} issue: https://github.com/elastic/elasticsearch/issues/114412 +- class: org.elasticsearch.xpack.esql.qa.multi_node.EsqlSpecIT + method: test {categorize.Categorize SYNC} + issue: https://github.com/elastic/elasticsearch/issues/113054 # Examples: # From 1afc6eaa91488a719a874731e8ff2bb5f360ef71 Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Sat, 2 Nov 2024 16:33:16 +1100 Subject: [PATCH 301/324] Mute org.elasticsearch.xpack.esql.qa.multi_node.EsqlSpecIT test {categorize.Categorize ASYNC} #113055 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index 6bb4c0d21b1aa..2d29520a8ca95 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -270,6 +270,9 @@ tests: - class: org.elasticsearch.xpack.esql.qa.multi_node.EsqlSpecIT method: test {categorize.Categorize SYNC} issue: https://github.com/elastic/elasticsearch/issues/113054 +- class: org.elasticsearch.xpack.esql.qa.multi_node.EsqlSpecIT + method: test {categorize.Categorize ASYNC} + issue: https://github.com/elastic/elasticsearch/issues/113055 # Examples: # From 9407519f34be04348cb81be5aadfb31b58c45beb Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Sat, 2 Nov 2024 17:05:15 +1100 Subject: [PATCH 302/324] Mute org.elasticsearch.xpack.inference.InferenceRestIT test {p0=inference/40_semantic_text_query/Query a field that uses the default ELSER 2 endpoint} #114376 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index 2d29520a8ca95..92094316af9c8 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -273,6 +273,9 @@ tests: - class: org.elasticsearch.xpack.esql.qa.multi_node.EsqlSpecIT method: test {categorize.Categorize ASYNC} issue: https://github.com/elastic/elasticsearch/issues/113055 +- class: org.elasticsearch.xpack.inference.InferenceRestIT + method: test {p0=inference/40_semantic_text_query/Query a field that uses the default ELSER 2 endpoint} + issue: https://github.com/elastic/elasticsearch/issues/114376 # Examples: # From 88938714923e5ceaa6e4a3f94180f7ce37df758e Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Sat, 2 Nov 2024 17:33:47 +1100 Subject: [PATCH 303/324] Mute org.elasticsearch.xpack.test.rest.XPackRestIT test {p0=ml/inference_crud/Test force delete given model with alias referenced by pipeline} #116136 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index 92094316af9c8..bf59db513661c 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -276,6 +276,9 @@ tests: - class: org.elasticsearch.xpack.inference.InferenceRestIT method: test {p0=inference/40_semantic_text_query/Query a field that uses the default ELSER 2 endpoint} issue: https://github.com/elastic/elasticsearch/issues/114376 +- class: org.elasticsearch.xpack.test.rest.XPackRestIT + method: test {p0=ml/inference_crud/Test force delete given model with alias referenced by pipeline} + issue: https://github.com/elastic/elasticsearch/issues/116136 # Examples: # From 2bae9110612a1f003442cadee6746c5799beb745 Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Sun, 3 Nov 2024 15:53:35 +1100 Subject: [PATCH 304/324] Mute org.elasticsearch.xpack.test.rest.XPackRestIT test {p0=transform/transforms_start_stop/Test start already started transform} #98802 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index bf59db513661c..90328c95055a0 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -279,6 +279,9 @@ tests: - class: org.elasticsearch.xpack.test.rest.XPackRestIT method: test {p0=ml/inference_crud/Test force delete given model with alias referenced by pipeline} issue: https://github.com/elastic/elasticsearch/issues/116136 +- class: org.elasticsearch.xpack.test.rest.XPackRestIT + method: test {p0=transform/transforms_start_stop/Test start already started transform} + issue: https://github.com/elastic/elasticsearch/issues/98802 # Examples: # From 0925ab53bbb5e6cb332fb769c30e2a66807ac6ce Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Mon, 4 Nov 2024 00:53:10 +1100 Subject: [PATCH 305/324] Mute org.elasticsearch.action.search.SearchPhaseControllerTests testProgressListener #116149 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index 90328c95055a0..9355e89fcc25b 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -282,6 +282,9 @@ tests: - class: org.elasticsearch.xpack.test.rest.XPackRestIT method: test {p0=transform/transforms_start_stop/Test start already started transform} issue: https://github.com/elastic/elasticsearch/issues/98802 +- class: org.elasticsearch.action.search.SearchPhaseControllerTests + method: testProgressListener + issue: https://github.com/elastic/elasticsearch/issues/116149 # Examples: # From 82b3b4d7167b3769d404083c152fd56407c578b7 Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Mon, 4 Nov 2024 01:09:09 +1100 Subject: [PATCH 306/324] Mute org.elasticsearch.xpack.test.rest.XPackRestIT test {p0=ml/forecast/Test forecast unknown job} #116150 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index 9355e89fcc25b..cea1903ef33e4 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -285,6 +285,9 @@ tests: - class: org.elasticsearch.action.search.SearchPhaseControllerTests method: testProgressListener issue: https://github.com/elastic/elasticsearch/issues/116149 +- class: org.elasticsearch.xpack.test.rest.XPackRestIT + method: test {p0=ml/forecast/Test forecast unknown job} + issue: https://github.com/elastic/elasticsearch/issues/116150 # Examples: # From 4573ab8ec18a962ad8dbbda8c2c4a84d7baf2a29 Mon Sep 17 00:00:00 2001 From: Kostas Krikellas <131142368+kkrik-es@users.noreply.github.com> Date: Mon, 4 Nov 2024 09:39:34 +0200 Subject: [PATCH 307/324] [TEST] Replace _source.mode with index.mapping.source.mode in integration tests - take 2 (#116072) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Reapply "[TEST] Replace _source.mode with index.mapping.source.mode in integra…" (#116069) This reverts commit e8bf344a28c79f71c2db2fff61525d283a04ef56. * [TEST] Replace _source.mode with index.mapping.source.mode in integration tests * add reason * add reason * spotless * revert unneeded --- .../test/aggregations/top_hits.yml | 9 +- .../test/painless/50_script_doc_values.yml | 11 +- .../test/match_only_text/10_basic.yml | 18 +- .../test/rank_feature/30_synthetic_source.yml | 9 +- .../rank_features/20_synthetic_source.yml | 9 +- .../30_synthetic_source.yml | 9 +- .../test/token_count/10_basic.yml | 10 +- .../test/60_synthetic_source.yml | 9 +- .../resources/rest-api-spec/test/10_basic.yml | 9 +- .../test/reindex/110_synthetic_source.yml | 9 +- .../update_by_query/100_synthetic_source.yml | 9 +- .../runtime_fields/270_synthetic_source.yml | 18 +- .../20_synthetic_source.yml | 39 +++-- .../test/mapper_murmur3/10_basic.yml | 8 +- .../test/get/100_synthetic_source.yml | 85 +++++---- .../indices.create/20_synthetic_source.yml | 163 +++++++++++------- .../21_synthetic_source_stored.yml | 101 ++++++----- .../test/indices.put_mapping/10_basic.yml | 32 +--- .../test/logsdb/20_source_mapping.yml | 37 +--- .../test/mget/90_synthetic_source.yml | 25 ++- .../search.highlight/50_synthetic_source.yml | 9 +- .../test/search.vectors/90_sparse_vector.yml | 9 +- .../test/search/350_binary_field.yml | 10 +- .../test/search/400_synthetic_source.yml | 49 ++++-- .../540_ignore_above_synthetic_source.yml | 17 +- .../rest-api-spec/test/tsdb/20_mapping.yml | 20 +-- .../test/update/100_synthetic_source.yml | 16 +- .../index/mapper/MapperFeatures.java | 1 + .../index/mapper/SourceFieldMapper.java | 1 + .../rest-api-spec/test/20_ignored_source.yml | 9 +- .../test/20_synthetic_source.yml | 9 +- .../test/80_synthetic_source.yml | 21 ++- .../test/40_synthetic_source.yml | 14 +- .../100_synthetic_source.yml | 16 +- .../test/analytics/histogram.yml | 27 +-- .../test/enrich/40_synthetic_source.yml | 9 +- .../rest-api-spec/test/esql/30_types.yml | 18 +- .../rest-api-spec/test/esql/80_text.yml | 18 +- .../20_synthetic_source.yml | 8 +- ..._field_level_security_synthetic_source.yml | 26 +-- ...cument_level_security_synthetic_source.yml | 28 +-- .../rest-api-spec/test/snapshot/10_basic.yml | 8 +- .../test/spatial/140_synthetic_source.yml | 45 +++-- .../preview_transforms_synthetic_source.yml | 11 +- .../30_ignore_above_synthetic_source.yml | 8 +- .../test/30_synthetic_source.yml | 16 +- 46 files changed, 615 insertions(+), 427 deletions(-) diff --git a/modules/aggregations/src/yamlRestTest/resources/rest-api-spec/test/aggregations/top_hits.yml b/modules/aggregations/src/yamlRestTest/resources/rest-api-spec/test/aggregations/top_hits.yml index ed24e1cc8404c..48fe6a5f4dbbf 100644 --- a/modules/aggregations/src/yamlRestTest/resources/rest-api-spec/test/aggregations/top_hits.yml +++ b/modules/aggregations/src/yamlRestTest/resources/rest-api-spec/test/aggregations/top_hits.yml @@ -349,16 +349,17 @@ sequence number and primary term: --- synthetic _source: - requires: - cluster_features: ["gte_v8.4.0"] - reason: introduced in 8.4.0 + cluster_features: ["mapper.source.mode_from_index_setting"] + reason: "Source mode configured through index setting" - do: indices.create: index: test_synthetic body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: page: type: keyword diff --git a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/50_script_doc_values.yml b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/50_script_doc_values.yml index ee6803d809087..b908f729a0159 100644 --- a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/50_script_doc_values.yml +++ b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/50_script_doc_values.yml @@ -1,4 +1,7 @@ setup: + - requires: + cluster_features: [ "mapper.source.mode_from_index_setting" ] + reason: "Source mode configured through index setting" - do: indices.create: index: test @@ -180,9 +183,9 @@ setup: body: settings: number_of_shards: 1 + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: boolean: type: boolean @@ -5630,9 +5633,9 @@ version and sequence number synthetic _source: body: settings: number_of_shards: 1 + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: keyword: type: keyword diff --git a/modules/mapper-extras/src/yamlRestTest/resources/rest-api-spec/test/match_only_text/10_basic.yml b/modules/mapper-extras/src/yamlRestTest/resources/rest-api-spec/test/match_only_text/10_basic.yml index b4ee226f72692..821ab46b1bd64 100644 --- a/modules/mapper-extras/src/yamlRestTest/resources/rest-api-spec/test/match_only_text/10_basic.yml +++ b/modules/mapper-extras/src/yamlRestTest/resources/rest-api-spec/test/match_only_text/10_basic.yml @@ -278,16 +278,17 @@ setup: --- synthetic_source: - requires: - cluster_features: ["gte_v8.4.0"] - reason: synthetic source introduced in 8.4.0 + cluster_features: [ "mapper.source.mode_from_index_setting" ] + reason: "Source mode configured through index setting" - do: indices.create: index: synthetic_source_test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: foo: type: match_only_text @@ -355,16 +356,17 @@ tsdb: --- synthetic_source with copy_to: - requires: - cluster_features: ["mapper.source.synthetic_source_with_copy_to_and_doc_values_false"] - reason: requires copy_to support in synthetic source + cluster_features: [ "mapper.source.mode_from_index_setting" ] + reason: "Source mode configured through index setting" - do: indices.create: index: synthetic_source_test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: foo: type: match_only_text diff --git a/modules/mapper-extras/src/yamlRestTest/resources/rest-api-spec/test/rank_feature/30_synthetic_source.yml b/modules/mapper-extras/src/yamlRestTest/resources/rest-api-spec/test/rank_feature/30_synthetic_source.yml index 1e0b90ebb9e0f..ccf3c689bdfd6 100644 --- a/modules/mapper-extras/src/yamlRestTest/resources/rest-api-spec/test/rank_feature/30_synthetic_source.yml +++ b/modules/mapper-extras/src/yamlRestTest/resources/rest-api-spec/test/rank_feature/30_synthetic_source.yml @@ -1,15 +1,16 @@ setup: - requires: - cluster_features: ["mapper.source.synthetic_source_fallback"] - reason: introduced in 8.15.0 + cluster_features: [ "mapper.source.mode_from_index_setting" ] + reason: "Source mode configured through index setting" - do: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: pagerank: type: rank_feature diff --git a/modules/mapper-extras/src/yamlRestTest/resources/rest-api-spec/test/rank_features/20_synthetic_source.yml b/modules/mapper-extras/src/yamlRestTest/resources/rest-api-spec/test/rank_features/20_synthetic_source.yml index c64e35cc2cea4..870e673044c0b 100644 --- a/modules/mapper-extras/src/yamlRestTest/resources/rest-api-spec/test/rank_features/20_synthetic_source.yml +++ b/modules/mapper-extras/src/yamlRestTest/resources/rest-api-spec/test/rank_features/20_synthetic_source.yml @@ -1,15 +1,16 @@ setup: - requires: - cluster_features: ["mapper.source.synthetic_source_fallback"] - reason: introduced in 8.15.0 + cluster_features: [ "mapper.source.mode_from_index_setting" ] + reason: "Source mode configured through index setting" - do: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: tags: type: rank_features diff --git a/modules/mapper-extras/src/yamlRestTest/resources/rest-api-spec/test/search-as-you-type/30_synthetic_source.yml b/modules/mapper-extras/src/yamlRestTest/resources/rest-api-spec/test/search-as-you-type/30_synthetic_source.yml index 75397bd9e0fe9..0ca2306256064 100644 --- a/modules/mapper-extras/src/yamlRestTest/resources/rest-api-spec/test/search-as-you-type/30_synthetic_source.yml +++ b/modules/mapper-extras/src/yamlRestTest/resources/rest-api-spec/test/search-as-you-type/30_synthetic_source.yml @@ -1,15 +1,16 @@ setup: - requires: - cluster_features: ["mapper.track_ignored_source"] - reason: requires tracking ignored source + cluster_features: [ "mapper.source.mode_from_index_setting" ] + reason: "Source mode configured through index setting" - do: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: a_field: type: search_as_you_type diff --git a/modules/mapper-extras/src/yamlRestTest/resources/rest-api-spec/test/token_count/10_basic.yml b/modules/mapper-extras/src/yamlRestTest/resources/rest-api-spec/test/token_count/10_basic.yml index 03b72a2623497..d92b807ebd6ce 100644 --- a/modules/mapper-extras/src/yamlRestTest/resources/rest-api-spec/test/token_count/10_basic.yml +++ b/modules/mapper-extras/src/yamlRestTest/resources/rest-api-spec/test/token_count/10_basic.yml @@ -36,15 +36,17 @@ --- "Synthetic source": - requires: - cluster_features: ["mapper.track_ignored_source"] - reason: requires tracking ignored source + cluster_features: [ "mapper.source.mode_from_index_setting" ] + reason: "Source mode configured through index setting" + - do: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: count: type: token_count diff --git a/modules/parent-join/src/yamlRestTest/resources/rest-api-spec/test/60_synthetic_source.yml b/modules/parent-join/src/yamlRestTest/resources/rest-api-spec/test/60_synthetic_source.yml index 12d0f1bbae6c7..6a8b6cfbd1a2e 100644 --- a/modules/parent-join/src/yamlRestTest/resources/rest-api-spec/test/60_synthetic_source.yml +++ b/modules/parent-join/src/yamlRestTest/resources/rest-api-spec/test/60_synthetic_source.yml @@ -1,15 +1,16 @@ supported: - requires: - cluster_features: ["mapper.source.synthetic_source_fallback"] - reason: introduced in 8.15.0 + cluster_features: [ "mapper.source.mode_from_index_setting" ] + reason: "Source mode configured through index setting" - do: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: join_field: type: join diff --git a/modules/percolator/src/yamlRestTest/resources/rest-api-spec/test/10_basic.yml b/modules/percolator/src/yamlRestTest/resources/rest-api-spec/test/10_basic.yml index a5576d203314f..53fca8b664e59 100644 --- a/modules/percolator/src/yamlRestTest/resources/rest-api-spec/test/10_basic.yml +++ b/modules/percolator/src/yamlRestTest/resources/rest-api-spec/test/10_basic.yml @@ -130,16 +130,17 @@ --- "Synthetic source": - requires: - cluster_features: ["mapper.source.synthetic_source_fallback"] - reason: introduced in 8.15.0 + cluster_features: [ "mapper.source.mode_from_index_setting" ] + reason: "Source mode configured through index setting" - do: indices.create: index: queries_index body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: query: type: percolator diff --git a/modules/reindex/src/yamlRestTest/resources/rest-api-spec/test/reindex/110_synthetic_source.yml b/modules/reindex/src/yamlRestTest/resources/rest-api-spec/test/reindex/110_synthetic_source.yml index 9ae2153f89ca5..6d65e42aa223d 100644 --- a/modules/reindex/src/yamlRestTest/resources/rest-api-spec/test/reindex/110_synthetic_source.yml +++ b/modules/reindex/src/yamlRestTest/resources/rest-api-spec/test/reindex/110_synthetic_source.yml @@ -1,11 +1,16 @@ setup: + - requires: + cluster_features: [ "mapper.source.mode_from_index_setting" ] + reason: "Source mode configured through index setting" + - do: indices.create: index: synthetic body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: kwd: type: keyword diff --git a/modules/reindex/src/yamlRestTest/resources/rest-api-spec/test/update_by_query/100_synthetic_source.yml b/modules/reindex/src/yamlRestTest/resources/rest-api-spec/test/update_by_query/100_synthetic_source.yml index 4329bf8ed471a..b9484dcf7db29 100644 --- a/modules/reindex/src/yamlRestTest/resources/rest-api-spec/test/update_by_query/100_synthetic_source.yml +++ b/modules/reindex/src/yamlRestTest/resources/rest-api-spec/test/update_by_query/100_synthetic_source.yml @@ -1,11 +1,16 @@ update: + - requires: + cluster_features: [ "mapper.source.mode_from_index_setting" ] + reason: "Source mode configured through index setting" + - do: indices.create: index: synthetic body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: kwd: type: keyword diff --git a/modules/runtime-fields-common/src/yamlRestTest/resources/rest-api-spec/test/runtime_fields/270_synthetic_source.yml b/modules/runtime-fields-common/src/yamlRestTest/resources/rest-api-spec/test/runtime_fields/270_synthetic_source.yml index 8832b3230910c..46ff988ac6e67 100644 --- a/modules/runtime-fields-common/src/yamlRestTest/resources/rest-api-spec/test/runtime_fields/270_synthetic_source.yml +++ b/modules/runtime-fields-common/src/yamlRestTest/resources/rest-api-spec/test/runtime_fields/270_synthetic_source.yml @@ -1,16 +1,17 @@ --- keywords: - requires: - cluster_features: ["gte_v7.12.0"] - reason: Runtime mappings support was added in 7.12 + cluster_features: [ "mapper.source.mode_from_index_setting" ] + reason: "Source mode configured through index setting" - do: indices.create: index: index1 body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: field1: type: keyword @@ -69,16 +70,17 @@ keywords: --- doubles: - requires: - cluster_features: ["gte_v7.12.0"] - reason: Runtime mappings support was added in 7.12 + cluster_features: [ "mapper.source.mode_from_index_setting" ] + reason: "Source mode configured through index setting" - do: indices.create: index: index1 body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: field1: type: double diff --git a/plugins/mapper-annotated-text/src/yamlRestTest/resources/rest-api-spec/test/mapper_annotatedtext/20_synthetic_source.yml b/plugins/mapper-annotated-text/src/yamlRestTest/resources/rest-api-spec/test/mapper_annotatedtext/20_synthetic_source.yml index 4aac881700e15..ca3a9baa7b694 100644 --- a/plugins/mapper-annotated-text/src/yamlRestTest/resources/rest-api-spec/test/mapper_annotatedtext/20_synthetic_source.yml +++ b/plugins/mapper-annotated-text/src/yamlRestTest/resources/rest-api-spec/test/mapper_annotatedtext/20_synthetic_source.yml @@ -1,8 +1,8 @@ --- setup: - requires: - cluster_features: ["mapper.annotated_text.synthetic_source"] - reason: introduced in 8.15.0 + cluster_features: [ "mapper.source.mode_from_index_setting" ] + reason: "Source mode configured through index setting" --- stored annotated_text field: @@ -10,9 +10,10 @@ stored annotated_text field: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: annotated_text: type: annotated_text @@ -40,9 +41,10 @@ annotated_text field with keyword multi-field: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: annotated_text: type: annotated_text @@ -72,9 +74,10 @@ multiple values in stored annotated_text field: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: annotated_text: type: annotated_text @@ -102,9 +105,10 @@ multiple values in annotated_text field with keyword multi-field: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: annotated_text: type: annotated_text @@ -135,9 +139,10 @@ multiple values in annotated_text field with stored keyword multi-field: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: annotated_text: type: annotated_text @@ -169,9 +174,10 @@ multiple values in stored annotated_text field with keyword multi-field: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: annotated_text: type: annotated_text @@ -202,9 +208,10 @@ fallback synthetic source: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: annotated_text: type: annotated_text diff --git a/plugins/mapper-murmur3/src/yamlRestTest/resources/rest-api-spec/test/mapper_murmur3/10_basic.yml b/plugins/mapper-murmur3/src/yamlRestTest/resources/rest-api-spec/test/mapper_murmur3/10_basic.yml index 12b23fb3b0395..4a2ca8544fe8d 100644 --- a/plugins/mapper-murmur3/src/yamlRestTest/resources/rest-api-spec/test/mapper_murmur3/10_basic.yml +++ b/plugins/mapper-murmur3/src/yamlRestTest/resources/rest-api-spec/test/mapper_murmur3/10_basic.yml @@ -129,14 +129,18 @@ setup: --- "Murmur3 docvalue_fields api with synthetic source": + - requires: + cluster_features: ["mapper.source.mode_from_index_setting"] + reason: "Source mode configured through index setting" - do: indices.create: index: test_synthetic_source body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: foo: type: keyword diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/get/100_synthetic_source.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/get/100_synthetic_source.yml index a7600da575cd3..13f6ca58ea295 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/get/100_synthetic_source.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/get/100_synthetic_source.yml @@ -1,3 +1,9 @@ +setup: + - requires: + cluster_features: [ "mapper.source.mode_from_index_setting" ] + reason: "Source mode configured through index setting" + +--- keyword: - requires: cluster_features: ["gte_v8.4.0"] @@ -7,9 +13,10 @@ keyword: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: kwd: type: keyword @@ -48,9 +55,8 @@ fetch without refresh also produces synthetic source: settings: index: refresh_interval: -1 + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: obj: properties: @@ -90,9 +96,10 @@ force_synthetic_source_ok: indices.create: index: test body: + settings: + index: + mapping.source.mode: stored mappings: - _source: - mode: stored properties: obj: properties: @@ -139,9 +146,10 @@ stored text: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: text: type: text @@ -212,9 +220,10 @@ stored keyword: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: kwd: type: keyword @@ -253,9 +262,10 @@ doc values keyword with ignore_above: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: kwd: type: keyword @@ -336,9 +346,10 @@ stored keyword with ignore_above: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: kwd: type: keyword @@ -421,9 +432,10 @@ indexed dense vectors: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: name: type: keyword @@ -465,9 +477,10 @@ non-indexed dense vectors: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: name: type: keyword @@ -508,9 +521,10 @@ _source filtering: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: kwd: type: keyword @@ -550,9 +564,9 @@ _doc_count: indices.create: index: test body: - mappings: - _source: - mode: synthetic + settings: + index: + mapping.source.mode: synthetic # with _doc_count - do: @@ -679,9 +693,10 @@ fields with ignore_malformed: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: ip: type: ip @@ -914,9 +929,10 @@ flattened field: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: flattened: type: flattened @@ -1006,9 +1022,10 @@ flattened field with ignore_above: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: field: type: flattened @@ -1061,9 +1078,10 @@ flattened field with ignore_above and arrays: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: field: type: flattened @@ -1117,9 +1135,10 @@ completion: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: completion: type: completion diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/20_synthetic_source.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/20_synthetic_source.yml index cc5fd0e08e695..af3d88fb35734 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/20_synthetic_source.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/20_synthetic_source.yml @@ -1,3 +1,8 @@ +setup: + - requires: + cluster_features: [ "mapper.source.mode_from_index_setting" ] + reason: "Source mode configured through index setting" + --- object with unmapped fields: - requires: @@ -11,13 +16,11 @@ object with unmapped fields: settings: index: mapping: + source.mode: synthetic total_fields: ignore_dynamic_beyond_limit: true limit: 1 - mappings: - _source: - mode: synthetic properties: name: type: keyword @@ -64,13 +67,12 @@ unmapped arrays: settings: index: mapping: + source.mode: synthetic total_fields: ignore_dynamic_beyond_limit: true limit: 1 mappings: - _source: - mode: synthetic properties: name: type: keyword @@ -111,13 +113,12 @@ nested object with unmapped fields: settings: index: mapping: + source.mode: synthetic total_fields: ignore_dynamic_beyond_limit: true limit: 3 mappings: - _source: - mode: synthetic properties: path: properties: @@ -163,13 +164,12 @@ empty object with unmapped fields: settings: index: mapping: + source.mode: synthetic total_fields: ignore_dynamic_beyond_limit: true limit: 3 mappings: - _source: - mode: synthetic properties: path: properties: @@ -205,9 +205,10 @@ disabled root object: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic enabled: false - do: @@ -242,9 +243,10 @@ disabled object: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: path: enabled: false @@ -279,9 +281,10 @@ disabled object contains array: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: path: enabled: false @@ -319,9 +322,10 @@ disabled subobject: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: path: properties: @@ -357,9 +361,10 @@ disabled subobject with array: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: path: properties: @@ -396,9 +401,10 @@ mixed disabled and enabled objects: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: path: properties: @@ -442,9 +448,10 @@ object with dynamic override: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: path_no: dynamic: false @@ -489,9 +496,10 @@ subobject with dynamic override: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: path: properties: @@ -537,9 +545,10 @@ object array in object with dynamic override: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: id: type: integer @@ -591,9 +600,10 @@ value array in object with dynamic override: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: path_no: dynamic: false @@ -634,9 +644,10 @@ nested object: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: nested_field: type: nested @@ -679,9 +690,10 @@ nested object next to regular: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: path: properties: @@ -725,9 +737,10 @@ nested object with disabled: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: obj_field: properties: @@ -813,9 +826,10 @@ doubly nested object: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: obj_field: properties: @@ -908,9 +922,10 @@ subobjects auto: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic subobjects: auto properties: id: @@ -996,9 +1011,10 @@ synthetic_source with copy_to: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: number: type: integer @@ -1132,9 +1148,10 @@ synthetic_source with disabled doc_values: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: number: type: integer @@ -1215,9 +1232,10 @@ fallback synthetic_source for text field: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: text: type: text @@ -1249,9 +1267,10 @@ synthetic_source with copy_to and ignored values: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: name: type: keyword @@ -1317,9 +1336,10 @@ synthetic_source with copy_to field having values in source: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: name: type: keyword @@ -1380,9 +1400,10 @@ synthetic_source with ignored source field using copy_to: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: name: type: keyword @@ -1444,9 +1465,10 @@ synthetic_source with copy_to field from dynamic template having values in sourc indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic dynamic_templates: - copy_template: match: "k" @@ -1541,9 +1563,10 @@ synthetic_source with copy_to and invalid values for copy: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: name: type: keyword @@ -1577,9 +1600,10 @@ synthetic_source with copy_to pointing inside object: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: name: type: keyword @@ -1681,9 +1705,10 @@ synthetic_source with copy_to pointing to ambiguous field: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: k: type: keyword @@ -1728,9 +1753,10 @@ synthetic_source with copy_to pointing to ambiguous field and subobjects false: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic subobjects: false properties: k: @@ -1776,9 +1802,10 @@ synthetic_source with copy_to pointing to ambiguous field and subobjects auto: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic subobjects: auto properties: k: @@ -1825,9 +1852,10 @@ synthetic_source with copy_to pointing at dynamic field: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: name: type: keyword @@ -1911,9 +1939,10 @@ synthetic_source with copy_to pointing inside dynamic object: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: name: type: keyword diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/21_synthetic_source_stored.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/21_synthetic_source_stored.yml index f3545bb0a3f0e..100cbf1b46625 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/21_synthetic_source_stored.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/21_synthetic_source_stored.yml @@ -1,3 +1,8 @@ +setup: + - requires: + cluster_features: [ "mapper.source.mode_from_index_setting" ] + reason: "Source mode configured through index setting" + --- object param - store complex object: - requires: @@ -8,9 +13,10 @@ object param - store complex object: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: id: type: integer @@ -72,9 +78,10 @@ object param - object array: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: id: type: integer @@ -136,9 +143,10 @@ object param - object array within array: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: stored: synthetic_source_keep: arrays @@ -179,9 +187,10 @@ object param - no object array: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: stored: synthetic_source_keep: arrays @@ -221,9 +230,10 @@ object param - field ordering in object array: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: a: type: keyword @@ -270,9 +280,10 @@ object param - nested object array next to other fields: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: a: type: keyword @@ -326,9 +337,10 @@ object param - nested object with stored array: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: name: type: keyword @@ -378,9 +390,10 @@ index param - nested array within array: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: name: type: keyword @@ -428,9 +441,9 @@ index param - nested array within array - disabled second pass: index: synthetic_source: enable_second_doc_parsing_pass: false + mapping.source.mode: synthetic + mappings: - _source: - mode: synthetic properties: name: type: keyword @@ -478,9 +491,8 @@ stored field under object with store_array_source: index: sort.field: "name" sort.order: "asc" + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: name: type: keyword @@ -525,9 +537,10 @@ field param - keep root array: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: id: type: integer @@ -582,9 +595,10 @@ field param - keep nested array: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: id: type: integer @@ -650,9 +664,10 @@ field param - keep root singleton fields: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: id: type: integer @@ -739,9 +754,10 @@ field param - keep nested singleton fields: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: id: type: integer @@ -820,9 +836,10 @@ field param - nested array within array: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: name: type: keyword @@ -866,10 +883,9 @@ index param - root arrays: settings: index: mapping: + source.mode: synthetic synthetic_source_keep: arrays mappings: - _source: - mode: synthetic properties: id: type: integer @@ -945,10 +961,9 @@ index param - dynamic root arrays: settings: index: mapping: + source.mode: synthetic synthetic_source_keep: arrays mappings: - _source: - mode: synthetic properties: id: type: integer @@ -998,10 +1013,9 @@ index param - object array within array: settings: index: mapping: + source.mode: synthetic synthetic_source_keep: arrays mappings: - _source: - mode: synthetic properties: stored: properties: @@ -1048,10 +1062,9 @@ index param - no object array: settings: index: mapping: + source.mode: synthetic synthetic_source_keep: arrays mappings: - _source: - mode: synthetic properties: stored: properties: @@ -1093,10 +1106,9 @@ index param - field ordering: settings: index: mapping: + source.mode: synthetic synthetic_source_keep: arrays mappings: - _source: - mode: synthetic properties: a: type: keyword @@ -1144,10 +1156,9 @@ index param - nested arrays: settings: index: mapping: + source.mode: synthetic synthetic_source_keep: arrays mappings: - _source: - mode: synthetic properties: a: type: keyword @@ -1212,10 +1223,9 @@ index param - nested object with stored array: settings: index: mapping: + source.mode: synthetic synthetic_source_keep: arrays mappings: - _source: - mode: synthetic properties: name: type: keyword @@ -1264,10 +1274,9 @@ index param - flattened fields: settings: index: mapping: + source.mode: synthetic synthetic_source_keep: arrays mappings: - _source: - mode: synthetic properties: name: type: keyword diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.put_mapping/10_basic.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.put_mapping/10_basic.yml index 75d282d524607..335e0b4783bf0 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.put_mapping/10_basic.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.put_mapping/10_basic.yml @@ -145,41 +145,19 @@ - is_false: test_index.mappings.properties.foo.meta.bar - match: { test_index.mappings.properties.foo.meta.baz: "quux" } ---- -"disabling synthetic source fails": - - requires: - cluster_features: ["gte_v8.4.0"] - reason: "Added in 8.4.0" - - - do: - indices.create: - index: test_index - body: - mappings: - _source: - mode: synthetic - - - do: - catch: /Cannot update parameter \[mode\] from \[synthetic\] to \[stored\]/ - indices.put_mapping: - index: test_index - body: - _source: - mode: stored - --- "enabling synthetic source from explicit succeeds": - requires: - cluster_features: [ "gte_v8.4.0" ] - reason: "Added in 8.4.0" + cluster_features: [ "mapper.source.mode_from_index_setting" ] + reason: "Source mode configured through index setting" - do: indices.create: index: test_index body: - mappings: - _source: - mode: stored + settings: + index: + mapping.source.mode: stored - do: indices.put_mapping: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/logsdb/20_source_mapping.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/logsdb/20_source_mapping.yml index b4709a4e4d176..27146557bb1be 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/logsdb/20_source_mapping.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/logsdb/20_source_mapping.yml @@ -1,9 +1,10 @@ ---- -synthetic _source is default: +setup: - requires: - cluster_features: ["mapper.source.remove_synthetic_source_only_validation"] - reason: requires new validation logic + cluster_features: [ "mapper.source.mode_from_index_setting" ] + reason: "Source mode configured through index setting" +--- +synthetic _source is default: - do: indices.create: index: test-default-source @@ -19,10 +20,6 @@ synthetic _source is default: --- stored _source mode is supported: - - requires: - cluster_features: ["mapper.source.remove_synthetic_source_only_validation"] - reason: requires new validation logic - - do: indices.create: index: test-stored-source @@ -30,9 +27,7 @@ stored _source mode is supported: settings: index: mode: logsdb - mappings: - _source: - mode: stored + mapping.source.mode: stored - do: indices.get: index: test-stored-source @@ -41,10 +36,6 @@ stored _source mode is supported: --- disabled _source is not supported: - - requires: - cluster_features: ["mapper.source.remove_synthetic_source_only_validation"] - reason: requires new error message - - do: catch: bad_request indices.create: @@ -69,9 +60,7 @@ disabled _source is not supported: settings: index: mode: logsdb - mappings: - _source: - mode: disabled + mapping.source.mode: disabled - match: { error.type: "mapper_parsing_exception" } - match: { error.root_cause.0.type: "mapper_parsing_exception" } @@ -79,10 +68,6 @@ disabled _source is not supported: --- include/exclude is not supported with synthetic _source: - - requires: - cluster_features: ["mapper.source.remove_synthetic_source_only_validation"] - reason: requires new validation logic - - do: catch: '/filtering the stored _source is incompatible with synthetic source/' indices.create: @@ -109,10 +94,6 @@ include/exclude is not supported with synthetic _source: --- include/exclude is supported with stored _source: - - requires: - cluster_features: ["mapper.source.remove_synthetic_source_only_validation"] - reason: requires new validation logic - - do: indices.create: index: test-includes @@ -120,9 +101,9 @@ include/exclude is supported with stored _source: settings: index: mode: logsdb + mapping.source.mode: stored mappings: _source: - mode: stored includes: [a] - do: @@ -139,9 +120,9 @@ include/exclude is supported with stored _source: settings: index: mode: logsdb + mapping.source.mode: stored mappings: _source: - mode: stored excludes: [b] - do: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/mget/90_synthetic_source.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/mget/90_synthetic_source.yml index 2f3d2fa2f974d..084f104932d99 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/mget/90_synthetic_source.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/mget/90_synthetic_source.yml @@ -1,3 +1,9 @@ +setup: + - requires: + cluster_features: [ "mapper.source.mode_from_index_setting" ] + reason: "Source mode configured through index setting" + +--- keyword: - requires: cluster_features: ["gte_v8.4.0"] @@ -7,9 +13,10 @@ keyword: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: kwd: type: keyword @@ -62,9 +69,9 @@ keyword with normalizer: type: custom filter: - lowercase + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: keyword: type: keyword @@ -144,9 +151,10 @@ stored text: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: text: type: text @@ -193,9 +201,10 @@ force_synthetic_source_ok: indices.create: index: test body: + settings: + index: + mapping.source.mode: stored mappings: - _source: - mode: stored properties: obj: properties: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.highlight/50_synthetic_source.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.highlight/50_synthetic_source.yml index a2fd448f5044d..657eeb759c6c6 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.highlight/50_synthetic_source.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.highlight/50_synthetic_source.yml @@ -1,7 +1,7 @@ setup: - requires: - cluster_features: ["gte_v8.4.0"] - reason: introduced in 8.4.0 + cluster_features: [ "mapper.source.mode_from_index_setting" ] + reason: "Source mode configured through index setting" - do: indices.create: @@ -9,9 +9,9 @@ setup: body: settings: number_of_shards: 1 + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: foo: type: keyword @@ -21,6 +21,7 @@ setup: index_options: positions vectors: type: text + store: false term_vector: with_positions_offsets positions: type: text diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/90_sparse_vector.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/90_sparse_vector.yml index 27f12f394c6a4..2505e6d7e353b 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/90_sparse_vector.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/90_sparse_vector.yml @@ -387,16 +387,17 @@ "sparse_vector synthetic source": - requires: - cluster_features: ["mapper.source.synthetic_source_fallback"] - reason: introduced in 8.15.0 + cluster_features: [ "mapper.source.mode_from_index_setting" ] + reason: "Source mode configured through index setting" - do: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: ml.tokens: type: sparse_vector diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/350_binary_field.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/350_binary_field.yml index 455d06ba2a984..82b85363b8ad6 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/350_binary_field.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/350_binary_field.yml @@ -49,15 +49,17 @@ --- "binary synthetic source": - requires: - cluster_features: "gte_v8.15.0" - reason: synthetic source support introduced in 8.15 + cluster_features: [ "mapper.source.mode_from_index_setting" ] + reason: "Source mode configured through index setting" + - do: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: binary: type: binary diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/400_synthetic_source.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/400_synthetic_source.yml index 0cc1796bb47de..0d3cbfb696d13 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/400_synthetic_source.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/400_synthetic_source.yml @@ -1,3 +1,9 @@ +setup: + - requires: + cluster_features: [ "mapper.source.mode_from_index_setting" ] + reason: "Source mode configured through index setting" + +--- keyword: - requires: cluster_features: ["gte_v8.4.0"] @@ -7,9 +13,10 @@ keyword: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: kwd: type: keyword @@ -44,9 +51,10 @@ stored text: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: text: type: text @@ -83,8 +91,6 @@ stored keyword: index: test body: mappings: - _source: - mode: synthetic properties: kwd: type: keyword @@ -120,9 +126,10 @@ stored keyword without sibling fields: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: kwd: type: keyword @@ -165,9 +172,10 @@ force_synthetic_source_ok: indices.create: index: test body: + settings: + index: + mapping.source.mode: stored mappings: - _source: - mode: stored properties: obj: properties: @@ -218,9 +226,10 @@ doc values keyword with ignore_above: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: kwd: type: keyword @@ -286,9 +295,10 @@ stored keyword with ignore_above: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: kwd: type: keyword @@ -356,9 +366,10 @@ _source filtering: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: kwd: type: keyword @@ -397,9 +408,9 @@ _doc_count: indices.create: index: test body: - mappings: - _source: - mode: synthetic + settings: + index: + mapping.source.mode: synthetic - do: index: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/540_ignore_above_synthetic_source.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/540_ignore_above_synthetic_source.yml index 435cda637cca6..772c3c24170cd 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/540_ignore_above_synthetic_source.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/540_ignore_above_synthetic_source.yml @@ -1,3 +1,8 @@ +setup: + - requires: + cluster_features: [ "mapper.source.mode_from_index_setting" ] + reason: "Source mode configured through index setting" + --- ignore_above mapping level setting: - requires: @@ -10,10 +15,9 @@ ignore_above mapping level setting: settings: index: mapping: + source.mode: synthetic ignore_above: 10 mappings: - _source: - mode: synthetic properties: keyword: type: keyword @@ -53,10 +57,9 @@ ignore_above mapping level setting on arrays: settings: index: mapping: + source.mode: synthetic ignore_above: 10 mappings: - _source: - mode: synthetic properties: keyword: type: keyword @@ -97,10 +100,9 @@ ignore_above mapping overrides setting: settings: index: mapping: + source.mode: synthetic ignore_above: 10 mappings: - _source: - mode: synthetic properties: keyword: type: keyword @@ -143,10 +145,9 @@ ignore_above mapping overrides setting on arrays: settings: index: mapping: + source.mode: synthetic ignore_above: 10 mappings: - _source: - mode: synthetic properties: keyword: type: keyword diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/20_mapping.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/20_mapping.yml index c5669cd6414b1..4d8f03a6e5e18 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/20_mapping.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/20_mapping.yml @@ -427,8 +427,8 @@ nested fields: --- "Synthetic source": - requires: - cluster_features: ["gte_v8.10.0"] - reason: Synthetic source shows up in the mapping in 8.10 + cluster_features: [ "mapper.source.mode_from_index_setting" ] + reason: "Source mode configured through index setting" - do: indices.create: @@ -458,8 +458,8 @@ nested fields: --- stored source is supported: - requires: - cluster_features: ["mapper.source.remove_synthetic_source_only_validation"] - reason: requires new validation logic + cluster_features: [ "mapper.source.mode_from_index_setting" ] + reason: "Source mode configured through index setting" - do: indices.create: @@ -472,9 +472,9 @@ stored source is supported: time_series: start_time: 2021-04-28T00:00:00Z end_time: 2021-04-29T00:00:00Z + mapping: + source.mode: stored mappings: - _source: - mode: stored properties: "@timestamp": type: date @@ -495,8 +495,8 @@ stored source is supported: --- disabled source is not supported: - requires: - cluster_features: ["mapper.source.remove_synthetic_source_only_validation"] - reason: requires new error message + cluster_features: [ "mapper.source.mode_from_index_setting" ] + reason: "Source mode configured through index setting" - do: catch: bad_request @@ -510,9 +510,9 @@ disabled source is not supported: time_series: start_time: 2021-04-28T00:00:00Z end_time: 2021-04-29T00:00:00Z + mapping: + source.mode: disabled mappings: - _source: - mode: disabled properties: "@timestamp": type: date diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/update/100_synthetic_source.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/update/100_synthetic_source.yml index f74fde7eb2a24..f4894692b6cad 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/update/100_synthetic_source.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/update/100_synthetic_source.yml @@ -1,3 +1,9 @@ +setup: + - requires: + cluster_features: [ "mapper.source.mode_from_index_setting" ] + reason: "Source mode configured through index setting" + +--- keyword: - requires: cluster_features: ["gte_v8.4.0"] @@ -7,9 +13,10 @@ keyword: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: kwd: type: keyword @@ -65,9 +72,10 @@ stored text: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: text: type: text diff --git a/server/src/main/java/org/elasticsearch/index/mapper/MapperFeatures.java b/server/src/main/java/org/elasticsearch/index/mapper/MapperFeatures.java index 4797857fc12f8..5743baeec536d 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/MapperFeatures.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/MapperFeatures.java @@ -63,6 +63,7 @@ public Set getTestFeatures() { RangeFieldMapper.DATE_RANGE_INDEXING_FIX, IgnoredSourceFieldMapper.DONT_EXPAND_DOTS_IN_IGNORED_SOURCE, SourceFieldMapper.REMOVE_SYNTHETIC_SOURCE_ONLY_VALIDATION, + SourceFieldMapper.SOURCE_MODE_FROM_INDEX_SETTING, IgnoredSourceFieldMapper.IGNORED_SOURCE_AS_TOP_LEVEL_METADATA_ARRAY_FIELD, IgnoredSourceFieldMapper.ALWAYS_STORE_OBJECT_ARRAYS_IN_NESTED_OBJECTS, MapperService.LOGSDB_DEFAULT_IGNORE_DYNAMIC_BEYOND_LIMIT diff --git a/server/src/main/java/org/elasticsearch/index/mapper/SourceFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/SourceFieldMapper.java index 1162734c0dc81..dd25cd6eb80a3 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/SourceFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/SourceFieldMapper.java @@ -54,6 +54,7 @@ public class SourceFieldMapper extends MetadataFieldMapper { public static final NodeFeature REMOVE_SYNTHETIC_SOURCE_ONLY_VALIDATION = new NodeFeature( "mapper.source.remove_synthetic_source_only_validation" ); + public static final NodeFeature SOURCE_MODE_FROM_INDEX_SETTING = new NodeFeature("mapper.source.mode_from_index_setting"); public static final String NAME = "_source"; public static final String RECOVERY_SOURCE_NAME = "_recovery_source"; diff --git a/x-pack/plugin/logsdb/src/yamlRestTest/resources/rest-api-spec/test/20_ignored_source.yml b/x-pack/plugin/logsdb/src/yamlRestTest/resources/rest-api-spec/test/20_ignored_source.yml index 2f111d579ebb1..61d3c7c8971e0 100644 --- a/x-pack/plugin/logsdb/src/yamlRestTest/resources/rest-api-spec/test/20_ignored_source.yml +++ b/x-pack/plugin/logsdb/src/yamlRestTest/resources/rest-api-spec/test/20_ignored_source.yml @@ -2,13 +2,18 @@ setup: - skip: features: headers + - requires: + cluster_features: [ "mapper.source.mode_from_index_setting" ] + reason: "Source mode configured through index setting" + - do: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: object: enabled: false diff --git a/x-pack/plugin/mapper-constant-keyword/src/yamlRestTest/resources/rest-api-spec/test/20_synthetic_source.yml b/x-pack/plugin/mapper-constant-keyword/src/yamlRestTest/resources/rest-api-spec/test/20_synthetic_source.yml index b64fb7b822713..d40f69f483dbb 100644 --- a/x-pack/plugin/mapper-constant-keyword/src/yamlRestTest/resources/rest-api-spec/test/20_synthetic_source.yml +++ b/x-pack/plugin/mapper-constant-keyword/src/yamlRestTest/resources/rest-api-spec/test/20_synthetic_source.yml @@ -1,15 +1,16 @@ constant_keyword: - requires: - cluster_features: ["gte_v8.4.0"] - reason: introduced in 8.4.0 + cluster_features: [ "mapper.source.mode_from_index_setting" ] + reason: "Source mode configured through index setting" - do: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: const_kwd: type: constant_keyword diff --git a/x-pack/plugin/mapper-unsigned-long/src/yamlRestTest/resources/rest-api-spec/test/80_synthetic_source.yml b/x-pack/plugin/mapper-unsigned-long/src/yamlRestTest/resources/rest-api-spec/test/80_synthetic_source.yml index b88fca3c478a9..22f69e30650fd 100644 --- a/x-pack/plugin/mapper-unsigned-long/src/yamlRestTest/resources/rest-api-spec/test/80_synthetic_source.yml +++ b/x-pack/plugin/mapper-unsigned-long/src/yamlRestTest/resources/rest-api-spec/test/80_synthetic_source.yml @@ -1,3 +1,9 @@ +setup: + - requires: + cluster_features: [ "mapper.source.mode_from_index_setting" ] + reason: "Source mode configured through index setting" + +--- synthetic source: - requires: cluster_features: ["mapper.source.synthetic_source_with_copy_to_and_doc_values_false"] @@ -7,9 +13,10 @@ synthetic source: indices.create: index: synthetic_source_test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: name: type: keyword @@ -52,9 +59,10 @@ synthetic source with copy_to: indices.create: index: synthetic_source_test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: name: type: keyword @@ -111,9 +119,10 @@ synthetic source with disabled doc_values: indices.create: index: synthetic_source_test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: name: type: keyword diff --git a/x-pack/plugin/mapper-version/src/yamlRestTest/resources/rest-api-spec/test/40_synthetic_source.yml b/x-pack/plugin/mapper-version/src/yamlRestTest/resources/rest-api-spec/test/40_synthetic_source.yml index 1ec91f5fde8d1..3095a19fa29d0 100644 --- a/x-pack/plugin/mapper-version/src/yamlRestTest/resources/rest-api-spec/test/40_synthetic_source.yml +++ b/x-pack/plugin/mapper-version/src/yamlRestTest/resources/rest-api-spec/test/40_synthetic_source.yml @@ -1,15 +1,16 @@ setup: - requires: - cluster_features: ["gte_v8.5.0"] - reason: "synthetic source support added in 8.5.0" + cluster_features: [ "mapper.source.mode_from_index_setting" ] + reason: "Source mode configured through index setting" - do: indices.create: index: test1 body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: ver: type: version @@ -76,9 +77,10 @@ synthetic source with copy_to: indices.create: index: synthetic_source_test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: ver: type: version diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/aggregate-metrics/100_synthetic_source.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/aggregate-metrics/100_synthetic_source.yml index cc0e8aff9b239..2576a51e8b80e 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/aggregate-metrics/100_synthetic_source.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/aggregate-metrics/100_synthetic_source.yml @@ -1,3 +1,9 @@ +setup: + - requires: + cluster_features: [ "mapper.source.mode_from_index_setting" ] + reason: "Source mode configured through index setting" + +--- aggregate_metric_double: - requires: cluster_features: ["gte_v8.5.0"] @@ -7,9 +13,10 @@ aggregate_metric_double: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: metric: type: aggregate_metric_double @@ -62,9 +69,10 @@ aggregate_metric_double with ignore_malformed: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: metric: type: aggregate_metric_double diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/analytics/histogram.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/analytics/histogram.yml index 726b9d153025e..d11da1fe24f3f 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/analytics/histogram.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/analytics/histogram.yml @@ -175,16 +175,17 @@ setup: --- histogram with synthetic source: - requires: - cluster_features: ["gte_v8.5.0"] - reason: introduced in 8.5.0 + cluster_features: [ "mapper.source.mode_from_index_setting" ] + reason: "Source mode configured through index setting" - do: indices.create: index: histo_synthetic body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: latency: type: histogram @@ -221,16 +222,17 @@ histogram with synthetic source: --- histogram with synthetic source and zero counts: - requires: - cluster_features: ["gte_v8.5.0"] - reason: introduced in 8.5.0 + cluster_features: [ "mapper.source.mode_from_index_setting" ] + reason: "Source mode configured through index setting" - do: indices.create: index: histo_synthetic body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: latency: type: histogram @@ -310,16 +312,17 @@ histogram with large count values: --- histogram with synthetic source and ignore_malformed: - requires: - cluster_features: ["mapper.track_ignored_source"] - reason: introduced in 8.15.0 + cluster_features: [ "mapper.source.mode_from_index_setting" ] + reason: "Source mode configured through index setting" - do: indices.create: index: histo_synthetic body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: latency: type: histogram diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/enrich/40_synthetic_source.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/enrich/40_synthetic_source.yml index 1c2e1cd922a65..0c985c7548765 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/enrich/40_synthetic_source.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/enrich/40_synthetic_source.yml @@ -1,12 +1,17 @@ --- setup: + - requires: + cluster_features: [ "mapper.source.mode_from_index_setting" ] + reason: "Source mode configured through index setting" + - do: indices.create: index: source body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: baz: type: keyword diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/30_types.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/30_types.yml index cfc7f2e4036fb..1f9ff72669309 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/30_types.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/30_types.yml @@ -805,13 +805,18 @@ text: --- synthetic _source text stored: + - requires: + cluster_features: [ "mapper.source.mode_from_index_setting" ] + reason: "Source mode configured through index setting" + - do: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: card: type: text @@ -836,13 +841,18 @@ synthetic _source text stored: --- synthetic _source text with parent keyword: + - requires: + cluster_features: [ "mapper.source.mode_from_index_setting" ] + reason: "Source mode configured through index setting" + - do: indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: card: type: keyword diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/80_text.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/80_text.yml index 55bd39bdd73cc..3a989d2c87bf3 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/80_text.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/80_text.yml @@ -477,13 +477,18 @@ setup: --- "text with synthetic source": + - requires: + cluster_features: [ "mapper.source.mode_from_index_setting" ] + reason: "Source mode configured through index setting" + - do: indices.create: index: test2 body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: "emp_no": type: long @@ -522,13 +527,18 @@ setup: --- "stored text with synthetic source": + - requires: + cluster_features: [ "mapper.source.mode_from_index_setting" ] + reason: "Source mode configured through index setting" + - do: indices.create: index: test2 body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: "emp_no": type: long diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/searchable_snapshots/20_synthetic_source.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/searchable_snapshots/20_synthetic_source.yml index b1ab120fff441..bbf301b360207 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/searchable_snapshots/20_synthetic_source.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/searchable_snapshots/20_synthetic_source.yml @@ -1,8 +1,8 @@ --- setup: - requires: - cluster_features: ["gte_v8.5.0"] - reason: added in 8.5 + cluster_features: [ "mapper.source.mode_from_index_setting" ] + reason: "Source mode configured through index setting" - do: indices.create: @@ -11,9 +11,9 @@ setup: settings: number_of_shards: 1 number_of_replicas: 0 + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: obj: properties: diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/security/authz_api_keys/30_field_level_security_synthetic_source.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/security/authz_api_keys/30_field_level_security_synthetic_source.yml index b971c246ac50a..301cb01acd2d3 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/security/authz_api_keys/30_field_level_security_synthetic_source.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/security/authz_api_keys/30_field_level_security_synthetic_source.yml @@ -2,6 +2,9 @@ setup: - skip: features: headers + - requires: + cluster_features: [ "mapper.source.mode_from_index_setting" ] + reason: "Source mode configured through index setting" - do: cluster.health: @@ -13,9 +16,10 @@ Filter single field: indices.create: index: index_fls body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: name: type: keyword @@ -75,9 +79,10 @@ Filter fields in object: indices.create: index: index_fls body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: name: type: keyword @@ -142,9 +147,10 @@ Fields under a disabled object - uses _ignored_source: indices.create: index: index_fls body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: name: type: keyword @@ -236,12 +242,11 @@ Dynamic fields beyond limit - uses _ignored_source: settings: index: mapping: + source.mode: synthetic total_fields: ignore_dynamic_beyond_limit: true limit: 2 mappings: - _source: - mode: synthetic properties: name: type: keyword @@ -301,9 +306,10 @@ Field with ignored_malformed: indices.create: index: index_fls body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: name: type: keyword diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/security/authz_api_keys/40_document_level_security_synthetic_source.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/security/authz_api_keys/40_document_level_security_synthetic_source.yml index 52abe0a3d83d7..3f614f06504f9 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/security/authz_api_keys/40_document_level_security_synthetic_source.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/security/authz_api_keys/40_document_level_security_synthetic_source.yml @@ -2,6 +2,9 @@ setup: - skip: features: headers + - requires: + cluster_features: [ "mapper.source.mode_from_index_setting" ] + reason: "Source mode configured through index setting" - do: cluster.health: @@ -13,9 +16,10 @@ Filter on single field: indices.create: index: index_dls body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: name: type: keyword @@ -95,9 +99,10 @@ Filter on nested field: indices.create: index: index_dls body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: name: type: keyword @@ -178,9 +183,10 @@ Filter on object with stored source: indices.create: index: index_dls body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: name: type: keyword @@ -258,9 +264,10 @@ Filter on field within a disabled object: indices.create: index: index_dls body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: name: type: keyword @@ -335,9 +342,10 @@ Filter on field with ignored_malformed: indices.create: index: index_dls body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: name: type: keyword diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/snapshot/10_basic.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/snapshot/10_basic.yml index e1b297f1b5d78..9ba0b5e4088af 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/snapshot/10_basic.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/snapshot/10_basic.yml @@ -95,17 +95,19 @@ setup: "Failed to snapshot indices with synthetic source": - skip: features: ["allowed_warnings"] + - requires: + cluster_features: [ "mapper.source.mode_from_index_setting" ] + reason: "Source mode configured through index setting" - do: indices.create: index: test_synthetic body: - mappings: - _source: - mode: synthetic settings: number_of_shards: 1 number_of_replicas: 0 + index: + mapping.source.mode: synthetic - do: snapshot.create: diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/spatial/140_synthetic_source.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/spatial/140_synthetic_source.yml index 700142cec9987..5e9faa84ee088 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/spatial/140_synthetic_source.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/spatial/140_synthetic_source.yml @@ -1,3 +1,8 @@ +setup: + - requires: + cluster_features: [ "mapper.source.mode_from_index_setting" ] + reason: "Source mode configured through index setting" + --- "geo_shape": - requires: @@ -8,9 +13,10 @@ indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: shape: type: geo_shape @@ -74,9 +80,10 @@ indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: shape: type: geo_shape @@ -157,9 +164,10 @@ indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: shape: type: shape @@ -223,9 +231,10 @@ indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: shape: type: shape @@ -306,9 +315,10 @@ indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: point: type: geo_point @@ -422,9 +432,10 @@ indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: geo_point: type: geo_point @@ -501,9 +512,10 @@ indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: point: type: point @@ -597,9 +609,10 @@ indices.create: index: test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: point: type: point diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/transform/preview_transforms_synthetic_source.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/transform/preview_transforms_synthetic_source.yml index 08055946a7831..a700170ace107 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/transform/preview_transforms_synthetic_source.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/transform/preview_transforms_synthetic_source.yml @@ -1,3 +1,9 @@ +setup: + - requires: + cluster_features: [ "mapper.source.mode_from_index_setting" ] + reason: "Source mode configured through index setting" + +--- simple: - skip: features: headers @@ -6,9 +12,10 @@ simple: indices.create: index: airline-data body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: time: type: date diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/wildcard/30_ignore_above_synthetic_source.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/wildcard/30_ignore_above_synthetic_source.yml index 2e3ba773fb0f2..26beb3aa19075 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/wildcard/30_ignore_above_synthetic_source.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/wildcard/30_ignore_above_synthetic_source.yml @@ -1,3 +1,8 @@ +setup: + - requires: + cluster_features: [ "mapper.source.mode_from_index_setting" ] + reason: "Source mode configured through index setting" + --- wildcard field type ignore_above: - requires: @@ -10,10 +15,9 @@ wildcard field type ignore_above: settings: index: mapping: + source.mode: synthetic ignore_above: 10 mappings: - _source: - mode: synthetic properties: a_wildcard: type: wildcard diff --git a/x-pack/plugin/wildcard/src/yamlRestTest/resources/rest-api-spec/test/30_synthetic_source.yml b/x-pack/plugin/wildcard/src/yamlRestTest/resources/rest-api-spec/test/30_synthetic_source.yml index ffa76f7433985..14e4a6f5aaef8 100644 --- a/x-pack/plugin/wildcard/src/yamlRestTest/resources/rest-api-spec/test/30_synthetic_source.yml +++ b/x-pack/plugin/wildcard/src/yamlRestTest/resources/rest-api-spec/test/30_synthetic_source.yml @@ -1,3 +1,9 @@ +setup: + - requires: + cluster_features: [ "mapper.source.mode_from_index_setting" ] + reason: "Source mode configured through index setting" + +--- synthetic source: - requires: cluster_features: ["mapper.source.synthetic_source_with_copy_to_and_doc_values_false"] @@ -7,9 +13,10 @@ synthetic source: indices.create: index: synthetic_source_test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: name: type: keyword @@ -48,9 +55,10 @@ synthetic source with copy_to: indices.create: index: synthetic_source_test body: + settings: + index: + mapping.source.mode: synthetic mappings: - _source: - mode: synthetic properties: name: type: keyword From ac9e0a559723f3175a20c13f434659324d8d47b5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mariusz=20J=C3=B3zala?= <377355+jozala@users.noreply.github.com> Date: Mon, 4 Nov 2024 09:44:07 +0100 Subject: [PATCH 308/324] Fixed DefaultSettingsProvider to use Java 17 (#116029) It is needed to be able to backport the change to the 8.x. Having same code for both versions can help us to avoid some merge conflicts in the future. --- .../test/cluster/local/DefaultSettingsProvider.java | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/DefaultSettingsProvider.java b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/DefaultSettingsProvider.java index 67d19895ccc30..b53f4ece46134 100644 --- a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/DefaultSettingsProvider.java +++ b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/DefaultSettingsProvider.java @@ -95,9 +95,6 @@ public Map get(LocalNodeSpec nodeSpec) { private boolean isMultiNodeCluster(LocalClusterSpec cluster) { return cluster.getNodes().size() > 1 - || cluster.getNodes() - .getFirst() - .getSetting(DISCOVERY_TYPE_SETTING, MULTI_NODE_DISCOVERY_TYPE) - .equals(MULTI_NODE_DISCOVERY_TYPE); + || cluster.getNodes().get(0).getSetting(DISCOVERY_TYPE_SETTING, MULTI_NODE_DISCOVERY_TYPE).equals(MULTI_NODE_DISCOVERY_TYPE); } } From 3fc349b66b48b7aee805ed4be9a8b474115c11af Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Johannes=20Fred=C3=A9n?= <109296772+jfreden@users.noreply.github.com> Date: Mon, 4 Nov 2024 10:20:24 +0100 Subject: [PATCH 309/324] Unmute tests (#116160) --- muted-tests.yml | 6 ------ 1 file changed, 6 deletions(-) diff --git a/muted-tests.yml b/muted-tests.yml index cea1903ef33e4..d7b4c472273a6 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -14,12 +14,6 @@ tests: - class: "org.elasticsearch.xpack.searchablesnapshots.FrozenSearchableSnapshotsIntegTests" issue: "https://github.com/elastic/elasticsearch/issues/110408" method: "testCreateAndRestorePartialSearchableSnapshot" -- class: org.elasticsearch.xpack.security.authc.oidc.OpenIdConnectAuthIT - method: testAuthenticateWithImplicitFlow - issue: https://github.com/elastic/elasticsearch/issues/111191 -- class: org.elasticsearch.xpack.security.authc.oidc.OpenIdConnectAuthIT - method: testAuthenticateWithCodeFlowAndClientPost - issue: https://github.com/elastic/elasticsearch/issues/111396 - class: org.elasticsearch.xpack.restart.FullClusterRestartIT method: testSingleDoc {cluster=UPGRADED} issue: https://github.com/elastic/elasticsearch/issues/111434 From 6cf45366d5ba34f97c40d79439b769e6ad9ab581 Mon Sep 17 00:00:00 2001 From: Kostas Krikellas <131142368+kkrik-es@users.noreply.github.com> Date: Mon, 4 Nov 2024 12:32:43 +0200 Subject: [PATCH 310/324] Track source for objects and fields with [synthetic_source_keep:arrays] in arrays as ignored (#116065) * Track source for objects and fields with [synthetic_source_keep:arrays] in arrays as ignored * Update TransportResumeFollowActionTests.java * rest compat fixes * rest compat fixes * update test --- rest-api-spec/build.gradle | 6 +- .../21_synthetic_source_stored.yml | 62 +------- .../common/settings/IndexScopedSettings.java | 1 - .../elasticsearch/index/IndexSettings.java | 21 --- .../index/mapper/DocumentParser.java | 141 +----------------- .../index/mapper/DocumentParserContext.java | 51 +------ .../mapper/IgnoredSourceFieldMapperTests.java | 8 +- .../TransportResumeFollowActionTests.java | 1 - 8 files changed, 28 insertions(+), 263 deletions(-) diff --git a/rest-api-spec/build.gradle b/rest-api-spec/build.gradle index c520fcd4a7f81..3532e08e8f659 100644 --- a/rest-api-spec/build.gradle +++ b/rest-api-spec/build.gradle @@ -59,10 +59,8 @@ tasks.named("yamlRestCompatTestTransform").configure ({ task -> task.replaceValueInMatch("profile.shards.0.dfs.knn.0.query.0.description", "DocAndScoreQuery[0,...][0.009673266,...],0.009673266", "dfs knn vector profiling with vector_operations_count") task.skipTest("indices.sort/10_basic/Index Sort", "warning does not exist for compatibility") task.skipTest("search/330_fetch_fields/Test search rewrite", "warning does not exist for compatibility") - task.skipTest("indices.create/20_synthetic_source/object with dynamic override", "temporary until backported") - task.skipTest("indices.create/20_synthetic_source/object with unmapped fields", "temporary until backported") - task.skipTest("indices.create/20_synthetic_source/empty object with unmapped fields", "temporary until backported") - task.skipTest("indices.create/20_synthetic_source/nested object with unmapped fields", "temporary until backported") + task.skipTest("indices.create/21_synthetic_source_stored/index param - nested array within array - disabled second pass", "temporary until backported") + task.skipTest("indices.create/21_synthetic_source_stored/index param - root arrays", "temporary until backported") task.skipTest("indices.create/21_synthetic_source_stored/object param - nested object with stored array", "temporary until backported") task.skipTest("cat.aliases/10_basic/Deprecated local parameter", "CAT APIs not covered by compatibility policy") task.skipTest("cat.shards/10_basic/Help", "sync_id is removed in 9.0") diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/21_synthetic_source_stored.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/21_synthetic_source_stored.yml index 100cbf1b46625..095665e9337b1 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/21_synthetic_source_stored.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/21_synthetic_source_stored.yml @@ -368,11 +368,8 @@ object param - nested object with stored array: sort: name - match: { hits.total.value: 2 } - match: { hits.hits.0._source.name: A } - # due to a workaround for #115261 - - match: { hits.hits.0._source.nested_array_regular.0.b.0.c: 10 } - - match: { hits.hits.0._source.nested_array_regular.0.b.1.c: 100 } - - match: { hits.hits.0._source.nested_array_regular.1.b.0.c: 20 } - - match: { hits.hits.0._source.nested_array_regular.1.b.1.c: 200 } + - match: { hits.hits.0._source.nested_array_regular.0.b.c: [ 10, 100 ] } + - match: { hits.hits.0._source.nested_array_regular.1.b.c: [ 20, 200 ] } - match: { hits.hits.1._source.name: B } - match: { hits.hits.1._source.nested_array_stored.0.b.0.c: 10 } - match: { hits.hits.1._source.nested_array_stored.0.b.1.c: 100 } @@ -427,55 +424,6 @@ index param - nested array within array: - match: { hits.hits.0._source.path.to.some.3.id: [ 1000, 2000 ] } ---- -index param - nested array within array - disabled second pass: - - requires: - cluster_features: ["mapper.synthetic_source_keep", "mapper.bwc_workaround_9_0"] - reason: requires tracking ignored source - - - do: - indices.create: - index: test - body: - settings: - index: - synthetic_source: - enable_second_doc_parsing_pass: false - mapping.source.mode: synthetic - - mappings: - properties: - name: - type: keyword - path: - properties: - to: - properties: - some: - synthetic_source_keep: arrays - properties: - id: - type: integer - - - do: - bulk: - index: test - refresh: true - body: - - '{ "create": { } }' - - '{ "name": "A", "path": [ { "to": [ { "some" : [ { "id": 10 }, { "id": [1, 3, 2] } ] }, { "some": { "id": 100 } } ] }, { "to": { "some": { "id": [1000, 2000] } } } ] }' - - match: { errors: false } - - - do: - search: - index: test - sort: name - - match: { hits.hits.0._source.name: A } - - length: { hits.hits.0._source.path.to.some: 2} - - match: { hits.hits.0._source.path.to.some.0.id: 10 } - - match: { hits.hits.0._source.path.to.some.1.id: [ 1, 3, 2] } - - --- # 112156 stored field under object with store_array_source: @@ -944,8 +892,10 @@ index param - root arrays: - match: { hits.hits.1._source.obj.1.span.id: "2" } - match: { hits.hits.2._source.id: 3 } - - match: { hits.hits.2._source.obj_default.trace.id: [aa, bb] } - - match: { hits.hits.2._source.obj_default.span.id: "2" } + - match: { hits.hits.2._source.obj_default.trace.0.id: bb } + - match: { hits.hits.2._source.obj_default.trace.1.id: aa } + - match: { hits.hits.2._source.obj_default.span.0.id: "2" } + - match: { hits.hits.2._source.obj_default.span.1.id: "2" } --- diff --git a/server/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java b/server/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java index f5276bbe49b63..884ce38fba391 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java +++ b/server/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java @@ -187,7 +187,6 @@ public final class IndexScopedSettings extends AbstractScopedSettings { FieldMapper.SYNTHETIC_SOURCE_KEEP_INDEX_SETTING, IgnoredSourceFieldMapper.SKIP_IGNORED_SOURCE_WRITE_SETTING, IgnoredSourceFieldMapper.SKIP_IGNORED_SOURCE_READ_SETTING, - IndexSettings.SYNTHETIC_SOURCE_SECOND_DOC_PARSING_PASS_SETTING, SourceFieldMapper.INDEX_MAPPER_SOURCE_MODE_SETTING, // validate that built-in similarities don't get redefined diff --git a/server/src/main/java/org/elasticsearch/index/IndexSettings.java b/server/src/main/java/org/elasticsearch/index/IndexSettings.java index 25e9c1e3701fb..5bea838f9d70c 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexSettings.java +++ b/server/src/main/java/org/elasticsearch/index/IndexSettings.java @@ -653,13 +653,6 @@ public Iterator> settings() { Property.Final ); - public static final Setting SYNTHETIC_SOURCE_SECOND_DOC_PARSING_PASS_SETTING = Setting.boolSetting( - "index.synthetic_source.enable_second_doc_parsing_pass", - true, - Property.IndexScope, - Property.Dynamic - ); - /** * Returns true if TSDB encoding is enabled. The default is true */ @@ -829,7 +822,6 @@ private void setRetentionLeaseMillis(final TimeValue retentionLease) { private volatile long mappingDimensionFieldsLimit; private volatile boolean skipIgnoredSourceWrite; private volatile boolean skipIgnoredSourceRead; - private volatile boolean syntheticSourceSecondDocParsingPassEnabled; private final SourceFieldMapper.Mode indexMappingSourceMode; private final boolean recoverySourceEnabled; @@ -992,7 +984,6 @@ public IndexSettings(final IndexMetadata indexMetadata, final Settings nodeSetti es87TSDBCodecEnabled = scopedSettings.get(TIME_SERIES_ES87TSDB_CODEC_ENABLED_SETTING); skipIgnoredSourceWrite = scopedSettings.get(IgnoredSourceFieldMapper.SKIP_IGNORED_SOURCE_WRITE_SETTING); skipIgnoredSourceRead = scopedSettings.get(IgnoredSourceFieldMapper.SKIP_IGNORED_SOURCE_READ_SETTING); - syntheticSourceSecondDocParsingPassEnabled = scopedSettings.get(SYNTHETIC_SOURCE_SECOND_DOC_PARSING_PASS_SETTING); indexMappingSourceMode = scopedSettings.get(SourceFieldMapper.INDEX_MAPPER_SOURCE_MODE_SETTING); recoverySourceEnabled = RecoverySettings.INDICES_RECOVERY_SOURCE_ENABLED_SETTING.get(nodeSettings); @@ -1082,10 +1073,6 @@ public IndexSettings(final IndexMetadata indexMetadata, final Settings nodeSetti this::setSkipIgnoredSourceWrite ); scopedSettings.addSettingsUpdateConsumer(IgnoredSourceFieldMapper.SKIP_IGNORED_SOURCE_READ_SETTING, this::setSkipIgnoredSourceRead); - scopedSettings.addSettingsUpdateConsumer( - SYNTHETIC_SOURCE_SECOND_DOC_PARSING_PASS_SETTING, - this::setSyntheticSourceSecondDocParsingPassEnabled - ); } private void setSearchIdleAfter(TimeValue searchIdleAfter) { @@ -1678,14 +1665,6 @@ private void setSkipIgnoredSourceRead(boolean value) { this.skipIgnoredSourceRead = value; } - private void setSyntheticSourceSecondDocParsingPassEnabled(boolean syntheticSourceSecondDocParsingPassEnabled) { - this.syntheticSourceSecondDocParsingPassEnabled = syntheticSourceSecondDocParsingPassEnabled; - } - - public boolean isSyntheticSourceSecondDocParsingPassEnabled() { - return syntheticSourceSecondDocParsingPassEnabled; - } - public SourceFieldMapper.Mode getIndexMappingSourceMode() { return indexMappingSourceMode; } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java b/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java index bde9b0fb8a4ab..82004356ceb57 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java @@ -35,16 +35,13 @@ import java.io.IOException; import java.util.ArrayList; -import java.util.Collection; import java.util.Collections; import java.util.HashMap; -import java.util.HashSet; import java.util.Iterator; import java.util.LinkedList; import java.util.List; import java.util.Map; import java.util.Optional; -import java.util.Set; import java.util.function.Consumer; import static org.elasticsearch.index.mapper.vectors.DenseVectorFieldMapper.MAX_DIMS_COUNT; @@ -148,9 +145,6 @@ private void internalParseDocument(MetadataFieldMapper[] metadataFieldsMappers, executeIndexTimeScripts(context); - // Record additional entries for {@link IgnoredSourceFieldMapper} before calling #postParse, so that they get stored. - addIgnoredSourceMissingValues(context); - for (MetadataFieldMapper metadataMapper : metadataFieldsMappers) { metadataMapper.postParse(context); } @@ -159,128 +153,6 @@ private void internalParseDocument(MetadataFieldMapper[] metadataFieldsMappers, } } - private void addIgnoredSourceMissingValues(DocumentParserContext context) throws IOException { - Collection ignoredFieldsMissingValues = context.getIgnoredFieldsMissingValues(); - if (ignoredFieldsMissingValues.isEmpty()) { - return; - } - - // Clean up any conflicting ignored values, to avoid double-printing them as array elements in synthetic source. - Map fields = new HashMap<>(ignoredFieldsMissingValues.size()); - for (var field : ignoredFieldsMissingValues) { - fields.put(field.name(), field); - } - context.deduplicateIgnoredFieldValues(fields.keySet()); - - assert context.mappingLookup().isSourceSynthetic(); - try ( - XContentParser parser = XContentHelper.createParser( - parserConfiguration, - context.sourceToParse().source(), - context.sourceToParse().getXContentType() - ) - ) { - DocumentParserContext newContext = new RootDocumentParserContext( - context.mappingLookup(), - mappingParserContext, - context.sourceToParse(), - parser - ); - var nameValues = parseDocForMissingValues(newContext, fields); - for (var nameValue : nameValues) { - context.addIgnoredField(nameValue); - } - } - } - - /** - * Simplified parsing version for retrieving the source of a given set of fields. - */ - private static List parseDocForMissingValues( - DocumentParserContext context, - Map fields - ) throws IOException { - // Generate all possible parent names for the given fields. - // This is used to skip processing objects that can't generate missing values. - Set parentNames = getPossibleParentNames(fields.keySet()); - List result = new ArrayList<>(); - - XContentParser parser = context.parser(); - XContentParser.Token currentToken = parser.nextToken(); - List path = new ArrayList<>(); - List isObjectInPath = new ArrayList<>(); // Tracks if path components correspond to an object or an array. - String fieldName = null; - while (currentToken != null) { - while (currentToken != XContentParser.Token.FIELD_NAME) { - if (fieldName != null - && (currentToken == XContentParser.Token.START_OBJECT || currentToken == XContentParser.Token.START_ARRAY)) { - if (parentNames.contains(getCurrentPath(path, fieldName)) == false) { - // No missing value under this parsing subtree, skip it. - parser.skipChildren(); - } else { - path.add(fieldName); - isObjectInPath.add(currentToken == XContentParser.Token.START_OBJECT); - } - fieldName = null; - } else if (currentToken == XContentParser.Token.END_OBJECT || currentToken == XContentParser.Token.END_ARRAY) { - // Remove the path, if the scope type matches the one when the path was added. - if (isObjectInPath.isEmpty() == false - && (isObjectInPath.getLast() && currentToken == XContentParser.Token.END_OBJECT - || isObjectInPath.getLast() == false && currentToken == XContentParser.Token.END_ARRAY)) { - path.removeLast(); - isObjectInPath.removeLast(); - } - fieldName = null; - } - currentToken = parser.nextToken(); - if (currentToken == null) { - return result; - } - } - fieldName = parser.currentName(); - String fullName = getCurrentPath(path, fieldName); - var leaf = fields.get(fullName); // There may be multiple matches for array elements, don't use #remove. - if (leaf != null) { - parser.nextToken(); // Advance the parser to the value to be read. - result.add(leaf.cloneWithValue(context.encodeFlattenedToken())); - fieldName = null; - } - currentToken = parser.nextToken(); - } - return result; - } - - private static String getCurrentPath(List path, String fieldName) { - assert fieldName != null; - return path.isEmpty() ? fieldName : String.join(".", path) + "." + fieldName; - } - - /** - * Generates all possible parent object names for the given full names. - * For instance, for input ['path.to.foo', 'another.path.to.bar'], it returns: - * [ 'path', 'path.to', 'another', 'another.path', 'another.path.to' ] - */ - private static Set getPossibleParentNames(Set fullPaths) { - if (fullPaths.isEmpty()) { - return Collections.emptySet(); - } - Set paths = new HashSet<>(); - for (String fullPath : fullPaths) { - String[] split = fullPath.split("\\."); - if (split.length < 2) { - continue; - } - StringBuilder builder = new StringBuilder(split[0]); - paths.add(builder.toString()); - for (int i = 1; i < split.length - 1; i++) { - builder.append("."); - builder.append(split[i]); - paths.add(builder.toString()); - } - } - return paths; - } - private static void executeIndexTimeScripts(DocumentParserContext context) { List indexTimeScriptMappers = context.mappingLookup().indexTimeScriptMappers(); if (indexTimeScriptMappers.isEmpty()) { @@ -426,7 +298,10 @@ static void parseObjectOrNested(DocumentParserContext context) throws IOExceptio throwOnConcreteValue(context.parent(), currentFieldName, context); } - if (context.canAddIgnoredField() && getSourceKeepMode(context, context.parent().sourceKeepMode()) == Mapper.SourceKeepMode.ALL) { + var sourceKeepMode = getSourceKeepMode(context, context.parent().sourceKeepMode()); + if (context.canAddIgnoredField() + && (sourceKeepMode == Mapper.SourceKeepMode.ALL + || (sourceKeepMode == Mapper.SourceKeepMode.ARRAYS && context.inArrayScope()))) { context = context.addIgnoredFieldFromContext( new IgnoredSourceFieldMapper.NameValue( context.parent().fullPath(), @@ -571,9 +446,11 @@ static void parseObjectOrField(DocumentParserContext context, Mapper mapper) thr parseObjectOrNested(context.createFlattenContext(currentFieldName)); context.path().add(currentFieldName); } else { + var sourceKeepMode = getSourceKeepMode(context, fieldMapper.sourceKeepMode()); if (context.canAddIgnoredField() && (fieldMapper.syntheticSourceMode() == FieldMapper.SyntheticSourceMode.FALLBACK - || getSourceKeepMode(context, fieldMapper.sourceKeepMode()) == Mapper.SourceKeepMode.ALL + || sourceKeepMode == Mapper.SourceKeepMode.ALL + || (sourceKeepMode == Mapper.SourceKeepMode.ARRAYS && context.inArrayScope()) || (context.isWithinCopyTo() == false && context.isCopyToDestinationField(mapper.fullPath())))) { context = context.addIgnoredFieldFromContext( IgnoredSourceFieldMapper.NameValue.fromContext(context, fieldMapper.fullPath(), null) @@ -811,9 +688,7 @@ private static void parseNonDynamicArray( if (mapper instanceof ObjectMapper objectMapper) { mode = getSourceKeepMode(context, objectMapper.sourceKeepMode()); objectWithFallbackSyntheticSource = mode == Mapper.SourceKeepMode.ALL - // Inside nested objects we always store object arrays as a workaround for #115261. - || ((context.inNestedScope() || mode == Mapper.SourceKeepMode.ARRAYS) - && objectMapper instanceof NestedObjectMapper == false); + || (mode == Mapper.SourceKeepMode.ARRAYS && objectMapper instanceof NestedObjectMapper == false); } boolean fieldWithFallbackSyntheticSource = false; boolean fieldWithStoredArraySource = false; diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DocumentParserContext.java b/server/src/main/java/org/elasticsearch/index/mapper/DocumentParserContext.java index c884d68c8f0ee..c84df68a637e2 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/DocumentParserContext.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/DocumentParserContext.java @@ -120,8 +120,6 @@ private enum Scope { private final Set ignoredFields; private final List ignoredFieldValues; - private final List ignoredFieldsMissingValues; - private boolean inArrayScopeEnabled; private Scope currentScope; private final Map> dynamicMappers; @@ -153,8 +151,6 @@ private DocumentParserContext( SourceToParse sourceToParse, Set ignoreFields, List ignoredFieldValues, - List ignoredFieldsWithNoSource, - boolean inArrayScopeEnabled, Scope currentScope, Map> dynamicMappers, Map dynamicObjectMappers, @@ -175,8 +171,6 @@ private DocumentParserContext( this.sourceToParse = sourceToParse; this.ignoredFields = ignoreFields; this.ignoredFieldValues = ignoredFieldValues; - this.ignoredFieldsMissingValues = ignoredFieldsWithNoSource; - this.inArrayScopeEnabled = inArrayScopeEnabled; this.currentScope = currentScope; this.dynamicMappers = dynamicMappers; this.dynamicObjectMappers = dynamicObjectMappers; @@ -200,8 +194,6 @@ private DocumentParserContext(ObjectMapper parent, ObjectMapper.Dynamic dynamic, in.sourceToParse, in.ignoredFields, in.ignoredFieldValues, - in.ignoredFieldsMissingValues, - in.inArrayScopeEnabled, in.currentScope, in.dynamicMappers, in.dynamicObjectMappers, @@ -232,8 +224,6 @@ protected DocumentParserContext( source, new HashSet<>(), new ArrayList<>(), - new ArrayList<>(), - mappingParserContext.getIndexSettings().isSyntheticSourceSecondDocParsingPassEnabled(), Scope.SINGLETON, new HashMap<>(), new HashMap<>(), @@ -324,13 +314,6 @@ public final Collection getIgnoredFieldValue return Collections.unmodifiableCollection(ignoredFieldValues); } - /** - * Remove duplicate ignored values, using the passed set of field names as reference - */ - public final void deduplicateIgnoredFieldValues(final Set fullNames) { - ignoredFieldValues.removeIf(nv -> fullNames.contains(nv.name())); - } - /** * Adds an ignored field from the parser context, capturing an object or an array. * @@ -345,17 +328,11 @@ public final void deduplicateIgnoredFieldValues(final Set fullNames) { public final DocumentParserContext addIgnoredFieldFromContext(IgnoredSourceFieldMapper.NameValue ignoredFieldWithNoSource) throws IOException { if (canAddIgnoredField()) { - if (currentScope == Scope.ARRAY) { - // The field is an array within an array, store all sub-array elements. - ignoredFieldsMissingValues.add(ignoredFieldWithNoSource); - return cloneWithRecordedSource(); - } else { - assert ignoredFieldWithNoSource != null; - assert ignoredFieldWithNoSource.value() == null; - Tuple tuple = XContentDataHelper.cloneSubContext(this); - addIgnoredField(ignoredFieldWithNoSource.cloneWithValue(XContentDataHelper.encodeXContentBuilder(tuple.v2()))); - return tuple.v1(); - } + assert ignoredFieldWithNoSource != null; + assert ignoredFieldWithNoSource.value() == null; + Tuple tuple = XContentDataHelper.cloneSubContext(this); + addIgnoredField(ignoredFieldWithNoSource.cloneWithValue(XContentDataHelper.encodeXContentBuilder(tuple.v2()))); + return tuple.v1(); } return this; } @@ -374,13 +351,6 @@ BytesRef encodeFlattenedToken() throws IOException { return encoded; } - /** - * Return the collection of fields that are missing their source values. - */ - public final Collection getIgnoredFieldsMissingValues() { - return Collections.unmodifiableCollection(ignoredFieldsMissingValues); - } - /** * Clones the current context to mark it as an array, if it's not already marked, or restore it if it's within a nested object. * Applies to synthetic source only. @@ -389,8 +359,7 @@ public final DocumentParserContext maybeCloneForArray(Mapper mapper) throws IOEx if (canAddIgnoredField() && mapper instanceof ObjectMapper && mapper instanceof NestedObjectMapper == false - && currentScope != Scope.ARRAY - && inArrayScopeEnabled) { + && currentScope != Scope.ARRAY) { DocumentParserContext subcontext = switchParser(parser()); subcontext.currentScope = Scope.ARRAY; return subcontext; @@ -679,8 +648,8 @@ public boolean isWithinCopyTo() { return false; } - public boolean inNestedScope() { - return currentScope == Scope.NESTED; + boolean inArrayScope() { + return currentScope == Scope.ARRAY; } public final DocumentParserContext createChildContext(ObjectMapper parent) { @@ -728,10 +697,6 @@ public LuceneDocument doc() { }; cloned.currentScope = Scope.NESTED; - // Disable using second parsing pass since it currently can not determine which parts - // of source belong to which nested document. - // See #115261. - cloned.inArrayScopeEnabled = false; return cloned; } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/IgnoredSourceFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/IgnoredSourceFieldMapperTests.java index 7d29db66f4031..b43371594d57b 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/IgnoredSourceFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/IgnoredSourceFieldMapperTests.java @@ -638,7 +638,7 @@ public void testIndexStoredArraySourceRootObjectArrayWithBypass() throws IOExcep b.field("bool_value", true); }); assertEquals(""" - {"bool_value":true,"path":{"int_value":[10,20]}}""", syntheticSource); + {"bool_value":true,"path":{"int_value":[20,10]}}""", syntheticSource); } public void testIndexStoredArraySourceNestedValueArray() throws IOException { @@ -702,7 +702,7 @@ public void testIndexStoredArraySourceNestedValueArrayDisabled() throws IOExcept b.endObject(); }); assertEquals(""" - {"path":{"bool_value":true,"int_value":[10,20,30],"obj":{"foo":[1,2]}}}""", syntheticSource); + {"path":{"bool_value":true,"int_value":[10,20,30],"obj":{"foo":[2,1]}}}""", syntheticSource); } public void testFieldStoredArraySourceNestedValueArray() throws IOException { @@ -992,7 +992,7 @@ public void testObjectArrayWithinNestedObjects() throws IOException { b.endObject(); }); assertEquals(""" - {"path":{"to":{"obj":[{"id":[1,20,3]},{"id":10}]}}}""", syntheticSource); + {"path":{"to":{"obj":{"id":[1,20,3,10]}}}}""", syntheticSource); } public void testObjectArrayWithinNestedObjectsArray() throws IOException { @@ -1043,7 +1043,7 @@ public void testObjectArrayWithinNestedObjectsArray() throws IOException { b.endObject(); }); assertEquals(""" - {"path":{"to":[{"obj":[{"id":[1,20,3]},{"id":10}]},{"obj":[{"id":[200,300,500]},{"id":100}]}]}}""", syntheticSource); + {"path":{"to":[{"obj":{"id":[1,20,3,10]}},{"obj":{"id":[200,300,500,100]}}]}}""", syntheticSource); } public void testArrayWithinArray() throws IOException { diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportResumeFollowActionTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportResumeFollowActionTests.java index ef03fd0ba6f0e..357e1bca38e8f 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportResumeFollowActionTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportResumeFollowActionTests.java @@ -333,7 +333,6 @@ public void testDynamicIndexSettingsAreClassified() { replicatedSettings.add(IndexSettings.MAX_SHINGLE_DIFF_SETTING); replicatedSettings.add(IndexSettings.TIME_SERIES_END_TIME); replicatedSettings.add(IndexSettings.PREFER_ILM_SETTING); - replicatedSettings.add(IndexSettings.SYNTHETIC_SOURCE_SECOND_DOC_PARSING_PASS_SETTING); replicatedSettings.add(IgnoredSourceFieldMapper.SKIP_IGNORED_SOURCE_READ_SETTING); replicatedSettings.add(IgnoredSourceFieldMapper.SKIP_IGNORED_SOURCE_WRITE_SETTING); replicatedSettings.add(SourceFieldMapper.INDEX_MAPPER_SOURCE_MODE_SETTING); From 69150c8de16335e0c3444d6bf5ccb4b705d359ad Mon Sep 17 00:00:00 2001 From: Pooya Salehi Date: Mon, 4 Nov 2024 14:01:16 +0100 Subject: [PATCH 311/324] Export desired balance node weight and its components as metrics (#115854) Note that this includes only the three node-level weight components (out of the four), as we were not sure how to aggregate and expose the index-specific component and how useful it will be at all. - some of the weight components are also recalculated or exposed as stats (not APM metrics) else where (e.g. `AllocationStatsService`) but since they are available right where we calculate the weight (which we also want), I have just exported all of them together. - How to pass the weight from the BalancedAllocator which is used as a delegated allocator in the desired balance allocator, and from there to the reconciler where we publish, could probably also be done differently, but using `RoutingNodes` and `DesiredBalance` seemed to make more sense to me. Not sure if it is blasphemy for those more familiar with the allocation code! - I liked the `DesiredBalanceMetrics` and how its used so I tried to clean up its existing usage a bit and colocate the new metrics. Relates ES-9866 --- .../DesiredBalanceReconcilerMetricsIT.java | 86 +++++++++++++- .../cluster/routing/RoutingNodes.java | 12 ++ .../allocator/BalancedShardsAllocator.java | 28 ++++- .../allocation/allocator/DesiredBalance.java | 13 +- .../allocator/DesiredBalanceComputer.java | 2 +- .../allocator/DesiredBalanceMetrics.java | 112 +++++++++++++++++- .../allocator/DesiredBalanceReconciler.java | 13 +- .../allocator/DesiredBalanceMetricsTests.java | 9 +- .../DesiredBalanceShardsAllocatorTests.java | 1 + 9 files changed, 255 insertions(+), 21 deletions(-) diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconcilerMetricsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconcilerMetricsIT.java index cb279c93b402e..bfe46dc4c90f2 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconcilerMetricsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconcilerMetricsIT.java @@ -9,6 +9,7 @@ package org.elasticsearch.cluster.routing.allocation.allocator; +import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.util.CollectionUtils; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.plugins.PluginsService; @@ -17,10 +18,16 @@ import org.hamcrest.Matcher; import java.util.Collection; +import java.util.stream.Collectors; import static org.hamcrest.Matchers.empty; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.in; +import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.not; +@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0) public class DesiredBalanceReconcilerMetricsIT extends ESIntegTestCase { @Override @@ -31,6 +38,7 @@ protected Collection> nodePlugins() { public void testDesiredBalanceGaugeMetricsAreOnlyPublishedByCurrentMaster() throws Exception { internalCluster().ensureAtLeastNumDataNodes(2); prepareCreate("test").setSettings(indexSettings(2, 1)).get(); + indexRandom(randomBoolean(), "test", between(50, 100)); ensureGreen(); assertOnlyMasterIsPublishingMetrics(); @@ -45,6 +53,59 @@ public void testDesiredBalanceGaugeMetricsAreOnlyPublishedByCurrentMaster() thro } } + public void testDesiredBalanceNodeWeightMetrics() { + internalCluster().startNodes(2); + prepareCreate("test").setSettings(indexSettings(2, 1)).get(); + indexRandom(randomBoolean(), "test", between(50, 100)); + ensureGreen(); + final var telemetryPlugin = getTelemetryPlugin(internalCluster().getMasterName()); + telemetryPlugin.collect(); + assertThat(telemetryPlugin.getLongGaugeMeasurement(DesiredBalanceMetrics.UNASSIGNED_SHARDS_METRIC_NAME), not(empty())); + assertThat(telemetryPlugin.getLongGaugeMeasurement(DesiredBalanceMetrics.TOTAL_SHARDS_METRIC_NAME), not(empty())); + assertThat(telemetryPlugin.getLongGaugeMeasurement(DesiredBalanceMetrics.UNDESIRED_ALLOCATION_COUNT_METRIC_NAME), not(empty())); + assertThat(telemetryPlugin.getDoubleGaugeMeasurement(DesiredBalanceMetrics.UNDESIRED_ALLOCATION_RATIO_METRIC_NAME), not(empty())); + + var nodeIds = internalCluster().clusterService().state().nodes().stream().map(DiscoveryNode::getId).collect(Collectors.toSet()); + var nodeNames = internalCluster().clusterService().state().nodes().stream().map(DiscoveryNode::getName).collect(Collectors.toSet()); + + final var nodeWeightsMetrics = telemetryPlugin.getDoubleGaugeMeasurement( + DesiredBalanceMetrics.DESIRED_BALANCE_NODE_WEIGHT_METRIC_NAME + ); + assertThat(nodeWeightsMetrics.size(), equalTo(2)); + for (var nodeStat : nodeWeightsMetrics) { + assertThat(nodeStat.value().doubleValue(), greaterThanOrEqualTo(0.0)); + assertThat((String) nodeStat.attributes().get("node_id"), is(in(nodeIds))); + assertThat((String) nodeStat.attributes().get("node_name"), is(in(nodeNames))); + } + final var nodeShardCountMetrics = telemetryPlugin.getLongGaugeMeasurement( + DesiredBalanceMetrics.DESIRED_BALANCE_NODE_SHARD_COUNT_METRIC_NAME + ); + assertThat(nodeShardCountMetrics.size(), equalTo(2)); + for (var nodeStat : nodeShardCountMetrics) { + assertThat(nodeStat.value().longValue(), equalTo(2L)); + assertThat((String) nodeStat.attributes().get("node_id"), is(in(nodeIds))); + assertThat((String) nodeStat.attributes().get("node_name"), is(in(nodeNames))); + } + final var nodeWriteLoadMetrics = telemetryPlugin.getDoubleGaugeMeasurement( + DesiredBalanceMetrics.DESIRED_BALANCE_NODE_WRITE_LOAD_METRIC_NAME + ); + assertThat(nodeWriteLoadMetrics.size(), equalTo(2)); + for (var nodeStat : nodeWriteLoadMetrics) { + assertThat(nodeStat.value().doubleValue(), greaterThanOrEqualTo(0.0)); + assertThat((String) nodeStat.attributes().get("node_id"), is(in(nodeIds))); + assertThat((String) nodeStat.attributes().get("node_name"), is(in(nodeNames))); + } + final var nodeDiskUsageMetrics = telemetryPlugin.getDoubleGaugeMeasurement( + DesiredBalanceMetrics.DESIRED_BALANCE_NODE_DISK_USAGE_METRIC_NAME + ); + assertThat(nodeDiskUsageMetrics.size(), equalTo(2)); + for (var nodeStat : nodeDiskUsageMetrics) { + assertThat(nodeStat.value().doubleValue(), greaterThanOrEqualTo(0.0)); + assertThat((String) nodeStat.attributes().get("node_id"), is(in(nodeIds))); + assertThat((String) nodeStat.attributes().get("node_name"), is(in(nodeNames))); + } + } + private static void assertOnlyMasterIsPublishingMetrics() { String masterNodeName = internalCluster().getMasterName(); String[] nodeNames = internalCluster().getNodeNames(); @@ -54,10 +115,7 @@ private static void assertOnlyMasterIsPublishingMetrics() { } private static void assertMetricsAreBeingPublished(String nodeName, boolean shouldBePublishing) { - final TestTelemetryPlugin testTelemetryPlugin = internalCluster().getInstance(PluginsService.class, nodeName) - .filterPlugins(TestTelemetryPlugin.class) - .findFirst() - .orElseThrow(); + final TestTelemetryPlugin testTelemetryPlugin = getTelemetryPlugin(nodeName); testTelemetryPlugin.resetMeter(); testTelemetryPlugin.collect(); Matcher> matcher = shouldBePublishing ? not(empty()) : empty(); @@ -65,5 +123,25 @@ private static void assertMetricsAreBeingPublished(String nodeName, boolean shou assertThat(testTelemetryPlugin.getLongGaugeMeasurement(DesiredBalanceMetrics.TOTAL_SHARDS_METRIC_NAME), matcher); assertThat(testTelemetryPlugin.getLongGaugeMeasurement(DesiredBalanceMetrics.UNDESIRED_ALLOCATION_COUNT_METRIC_NAME), matcher); assertThat(testTelemetryPlugin.getDoubleGaugeMeasurement(DesiredBalanceMetrics.UNDESIRED_ALLOCATION_RATIO_METRIC_NAME), matcher); + assertThat(testTelemetryPlugin.getDoubleGaugeMeasurement(DesiredBalanceMetrics.DESIRED_BALANCE_NODE_WEIGHT_METRIC_NAME), matcher); + assertThat( + testTelemetryPlugin.getDoubleGaugeMeasurement(DesiredBalanceMetrics.DESIRED_BALANCE_NODE_WRITE_LOAD_METRIC_NAME), + matcher + ); + assertThat( + testTelemetryPlugin.getDoubleGaugeMeasurement(DesiredBalanceMetrics.DESIRED_BALANCE_NODE_DISK_USAGE_METRIC_NAME), + matcher + ); + assertThat( + testTelemetryPlugin.getLongGaugeMeasurement(DesiredBalanceMetrics.DESIRED_BALANCE_NODE_SHARD_COUNT_METRIC_NAME), + matcher + ); + } + + private static TestTelemetryPlugin getTelemetryPlugin(String nodeName) { + return internalCluster().getInstance(PluginsService.class, nodeName) + .filterPlugins(TestTelemetryPlugin.class) + .findFirst() + .orElseThrow(); } } diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/RoutingNodes.java b/server/src/main/java/org/elasticsearch/cluster/routing/RoutingNodes.java index 7f03a2861a807..fb5393c1961f8 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/RoutingNodes.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/RoutingNodes.java @@ -16,6 +16,7 @@ import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.UnassignedInfo.AllocationStatus; import org.elasticsearch.cluster.routing.allocation.ExistingShardsAllocator; +import org.elasticsearch.cluster.routing.allocation.allocator.DesiredBalanceMetrics; import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.util.Maps; import org.elasticsearch.core.Assertions; @@ -76,6 +77,8 @@ public class RoutingNodes implements Iterable { private final Map> attributeValuesByAttribute; private final Map recoveriesPerNode; + private Map balanceWeightStatsPerNode; + /** * Creates an immutable instance from the {@link RoutingTable} and {@link DiscoveryNodes} found in a cluster state. Used to initialize * the routing nodes in {@link ClusterState#getRoutingNodes()}. This method should not be used directly, use @@ -89,6 +92,14 @@ public static RoutingNodes mutable(RoutingTable routingTable, DiscoveryNodes dis return new RoutingNodes(routingTable, discoveryNodes, false); } + public void setBalanceWeightStatsPerNode(Map weightStatsPerNode) { + this.balanceWeightStatsPerNode = weightStatsPerNode; + } + + public Map getBalanceWeightStatsPerNode() { + return balanceWeightStatsPerNode; + } + private RoutingNodes(RoutingTable routingTable, DiscoveryNodes discoveryNodes, boolean readOnly) { this.readOnly = readOnly; this.recoveriesPerNode = new HashMap<>(); @@ -97,6 +108,7 @@ private RoutingNodes(RoutingTable routingTable, DiscoveryNodes discoveryNodes, b this.unassignedShards = new UnassignedShards(this); this.attributeValuesByAttribute = Collections.synchronizedMap(new HashMap<>()); + balanceWeightStatsPerNode = Maps.newMapWithExpectedSize(discoveryNodes.getDataNodes().size()); nodesToShards = Maps.newMapWithExpectedSize(discoveryNodes.getDataNodes().size()); // fill in the nodeToShards with the "live" nodes var dataNodes = discoveryNodes.getDataNodes().keySet(); diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java index 108bb83d90871..5b8fb0c7e9203 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java @@ -16,6 +16,7 @@ import org.elasticsearch.cluster.ClusterInfo; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.RoutingNode; import org.elasticsearch.cluster.routing.RoutingNodes; import org.elasticsearch.cluster.routing.ShardRouting; @@ -159,6 +160,25 @@ public void allocate(RoutingAllocation allocation) { balancer.allocateUnassigned(); balancer.moveShards(); balancer.balance(); + + collectAndRecordNodeWeightStats(balancer, weightFunction, allocation); + } + + private void collectAndRecordNodeWeightStats(Balancer balancer, WeightFunction weightFunction, RoutingAllocation allocation) { + Map nodeLevelWeights = new HashMap<>(); + for (var entry : balancer.nodes.entrySet()) { + var node = entry.getValue(); + nodeLevelWeights.put( + node.routingNode.node(), + new DesiredBalanceMetrics.NodeWeightStats( + node.numShards(), + node.diskUsageInBytes(), + node.writeLoad(), + weightFunction.nodeWeight(balancer, node) + ) + ); + } + allocation.routingNodes().setBalanceWeightStatsPerNode(nodeLevelWeights); } @Override @@ -275,11 +295,15 @@ private static class WeightFunction { } float weight(Balancer balancer, ModelNode node, String index) { - final float weightShard = node.numShards() - balancer.avgShardsPerNode(); final float weightIndex = node.numShards(index) - balancer.avgShardsPerNode(index); + return nodeWeight(balancer, node) + theta1 * weightIndex; + } + + float nodeWeight(Balancer balancer, ModelNode node) { + final float weightShard = node.numShards() - balancer.avgShardsPerNode(); final float ingestLoad = (float) (node.writeLoad() - balancer.avgWriteLoadPerNode()); final float diskUsage = (float) (node.diskUsageInBytes() - balancer.avgDiskUsageInBytesPerNode()); - return theta0 * weightShard + theta1 * weightIndex + theta2 * ingestLoad + theta3 * diskUsage; + return theta0 * weightShard + theta2 * ingestLoad + theta3 * diskUsage; } float minWeightDelta(Balancer balancer, String index) { diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalance.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalance.java index 5224d1d920b2e..aeedbb56b9df2 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalance.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalance.java @@ -9,6 +9,7 @@ package org.elasticsearch.cluster.routing.allocation.allocator; +import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.index.shard.ShardId; @@ -19,8 +20,18 @@ * The desired balance of the cluster, indicating which nodes should hold a copy of each shard. * * @param assignments a set of the (persistent) node IDs to which each {@link ShardId} should be allocated + * @param weightsPerNode The node weights calculated based on + * {@link org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator.WeightFunction#nodeWeight} */ -public record DesiredBalance(long lastConvergedIndex, Map assignments) { +public record DesiredBalance( + long lastConvergedIndex, + Map assignments, + Map weightsPerNode +) { + + public DesiredBalance(long lastConvergedIndex, Map assignments) { + this(lastConvergedIndex, assignments, Map.of()); + } public static final DesiredBalance INITIAL = new DesiredBalance(-1, Map.of()); diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceComputer.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceComputer.java index 2acb4827a9585..56c48492a2051 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceComputer.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceComputer.java @@ -368,7 +368,7 @@ public DesiredBalance compute( } long lastConvergedIndex = hasChanges ? previousDesiredBalance.lastConvergedIndex() : desiredBalanceInput.index(); - return new DesiredBalance(lastConvergedIndex, assignments); + return new DesiredBalance(lastConvergedIndex, assignments, routingNodes.getBalanceWeightStatsPerNode()); } private static Map collectShardAssignments(RoutingNodes routingNodes) { diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceMetrics.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceMetrics.java index 436f1ac38c0c2..d8a2d01f56dff 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceMetrics.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceMetrics.java @@ -9,39 +9,63 @@ package org.elasticsearch.cluster.routing.allocation.allocator; +import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.telemetry.metric.DoubleWithAttributes; import org.elasticsearch.telemetry.metric.LongWithAttributes; import org.elasticsearch.telemetry.metric.MeterRegistry; +import java.util.ArrayList; import java.util.List; +import java.util.Map; +import java.util.concurrent.atomic.AtomicReference; public class DesiredBalanceMetrics { + public record AllocationStats(long unassignedShards, long totalAllocations, long undesiredAllocationsExcludingShuttingDownNodes) {} + + public record NodeWeightStats(long shardCount, double diskUsageInBytes, double writeLoad, double nodeWeight) {} + public static final DesiredBalanceMetrics NOOP = new DesiredBalanceMetrics(MeterRegistry.NOOP); public static final String UNASSIGNED_SHARDS_METRIC_NAME = "es.allocator.desired_balance.shards.unassigned.current"; public static final String TOTAL_SHARDS_METRIC_NAME = "es.allocator.desired_balance.shards.current"; public static final String UNDESIRED_ALLOCATION_COUNT_METRIC_NAME = "es.allocator.desired_balance.allocations.undesired.current"; public static final String UNDESIRED_ALLOCATION_RATIO_METRIC_NAME = "es.allocator.desired_balance.allocations.undesired.ratio"; + public static final String DESIRED_BALANCE_NODE_WEIGHT_METRIC_NAME = "es.allocator.desired_balance.allocations.node_weight.current"; + public static final String DESIRED_BALANCE_NODE_SHARD_COUNT_METRIC_NAME = + "es.allocator.desired_balance.allocations.node_shard_count.current"; + public static final String DESIRED_BALANCE_NODE_WRITE_LOAD_METRIC_NAME = + "es.allocator.desired_balance.allocations.node_write_load.current"; + public static final String DESIRED_BALANCE_NODE_DISK_USAGE_METRIC_NAME = + "es.allocator.desired_balance.allocations.node_disk_usage_bytes.current"; + public static final AllocationStats EMPTY_ALLOCATION_STATS = new AllocationStats(-1, -1, -1); private volatile boolean nodeIsMaster = false; - /** * Number of unassigned shards during last reconciliation */ private volatile long unassignedShards; + /** * Total number of assigned shards during last reconciliation */ private volatile long totalAllocations; + /** * Number of assigned shards during last reconciliation that are not allocated on desired node and need to be moved */ private volatile long undesiredAllocations; - public void updateMetrics(long unassignedShards, long totalAllocations, long undesiredAllocations) { - this.unassignedShards = unassignedShards; - this.totalAllocations = totalAllocations; - this.undesiredAllocations = undesiredAllocations; + private final AtomicReference> weightStatsPerNodeRef = new AtomicReference<>(Map.of()); + + public void updateMetrics(AllocationStats allocationStats, Map weightStatsPerNode) { + assert allocationStats != null : "allocation stats cannot be null"; + assert weightStatsPerNode != null : "node balance weight stats cannot be null"; + if (allocationStats != EMPTY_ALLOCATION_STATS) { + this.unassignedShards = allocationStats.unassignedShards; + this.totalAllocations = allocationStats.totalAllocations; + this.undesiredAllocations = allocationStats.undesiredAllocationsExcludingShuttingDownNodes; + } + weightStatsPerNodeRef.set(weightStatsPerNode); } public DesiredBalanceMetrics(MeterRegistry meterRegistry) { @@ -64,6 +88,30 @@ public DesiredBalanceMetrics(MeterRegistry meterRegistry) { "1", this::getUndesiredAllocationsRatioMetrics ); + meterRegistry.registerDoublesGauge( + DESIRED_BALANCE_NODE_WEIGHT_METRIC_NAME, + "Weight of nodes in the computed desired balance", + "unit", + this::getDesiredBalanceNodeWeightMetrics + ); + meterRegistry.registerDoublesGauge( + DESIRED_BALANCE_NODE_WRITE_LOAD_METRIC_NAME, + "Write load of nodes in the computed desired balance", + "threads", + this::getDesiredBalanceNodeWriteLoadMetrics + ); + meterRegistry.registerDoublesGauge( + DESIRED_BALANCE_NODE_DISK_USAGE_METRIC_NAME, + "Disk usage of nodes in the computed desired balance", + "bytes", + this::getDesiredBalanceNodeDiskUsageMetrics + ); + meterRegistry.registerLongsGauge( + DESIRED_BALANCE_NODE_SHARD_COUNT_METRIC_NAME, + "Shard count of nodes in the computed desired balance", + "unit", + this::getDesiredBalanceNodeShardCountMetrics + ); } public void setNodeIsMaster(boolean nodeIsMaster) { @@ -86,6 +134,59 @@ private List getUnassignedShardsMetrics() { return getIfPublishing(unassignedShards); } + private List getDesiredBalanceNodeWeightMetrics() { + if (nodeIsMaster == false) { + return List.of(); + } + var stats = weightStatsPerNodeRef.get(); + List doubles = new ArrayList<>(stats.size()); + for (var node : stats.keySet()) { + var stat = stats.get(node); + doubles.add(new DoubleWithAttributes(stat.nodeWeight(), getNodeAttributes(node))); + } + return doubles; + } + + private List getDesiredBalanceNodeWriteLoadMetrics() { + if (nodeIsMaster == false) { + return List.of(); + } + var stats = weightStatsPerNodeRef.get(); + List doubles = new ArrayList<>(stats.size()); + for (var node : stats.keySet()) { + doubles.add(new DoubleWithAttributes(stats.get(node).writeLoad(), getNodeAttributes(node))); + } + return doubles; + } + + private List getDesiredBalanceNodeDiskUsageMetrics() { + if (nodeIsMaster == false) { + return List.of(); + } + var stats = weightStatsPerNodeRef.get(); + List doubles = new ArrayList<>(stats.size()); + for (var node : stats.keySet()) { + doubles.add(new DoubleWithAttributes(stats.get(node).diskUsageInBytes(), getNodeAttributes(node))); + } + return doubles; + } + + private List getDesiredBalanceNodeShardCountMetrics() { + if (nodeIsMaster == false) { + return List.of(); + } + var stats = weightStatsPerNodeRef.get(); + List values = new ArrayList<>(stats.size()); + for (var node : stats.keySet()) { + values.add(new LongWithAttributes(stats.get(node).shardCount(), getNodeAttributes(node))); + } + return values; + } + + private Map getNodeAttributes(DiscoveryNode node) { + return Map.of("node_id", node.getId(), "node_name", node.getName()); + } + private List getTotalAllocationsMetrics() { return getIfPublishing(totalAllocations); } @@ -114,5 +215,6 @@ public void zeroAllMetrics() { unassignedShards = 0; totalAllocations = 0; undesiredAllocations = 0; + weightStatsPerNodeRef.set(Map.of()); } } diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconciler.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconciler.java index dced9214a3245..129144a3d734b 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconciler.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconciler.java @@ -21,6 +21,7 @@ import org.elasticsearch.cluster.routing.UnassignedInfo; import org.elasticsearch.cluster.routing.UnassignedInfo.AllocationStatus; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; +import org.elasticsearch.cluster.routing.allocation.allocator.DesiredBalanceMetrics.AllocationStats; import org.elasticsearch.cluster.routing.allocation.decider.Decision; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.ClusterSettings; @@ -138,9 +139,11 @@ void run() { moveShards(); // 3. move any other shards that are desired elsewhere logger.trace("Reconciler#balance"); - balance(); + var allocationStats = balance(); logger.debug("Reconciliation is complete"); + + desiredBalanceMetrics.updateMetrics(allocationStats, desiredBalance.weightsPerNode()); } } @@ -464,9 +467,9 @@ private void moveShards() { } } - private void balance() { + private AllocationStats balance() { if (allocation.deciders().canRebalance(allocation).type() != Decision.Type.YES) { - return; + return DesiredBalanceMetrics.EMPTY_ALLOCATION_STATS; } int unassignedShards = routingNodes.unassigned().size() + routingNodes.unassigned().ignored().size(); @@ -532,9 +535,9 @@ private void balance() { } } - desiredBalanceMetrics.updateMetrics(unassignedShards, totalAllocations, undesiredAllocationsExcludingShuttingDownNodes); - maybeLogUndesiredAllocationsWarning(totalAllocations, undesiredAllocationsExcludingShuttingDownNodes, routingNodes.size()); + + return new AllocationStats(unassignedShards, totalAllocations, undesiredAllocationsExcludingShuttingDownNodes); } private void maybeLogUndesiredAllocationsWarning(int totalAllocations, int undesiredAllocations, int nodeCount) { diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceMetricsTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceMetricsTests.java index 2c642da665051..85dc5c9dcd6a9 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceMetricsTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceMetricsTests.java @@ -9,11 +9,14 @@ package org.elasticsearch.cluster.routing.allocation.allocator; +import org.elasticsearch.cluster.routing.allocation.allocator.DesiredBalanceMetrics.AllocationStats; import org.elasticsearch.telemetry.InstrumentType; import org.elasticsearch.telemetry.RecordingMeterRegistry; import org.elasticsearch.telemetry.metric.MeterRegistry; import org.elasticsearch.test.ESTestCase; +import java.util.Map; + import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; @@ -24,7 +27,7 @@ public void testZeroAllMetrics() { long unassignedShards = randomNonNegativeLong(); long totalAllocations = randomNonNegativeLong(); long undesiredAllocations = randomNonNegativeLong(); - metrics.updateMetrics(unassignedShards, totalAllocations, undesiredAllocations); + metrics.updateMetrics(new AllocationStats(unassignedShards, totalAllocations, undesiredAllocations), Map.of()); assertEquals(totalAllocations, metrics.totalAllocations()); assertEquals(unassignedShards, metrics.unassignedShards()); assertEquals(undesiredAllocations, metrics.undesiredAllocations()); @@ -41,7 +44,7 @@ public void testMetricsAreOnlyPublishedWhenNodeIsMaster() { long unassignedShards = randomNonNegativeLong(); long totalAllocations = randomLongBetween(100, 10000000); long undesiredAllocations = randomLongBetween(0, totalAllocations); - metrics.updateMetrics(unassignedShards, totalAllocations, undesiredAllocations); + metrics.updateMetrics(new AllocationStats(unassignedShards, totalAllocations, undesiredAllocations), Map.of()); // Collect when not master meterRegistry.getRecorder().collect(); @@ -101,7 +104,7 @@ public void testUndesiredAllocationRatioIsZeroWhenTotalShardsIsZero() { RecordingMeterRegistry meterRegistry = new RecordingMeterRegistry(); DesiredBalanceMetrics metrics = new DesiredBalanceMetrics(meterRegistry); long unassignedShards = randomNonNegativeLong(); - metrics.updateMetrics(unassignedShards, 0, 0); + metrics.updateMetrics(new AllocationStats(unassignedShards, 0, 0), Map.of()); metrics.setNodeIsMaster(true); meterRegistry.getRecorder().collect(); diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceShardsAllocatorTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceShardsAllocatorTests.java index 1d2f6cffa3777..739f81ed6d110 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceShardsAllocatorTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceShardsAllocatorTests.java @@ -808,6 +808,7 @@ public void allocate(RoutingAllocation allocation) { unassignedIterator.next(); unassignedIterator.initialize(dataNodeId, null, 0L, allocation.changes()); } + allocation.routingNodes().setBalanceWeightStatsPerNode(Map.of()); } @Override From b2781c76f5a8986533aa08cbc13fbba5e116ac5e Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Tue, 5 Nov 2024 01:14:17 +1100 Subject: [PATCH 312/324] Mute org.elasticsearch.xpack.test.rest.XPackRestIT test {p0=terms_enum/10_basic/Test security} #116178 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index d7b4c472273a6..3a471ad5d12d2 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -282,6 +282,9 @@ tests: - class: org.elasticsearch.xpack.test.rest.XPackRestIT method: test {p0=ml/forecast/Test forecast unknown job} issue: https://github.com/elastic/elasticsearch/issues/116150 +- class: org.elasticsearch.xpack.test.rest.XPackRestIT + method: test {p0=terms_enum/10_basic/Test security} + issue: https://github.com/elastic/elasticsearch/issues/116178 # Examples: # From 744eb507f6d74aba45a960595aab0cd93bb8d48e Mon Sep 17 00:00:00 2001 From: Mark Tozzi Date: Mon, 4 Nov 2024 09:32:53 -0500 Subject: [PATCH 313/324] [ESQL] clean up date trunc tests (#116111) While working on #110008 I discovered that the Date Trunc tests were only running in folding mode, because the interval types are marked as not representable. The correct way to test this is to set the forceLiteral flag for those fields, which will (as the name suggests) force them to be literals even in non-folding tests. Doing that turned up errors in the evaluatorToString tests, which I fixed. There are two big changes here. First, the second parameter to the evaluator is a Rounding instance, not the actual interval. Since Rounding includes some information about the specific rounding in the toString results, I am just using a starts with matcher to validate the majority of the string, rather than trying to reconstruct the expected rounding string. Second, passing in a literal null for the interval parameter folds the whole expression to null, and thus a completely different toString. I added a clause in AnyNullIsNull to account for this. While I was in there, I moved some specific test cases to a different file. I know moving code is something we're trying to minimize right now, but this seemed worth it. The tests in question do not depend on the parameters of the test case, but all methods in the class get run for every set of parameters. This was causing these tests to be run many times with the same values, which bloats our test run time and test count. Moving them to a distinct class means they'll only be executed once per test run. I feel like this benefit outweighs the cost of git history complexity. --- .../kibana/definition/date_diff.json | 2 +- .../esql/functions/signature/match.svg | 2 +- .../function/AbstractFunctionTestCase.java | 3 +- .../scalar/date/DateTruncRoundingTests.java | 111 ++++++++++++++++++ .../function/scalar/date/DateTruncTests.java | 96 ++------------- 5 files changed, 123 insertions(+), 91 deletions(-) create mode 100644 x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateTruncRoundingTests.java diff --git a/docs/reference/esql/functions/kibana/definition/date_diff.json b/docs/reference/esql/functions/kibana/definition/date_diff.json index a4812af5f930b..d32028d455348 100644 --- a/docs/reference/esql/functions/kibana/definition/date_diff.json +++ b/docs/reference/esql/functions/kibana/definition/date_diff.json @@ -55,7 +55,7 @@ ], "examples" : [ "ROW date1 = TO_DATETIME(\"2023-12-02T11:00:00.000Z\"), date2 = TO_DATETIME(\"2023-12-02T11:00:00.001Z\")\n| EVAL dd_ms = DATE_DIFF(\"microseconds\", date1, date2)", - "ROW end_23=\"2023-12-31T23:59:59.999Z\"::DATETIME,\n start_24=\"2024-01-01T00:00:00.000Z\"::DATETIME,\n end_24=\"2024-12-31T23:59:59.999\"::DATETIME\n| EVAL end23_to_start24=DATE_DIFF(\"year\", end_23, start_24)\n| EVAL end23_to_end24=DATE_DIFF(\"year\", end_23, end_24)\n| EVAL start_to_end_24=DATE_DIFF(\"year\", start_24, end_24)" + "ROW end_23=TO_DATETIME(\"2023-12-31T23:59:59.999Z\"),\n start_24=TO_DATETIME(\"2024-01-01T00:00:00.000Z\"),\n end_24=TO_DATETIME(\"2024-12-31T23:59:59.999\")\n| EVAL end23_to_start24=DATE_DIFF(\"year\", end_23, start_24)\n| EVAL end23_to_end24=DATE_DIFF(\"year\", end_23, end_24)\n| EVAL start_to_end_24=DATE_DIFF(\"year\", start_24, end_24)" ], "preview" : false, "snapshot_only" : false diff --git a/docs/reference/esql/functions/signature/match.svg b/docs/reference/esql/functions/signature/match.svg index e7bb001247a9d..14ddb87468e70 100644 --- a/docs/reference/esql/functions/signature/match.svg +++ b/docs/reference/esql/functions/signature/match.svg @@ -1 +1 @@ -MATCH(field,query) +MATCH(field,query) \ No newline at end of file diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java index c05f8e0990b3c..6a552f400d36e 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java @@ -67,6 +67,7 @@ import org.elasticsearch.xpack.esql.planner.PlannerUtils; import org.elasticsearch.xpack.esql.session.Configuration; import org.hamcrest.Matcher; +import org.hamcrest.Matchers; import org.junit.After; import org.junit.AfterClass; @@ -170,7 +171,7 @@ protected static List anyNullIsNull(boolean entirelyNullPreser (nullPosition, nullValueDataType, original) -> entirelyNullPreservesType == false && nullValueDataType == DataType.NULL && original.getData().size() == 1 ? DataType.NULL : original.expectedType(), - (nullPosition, nullData, original) -> original + (nullPosition, nullData, original) -> nullData.isForceLiteral() ? Matchers.equalTo("LiteralsEvaluator[lit=null]") : original ); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateTruncRoundingTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateTruncRoundingTests.java new file mode 100644 index 0000000000000..5af5c8e493177 --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateTruncRoundingTests.java @@ -0,0 +1,111 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.scalar.date; + +import org.elasticsearch.common.Rounding; +import org.elasticsearch.test.ESTestCase; + +import java.time.Duration; +import java.time.Instant; +import java.time.Period; + +import static org.elasticsearch.xpack.esql.expression.function.scalar.date.DateTrunc.createRounding; +import static org.elasticsearch.xpack.esql.expression.function.scalar.date.DateTrunc.process; +import static org.hamcrest.Matchers.containsString; + +/** + * This class supplements {@link DateTruncTests}. The tests in this class are not run via the parametrized runner, + * and exercise specific helper functions within the class. + */ +public class DateTruncRoundingTests extends ESTestCase { + + public void testCreateRoundingDuration() { + Rounding.Prepared rounding; + + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> createRounding(Duration.ofHours(0))); + assertThat(e.getMessage(), containsString("Zero or negative time interval is not supported")); + + e = expectThrows(IllegalArgumentException.class, () -> createRounding(Duration.ofHours(-10))); + assertThat(e.getMessage(), containsString("Zero or negative time interval is not supported")); + + rounding = createRounding(Duration.ofHours(1)); + assertEquals(1, rounding.roundingSize(Rounding.DateTimeUnit.HOUR_OF_DAY), 0d); + + rounding = createRounding(Duration.ofHours(10)); + assertEquals(10, rounding.roundingSize(Rounding.DateTimeUnit.HOUR_OF_DAY), 0d); + + rounding = createRounding(Duration.ofMinutes(1)); + assertEquals(1, rounding.roundingSize(Rounding.DateTimeUnit.MINUTES_OF_HOUR), 0d); + + rounding = createRounding(Duration.ofMinutes(100)); + assertEquals(100, rounding.roundingSize(Rounding.DateTimeUnit.MINUTES_OF_HOUR), 0d); + + rounding = createRounding(Duration.ofSeconds(1)); + assertEquals(1, rounding.roundingSize(Rounding.DateTimeUnit.SECOND_OF_MINUTE), 0d); + + rounding = createRounding(Duration.ofSeconds(120)); + assertEquals(120, rounding.roundingSize(Rounding.DateTimeUnit.SECOND_OF_MINUTE), 0d); + + rounding = createRounding(Duration.ofSeconds(60).plusMinutes(5).plusHours(1)); + assertEquals(1 + 5 + 60, rounding.roundingSize(Rounding.DateTimeUnit.MINUTES_OF_HOUR), 0d); + } + + public void testCreateRoundingPeriod() { + Rounding.Prepared rounding; + + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> createRounding(Period.ofMonths(0))); + assertThat(e.getMessage(), containsString("Zero or negative time interval is not supported")); + + e = expectThrows(IllegalArgumentException.class, () -> createRounding(Period.ofYears(-10))); + assertThat(e.getMessage(), containsString("Zero or negative time interval is not supported")); + + e = expectThrows(IllegalArgumentException.class, () -> createRounding(Period.of(0, 1, 1))); + assertThat(e.getMessage(), containsString("Time interval with multiple periods is not supported")); + + rounding = createRounding(Period.ofDays(1)); + assertEquals(1, rounding.roundingSize(Rounding.DateTimeUnit.DAY_OF_MONTH), 0d); + + rounding = createRounding(Period.ofDays(4)); + assertEquals(4, rounding.roundingSize(Rounding.DateTimeUnit.DAY_OF_MONTH), 0d); + + rounding = createRounding(Period.ofDays(7)); + assertEquals(1, rounding.roundingSize(Rounding.DateTimeUnit.WEEK_OF_WEEKYEAR), 0d); + + rounding = createRounding(Period.ofMonths(1)); + assertEquals(1, rounding.roundingSize(Rounding.DateTimeUnit.MONTH_OF_YEAR), 0d); + + rounding = createRounding(Period.ofMonths(3)); + assertEquals(1, rounding.roundingSize(Rounding.DateTimeUnit.QUARTER_OF_YEAR), 0d); + + rounding = createRounding(Period.ofYears(1)); + assertEquals(1, rounding.roundingSize(Rounding.DateTimeUnit.YEAR_OF_CENTURY), 0d); + + e = expectThrows(IllegalArgumentException.class, () -> createRounding(Period.ofYears(3))); + assertThat(e.getMessage(), containsString("Time interval is not supported")); + } + + public void testCreateRoundingNullInterval() { + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> createRounding(null)); + assertThat(e.getMessage(), containsString("Time interval is not supported")); + } + + public void testDateTruncFunction() { + long ts = toMillis("2023-02-17T10:25:33.38Z"); + + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> process(ts, createRounding(Period.ofDays(-1)))); + assertThat(e.getMessage(), containsString("Zero or negative time interval is not supported")); + + e = expectThrows(IllegalArgumentException.class, () -> process(ts, createRounding(Duration.ofHours(-1)))); + assertThat(e.getMessage(), containsString("Zero or negative time interval is not supported")); + } + + private static long toMillis(String timestamp) { + return Instant.parse(timestamp).toEpochMilli(); + } + +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateTruncTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateTruncTests.java index 48b23ed5c8840..0e4968cc2a504 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateTruncTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateTruncTests.java @@ -10,12 +10,12 @@ import com.carrotsearch.randomizedtesting.annotations.Name; import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; -import org.elasticsearch.common.Rounding; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; import org.elasticsearch.xpack.esql.expression.function.AbstractScalarFunctionTestCase; import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; +import org.hamcrest.Matchers; import java.time.Duration; import java.time.Instant; @@ -23,11 +23,11 @@ import java.util.List; import java.util.function.Supplier; -import static org.elasticsearch.xpack.esql.expression.function.scalar.date.DateTrunc.createRounding; -import static org.elasticsearch.xpack.esql.expression.function.scalar.date.DateTrunc.process; -import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; +/** + * Parameterized testing for {@link DateTrunc}. See also {@link DateTruncRoundingTests} for non-parametrized tests. + */ public class DateTruncTests extends AbstractScalarFunctionTestCase { public DateTruncTests(@Name("TestCase") Supplier testCaseSupplier) { @@ -61,95 +61,15 @@ public static Iterable parameters() { }); } - public void testCreateRoundingDuration() { - Rounding.Prepared rounding; - - IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> createRounding(Duration.ofHours(0))); - assertThat(e.getMessage(), containsString("Zero or negative time interval is not supported")); - - e = expectThrows(IllegalArgumentException.class, () -> createRounding(Duration.ofHours(-10))); - assertThat(e.getMessage(), containsString("Zero or negative time interval is not supported")); - - rounding = createRounding(Duration.ofHours(1)); - assertEquals(1, rounding.roundingSize(Rounding.DateTimeUnit.HOUR_OF_DAY), 0d); - - rounding = createRounding(Duration.ofHours(10)); - assertEquals(10, rounding.roundingSize(Rounding.DateTimeUnit.HOUR_OF_DAY), 0d); - - rounding = createRounding(Duration.ofMinutes(1)); - assertEquals(1, rounding.roundingSize(Rounding.DateTimeUnit.MINUTES_OF_HOUR), 0d); - - rounding = createRounding(Duration.ofMinutes(100)); - assertEquals(100, rounding.roundingSize(Rounding.DateTimeUnit.MINUTES_OF_HOUR), 0d); - - rounding = createRounding(Duration.ofSeconds(1)); - assertEquals(1, rounding.roundingSize(Rounding.DateTimeUnit.SECOND_OF_MINUTE), 0d); - - rounding = createRounding(Duration.ofSeconds(120)); - assertEquals(120, rounding.roundingSize(Rounding.DateTimeUnit.SECOND_OF_MINUTE), 0d); - - rounding = createRounding(Duration.ofSeconds(60).plusMinutes(5).plusHours(1)); - assertEquals(1 + 5 + 60, rounding.roundingSize(Rounding.DateTimeUnit.MINUTES_OF_HOUR), 0d); - } - - public void testCreateRoundingPeriod() { - Rounding.Prepared rounding; - - IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> createRounding(Period.ofMonths(0))); - assertThat(e.getMessage(), containsString("Zero or negative time interval is not supported")); - - e = expectThrows(IllegalArgumentException.class, () -> createRounding(Period.ofYears(-10))); - assertThat(e.getMessage(), containsString("Zero or negative time interval is not supported")); - - e = expectThrows(IllegalArgumentException.class, () -> createRounding(Period.of(0, 1, 1))); - assertThat(e.getMessage(), containsString("Time interval with multiple periods is not supported")); - - rounding = createRounding(Period.ofDays(1)); - assertEquals(1, rounding.roundingSize(Rounding.DateTimeUnit.DAY_OF_MONTH), 0d); - - rounding = createRounding(Period.ofDays(4)); - assertEquals(4, rounding.roundingSize(Rounding.DateTimeUnit.DAY_OF_MONTH), 0d); - - rounding = createRounding(Period.ofDays(7)); - assertEquals(1, rounding.roundingSize(Rounding.DateTimeUnit.WEEK_OF_WEEKYEAR), 0d); - - rounding = createRounding(Period.ofMonths(1)); - assertEquals(1, rounding.roundingSize(Rounding.DateTimeUnit.MONTH_OF_YEAR), 0d); - - rounding = createRounding(Period.ofMonths(3)); - assertEquals(1, rounding.roundingSize(Rounding.DateTimeUnit.QUARTER_OF_YEAR), 0d); - - rounding = createRounding(Period.ofYears(1)); - assertEquals(1, rounding.roundingSize(Rounding.DateTimeUnit.YEAR_OF_CENTURY), 0d); - - e = expectThrows(IllegalArgumentException.class, () -> createRounding(Period.ofYears(3))); - assertThat(e.getMessage(), containsString("Time interval is not supported")); - } - - public void testCreateRoundingNullInterval() { - IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> createRounding(null)); - assertThat(e.getMessage(), containsString("Time interval is not supported")); - } - - public void testDateTruncFunction() { - long ts = toMillis("2023-02-17T10:25:33.38Z"); - - IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> process(ts, createRounding(Period.ofDays(-1)))); - assertThat(e.getMessage(), containsString("Zero or negative time interval is not supported")); - - e = expectThrows(IllegalArgumentException.class, () -> process(ts, createRounding(Duration.ofHours(-1)))); - assertThat(e.getMessage(), containsString("Zero or negative time interval is not supported")); - } - private static TestCaseSupplier ofDatePeriod(Period period, long value, String expectedDate) { return new TestCaseSupplier( List.of(DataType.DATE_PERIOD, DataType.DATETIME), () -> new TestCaseSupplier.TestCase( List.of( - new TestCaseSupplier.TypedData(period, DataType.DATE_PERIOD, "interval"), + new TestCaseSupplier.TypedData(period, DataType.DATE_PERIOD, "interval").forceLiteral(), new TestCaseSupplier.TypedData(value, DataType.DATETIME, "date") ), - "DateTruncEvaluator[date=Attribute[channel=1], interval=Attribute[channel=0]]", + Matchers.startsWith("DateTruncEvaluator[fieldVal=Attribute[channel=0], rounding=Rounding["), DataType.DATETIME, equalTo(toMillis(expectedDate)) ) @@ -161,10 +81,10 @@ private static TestCaseSupplier ofDuration(Duration duration, long value, String List.of(DataType.TIME_DURATION, DataType.DATETIME), () -> new TestCaseSupplier.TestCase( List.of( - new TestCaseSupplier.TypedData(duration, DataType.TIME_DURATION, "interval"), + new TestCaseSupplier.TypedData(duration, DataType.TIME_DURATION, "interval").forceLiteral(), new TestCaseSupplier.TypedData(value, DataType.DATETIME, "date") ), - "DateTruncEvaluator[date=Attribute[channel=1], interval=Attribute[channel=0]]", + Matchers.startsWith("DateTruncEvaluator[fieldVal=Attribute[channel=0], rounding=Rounding["), DataType.DATETIME, equalTo(toMillis(expectedDate)) ) From 9ad09b6ee0d99f69f239ab2be7423cbd3da0d6f8 Mon Sep 17 00:00:00 2001 From: Giorgos Bamparopoulos Date: Mon, 4 Nov 2024 17:06:16 +0200 Subject: [PATCH 314/324] Fix a typo in the example for using pre-existing pipeline definitions (#116084) --- docs/reference/ingest/apis/simulate-ingest.asciidoc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/reference/ingest/apis/simulate-ingest.asciidoc b/docs/reference/ingest/apis/simulate-ingest.asciidoc index da591eed7546f..52ed09b1d32c2 100644 --- a/docs/reference/ingest/apis/simulate-ingest.asciidoc +++ b/docs/reference/ingest/apis/simulate-ingest.asciidoc @@ -265,8 +265,8 @@ Definition of a mapping that will be merged into the index's mapping for validat [[simulate-ingest-api-pre-existing-pipelines-ex]] ===== Use pre-existing pipeline definitions -In this example the index `index` has a default pipeline called `my-pipeline` and a final -pipeline called `my-final-pipeline`. Since both documents are being ingested into `index`, +In this example the index `my-index` has a default pipeline called `my-pipeline` and a final +pipeline called `my-final-pipeline`. Since both documents are being ingested into `my-index`, both pipelines are executed using the pipeline definitions that are already in the system. [source,console] From 544750c2655d1ef72e9044642a6164727e5d0f0e Mon Sep 17 00:00:00 2001 From: Dan Rubinstein Date: Mon, 4 Nov 2024 10:25:37 -0500 Subject: [PATCH 315/324] Fixing ScheduledEventTest generating same start and end time (#115877) Co-authored-by: Elastic Machine --- muted-tests.yml | 3 --- .../xpack/core/ml/calendars/ScheduledEventTests.java | 2 +- 2 files changed, 1 insertion(+), 4 deletions(-) diff --git a/muted-tests.yml b/muted-tests.yml index 3a471ad5d12d2..71bbcddd6cc9a 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -175,9 +175,6 @@ tests: - class: org.elasticsearch.xpack.spatial.search.GeoGridAggAndQueryConsistencyIT method: testGeoShapeGeoHex issue: https://github.com/elastic/elasticsearch/issues/115705 -- class: org.elasticsearch.xpack.core.ml.calendars.ScheduledEventTests - method: testBuild_SucceedsWithDefaultSkipResultAndSkipModelUpdatesValues - issue: https://github.com/elastic/elasticsearch/issues/115476 - class: org.elasticsearch.xpack.test.rest.XPackRestIT method: test {p0=transform/transforms_start_stop/Verify start transform reuses destination index} issue: https://github.com/elastic/elasticsearch/issues/115808 diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/calendars/ScheduledEventTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/calendars/ScheduledEventTests.java index 891430057513e..21fadee4e78a8 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/calendars/ScheduledEventTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/calendars/ScheduledEventTests.java @@ -207,7 +207,7 @@ private void validateScheduledEventSuccessfulBuild( String description = randomAlphaOfLength(10); String calendarId = randomAlphaOfLength(10); Instant startTime = Instant.ofEpochMilli(Instant.now().toEpochMilli()); - Instant endTime = startTime.plusSeconds(randomInt(3600)); + Instant endTime = startTime.plusSeconds(randomIntBetween(1, 3600)); ScheduledEvent.Builder builder = new ScheduledEvent.Builder().description(description) .calendarId(calendarId) From 589738c355590a4eb11dc99ec986291e4d2606c6 Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Mon, 4 Nov 2024 16:29:07 +0100 Subject: [PATCH 316/324] Fix incorrect mutex used in QueryPhaseResultConsumer (#116171) Everything synchronizes on PendingMerges, this was just a typo. closes #115716 --- muted-tests.yml | 3 --- .../elasticsearch/action/search/QueryPhaseResultConsumer.java | 4 ++-- 2 files changed, 2 insertions(+), 5 deletions(-) diff --git a/muted-tests.yml b/muted-tests.yml index 71bbcddd6cc9a..8498032d07b2c 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -223,9 +223,6 @@ tests: - class: org.elasticsearch.search.functionscore.QueryRescorerIT method: testScoring issue: https://github.com/elastic/elasticsearch/issues/116050 -- class: org.elasticsearch.indexing.IndexActionIT - method: testAutoGenerateIdNoDuplicates - issue: https://github.com/elastic/elasticsearch/issues/115716 - class: org.elasticsearch.xpack.application.connector.ConnectorIndexServiceTests issue: https://github.com/elastic/elasticsearch/issues/116087 - class: org.elasticsearch.compute.operator.FilterOperatorTests diff --git a/server/src/main/java/org/elasticsearch/action/search/QueryPhaseResultConsumer.java b/server/src/main/java/org/elasticsearch/action/search/QueryPhaseResultConsumer.java index 6c654d9235ec2..249f49c951fcd 100644 --- a/server/src/main/java/org/elasticsearch/action/search/QueryPhaseResultConsumer.java +++ b/server/src/main/java/org/elasticsearch/action/search/QueryPhaseResultConsumer.java @@ -480,7 +480,7 @@ protected void doRun() { onMergeFailure(t); return; } - synchronized (QueryPhaseResultConsumer.this) { + synchronized (QueryPhaseResultConsumer.PendingMerges.this) { if (hasFailure()) { return; } @@ -501,7 +501,7 @@ protected void doRun() { } } Runnable r = mergeTask.consumeListener(); - synchronized (QueryPhaseResultConsumer.this) { + synchronized (QueryPhaseResultConsumer.PendingMerges.this) { while (true) { mergeTask = queue.poll(); runningTask.set(mergeTask); From 2a4a12b064be12207aa5dbbf1fa316391f3c06b6 Mon Sep 17 00:00:00 2001 From: Max Hniebergall <137079448+maxhniebergall@users.noreply.github.com> Date: Mon, 4 Nov 2024 10:31:14 -0500 Subject: [PATCH 317/324] Prevent randomized end time from being zero-offset (#116118) From 0d11e88d43f862f4bad3a27e26b34af533c5bdba Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lorenzo=20Dematt=C3=A9?= Date: Mon, 4 Nov 2024 16:40:54 +0100 Subject: [PATCH 318/324] Entitlement tools: SecurityManager scanner (#116020) --- libs/entitlement/tools/build.gradle | 0 libs/entitlement/tools/common/build.gradle | 15 + .../entitlement/tools/Utils.java | 45 +++ .../securitymanager-scanner/build.gradle | 61 ++++ .../licenses/asm-LICENSE.txt | 26 ++ .../licenses/asm-NOTICE.txt | 1 + .../securitymanager-scanner/src/README.md | 47 +++ .../tools/securitymanager/scanner/Main.java | 103 +++++++ .../scanner/SecurityCheckClassVisitor.java | 279 ++++++++++++++++++ 9 files changed, 577 insertions(+) create mode 100644 libs/entitlement/tools/build.gradle create mode 100644 libs/entitlement/tools/common/build.gradle create mode 100644 libs/entitlement/tools/common/src/main/java/org/elasticsearch/entitlement/tools/Utils.java create mode 100644 libs/entitlement/tools/securitymanager-scanner/build.gradle create mode 100644 libs/entitlement/tools/securitymanager-scanner/licenses/asm-LICENSE.txt create mode 100644 libs/entitlement/tools/securitymanager-scanner/licenses/asm-NOTICE.txt create mode 100644 libs/entitlement/tools/securitymanager-scanner/src/README.md create mode 100644 libs/entitlement/tools/securitymanager-scanner/src/main/java/org/elasticsearch/entitlement/tools/securitymanager/scanner/Main.java create mode 100644 libs/entitlement/tools/securitymanager-scanner/src/main/java/org/elasticsearch/entitlement/tools/securitymanager/scanner/SecurityCheckClassVisitor.java diff --git a/libs/entitlement/tools/build.gradle b/libs/entitlement/tools/build.gradle new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/libs/entitlement/tools/common/build.gradle b/libs/entitlement/tools/common/build.gradle new file mode 100644 index 0000000000000..3373a8f747430 --- /dev/null +++ b/libs/entitlement/tools/common/build.gradle @@ -0,0 +1,15 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +plugins { + id 'java' +} + +group = 'org.elasticsearch.entitlement.tools' + diff --git a/libs/entitlement/tools/common/src/main/java/org/elasticsearch/entitlement/tools/Utils.java b/libs/entitlement/tools/common/src/main/java/org/elasticsearch/entitlement/tools/Utils.java new file mode 100644 index 0000000000000..c72e550a529cd --- /dev/null +++ b/libs/entitlement/tools/common/src/main/java/org/elasticsearch/entitlement/tools/Utils.java @@ -0,0 +1,45 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.entitlement.tools; + +import java.io.IOException; +import java.lang.module.ModuleDescriptor; +import java.nio.file.FileSystem; +import java.nio.file.Files; +import java.util.HashMap; +import java.util.Map; +import java.util.Set; +import java.util.stream.Collectors; + +public class Utils { + + public static Map> findModuleExports(FileSystem fs) throws IOException { + var modulesExports = new HashMap>(); + try (var stream = Files.walk(fs.getPath("modules"))) { + stream.filter(p -> p.getFileName().toString().equals("module-info.class")).forEach(x -> { + try (var is = Files.newInputStream(x)) { + var md = ModuleDescriptor.read(is); + modulesExports.put( + md.name(), + md.exports() + .stream() + .filter(e -> e.isQualified() == false) + .map(ModuleDescriptor.Exports::source) + .collect(Collectors.toSet()) + ); + } catch (IOException e) { + throw new RuntimeException(e); + } + }); + } + return modulesExports; + } + +} diff --git a/libs/entitlement/tools/securitymanager-scanner/build.gradle b/libs/entitlement/tools/securitymanager-scanner/build.gradle new file mode 100644 index 0000000000000..8d035c9e847c6 --- /dev/null +++ b/libs/entitlement/tools/securitymanager-scanner/build.gradle @@ -0,0 +1,61 @@ +plugins { + id 'application' +} + +apply plugin: 'elasticsearch.build' +apply plugin: 'elasticsearch.publish' + +tasks.named("dependencyLicenses").configure { + mapping from: /asm-.*/, to: 'asm' +} + +group = 'org.elasticsearch.entitlement.tools' + +ext { + javaMainClass = "org.elasticsearch.entitlement.tools.securitymanager.scanner.Main" +} + +application { + mainClass.set(javaMainClass) + applicationDefaultJvmArgs = [ + '--add-exports', 'java.base/sun.security.util=ALL-UNNAMED', + '--add-opens', 'java.base/java.lang=ALL-UNNAMED', + '--add-opens', 'java.base/java.net=ALL-UNNAMED', + '--add-opens', 'java.base/java.net.spi=ALL-UNNAMED', + '--add-opens', 'java.base/java.util.concurrent=ALL-UNNAMED', + '--add-opens', 'java.base/javax.crypto=ALL-UNNAMED', + '--add-opens', 'java.base/javax.security.auth=ALL-UNNAMED', + '--add-opens', 'java.base/jdk.internal.logger=ALL-UNNAMED', + '--add-opens', 'java.base/sun.nio.ch=ALL-UNNAMED', + '--add-opens', 'jdk.management.jfr/jdk.management.jfr=ALL-UNNAMED', + '--add-opens', 'java.logging/java.util.logging=ALL-UNNAMED', + '--add-opens', 'java.logging/sun.util.logging.internal=ALL-UNNAMED', + '--add-opens', 'java.naming/javax.naming.ldap.spi=ALL-UNNAMED', + '--add-opens', 'java.rmi/sun.rmi.runtime=ALL-UNNAMED', + '--add-opens', 'jdk.dynalink/jdk.dynalink=ALL-UNNAMED', + '--add-opens', 'jdk.dynalink/jdk.dynalink.linker=ALL-UNNAMED', + '--add-opens', 'java.desktop/sun.awt=ALL-UNNAMED', + '--add-opens', 'java.sql.rowset/javax.sql.rowset.spi=ALL-UNNAMED', + '--add-opens', 'java.sql/java.sql=ALL-UNNAMED', + '--add-opens', 'java.xml.crypto/com.sun.org.apache.xml.internal.security.utils=ALL-UNNAMED' + ] +} + +repositories { + mavenCentral() +} + +dependencies { + compileOnly(project(':libs:core')) + implementation 'org.ow2.asm:asm:9.7' + implementation 'org.ow2.asm:asm-util:9.7' + implementation(project(':libs:entitlement:tools:common')) +} + +tasks.named('forbiddenApisMain').configure { + replaceSignatureFiles 'jdk-signatures' +} + +tasks.named("thirdPartyAudit").configure { + ignoreMissingClasses() +} diff --git a/libs/entitlement/tools/securitymanager-scanner/licenses/asm-LICENSE.txt b/libs/entitlement/tools/securitymanager-scanner/licenses/asm-LICENSE.txt new file mode 100644 index 0000000000000..afb064f2f2666 --- /dev/null +++ b/libs/entitlement/tools/securitymanager-scanner/licenses/asm-LICENSE.txt @@ -0,0 +1,26 @@ +Copyright (c) 2012 France Télécom +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. +3. Neither the name of the copyright holders nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +THE POSSIBILITY OF SUCH DAMAGE. diff --git a/libs/entitlement/tools/securitymanager-scanner/licenses/asm-NOTICE.txt b/libs/entitlement/tools/securitymanager-scanner/licenses/asm-NOTICE.txt new file mode 100644 index 0000000000000..8d1c8b69c3fce --- /dev/null +++ b/libs/entitlement/tools/securitymanager-scanner/licenses/asm-NOTICE.txt @@ -0,0 +1 @@ + diff --git a/libs/entitlement/tools/securitymanager-scanner/src/README.md b/libs/entitlement/tools/securitymanager-scanner/src/README.md new file mode 100644 index 0000000000000..c01ba1387d1c8 --- /dev/null +++ b/libs/entitlement/tools/securitymanager-scanner/src/README.md @@ -0,0 +1,47 @@ +This tool scans the JDK on which it is running, looking for any location where `SecurityManager` is currently used, thus giving us a list of "entry points" inside the JDK where security checks are currently happening. + +More in detail, the tool scans for calls to any `SecurityManager` method starting with `check` (e.g. `checkWrite`). The tool treats the generic `checkPermission` method a little bit differently: `checkPermission` accepts a generic `Permission` object, it tries to read the permission type and permission name to give more information about it, trying to match two patterns that are used frequently inside the JDK: + +Pattern 1: private static permission field + +```java +private static final RuntimePermission INET_ADDRESS_RESOLVER_PERMISSION = +new RuntimePermission("inetAddressResolverProvider"); +... +sm.checkPermission(INET_ADDRESS_RESOLVER_PERMISSION); +``` +Pattern 2: direct object creation + +```java +sm.checkPermission(new LinkPermission("symbolic")); +``` + +The tool will recognize this pattern, and report the permission type and name alongside the `checkPermission` entry point (type `RuntimePermission` and name `inetAddressResolverProvider` in the first case, type `LinkPermission` and name `symbolic` in the second). + +This allows to give more information (either a specific type like `LinkPermission`, or a specific name like `inetAddressResolverProvider`) to generic `checkPermission` to help in deciding how to classify the permission check. The 2 patterns work quite well and cover roughly 90% of the cases. + +In order to run the tool, use: +```shell +./gradlew :libs:entitlement:tools:securitymanager-scanner:run +``` +The output of the tool is a CSV file, with one line for each entry-point, columns separated by `TAB` + +The columns are: +1. Module name +2. File name (from source root) +3. Line number +4. Fully qualified class name (ASM style, with `/` separators) +5. Method name +6. Method descriptor (ASM signature) +6. Visibility (PUBLIC/PUBLIC-METHOD/PRIVATE) +7. Check detail 1 (method name, or in case of checkPermission, permission name. Might be `MISSING`) +8. Check detail 2 (in case of checkPermission, the argument type (`Permission` subtype). Might be `MISSING`) + +Examples: +``` +java.base sun/nio/ch/DatagramChannelImpl.java 1360 sun/nio/ch/DatagramChannelImpl connect (Ljava/net/SocketAddress;Z)Ljava/nio/channels/DatagramChannel; PRIVATE checkConnect +``` +or +``` +java.base java/net/ResponseCache.java 118 java/net/ResponseCache setDefault (Ljava/net/ResponseCache;)V PUBLIC setResponseCache java/net/NetPermission +``` diff --git a/libs/entitlement/tools/securitymanager-scanner/src/main/java/org/elasticsearch/entitlement/tools/securitymanager/scanner/Main.java b/libs/entitlement/tools/securitymanager-scanner/src/main/java/org/elasticsearch/entitlement/tools/securitymanager/scanner/Main.java new file mode 100644 index 0000000000000..bea49e0296e67 --- /dev/null +++ b/libs/entitlement/tools/securitymanager-scanner/src/main/java/org/elasticsearch/entitlement/tools/securitymanager/scanner/Main.java @@ -0,0 +1,103 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.entitlement.tools.securitymanager.scanner; + +import org.elasticsearch.core.SuppressForbidden; +import org.elasticsearch.entitlement.tools.Utils; +import org.objectweb.asm.ClassReader; + +import java.io.IOException; +import java.net.URI; +import java.nio.file.FileSystem; +import java.nio.file.FileSystems; +import java.nio.file.Files; +import java.util.HashMap; +import java.util.List; +import java.util.Set; + +public class Main { + + static final Set excludedModules = Set.of("java.desktop"); + + private static void identifySMChecksEntryPoints() throws IOException { + + FileSystem fs = FileSystems.getFileSystem(URI.create("jrt:/")); + + var moduleExports = Utils.findModuleExports(fs); + + var callers = new HashMap>(); + var visitor = new SecurityCheckClassVisitor(callers); + + try (var stream = Files.walk(fs.getPath("modules"))) { + stream.filter(x -> x.toString().endsWith(".class")).forEach(x -> { + var moduleName = x.subpath(1, 2).toString(); + if (excludedModules.contains(moduleName) == false) { + try { + ClassReader cr = new ClassReader(Files.newInputStream(x)); + visitor.setCurrentModule(moduleName, moduleExports.get(moduleName)); + var path = x.getNameCount() > 3 ? x.subpath(2, x.getNameCount() - 1).toString() : ""; + visitor.setCurrentSourcePath(path); + cr.accept(visitor, 0); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + }); + } + + printToStdout(callers); + } + + @SuppressForbidden(reason = "This simple tool just prints to System.out") + private static void printToStdout(HashMap> callers) { + for (var kv : callers.entrySet()) { + for (var e : kv.getValue()) { + System.out.println(toString(kv.getKey(), e)); + } + } + } + + private static final String SEPARATOR = "\t"; + + private static String toString(String calleeName, SecurityCheckClassVisitor.CallerInfo callerInfo) { + var s = callerInfo.moduleName() + SEPARATOR + callerInfo.source() + SEPARATOR + callerInfo.line() + SEPARATOR + callerInfo + .className() + SEPARATOR + callerInfo.methodName() + SEPARATOR + callerInfo.methodDescriptor() + SEPARATOR; + + if (callerInfo.externalAccess().contains(SecurityCheckClassVisitor.ExternalAccess.METHOD) + && callerInfo.externalAccess().contains(SecurityCheckClassVisitor.ExternalAccess.CLASS)) { + s += "PUBLIC"; + } else if (callerInfo.externalAccess().contains(SecurityCheckClassVisitor.ExternalAccess.METHOD)) { + s += "PUBLIC-METHOD"; + } else { + s += "PRIVATE"; + } + + if (callerInfo.runtimePermissionType() != null) { + s += SEPARATOR + callerInfo.runtimePermissionType(); + } else if (calleeName.equals("checkPermission")) { + s += SEPARATOR + "MISSING"; // missing information + } else { + s += SEPARATOR + calleeName; + } + + if (callerInfo.permissionType() != null) { + s += SEPARATOR + callerInfo.permissionType(); + } else if (calleeName.equals("checkPermission")) { + s += SEPARATOR + "MISSING"; // missing information + } else { + s += SEPARATOR; + } + return s; + } + + public static void main(String[] args) throws IOException { + identifySMChecksEntryPoints(); + } +} diff --git a/libs/entitlement/tools/securitymanager-scanner/src/main/java/org/elasticsearch/entitlement/tools/securitymanager/scanner/SecurityCheckClassVisitor.java b/libs/entitlement/tools/securitymanager-scanner/src/main/java/org/elasticsearch/entitlement/tools/securitymanager/scanner/SecurityCheckClassVisitor.java new file mode 100644 index 0000000000000..a75fd5fc685f1 --- /dev/null +++ b/libs/entitlement/tools/securitymanager-scanner/src/main/java/org/elasticsearch/entitlement/tools/securitymanager/scanner/SecurityCheckClassVisitor.java @@ -0,0 +1,279 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.entitlement.tools.securitymanager.scanner; + +import org.elasticsearch.core.SuppressForbidden; +import org.objectweb.asm.ClassVisitor; +import org.objectweb.asm.Label; +import org.objectweb.asm.MethodVisitor; +import org.objectweb.asm.Type; + +import java.lang.constant.ClassDesc; +import java.lang.reflect.InaccessibleObjectException; +import java.lang.reflect.Modifier; +import java.nio.file.Path; +import java.security.Permission; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.EnumSet; +import java.util.List; +import java.util.Map; +import java.util.Set; + +import static org.objectweb.asm.Opcodes.ACC_PUBLIC; +import static org.objectweb.asm.Opcodes.ASM9; +import static org.objectweb.asm.Opcodes.GETSTATIC; +import static org.objectweb.asm.Opcodes.INVOKEDYNAMIC; +import static org.objectweb.asm.Opcodes.INVOKEINTERFACE; +import static org.objectweb.asm.Opcodes.INVOKESPECIAL; +import static org.objectweb.asm.Opcodes.INVOKESTATIC; +import static org.objectweb.asm.Opcodes.INVOKEVIRTUAL; +import static org.objectweb.asm.Opcodes.NEW; + +class SecurityCheckClassVisitor extends ClassVisitor { + + static final String SECURITY_MANAGER_INTERNAL_NAME = "java/lang/SecurityManager"; + static final Set excludedClasses = Set.of(SECURITY_MANAGER_INTERNAL_NAME); + + enum ExternalAccess { + CLASS, + METHOD + } + + record CallerInfo( + String moduleName, + String source, + int line, + String className, + String methodName, + String methodDescriptor, + EnumSet externalAccess, + String permissionType, + String runtimePermissionType + ) {} + + private final Map> callerInfoByMethod; + private String className; + private int classAccess; + private String source; + private String moduleName; + private String sourcePath; + private Set moduleExports; + + protected SecurityCheckClassVisitor(Map> callerInfoByMethod) { + super(ASM9); + this.callerInfoByMethod = callerInfoByMethod; + } + + @Override + public void visit(int version, int access, String name, String signature, String superName, String[] interfaces) { + super.visit(version, access, name, signature, superName, interfaces); + this.className = name; + this.classAccess = access; + } + + @Override + public void visitSource(String source, String debug) { + super.visitSource(source, debug); + this.source = source; + } + + @Override + public MethodVisitor visitMethod(int access, String name, String descriptor, String signature, String[] exceptions) { + if (excludedClasses.contains(this.className)) { + return super.visitMethod(access, name, descriptor, signature, exceptions); + } + return new SecurityCheckMethodVisitor(super.visitMethod(access, name, descriptor, signature, exceptions), name, access, descriptor); + } + + public void setCurrentModule(String moduleName, Set moduleExports) { + this.moduleName = moduleName; + this.moduleExports = moduleExports; + } + + public void setCurrentSourcePath(String path) { + this.sourcePath = path; + } + + private class SecurityCheckMethodVisitor extends MethodVisitor { + + private final String methodName; + private final String methodDescriptor; + private int line; + private String permissionType; + private String runtimePermissionType; + private final int methodAccess; + + protected SecurityCheckMethodVisitor(MethodVisitor mv, String methodName, int methodAccess, String methodDescriptor) { + super(ASM9, mv); + this.methodName = methodName; + this.methodAccess = methodAccess; + this.methodDescriptor = methodDescriptor; + } + + private static final Set KNOWN_PERMISSIONS = Set.of("jdk.vm.ci.services.JVMCIPermission"); + + @SuppressForbidden(reason = "System.err is OK for this simple command-line tool") + private void handleException(String className, Throwable e) { + System.err.println("Cannot process " + className + ": " + e.getMessage()); + } + + @Override + public void visitTypeInsn(int opcode, String type) { + super.visitTypeInsn(opcode, type); + if (opcode == NEW) { + if (type.endsWith("Permission")) { + var objectType = Type.getObjectType(type); + if (KNOWN_PERMISSIONS.contains(objectType.getClassName())) { + permissionType = type; + } else { + try { + var clazz = Class.forName(objectType.getClassName()); + if (Permission.class.isAssignableFrom(clazz)) { + permissionType = type; + } + } catch (ClassNotFoundException e) { + handleException(objectType.getClassName(), e); + } + } + } + } + } + + @Override + @SuppressForbidden(reason = "We need to violate java's access system to access private parts") + public void visitFieldInsn(int opcode, String owner, String name, String descriptor) { + super.visitFieldInsn(opcode, owner, name, descriptor); + if (opcode == GETSTATIC && descriptor.endsWith("Permission;")) { + var permissionType = Type.getType(descriptor); + if (permissionType.getSort() == Type.ARRAY) { + permissionType = permissionType.getElementType(); + } + try { + var clazz = Class.forName(permissionType.getClassName()); + if (Permission.class.isAssignableFrom(clazz)) { + this.permissionType = permissionType.getInternalName(); + } + } catch (ClassNotFoundException e) { + handleException(permissionType.getClassName(), e); + } + + var objectType = Type.getObjectType(owner); + try { + var clazz = Class.forName(objectType.getClassName()); + Arrays.stream(clazz.getDeclaredFields()) + .filter(f -> Modifier.isStatic(f.getModifiers()) && Modifier.isFinal(f.getModifiers())) + .filter(f -> f.getName().equals(name)) + .findFirst() + .ifPresent(x -> { + if (Permission.class.isAssignableFrom(x.getType())) { + try { + x.setAccessible(true); + var p = (Permission) (x.get(null)); + this.runtimePermissionType = p.getName(); + } catch (IllegalAccessException | InaccessibleObjectException e) { + handleException(x.getName(), e); + } + } + }); + + } catch (ClassNotFoundException | NoClassDefFoundError | UnsatisfiedLinkError e) { + handleException(objectType.getClassName(), e); + } + } + } + + @Override + public void visitLdcInsn(Object value) { + super.visitLdcInsn(value); + if (permissionType != null && permissionType.equals("java/lang/RuntimePermission")) { + this.runtimePermissionType = value.toString(); + } + } + + @Override + public void visitMethodInsn(int opcode, String owner, String name, String descriptor, boolean isInterface) { + super.visitMethodInsn(opcode, owner, name, descriptor, isInterface); + if (opcode == INVOKEVIRTUAL + || opcode == INVOKESPECIAL + || opcode == INVOKESTATIC + || opcode == INVOKEINTERFACE + || opcode == INVOKEDYNAMIC) { + + if (SECURITY_MANAGER_INTERNAL_NAME.equals(owner)) { + EnumSet externalAccesses = EnumSet.noneOf(ExternalAccess.class); + if (moduleExports.contains(getPackageName(className))) { + if ((methodAccess & ACC_PUBLIC) != 0) { + externalAccesses.add(ExternalAccess.METHOD); + } + if ((classAccess & ACC_PUBLIC) != 0) { + externalAccesses.add(ExternalAccess.CLASS); + } + } + + if (name.equals("checkPermission")) { + var callers = callerInfoByMethod.computeIfAbsent(name, ignored -> new ArrayList<>()); + callers.add( + new CallerInfo( + moduleName, + Path.of(sourcePath, source).toString(), + line, + className, + methodName, + methodDescriptor, + externalAccesses, + permissionType, + runtimePermissionType + ) + ); + this.permissionType = null; + this.runtimePermissionType = null; + } else if (name.startsWith("check")) { + // Non-generic methods (named methods that which already tell us the permission type) + var callers = callerInfoByMethod.computeIfAbsent(name, ignored -> new ArrayList<>()); + callers.add( + new CallerInfo( + moduleName, + Path.of(sourcePath, source).toString(), + line, + className, + methodName, + methodDescriptor, + externalAccesses, + null, + null + ) + ); + } + } + } + } + + private String getPackageName(String className) { + return ClassDesc.ofInternalName(className).packageName(); + } + + @Override + public void visitParameter(String name, int access) { + if (name != null) super.visitParameter(name, access); + } + + @Override + public void visitLineNumber(int line, Label start) { + super.visitLineNumber(line, start); + this.line = line; + } + + @Override + public void visitEnd() { + super.visitEnd(); + } + } +} From a1daddc9e35e105e1040659127cb5673865d17ce Mon Sep 17 00:00:00 2001 From: David Turner Date: Mon, 4 Nov 2024 16:07:54 +0000 Subject: [PATCH 319/324] Add note about incompleteness of CBs (#116176) The docs kinda imply that circuit breakers protect against OOMEs, at least that's how some customers seem to interpret them. This commit adds a note spelling out that this isn't the case. --- .../modules/indices/circuit_breaker.asciidoc | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/docs/reference/modules/indices/circuit_breaker.asciidoc b/docs/reference/modules/indices/circuit_breaker.asciidoc index 452d4e99704ce..13d81821c4f33 100644 --- a/docs/reference/modules/indices/circuit_breaker.asciidoc +++ b/docs/reference/modules/indices/circuit_breaker.asciidoc @@ -2,7 +2,16 @@ === Circuit breaker settings [[circuit-breaker-description]] // tag::circuit-breaker-description-tag[] -{es} contains multiple circuit breakers used to prevent operations from causing an OutOfMemoryError. Each breaker specifies a limit for how much memory it can use. Additionally, there is a parent-level breaker that specifies the total amount of memory that can be used across all breakers. +{es} contains multiple circuit breakers used to prevent operations from using an excessive amount of memory. Each breaker tracks the memory +used by certain operations and specifies a limit for how much memory it may track. Additionally, there +is a parent-level breaker that specifies the total amount of memory that may be tracked across all breakers. + +When a circuit breaker reaches its limit, {es} will reject further operations. See <> for information about errors +raised by circuit breakers. + +Circuit breakers do not track all memory usage in {es} and therefore provide only incomplete protection against excessive memory usage. If +{es} uses too much memory then it may suffer from performance issues and nodes may even fail with an `OutOfMemoryError`. See +<> for help with troubleshooting high heap usage. Except where noted otherwise, these settings can be dynamically updated on a live cluster with the <> API. From 90892c73b17d051c9100078e5a90fce7bf63c517 Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Tue, 5 Nov 2024 03:22:30 +1100 Subject: [PATCH 320/324] Mute org.elasticsearch.search.basic.SearchWithRandomDisconnectsIT testSearchWithRandomDisconnects #116175 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index 8498032d07b2c..036daa0105c61 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -279,6 +279,9 @@ tests: - class: org.elasticsearch.xpack.test.rest.XPackRestIT method: test {p0=terms_enum/10_basic/Test security} issue: https://github.com/elastic/elasticsearch/issues/116178 +- class: org.elasticsearch.search.basic.SearchWithRandomDisconnectsIT + method: testSearchWithRandomDisconnects + issue: https://github.com/elastic/elasticsearch/issues/116175 # Examples: # From 599ab7a8ad5dd7773893ae414b7db83422d9e843 Mon Sep 17 00:00:00 2001 From: Pete Gillin Date: Mon, 4 Nov 2024 16:23:10 +0000 Subject: [PATCH 321/324] Remove ignored fallback option on GeoIP processor (#116112) This removes the option `fallback_to_default_databases` on the `geoip` ingest processor has been deprecated and ignored since 8.0.0. --- docs/changelog/116112.yaml | 13 +++++++++++++ .../elasticsearch/ingest/geoip/GeoIpProcessor.java | 8 -------- .../ingest/geoip/GeoIpProcessorFactoryTests.java | 9 --------- 3 files changed, 13 insertions(+), 17 deletions(-) create mode 100644 docs/changelog/116112.yaml diff --git a/docs/changelog/116112.yaml b/docs/changelog/116112.yaml new file mode 100644 index 0000000000000..9e15d691a77d3 --- /dev/null +++ b/docs/changelog/116112.yaml @@ -0,0 +1,13 @@ +pr: 116112 +summary: Remove ignored fallback option on GeoIP processor +area: Ingest Node +type: breaking +issues: [] +breaking: + title: Remove ignored fallback option on GeoIP processor + area: Ingest + details: >- + The option fallback_to_default_databases on the geoip ingest processor has been removed. + (It was deprecated and ignored since 8.0.0.) + impact: Customers should stop remove the noop fallback_to_default_databases option on any geoip ingest processors. + notable: false diff --git a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpProcessor.java b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpProcessor.java index f99f8dbe2fdd0..9e0392b2b7974 100644 --- a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpProcessor.java +++ b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpProcessor.java @@ -36,8 +36,6 @@ public final class GeoIpProcessor extends AbstractProcessor { private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(GeoIpProcessor.class); - static final String DEFAULT_DATABASES_DEPRECATION_MESSAGE = "the [fallback_to_default_databases] has been deprecated, because " - + "Elasticsearch no longer includes the default Maxmind geoip databases. This setting will be removed in Elasticsearch 9.0"; static final String UNSUPPORTED_DATABASE_DEPRECATION_MESSAGE = "the geoip processor will no longer support database type [{}] " + "in a future version of Elasticsearch"; // TODO add a message about migration? @@ -241,12 +239,6 @@ public Processor create( // validate (and consume) the download_database_on_pipeline_creation property even though the result is not used by the factory readBooleanProperty(type, processorTag, config, "download_database_on_pipeline_creation", true); - // noop, should be removed in 9.0 - Object value = config.remove("fallback_to_default_databases"); - if (value != null) { - deprecationLogger.warn(DeprecationCategory.OTHER, "default_databases_message", DEFAULT_DATABASES_DEPRECATION_MESSAGE); - } - final String databaseType; try (IpDatabase ipDatabase = ipDatabaseProvider.getDatabase(databaseFile)) { if (ipDatabase == null) { diff --git a/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpProcessorFactoryTests.java b/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpProcessorFactoryTests.java index 5ac0c76054d33..34003b79fc18b 100644 --- a/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpProcessorFactoryTests.java +++ b/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpProcessorFactoryTests.java @@ -473,15 +473,6 @@ public void testLoadingCustomDatabase() throws IOException { threadPool.shutdown(); } - public void testFallbackUsingDefaultDatabases() throws Exception { - GeoIpProcessor.Factory factory = new GeoIpProcessor.Factory(GEOIP_TYPE, databaseNodeService); - Map config = new HashMap<>(); - config.put("field", "source_field"); - config.put("fallback_to_default_databases", randomBoolean()); - factory.create(null, null, null, config); - assertWarnings(GeoIpProcessor.DEFAULT_DATABASES_DEPRECATION_MESSAGE); - } - public void testDownloadDatabaseOnPipelineCreation() throws IOException { GeoIpProcessor.Factory factory = new GeoIpProcessor.Factory(GEOIP_TYPE, databaseNodeService); Map config = new HashMap<>(); From 9658940a518099f3e7f1b9e8d0f802a4be788094 Mon Sep 17 00:00:00 2001 From: Felix Barnsteiner Date: Mon, 4 Nov 2024 17:26:50 +0100 Subject: [PATCH 322/324] Ignore conflicting fields during dynamic mapping update (#114227) This fixes a bug when concurrently executing index requests that have different types for the same field. --- docs/changelog/114227.yaml | 6 ++++ .../index/mapper/DynamicMappingIT.java | 31 +++++++++++++++++ .../index/mapper/ObjectMapper.java | 26 ++++++++++++++ .../index/mapper/ObjectMapperMergeTests.java | 34 +++++++++++++++++++ 4 files changed, 97 insertions(+) create mode 100644 docs/changelog/114227.yaml diff --git a/docs/changelog/114227.yaml b/docs/changelog/114227.yaml new file mode 100644 index 0000000000000..9b508f07c9e5a --- /dev/null +++ b/docs/changelog/114227.yaml @@ -0,0 +1,6 @@ +pr: 114227 +summary: Ignore conflicting fields during dynamic mapping update +area: Mapping +type: bug +issues: + - 114228 diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/mapper/DynamicMappingIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/mapper/DynamicMappingIT.java index 9b9b23e71abed..f7bf775bc4f8b 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/index/mapper/DynamicMappingIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/index/mapper/DynamicMappingIT.java @@ -63,6 +63,8 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasKey; import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.oneOf; public class DynamicMappingIT extends ESIntegTestCase { @@ -190,6 +192,35 @@ private Map indexConcurrently(int numberOfFieldsToCreate, Settin return properties; } + public void testConcurrentDynamicMappingsWithConflictingType() throws Throwable { + int numberOfDocsToCreate = 16; + indicesAdmin().prepareCreate("index").setSettings(Settings.builder()).get(); + ensureGreen("index"); + final AtomicReference error = new AtomicReference<>(); + startInParallel(numberOfDocsToCreate, i -> { + try { + assertEquals( + DocWriteResponse.Result.CREATED, + prepareIndex("index").setId(Integer.toString(i)).setSource("field" + i, 0, "field" + (i + 1), 0.1).get().getResult() + ); + } catch (Exception e) { + error.compareAndSet(null, e); + } + }); + if (error.get() != null) { + throw error.get(); + } + client().admin().indices().prepareRefresh("index").get(); + for (int i = 0; i < numberOfDocsToCreate; ++i) { + assertTrue(client().prepareGet("index", Integer.toString(i)).get().isExists()); + } + Map index = indicesAdmin().prepareGetMappings("index").get().getMappings().get("index").getSourceAsMap(); + for (int i = 0, j = 1; i < numberOfDocsToCreate; i++, j++) { + assertThat(new WriteField("properties.field" + i + ".type", () -> index).get(null), is(oneOf("long", "float"))); + assertThat(new WriteField("properties.field" + j + ".type", () -> index).get(null), is(oneOf("long", "float"))); + } + } + public void testPreflightCheckAvoidsMaster() throws InterruptedException, IOException { // can't use INDEX_MAPPING_TOTAL_FIELDS_LIMIT_SETTING nor INDEX_MAPPING_DEPTH_LIMIT_SETTING as a check here, as that is already // checked at parse time, see testTotalFieldsLimitForDynamicMappingsUpdateCheckedAtDocumentParseTime diff --git a/server/src/main/java/org/elasticsearch/index/mapper/ObjectMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/ObjectMapper.java index 70c4a3ac213a2..023f6fcea0bfe 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/ObjectMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/ObjectMapper.java @@ -9,6 +9,8 @@ package org.elasticsearch.index.mapper; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.apache.lucene.index.LeafReader; import org.apache.lucene.util.BytesRef; import org.elasticsearch.ElasticsearchParseException; @@ -41,6 +43,7 @@ import java.util.stream.Stream; public class ObjectMapper extends Mapper { + private static final Logger logger = LogManager.getLogger(ObjectMapper.class); private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(ObjectMapper.class); public static final FeatureFlag SUB_OBJECTS_AUTO_FEATURE_FLAG = new FeatureFlag("sub_objects_auto"); @@ -679,6 +682,13 @@ private static Map buildMergedMappers( // replaces an existing one. if (objectMergeContext.getMapperBuilderContext().getMergeReason() == MergeReason.INDEX_TEMPLATE) { putMergedMapper(mergedMappers, mergeWithMapper); + } else if (isConflictingDynamicMapping(objectMergeContext, mergeWithMapper, mergeIntoMapper)) { + logger.trace( + "ignoring conflicting dynamic mapping update for field={} current_type={} new_type={}", + mergeIntoMapper.fullPath(), + mergeIntoMapper.typeName(), + mergeWithMapper.typeName() + ); } else { putMergedMapper(mergedMappers, mergeIntoMapper.merge(mergeWithMapper, objectMergeContext)); } @@ -687,6 +697,22 @@ private static Map buildMergedMappers( return Map.copyOf(mergedMappers); } + /* + * We're ignoring the field if a dynamic mapping update tries to define a conflicting field type. + * This is caused by another index request with a different value racing to update the mappings. + * After updating the mappings, the index request will be re-tried and sees the updated mappings for this field. + * The updated mappings will then be taken into account when parsing the document + * (for example by coercing the value, ignore_malformed values, or failing the index request due to a type conflict). + */ + private static boolean isConflictingDynamicMapping( + MapperMergeContext objectMergeContext, + Mapper mergeWithMapper, + Mapper mergeIntoMapper + ) { + return objectMergeContext.getMapperBuilderContext().getMergeReason().isAutoUpdate() + && mergeIntoMapper.typeName().equals(mergeWithMapper.typeName()) == false; + } + private static void putMergedMapper(Map mergedMappers, @Nullable Mapper merged) { if (merged != null) { mergedMappers.put(merged.leafName(), merged); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/ObjectMapperMergeTests.java b/server/src/test/java/org/elasticsearch/index/mapper/ObjectMapperMergeTests.java index 3a68ad301ce5c..1f8a2a754428b 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/ObjectMapperMergeTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/ObjectMapperMergeTests.java @@ -9,13 +9,19 @@ package org.elasticsearch.index.mapper; import org.elasticsearch.index.IndexVersion; +import org.elasticsearch.script.ScriptCompiler; import org.elasticsearch.test.ESTestCase; import java.util.Collections; import java.util.Optional; import static org.elasticsearch.index.mapper.MapperService.MergeReason.INDEX_TEMPLATE; +import static org.elasticsearch.index.mapper.MapperService.MergeReason.MAPPING_AUTO_UPDATE; +import static org.elasticsearch.index.mapper.MapperService.MergeReason.MAPPING_AUTO_UPDATE_PREFLIGHT; import static org.elasticsearch.index.mapper.MapperService.MergeReason.MAPPING_UPDATE; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.is; public final class ObjectMapperMergeTests extends ESTestCase { @@ -318,6 +324,34 @@ public void testMergeSubobjectsFalseWithObject() { assertNotNull(parentMapper.getMapper("child.grandchild")); } + public void testConflictingDynamicUpdate() { + RootObjectMapper mergeInto = new RootObjectMapper.Builder("_doc", Optional.empty()).add( + new KeywordFieldMapper.Builder("http.status_code", IndexVersion.current()) + ).build(MapperBuilderContext.root(false, false)); + RootObjectMapper mergeWith = new RootObjectMapper.Builder("_doc", Optional.empty()).add( + new NumberFieldMapper.Builder( + "http.status_code", + NumberFieldMapper.NumberType.LONG, + ScriptCompiler.NONE, + false, + true, + IndexVersion.current(), + null + ) + ).build(MapperBuilderContext.root(false, false)); + + MapperService.MergeReason autoUpdateMergeReason = randomFrom(MAPPING_AUTO_UPDATE, MAPPING_AUTO_UPDATE_PREFLIGHT); + ObjectMapper merged = mergeInto.merge(mergeWith, MapperMergeContext.root(false, false, autoUpdateMergeReason, Long.MAX_VALUE)); + FieldMapper httpStatusCode = (FieldMapper) merged.getMapper("http.status_code"); + assertThat(httpStatusCode, is(instanceOf(KeywordFieldMapper.class))); + + IllegalArgumentException e = expectThrows( + IllegalArgumentException.class, + () -> mergeInto.merge(mergeWith, MapperMergeContext.root(false, false, MAPPING_UPDATE, Long.MAX_VALUE)) + ); + assertThat(e.getMessage(), equalTo("mapper [http.status_code] cannot be changed from type [keyword] to [long]")); + } + private static RootObjectMapper createRootSubobjectFalseLeafWithDots() { FieldMapper.Builder fieldBuilder = new KeywordFieldMapper.Builder("host.name", IndexVersion.current()); FieldMapper fieldMapper = fieldBuilder.build(MapperBuilderContext.root(false, false)); From de9851aea5c509fd586bf054a450c37e2b5befa3 Mon Sep 17 00:00:00 2001 From: Alexey Ivanov Date: Mon, 4 Nov 2024 16:28:57 +0000 Subject: [PATCH 323/324] Don't allow secure settings in YML config (109115) (#115779) * Don't allow secure settings in YML config (109115) Elasticsearch should refuse to start if a secure setting is defined in elasticsearch.yml, in order to protect users from accidentally putting their secrets in a place where they are unexpectedly visible Fixes #109115 --- docs/changelog/115779.yaml | 6 ++++ .../settings/AbstractScopedSettings.java | 9 +++++ .../common/settings/SecureSetting.java | 13 ++----- .../common/settings/ScopedSettingsTests.java | 36 +++++++++++++++++++ .../common/settings/SettingsTests.java | 7 ---- 5 files changed, 54 insertions(+), 17 deletions(-) create mode 100644 docs/changelog/115779.yaml diff --git a/docs/changelog/115779.yaml b/docs/changelog/115779.yaml new file mode 100644 index 0000000000000..326751db7750b --- /dev/null +++ b/docs/changelog/115779.yaml @@ -0,0 +1,6 @@ +pr: 115779 +summary: Don't allow secure settings in YML config (109115) +area: Infra/Settings +type: bug +issues: + - 109115 diff --git a/server/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java b/server/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java index 60626b9e2375f..c65f75df663d2 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java +++ b/server/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java @@ -599,6 +599,15 @@ void validate(final String key, final Settings settings, final boolean validateV ); } } + + if (setting instanceof SecureSetting && settings.hasValue(key)) { + throw new IllegalArgumentException( + "Setting [" + + key + + "] is a secure setting" + + " and must be stored inside the Elasticsearch keystore, but was found inside elasticsearch.yml" + ); + } } if (validateValue) { setting.get(settings); diff --git a/server/src/main/java/org/elasticsearch/common/settings/SecureSetting.java b/server/src/main/java/org/elasticsearch/common/settings/SecureSetting.java index 67ac55f7b19eb..36ca2df08724d 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/SecureSetting.java +++ b/server/src/main/java/org/elasticsearch/common/settings/SecureSetting.java @@ -82,21 +82,14 @@ public boolean exists(Settings.Builder builder) { public T get(Settings settings) { checkDeprecation(settings); final SecureSettings secureSettings = settings.getSecureSettings(); - if (secureSettings == null || secureSettings.getSettingNames().contains(getKey()) == false) { - if (super.exists(settings)) { - throw new IllegalArgumentException( - "Setting [" - + getKey() - + "] is a secure setting" - + " and must be stored inside the Elasticsearch keystore, but was found inside elasticsearch.yml" - ); - } + String key = getKey(); + if (secureSettings == null || secureSettings.getSettingNames().contains(key) == false) { return getFallback(settings); } try { return getSecret(secureSettings); } catch (GeneralSecurityException e) { - throw new RuntimeException("failed to read secure setting " + getKey(), e); + throw new RuntimeException("failed to read secure setting " + key, e); } } diff --git a/server/src/test/java/org/elasticsearch/common/settings/ScopedSettingsTests.java b/server/src/test/java/org/elasticsearch/common/settings/ScopedSettingsTests.java index 8051437cf6e12..47026fe713c5c 100644 --- a/server/src/test/java/org/elasticsearch/common/settings/ScopedSettingsTests.java +++ b/server/src/test/java/org/elasticsearch/common/settings/ScopedSettingsTests.java @@ -1138,6 +1138,42 @@ public void testDiffSecureSettings() { assertTrue(diffed.isEmpty()); } + public void testValidateSecureSettingInsecureOverride() { + MockSecureSettings secureSettings = new MockSecureSettings(); + String settingName = "something.secure"; + secureSettings.setString(settingName, "secure"); + Settings settings = Settings.builder().put(settingName, "notreallysecure").setSecureSettings(secureSettings).build(); + + ClusterSettings clusterSettings = new ClusterSettings( + settings, + Collections.singleton(SecureSetting.secureString(settingName, null)) + ); + + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> clusterSettings.validate(settings, false)); + assertEquals( + e.getMessage(), + "Setting [something.secure] is a secure setting " + + "and must be stored inside the Elasticsearch keystore, but was found inside elasticsearch.yml" + ); + } + + public void testValidateSecureSettingInInsecureSettings() { + String settingName = "something.secure"; + Settings settings = Settings.builder().put(settingName, "notreallysecure").build(); + + ClusterSettings clusterSettings = new ClusterSettings( + settings, + Collections.singleton(SecureSetting.secureString(settingName, null)) + ); + + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> clusterSettings.validate(settings, false)); + assertEquals( + e.getMessage(), + "Setting [something.secure] is a secure setting " + + "and must be stored inside the Elasticsearch keystore, but was found inside elasticsearch.yml" + ); + } + public static IndexMetadata newIndexMeta(String name, Settings indexSettings) { return IndexMetadata.builder(name).settings(indexSettings(IndexVersion.current(), 1, 0).put(indexSettings)).build(); } diff --git a/server/src/test/java/org/elasticsearch/common/settings/SettingsTests.java b/server/src/test/java/org/elasticsearch/common/settings/SettingsTests.java index cfdc5e6befaaa..5fefd92d176a5 100644 --- a/server/src/test/java/org/elasticsearch/common/settings/SettingsTests.java +++ b/server/src/test/java/org/elasticsearch/common/settings/SettingsTests.java @@ -473,13 +473,6 @@ public void testDiff() throws IOException { } } - public void testSecureSettingConflict() { - Setting setting = SecureSetting.secureString("something.secure", null); - Settings settings = Settings.builder().put("something.secure", "notreallysecure").build(); - IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> setting.get(settings)); - assertTrue(e.getMessage().contains("must be stored inside the Elasticsearch keystore")); - } - public void testSecureSettingIllegalName() { IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> SecureSetting.secureString("*IllegalName", null)); assertTrue(e.getMessage().contains("does not match the allowed setting name pattern")); From 409fb8db7235be96c0cfdf8cf3514ddfdf0aa8ba Mon Sep 17 00:00:00 2001 From: Craig Taverner Date: Mon, 4 Nov 2024 17:58:59 +0100 Subject: [PATCH 324/324] The common type between any two string types is KEYWORD (#116107) * The common type between any two string types is KEYWORD The only time we return TEXT or SEMANTIC_TEXT is if both types are of that type. * Simplify --- .../xpack/esql/type/EsqlDataTypeConverter.java | 11 ++--------- .../xpack/esql/type/EsqlDataTypeConverterTests.java | 6 +----- 2 files changed, 3 insertions(+), 14 deletions(-) diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/type/EsqlDataTypeConverter.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/type/EsqlDataTypeConverter.java index 7fb998e82001e..c9c292769b570 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/type/EsqlDataTypeConverter.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/type/EsqlDataTypeConverter.java @@ -74,8 +74,6 @@ import static org.elasticsearch.xpack.esql.core.type.DataType.KEYWORD; import static org.elasticsearch.xpack.esql.core.type.DataType.LONG; import static org.elasticsearch.xpack.esql.core.type.DataType.NULL; -import static org.elasticsearch.xpack.esql.core.type.DataType.SEMANTIC_TEXT; -import static org.elasticsearch.xpack.esql.core.type.DataType.TEXT; import static org.elasticsearch.xpack.esql.core.type.DataType.TIME_DURATION; import static org.elasticsearch.xpack.esql.core.type.DataType.UNSIGNED_LONG; import static org.elasticsearch.xpack.esql.core.type.DataType.VERSION; @@ -366,13 +364,8 @@ public static DataType commonType(DataType left, DataType right) { } } if (isString(left) && isString(right)) { - if (left == SEMANTIC_TEXT || right == SEMANTIC_TEXT) { - return KEYWORD; - } - if (left == TEXT || right == TEXT) { - return TEXT; - } - return right; + // Both TEXT and SEMANTIC_TEXT are processed as KEYWORD + return KEYWORD; } if (left.isNumeric() && right.isNumeric()) { int lsize = left.estimatedSize().orElseThrow(); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/type/EsqlDataTypeConverterTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/type/EsqlDataTypeConverterTests.java index b2228b5543ef2..b30f0870496e3 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/type/EsqlDataTypeConverterTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/type/EsqlDataTypeConverterTests.java @@ -36,10 +36,8 @@ import static org.elasticsearch.xpack.esql.core.type.DataType.OBJECT; import static org.elasticsearch.xpack.esql.core.type.DataType.PARTIAL_AGG; import static org.elasticsearch.xpack.esql.core.type.DataType.SCALED_FLOAT; -import static org.elasticsearch.xpack.esql.core.type.DataType.SEMANTIC_TEXT; import static org.elasticsearch.xpack.esql.core.type.DataType.SHORT; import static org.elasticsearch.xpack.esql.core.type.DataType.SOURCE; -import static org.elasticsearch.xpack.esql.core.type.DataType.TEXT; import static org.elasticsearch.xpack.esql.core.type.DataType.TSID_DATA_TYPE; import static org.elasticsearch.xpack.esql.core.type.DataType.UNSIGNED_LONG; import static org.elasticsearch.xpack.esql.core.type.DataType.UNSUPPORTED; @@ -71,10 +69,8 @@ public void testCommonTypeStrings() { } else if ((isString(dataType1) && isString(dataType2))) { if (dataType1 == dataType2) { assertEqualsCommonType(dataType1, dataType2, dataType1); - } else if (dataType1 == SEMANTIC_TEXT || dataType2 == SEMANTIC_TEXT) { - assertEqualsCommonType(dataType1, dataType2, KEYWORD); } else { - assertEqualsCommonType(dataType1, dataType2, TEXT); + assertEqualsCommonType(dataType1, dataType2, KEYWORD); } } else { assertNullCommonType(dataType1, dataType2);