diff --git a/docs-mdx/painless/painless-field-context.mdx b/docs-mdx/painless/painless-field-context.mdx new file mode 100644 index 0000000000000..8e3c38938b5b8 --- /dev/null +++ b/docs-mdx/painless/painless-field-context.mdx @@ -0,0 +1,136 @@ +--- +id: enElasticsearchPainlessPainlessFieldContext +slug: /en/elasticsearch/painless/painless-field-context +title: Field context +description: Description to be written +tags: [] +--- + +
+ +Use a Painless script to create a +[script field](((ref))/search-fields.html#script-fields) to return +a customized value for each document in the results of a query. + +**Variables** + +`params` (`Map`, read-only) + : User-defined parameters passed in as part of the query. + +`doc` (`Map`, read-only) + : Contains the fields of the specified document where each field is a + `List` of values. + +[`params['_source']`](((ref))/mapping-source-field.html) (`Map`, read-only) + : Contains extracted JSON in a `Map` and `List` structure for the fields + existing in a stored document. + +**Return** + +`Object` + : The customized value for each document. + +**API** + +Both the standard Painless API and +Specialized Field API are available. + +**Example** + +To run this example, first follow the steps in +context examples. + +You can then use these two example scripts to compute custom information +for each search hit and output it to two new fields. + +The first script gets the doc value for the `datetime` field and calls +the `getDayOfWeekEnum` function to determine the corresponding day of the week. + +```Painless +doc['datetime'].value.getDayOfWeekEnum().getDisplayName(TextStyle.FULL, Locale.ROOT) +``` + +The second script calculates the number of actors. Actors' names are stored +as a keyword array in the `actors` field. + +```Painless +doc['actors'].size() [^1] +``` +[^1]: By default, doc values are not available for `text` fields. If `actors` was +a `text` field, you could still calculate the number of actors by extracting +values from `_source` with `params['_source']['actors'].size()`. + +The following request returns the calculated day of week and the number of +actors that appear in each play: + +```console +GET seats/_search +{ + "size": 2, + "query": { + "match_all": {} + }, + "script_fields": { + "day-of-week": { + "script": { + "source": "doc['datetime'].value.getDayOfWeekEnum().getDisplayName(TextStyle.FULL, Locale.ROOT)" + } + }, + "number-of-actors": { + "script": { + "source": "doc['actors'].size()" + } + } + } +} +``` +{/* TEST[setup:seats] */} + +```console-result +{ + "took" : 68, + "timed_out" : false, + "_shards" : { + "total" : 1, + "successful" : 1, + "skipped" : 0, + "failed" : 0 + }, + "hits" : { + "total" : { + "value" : 11, + "relation" : "eq" + }, + "max_score" : 1.0, + "hits" : [ + { + "_index" : "seats", + "_id" : "1", + "_score" : 1.0, + "fields" : { + "day-of-week" : [ + "Thursday" + ], + "number-of-actors" : [ + 4 + ] + } + }, + { + "_index" : "seats", + "_id" : "2", + "_score" : 1.0, + "fields" : { + "day-of-week" : [ + "Thursday" + ], + "number-of-actors" : [ + 1 + ] + } + } + ] + } +} +``` +{/* TESTRESPONSE[s/"took" : 68/"took" : "$body.took"/] */} \ No newline at end of file diff --git a/docs/changelog/105442.yaml b/docs/changelog/105442.yaml new file mode 100644 index 0000000000000..b0af1b634d984 --- /dev/null +++ b/docs/changelog/105442.yaml @@ -0,0 +1,6 @@ +pr: 105442 +summary: Handling exceptions on watcher reload +area: Watcher +type: bug +issues: + - 69842 diff --git a/docs/changelog/105894.yaml b/docs/changelog/105894.yaml new file mode 100644 index 0000000000000..a1a99eaa6259b --- /dev/null +++ b/docs/changelog/105894.yaml @@ -0,0 +1,5 @@ +pr: 105894 +summary: Add allocation stats +area: Allocation +type: enhancement +issues: [] diff --git a/docs/changelog/106150.yaml b/docs/changelog/106150.yaml new file mode 100644 index 0000000000000..05bd8b06987c6 --- /dev/null +++ b/docs/changelog/106150.yaml @@ -0,0 +1,5 @@ +pr: 106150 +summary: Use correct system index bulk executor +area: CRUD +type: bug +issues: [] diff --git a/docs/changelog/106172.yaml b/docs/changelog/106172.yaml new file mode 100644 index 0000000000000..80d80b9d7f299 --- /dev/null +++ b/docs/changelog/106172.yaml @@ -0,0 +1,5 @@ +pr: 106172 +summary: "[Profiling] Allow to override index settings" +area: Application +type: enhancement +issues: [] diff --git a/docs/changelog/106189.yaml b/docs/changelog/106189.yaml new file mode 100644 index 0000000000000..ec485f0e60efb --- /dev/null +++ b/docs/changelog/106189.yaml @@ -0,0 +1,6 @@ +pr: 106189 +summary: Fix numeric sorts in `_cat/nodes` +area: CAT APIs +type: bug +issues: + - 48070 diff --git a/docs/changelog/97561.yaml b/docs/changelog/97561.yaml new file mode 100644 index 0000000000000..cacefbf7e4ca3 --- /dev/null +++ b/docs/changelog/97561.yaml @@ -0,0 +1,5 @@ +pr: 97561 +summary: Add index forecasts to /_cat/allocation output +area: Allocation +type: enhancement +issues: [] diff --git a/docs/changelog/99048.yaml b/docs/changelog/99048.yaml new file mode 100644 index 0000000000000..722c145dae78f --- /dev/null +++ b/docs/changelog/99048.yaml @@ -0,0 +1,6 @@ +pr: 99048 +summary: String sha512() painless function +area: Infra/Scripting +type: enhancement +issues: + - 97691 diff --git a/docs/reference/analysis/normalizers.asciidoc b/docs/reference/analysis/normalizers.asciidoc index deb04a9bd44ba..6acd415437525 100644 --- a/docs/reference/analysis/normalizers.asciidoc +++ b/docs/reference/analysis/normalizers.asciidoc @@ -6,15 +6,15 @@ token. As a consequence, they do not have a tokenizer and only accept a subset of the available char filters and token filters. Only the filters that work on a per-character basis are allowed. For instance a lowercasing filter would be allowed, but not a stemming filter, which needs to look at the keyword as a -whole. The current list of filters that can be used in a normalizer is -following: `arabic_normalization`, `asciifolding`, `bengali_normalization`, +whole. The current list of filters that can be used in a normalizer definition +are: `arabic_normalization`, `asciifolding`, `bengali_normalization`, `cjk_width`, `decimal_digit`, `elision`, `german_normalization`, `hindi_normalization`, `indic_normalization`, `lowercase`, `pattern_replace`, `persian_normalization`, `scandinavian_folding`, `serbian_normalization`, `sorani_normalization`, `trim`, `uppercase`. Elasticsearch ships with a `lowercase` built-in normalizer. For other forms of -normalization a custom configuration is required. +normalization, a custom configuration is required. [discrete] === Custom normalizers diff --git a/docs/reference/api-conventions.asciidoc b/docs/reference/api-conventions.asciidoc index b0fa51679d661..1a63af19b0a33 100644 --- a/docs/reference/api-conventions.asciidoc +++ b/docs/reference/api-conventions.asciidoc @@ -337,8 +337,22 @@ value `true`. All other values will raise an error. [discrete] === Number Values -All REST APIs support providing numbered parameters as `string` on top -of supporting the native JSON number types. +When passing a numeric parameter in a request body, you may use a `string` +containing the number instead of the native numeric type. For example: + +[source,console] +-------------------------------------------------- +POST /_search +{ + "size": "1000" +} +-------------------------------------------------- + +Integer-valued fields in a response body are described as `integer` (or +occasionally `long`) in this manual, but there are generally no explicit bounds +on such values. JSON, SMILE, CBOR and YAML all permit arbitrarily large integer +values. Do not assume that `integer` fields in a response body will always fit +into a 32-bit signed integer. [[byte-units]] [discrete] diff --git a/docs/reference/cat/allocation.asciidoc b/docs/reference/cat/allocation.asciidoc index f9574ed933398..7bab1926cff09 100644 --- a/docs/reference/cat/allocation.asciidoc +++ b/docs/reference/cat/allocation.asciidoc @@ -57,6 +57,16 @@ include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=cat-v] `shards`:: Number of primary and replica shards assigned to the node. +`shards.undesired`:: +Amount of shards that are scheduled to be moved elsewhere in the cluster +or -1 other than desired balance allocator is used + +`write_load.forecast`:: +Sum of index write load forecasts + +`disk.indices.forecast`:: +Sum of shard size forecasts + `disk.indices`:: Disk space used by the node's shards. Does not include disk space for the <> or unassigned shards. @@ -99,6 +109,8 @@ IP address and port for the node. `node`:: Name for the node. Set using <>. +`node.role`, `r`, `role`, `nodeRole`:: +Node roles [[cat-allocation-api-example]] ==== {api-examples-title} @@ -113,8 +125,8 @@ The API returns the following response: [source,txt] -------------------------------------------------- -shards disk.indices disk.used disk.avail disk.total disk.percent host ip node node.role - 1 260b 47.3gb 43.4gb 100.7gb 46 127.0.0.1 127.0.0.1 CSUXak2 himrst +shards shards.undesired write_load.forecast disk.indices.forecast disk.indices disk.used disk.avail disk.total disk.percent host ip node node.role + 1 0 0.0 260b 260b 47.3gb 43.4gb 100.7gb 46 127.0.0.1 127.0.0.1 CSUXak2 himrst -------------------------------------------------- // TESTRESPONSE[s/\d+(\.\d+)?[tgmk]?b/\\d+(\\.\\d+)?[tgmk]?b/ s/46/\\d+/] // TESTRESPONSE[s/CSUXak2 himrst/.+/ non_json] diff --git a/docs/reference/cluster/nodes-stats.asciidoc b/docs/reference/cluster/nodes-stats.asciidoc index e2848f9a8e70f..c008b074acccd 100644 --- a/docs/reference/cluster/nodes-stats.asciidoc +++ b/docs/reference/cluster/nodes-stats.asciidoc @@ -50,6 +50,9 @@ using metrics. `adaptive_selection`:: Statistics about <>. + `allocations`:: + Statistics about allocated shards + `breaker`:: Statistics about the field data circuit breaker. @@ -2802,6 +2805,44 @@ search requests on the keyed node. The rank of this node; used for shard selection when routing search requests. ====== + +[[cluster-nodes-stats-api-response-body-allocations]] +`allocations`:: +(object) +Contains allocations statistics for the node. ++ +.Properties of `allocations` +[%collapsible%open] +====== +`shards`:: +(integer) +The number of shards currently allocated to this node + +`undesired_shards`:: +(integer) +The amount of shards that are scheduled to be moved elsewhere in the cluster +if desired balance allocator is used or -1 if any other allocator is used. + +`forecasted_ingest_load`:: +(double) +Total forecasted ingest load of all shards assigned to this node + +`forecasted_disk_usage`:: +(<>) +Forecasted size of all shards assigned to the node + +`forecasted_disk_usage_bytes`:: +(integer) +Forecasted size, in bytes, of all shards assigned to the node + +`current_disk_usage`:: +(<>) +Current size of all shards assigned to the node + +`current_disk_usage_bytes`:: +(integer) +Current size, in bytes, of all shards assigned to the node +====== ===== ==== diff --git a/docs/reference/esql/functions/mv-functions.asciidoc b/docs/reference/esql/functions/mv-functions.asciidoc index a95a3d36a9963..07d89e7879e67 100644 --- a/docs/reference/esql/functions/mv-functions.asciidoc +++ b/docs/reference/esql/functions/mv-functions.asciidoc @@ -17,7 +17,9 @@ * <> * <> * <> +* <> * <> +* <> // end::mv_list[] include::mv_avg.asciidoc[] @@ -29,4 +31,6 @@ include::mv_last.asciidoc[] include::mv_max.asciidoc[] include::mv_median.asciidoc[] include::mv_min.asciidoc[] +include::mv_slice.asciidoc[] include::mv_sum.asciidoc[] +include::mv_zip.asciidoc[] diff --git a/docs/reference/esql/functions/mv_slice.asciidoc b/docs/reference/esql/functions/mv_slice.asciidoc new file mode 100644 index 0000000000000..f4431b25232a2 --- /dev/null +++ b/docs/reference/esql/functions/mv_slice.asciidoc @@ -0,0 +1,47 @@ +[discrete] +[[esql-mv_slice]] +=== `MV_SLICE` + +*Syntax* + +[.text-center] +image::esql/functions/signature/mv_slice.svg[Embedded,opts=inline] + +*Parameters* + +`field`:: +Multivalue expression. If `null`, the function returns `null`. + +`start`:: +Start position. If `null`, the function returns `null`. The start argument can be negative. An index of -1 is used to specify the last value in the list. + +`end`:: +End position. Optional; if omitted, the position at `start` is returned. The end argument can be negative. An index of -1 is used to specify the last value in the list. + +*Description* + +Returns a subset of the multivalued field using the start and end index values. + +*Supported types* + +include::types/mv_slice.asciidoc[] + +*Example* + +[source.merge.styled,esql] +---- +include::{esql-specs}/ints.csv-spec[tag=mv_slice_positive] +---- +[%header.monospaced.styled,format=dsv,separator=|] +|=== +include::{esql-specs}/ints.csv-spec[tag=mv_slice_positive-result] +|=== + +[source.merge.styled,esql] +---- +include::{esql-specs}/ints.csv-spec[tag=mv_slice_negative] +---- +[%header.monospaced.styled,format=dsv,separator=|] +|=== +include::{esql-specs}/ints.csv-spec[tag=mv_slice_negative-result] +|=== diff --git a/docs/reference/esql/functions/mv_zip.asciidoc b/docs/reference/esql/functions/mv_zip.asciidoc new file mode 100644 index 0000000000000..4e71e2cafb9c4 --- /dev/null +++ b/docs/reference/esql/functions/mv_zip.asciidoc @@ -0,0 +1,38 @@ +[discrete] +[[esql-mv_zip]] +=== `MV_ZIP` + +*Syntax* + +[.text-center] +image::esql/functions/signature/mv_zip.svg[Embedded,opts=inline] + +*Parameters* + +`mvLeft`:: +Multivalue expression. + +`mvRight`:: +Multivalue expression. + +`delim`:: +Delimiter. Optional; if omitted, `,` is used as a default delimiter. + +*Description* + +Combines the values from two multivalued fields with a delimiter that joins them together. + +*Supported types* + +include::types/mv_zip.asciidoc[] + +*Example* + +[source.merge.styled,esql] +---- +include::{esql-specs}/string.csv-spec[tag=mv_zip] +---- +[%header.monospaced.styled,format=dsv,separator=|] +|=== +include::{esql-specs}/string.csv-spec[tag=mv_zip-result] +|=== diff --git a/docs/reference/esql/functions/signature/mv_slice.svg b/docs/reference/esql/functions/signature/mv_slice.svg new file mode 100644 index 0000000000000..277566a35e47d --- /dev/null +++ b/docs/reference/esql/functions/signature/mv_slice.svg @@ -0,0 +1 @@ +MV_SLICE(v,start,end) \ No newline at end of file diff --git a/docs/reference/esql/functions/signature/mv_zip.svg b/docs/reference/esql/functions/signature/mv_zip.svg new file mode 100644 index 0000000000000..02c61b3c4bc5c --- /dev/null +++ b/docs/reference/esql/functions/signature/mv_zip.svg @@ -0,0 +1 @@ +MV_ZIP(mvLeft,mvRight,delim) \ No newline at end of file diff --git a/docs/reference/esql/functions/types/mv_slice.asciidoc b/docs/reference/esql/functions/types/mv_slice.asciidoc new file mode 100644 index 0000000000000..1891fed3631e9 --- /dev/null +++ b/docs/reference/esql/functions/types/mv_slice.asciidoc @@ -0,0 +1,17 @@ +[%header.monospaced.styled,format=dsv,separator=|] +|=== +v | start | end | result +boolean | integer | integer | boolean +cartesian_point | integer | integer | cartesian_point +cartesian_shape | integer | integer | cartesian_shape +datetime | integer | integer | datetime +double | integer | integer | double +geo_point | integer | integer | geo_point +geo_shape | integer | integer | geo_shape +integer | integer | integer | integer +ip | integer | integer | ip +keyword | integer | integer | keyword +long | integer | integer | long +text | integer | integer | text +version | integer | integer | version +|=== diff --git a/docs/reference/esql/functions/types/mv_zip.asciidoc b/docs/reference/esql/functions/types/mv_zip.asciidoc new file mode 100644 index 0000000000000..6ee6c29c77264 --- /dev/null +++ b/docs/reference/esql/functions/types/mv_zip.asciidoc @@ -0,0 +1,6 @@ +[%header.monospaced.styled,format=dsv,separator=|] +|=== +mvLeft | mvRight | delim | result +keyword | keyword | keyword | keyword +text | text | text | keyword +|=== diff --git a/docs/reference/images/search/learning-to-rank-feature-extraction.png b/docs/reference/images/search/learning-to-rank-feature-extraction.png new file mode 100644 index 0000000000000..6dc2ee31902f6 Binary files /dev/null and b/docs/reference/images/search/learning-to-rank-feature-extraction.png differ diff --git a/docs/reference/images/search/learning-to-rank-judgment-list.png b/docs/reference/images/search/learning-to-rank-judgment-list.png new file mode 100644 index 0000000000000..3f0c212df321b Binary files /dev/null and b/docs/reference/images/search/learning-to-rank-judgment-list.png differ diff --git a/docs/reference/images/search/learning-to-rank-overview.png b/docs/reference/images/search/learning-to-rank-overview.png new file mode 100644 index 0000000000000..ea9557a70ac78 Binary files /dev/null and b/docs/reference/images/search/learning-to-rank-overview.png differ diff --git a/docs/reference/search/search-your-data/learning-to-rank-model-training.asciidoc b/docs/reference/search/search-your-data/learning-to-rank-model-training.asciidoc new file mode 100644 index 0000000000000..fb026578bc00d --- /dev/null +++ b/docs/reference/search/search-your-data/learning-to-rank-model-training.asciidoc @@ -0,0 +1,168 @@ +[[learning-to-rank-model-training]] +=== Deploy and manage Learning To Rank models +++++ +Deploy and manage LTR models +++++ + +preview::["The Learning To Rank feature is in technical preview and may be changed or removed in a future release. Elastic will work to fix any issues, but this feature is not subject to the support SLA of official GA features."] + +NOTE: This feature was introduced in version 8.12.0 and is only available to certain subscription levels. +For more information, see {subscriptions}. + +[discrete] +[[learning-to-rank-model-training-workflow]] +==== Train and deploy a model using Eland + +Typically, the https://xgboost.readthedocs.io/en/stable/[XGBoost^] model training process uses standard Python data science tools like Pandas and scikit-learn. + + +We have developed an +https://github.com/elastic/elasticsearch-labs/blob/main/notebooks/search/08-learning-to-rank.ipynb[example +notebook^] available in the `elasticsearch-labs` repo. This interactive Python notebook +details an end-to-end model training and deployment workflow. + +We highly recommend using https://eland.readthedocs.io/[eland^] in your workflow, because it provides important functionalities for working with LTR in {es}. Use eland to: + +* Configure feature extraction + +* Extract features for training + +* Deploy the model in {es} + +[discrete] +[[learning-to-rank-model-training-feature-definition]] +===== Configure feature extraction in Eland + +Feature extractors are defined using templated queries. https://eland.readthedocs.io/[Eland^] provides the `eland.ml.ltr.QueryFeatureExtractor` to define these feature extractors directly in Python: + +[source,python] +---- +from eland.ml.ltr import QueryFeatureExtractor + +feature_extractors=[ + # We want to use the score of the match query for the title field as a feature: + QueryFeatureExtractor( + feature_name="title_bm25", + query={"match": {"title": "{{query}}"}} + ), + # We can use a script_score query to get the value + # of the field rating directly as a feature: + QueryFeatureExtractor( + feature_name="popularity", + query={ + "script_score": { + "query": {"exists": {"field": "popularity"}}, + "script": {"source": "return doc['popularity'].value;"}, + } + }, + ), + # We can execute a script on the value of the query + # and use the return value as a feature: + QueryFeatureExtractor( + feature_name="query_length", + query={ + "script_score": { + "query": {"match_all": {}}, + "script": { + "source": "return params['query'].splitOnToken(' ').length;", + "params": { + "query": "{{query}}", + } + }, + } + }, + ), +] +---- +// NOTCONSOLE + +Once the feature extractors have been defined, they are wrapped in an `eland.ml.ltr.LTRModelConfig` object for use in later training steps: + +[source,python] +---- +from eland.ml.ltr import LTRModelConfig + +ltr_config = LTRModelConfig(feature_extractors) +---- +// NOTCONSOLE + +[discrete] +[[learning-to-rank-model-training-feature-extraction]] +===== Extracting features for training + +Building your dataset is a critical step in the training process. This involves +extracting relevant features and adding them to your judgment list. We +recommend using Eland's `eland.ml.ltr.FeatureLogger` helper class for this +process. + +[source,python] +---- +from eland.ml.ltr import FeatureLogger + +# Create a feature logger that will be used to query {es} to retrieve the features: +feature_logger = FeatureLogger(es_client, MOVIE_INDEX, ltr_config) +---- +// NOTCONSOLE + +The FeatureLogger provides an `extract_features` method which enables you to extract features for a list of specific documents from your judgment list. At the same time, you can pass query parameters to the feature extractors defined earlier: + +[source,python] +---- +feature_logger.extract_features( + query_params:{"query": "foo"}, + doc_ids=["doc-1", "doc-2"] +) +---- +// NOTCONSOLE + +Our https://github.com/elastic/elasticsearch-labs/blob/main/notebooks/search/08-learning-to-rank.ipynb[example notebook^] explains how to use the `FeatureLogger` to build a training dataset, by adding features to a judgment list. + +[discrete] +[[learning-to-rank-model-training-feature-extraction-notes]] +====== Notes on feature extraction + +* We strongly advise against implementing feature extraction on your own. It's crucial to maintain consistency in feature extraction between the training environment and inference in {es}. By using eland tooling, which is developed and tested in tandem with {es}, you can ensure that they function together consistently. + +* Feature extraction is performed by executing queries on the {es} server. This could put a lot of stress on your cluster, especially when your judgment list contains a lot of examples or you have many features. Our feature logger implementation is designed to minimize the number of search requests sent to the server and reduce load. However, it might be best to build your training dataset using an {es} cluster that is isolated from any user-facing, production traffic. + +[discrete] +[[learning-to-rank-model-deployment]] +===== Deploy your model into {es} + +Once your model is trained you will be able to deploy it in your {es} cluster. You can use Eland's `MLModel.import_ltr_model method`: + +[source,python] +---- +from eland.ml import MLModel + +LEARNING_TO_RANK_MODEL_ID="ltr-model-xgboost" + +MLModel.import_ltr_model( + es_client=es_client, + model=ranker, + model_id=LEARNING_TO_RANK_MODEL_ID, + ltr_model_config=ltr_config, + es_if_exists="replace", +) +---- +// NOTCONSOLE + +This method will serialize the trained model and the Learning To Rank configuration (including feature extraction) in a format that {es} can understand. The model is then deployed to {es} using the <>. + +The following types of models are currently supported for LTR with {es}: + +* https://scikit-learn.org/stable/modules/generated/sklearn.tree.DecisionTreeRegressor.html[`DecisionTreeRegressor`^] +* https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestRegressor.html[`RandomForestRegressor`^] +* https://lightgbm.readthedocs.io/en/latest/pythonapi/lightgbm.LGBMRegressor.html[`LGBMRegressor`^] +* https://xgboost.readthedocs.io/en/stable/python/python_api.html#xgboost.XGBRanker[`XGBRanker`^] +* https://xgboost.readthedocs.io/en/stable/python/python_api.html#xgboost.XGBRegressor[`XGBRegressor`^] + + +More model types will be supported in the future. + +[discrete] +[[learning-to-rank-model-management]] +==== Learning To Rank model management + +Once your model is deployed in {es} you can manage it using the https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-df-trained-models-apis.html[trained model APIs]. +You're now ready to work with your LTR model as a rescorer at <>. diff --git a/docs/reference/search/search-your-data/learning-to-rank-search-usage.asciidoc b/docs/reference/search/search-your-data/learning-to-rank-search-usage.asciidoc new file mode 100644 index 0000000000000..1d040a116ad9a --- /dev/null +++ b/docs/reference/search/search-your-data/learning-to-rank-search-usage.asciidoc @@ -0,0 +1,78 @@ +[[learning-to-rank-search-usage]] +=== Search using Learning To Rank +++++ +Search using LTR +++++ + +preview::["The Learning To Rank feature is in technical preview and may be changed or removed in a future release. Elastic will work to fix any issues, but this feature is not subject to the support SLA of official GA features."] + +NOTE: This feature was introduced in version 8.12.0 and is only available to certain subscription levels. +For more information, see {subscriptions}. + +[discrete] +[[learning-to-rank-rescorer]] +==== Learning To Rank as a rescorer + +Once your LTR model is trained and deployed in {es}, it can be used as a <> in the <>: + +[source,console] +---- +GET my-index/_search +{ + "query": { <1> + "multi_match": { + "fields": ["title", "content"], + "query": "the quick brown fox" + } + }, + "rescore": { + "learning_to_rank": { + "model_id": "ltr-model", <2> + "params": { <3> + "query_text": "the quick brown fox" + } + }, + "window_size": 100 <4> + } +} +---- +// TEST[skip:TBD] +<1> First pass query providing documents to be rescored. +<2> The unique identifier of the trained model uploaded to {es}. +<3> Named parameters to be passed to the query templates used for feature. +<4> The number of documents that should be examined by the rescorer on each shard. + +[discrete] +[[learning-to-rank-rescorer-limitations]] +===== Known limitations + +[discrete] +[[learning-to-rank-rescorer-limitations-window-size]] +====== Rescore window size + +Scores returned by LTR models are usually not comparable with the scores issued by the first pass query and can be lower than the non-rescored score. This can cause the non-rescored result document to be ranked higher than the rescored document. To prevent this, the `window_size` parameter is mandatory for LTR rescorers and should be greater than or equal to `from + size`. + +[discrete] +[[learning-to-rank-rescorer-limitations-pagination]] +====== Pagination + +When exposing pagination to users, `window_size` should remain constant as each page is progressed by passing different `from` values. Changing the `window_size` can alter the top hits causing results to confusingly shift as the user steps through pages. + +[discrete] +[[learning-to-rank-rescorer-limitations-negative-scores]] +====== Negative scores + +Depending on how your model is trained, it’s possible that the model will return negative scores for documents. While negative scores are not allowed from first-stage retrieval and ranking, it is possible to use them in the LTR rescorer. + +[discrete] +[[learning-to-rank-rescorer-limitations-field-collapsing]] +====== Compatibility with field collapsing + +LTR rescorers are not compatible with the <>. + +[discrete] +[[learning-to-rank-rescorer-limitations-term-statistics]] +====== Term statistics as features + +We do not currently support term statistics as features, however future releases will introduce this capability. + diff --git a/docs/reference/search/search-your-data/learning-to-rank.asciidoc b/docs/reference/search/search-your-data/learning-to-rank.asciidoc new file mode 100644 index 0000000000000..08fad9db9c0f6 --- /dev/null +++ b/docs/reference/search/search-your-data/learning-to-rank.asciidoc @@ -0,0 +1,136 @@ +[[learning-to-rank]] +== Learning To Rank + +preview::["The Learning To Rank feature is in technical preview and may be changed or removed in a future release. Elastic will work to fix any issues, but this feature is not subject to the support SLA of official GA features."] + +NOTE: This feature was introduced in version 8.12.0 and is only available to certain subscription levels. +For more information, see {subscriptions}. + +Learning To Rank (LTR) uses a trained machine learning (ML) model to build a +ranking function for your search engine. Typically, the model is used as a +second stage re-ranker, to improve the relevance of search results returned by a +simpler, first stage retrieval algorithm. The LTR function takes a list of +documents and a search context and outputs ranked documents: + +[[learning-to-rank-overview-diagram]] +.Learning To Rank overview +image::images/search/learning-to-rank-overview.png[Learning To Rank overview,align="center"] + + +[discrete] +[[learning-to-rank-search-context]] +=== Search context + +In addition to the list of documents to sort, the LTR function also requires a +search context. Typically, this search context includes at least the search +terms provided by the user (`text_query` in the example above). +The search context can also provide additional information used in the ranking mode. +This could be information about the user doing the search (such as demographic data, geolocation, or age); about the query (such as query length); or document in the context of the query (such as score for the title field). + +[discrete] +[[learning-to-rank-judgement-list]] +=== Judgment list +The LTR model is usually trained on a judgment list, which is a set of queries and documents with a relevance grade. Judgment lists can be human or machine generated: they're commonly populated from behavioural analytics, often with human moderation. Judgment lists determine the ideal ordering of results for a given search query. The goal of LTR is to fit the model to the judgment list rankings as closely as possible for new queries and documents. + +The judgment list is the main input used to train the model. It consists of a dataset that contains pairs of queries and documents, along with their corresponding relevance labels. +The relevance judgment is typically either a binary (relevant/irrelevant) or a more +granular label, such as a grade between 0 (completely irrelevant) to 4 (highly +relevant). The example below uses a graded relevance judgment. + + +[[learning-to-rank-judgment-list-example]] +.Judgment list example +image::images/search/learning-to-rank-judgment-list.png[Judgment list example,align="center"] + +[discrete] +[[judgment-list-notes]] +==== Notes on judgment lists + +While a judgment list can be created manually by humans, there are techniques available to leverage user engagement data, such as clicks or conversions, to construct judgment lists automatically. + +The quantity and the quality of your judgment list will greatly influence the overall performance of the LTR model. The following aspects should be considered very carefully when building your judgment list: + +* Most search engines can be searched using different query types. For example, in a movie search engine, users search by title but also by actor or director. It's essential to maintain a balanced number of examples for each query type in your judgment list. This prevents overfitting and allows the model to generalize effectively across all query types. + +* Users often provide more positive examples than negative ones. By balancing the number of positive and negative examples, you help the model learn to distinguish between relevant and irrelevant content more accurately. + +[discrete] +[[learning-to-rank-feature-extraction]] +=== Feature extraction + +Query and document pairs alone don't provide enough information to train the ML +models used for LTR. The relevance scores in judgment lists depend on a number +of properties or _features_. These features must be extracted to determine how +the various components combine to determine document relevance. The judgment +list plus the extracted features make up the training dataset for an LTR model. + +These features fall into one of three main categories: + +* *Document features*: + These features are derived directly from document properties. + Example: product price in an eCommerce store. + +* *Query features*: + These features are computed directly from the query submitted by the user. + Example: the number of words in the query. + +* *Query-document features*: + Features used to provide information about the document in the context of the query. + Example: the BM25 score for the `title` field. + +To prepare the dataset for training, the features are added to the judgment list: + +[[learning-to-rank-judgement-feature-extraction]] +.Judgment list with features +image::images/search/learning-to-rank-feature-extraction.png[Judgment list with features,align="center"] + +To do this in {es}, use templated queries to extract features when building the +training dataset and during inference at query time. Here is an example of a +templated query: + +[source,js] +---- +[ + { + "query_extractor": { + "feature_name": "title_bm25", + "query": { "match": { "title": "{{query}}" } } + } + } +] +---- +// NOTCONSOLE + +[discrete] +[[learning-to-rank-models]] +=== Models + +The heart of LTR is of course an ML model. A model is trained using the training data described above in combination with an objective. In the case of LTR, the objective is to rank result documents in an optimal way with respect to a judgment list, given some ranking metric such as https://en.wikipedia.org/wiki/Evaluation_measures_(information_retrieval)#Discounted_cumulative_gain[nDCG^] or https://en.wikipedia.org/wiki/Evaluation_measures_(information_retrieval)#Mean_average_precision[MAP^]. The model relies solely on the features and relevance labels from the training data. + +The LTR space is evolving rapidly and many approaches and model types are being +experimented with. In practice {es} relies specifically on gradient boosted decision tree +(https://en.wikipedia.org/wiki/Gradient_boosting#Gradient_tree_boosting[GBDT^]) models for LTR inference. + +Note that {es} supports model inference but the training process itself must +happen outside of {es}, using a GBDT model. Among the most popular LTR models +used today, https://www.microsoft.com/en-us/research/wp-content/uploads/2016/02/MSR-TR-2010-82.pdf[LambdaMART^] provides strong ranking performance with low inference +latencies. It relies on GBDT models and is therefore a perfect fit for LTR in +{es}. + +https://xgboost.readthedocs.io/en/stable/[XGBoost^] is a well known library that provides an https://xgboost.readthedocs.io/en/stable/tutorials/learning_to_rank.html[implementation^] of LambdaMART, making it a popular choice for LTR. We offer helpers in https://eland.readthedocs.io/[eland^] to facilitate the integration of a trained https://xgboost.readthedocs.io/en/stable/python/python_api.html#xgboost.XGBRanker[XBGRanker^] model as your LTR model in {es}. + +[TIP] +==== +Learn more about training in <>, or check out our https://github.com/elastic/elasticsearch-labs/blob/main/notebooks/search/08-learning-to-rank.ipynb[interactive LTR notebook] available in the `elasticsearch-labs` repo. +==== +[discrete] +[[learning-to-rank-in-the-elastic-stack]] +=== LTR in the Elastic stack + +In the next pages of this guide you will learn to: + +* <> +* <> + +include::learning-to-rank-model-training.asciidoc[] +include::learning-to-rank-search-usage.asciidoc[] diff --git a/docs/reference/search/search-your-data/search-your-data.asciidoc b/docs/reference/search/search-your-data/search-your-data.asciidoc index 8362094fab10c..bed204985296c 100644 --- a/docs/reference/search/search-your-data/search-your-data.asciidoc +++ b/docs/reference/search/search-your-data/search-your-data.asciidoc @@ -46,6 +46,7 @@ include::search-api.asciidoc[] include::search-application-overview.asciidoc[] include::knn-search.asciidoc[] include::semantic-search.asciidoc[] +include::learning-to-rank.asciidoc[] include::search-across-clusters.asciidoc[] include::search-with-synonyms.asciidoc[] include::behavioral-analytics/behavioral-analytics-overview.asciidoc[] diff --git a/docs/reference/sql/endpoints/rest.asciidoc b/docs/reference/sql/endpoints/rest.asciidoc index 8168a1c14e1a3..258df3c8afc97 100644 --- a/docs/reference/sql/endpoints/rest.asciidoc +++ b/docs/reference/sql/endpoints/rest.asciidoc @@ -579,6 +579,7 @@ POST _sql?format=json "fetch_size": 5 } ---- +// TEST[skip:waiting on https://github.com/elastic/elasticsearch/issues/106158] // TEST[setup:library] // TEST[s/"wait_for_completion_timeout": "2s"/"wait_for_completion_timeout": "0"/] @@ -602,6 +603,7 @@ For CSV, TSV, and TXT responses, the API returns these values in the respective "rows": [ ] } ---- +// TESTRESPONSE[skip:waiting on https://github.com/elastic/elasticsearch/issues/106158] // TESTRESPONSE[s/FnR0TDhyWUVmUmVtWXRWZER4MXZiNFEad2F5UDk2ZVdTVHV1S0xDUy00SklUdzozMTU=/$body.id/] // TESTRESPONSE[s/"is_partial": true/"is_partial": $body.is_partial/] // TESTRESPONSE[s/"is_running": true/"is_running": $body.is_running/] @@ -628,6 +630,7 @@ complete results. "completion_status": 200 } ---- +// TESTRESPONSE[skip:waiting on https://github.com/elastic/elasticsearch/issues/106158] // TESTRESPONSE[s/FnR0TDhyWUVmUmVtWXRWZER4MXZiNFEad2F5UDk2ZVdTVHV1S0xDUy00SklUdzozMTU=/$body.id/] // TESTRESPONSE[s/"expiration_time_in_millis": 1611690295000/"expiration_time_in_millis": $body.expiration_time_in_millis/] @@ -660,6 +663,7 @@ POST _sql?format=json "fetch_size": 5 } ---- +// TEST[skip:waiting on https://github.com/elastic/elasticsearch/issues/106158] // TEST[setup:library] You can use the get async SQL search API's `keep_alive` parameter to later @@ -698,6 +702,7 @@ POST _sql?format=json "fetch_size": 5 } ---- +// TEST[skip:waiting on https://github.com/elastic/elasticsearch/issues/106158] // TEST[setup:library] If `is_partial` and `is_running` are `false`, the search was synchronous and @@ -714,6 +719,7 @@ returned complete results. "cursor": ... } ---- +// TESTRESPONSE[skip:waiting on https://github.com/elastic/elasticsearch/issues/106158] // TESTRESPONSE[s/Fnc5UllQdUVWU0NxRFNMbWxNYXplaFEaMUpYQ05oSkpTc3kwZ21EdC1tbFJXQTo0NzA=/$body.id/] // TESTRESPONSE[s/"rows": \.\.\./"rows": $body.rows/] // TESTRESPONSE[s/"columns": \.\.\./"columns": $body.columns/] diff --git a/docs/reference/tab-widgets/inference-api/infer-api-mapping.asciidoc b/docs/reference/tab-widgets/inference-api/infer-api-mapping.asciidoc index e43bbd036b44e..5ca5e0b7bf139 100644 --- a/docs/reference/tab-widgets/inference-api/infer-api-mapping.asciidoc +++ b/docs/reference/tab-widgets/inference-api/infer-api-mapping.asciidoc @@ -9,7 +9,7 @@ PUT cohere-embeddings "content_embedding": { <1> "type": "dense_vector", <2> "dims": 1024, <3> - "element_type": "float" + "element_type": "byte" }, "content": { <4> "type": "text" <5> diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/api/Augmentation.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/api/Augmentation.java index 9267a8e963045..21f940efda5ac 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/api/Augmentation.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/api/Augmentation.java @@ -673,6 +673,10 @@ public static String sha256(String source) { return MessageDigests.toHexString(MessageDigests.sha256().digest(source.getBytes(StandardCharsets.UTF_8))); } + public static String sha512(String source) { + return MessageDigests.toHexString(MessageDigests.sha512().digest(source.getBytes(StandardCharsets.UTF_8))); + } + public static final int UNLIMITED_PATTERN_FACTOR = 0; public static final int DISABLED_PATTERN_FACTOR = -1; diff --git a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.script.ingest.txt b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.script.ingest.txt index 7f2282eaa714a..13678c4216d7a 100644 --- a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.script.ingest.txt +++ b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.script.ingest.txt @@ -11,6 +11,7 @@ class java.lang.String { String org.elasticsearch.painless.api.Augmentation sha1() String org.elasticsearch.painless.api.Augmentation sha256() + String org.elasticsearch.painless.api.Augmentation sha512() } class org.elasticsearch.painless.api.Json { diff --git a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.script.reindex.txt b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.script.reindex.txt index a90d3525e1203..18d658d797b60 100644 --- a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.script.reindex.txt +++ b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.script.reindex.txt @@ -11,6 +11,7 @@ class java.lang.String { String org.elasticsearch.painless.api.Augmentation sha1() String org.elasticsearch.painless.api.Augmentation sha256() + String org.elasticsearch.painless.api.Augmentation sha512() } class org.elasticsearch.painless.api.Json { diff --git a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.script.update.txt b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.script.update.txt index b58a8e720b21b..214fdaae26394 100644 --- a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.script.update.txt +++ b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.script.update.txt @@ -11,6 +11,7 @@ class java.lang.String { String org.elasticsearch.painless.api.Augmentation sha1() String org.elasticsearch.painless.api.Augmentation sha256() + String org.elasticsearch.painless.api.Augmentation sha512() } class org.elasticsearch.painless.api.Json { diff --git a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.script.update_by_query.txt b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.script.update_by_query.txt index 7c0bf5b2985fe..6c569a165336b 100644 --- a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.script.update_by_query.txt +++ b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.script.update_by_query.txt @@ -11,6 +11,7 @@ class java.lang.String { String org.elasticsearch.painless.api.Augmentation sha1() String org.elasticsearch.painless.api.Augmentation sha256() + String org.elasticsearch.painless.api.Augmentation sha512() } class org.elasticsearch.painless.api.Json { diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/AugmentationTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/AugmentationTests.java index b51f0f2657278..e97bd1bb123ca 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/AugmentationTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/AugmentationTests.java @@ -293,6 +293,24 @@ public void testSha256() { assertEquals("97df3588b5a3f24babc3851b372f0ba71a9dcdded43b14b9d06961bfc1707d9d", execDigest("'foobarbaz'.sha256()")); } + public void testSha512() { + assertEquals( + "f7fbba6e0636f890e56fbbf3283e524c6fa3204ae298382d624741d0dc663832" + + "6e282c41be5e4254d8820772c5518a2c5a8c0c7f7eda19594a7eb539453e1ed7", + execDigest("'foo'.sha512()") + ); + assertEquals( + "d82c4eb5261cb9c8aa9855edd67d1bd10482f41529858d925094d173fa662aa9" + + "1ff39bc5b188615273484021dfb16fd8284cf684ccf0fc795be3aa2fc1e6c181", + execDigest("'bar'.sha512()") + ); + assertEquals( + "cb377c10b0f5a62c803625a799d9e908be45e767f5d147d4744907cb05597aa4" + + "edd329a0af147add0cf4181ed328fa1e7994265826b3ed3d7ef6f067ca99185a", + execDigest("'foobarbaz'.sha512()") + ); + } + public void testToEpochMilli() { assertEquals(0L, exec("ZonedDateTime.parse('1970-01-01T00:00:00Z').toEpochMilli()")); assertEquals(1602097376782L, exec("ZonedDateTime.parse('2020-10-07T19:02:56.782Z').toEpochMilli()")); diff --git a/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/snapshots/RestGetSnapshotsIT.java b/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/snapshots/RestGetSnapshotsIT.java index 59e07581499ee..88d910b61fa52 100644 --- a/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/snapshots/RestGetSnapshotsIT.java +++ b/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/snapshots/RestGetSnapshotsIT.java @@ -386,7 +386,7 @@ private static void assertStablePagination(String repoName, Collection a final GetSnapshotsResponse getSnapshotsResponse = sortedWithLimit( repoName, sort, - GetSnapshotsRequest.After.from(after, sort).asQueryParam(), + sort.encodeAfterQueryParam(after), i, order, includeIndexNames diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cat.aliases/10_basic.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cat.aliases/10_basic.yml index 9566f6f036c3f..9eebb281795b0 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cat.aliases/10_basic.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cat.aliases/10_basic.yml @@ -1,8 +1,8 @@ --- "Help": - - skip: - version: " - 7.3.99" + - requires: + cluster_features: "cat_aliases_show_write_index" reason: "is_write_index is shown in cat.aliases starting version 7.4.0" - do: @@ -22,7 +22,7 @@ --- "Help (pre 7.4.0)": - skip: - version: "7.4.0 - " + cluster_features: "cat_aliases_show_write_index" reason: "is_write_index is shown in cat.aliases starting version 7.4.0" - do: @@ -51,8 +51,8 @@ --- "Simple alias": - - skip: - version: " - 7.3.99" + - requires: + cluster_features: "cat_aliases_show_write_index" reason: "is_write_index is shown in cat.aliases starting version 7.4.0" - do: @@ -81,7 +81,7 @@ --- "Simple alias (pre 7.4.0)": - skip: - version: "7.4.0 - " + cluster_features: "cat_aliases_show_write_index" reason: "is_write_index is shown in cat.aliases starting version 7.4.0" - do: @@ -108,8 +108,8 @@ --- "Complex alias": - - skip: - version: " - 7.3.99" + - requires: + cluster_features: "cat_aliases_show_write_index" reason: "is_write_index is shown in cat.aliases starting version 7.4.0" - do: @@ -149,7 +149,7 @@ --- "Complex alias (pre 7.4.0)": - skip: - version: "7.4.0 - " + cluster_features: "cat_aliases_show_write_index" reason: "is_write_index is shown in cat.aliases starting version 7.4.0" - do: @@ -269,8 +269,8 @@ --- "Column headers": - - skip: - version: " - 7.3.99" + - requires: + cluster_features: "cat_aliases_show_write_index" reason: "is_write_index is shown in cat.aliases starting version 7.4.0" - do: @@ -307,7 +307,7 @@ --- "Column headers (pre 7.4.0)": - skip: - version: "7.4.0 - " + cluster_features: "cat_aliases_show_write_index" reason: "is_write_index is shown in cat.aliases starting version 7.4.0" - do: @@ -373,10 +373,10 @@ --- "Alias against closed index": - - skip: - version: " - 7.3.99" + - requires: + cluster_features: "cat_aliases_show_write_index" reason: "is_write_index is shown in cat.aliases starting version 7.4.0" - features: ["allowed_warnings"] + test_runner_features: ["allowed_warnings"] - do: indices.create: @@ -409,10 +409,12 @@ --- "Alias against closed index (pre 7.4.0)": - skip: - version: "7.4.0 - " - features: ["allowed_warnings"] + cluster_features: "cat_aliases_show_write_index" reason: "is_write_index is shown in cat.aliases starting version 7.4.0" + - requires: + test_runner_features: ["allowed_warnings"] + - do: indices.create: index: test_index diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cat.allocation/10_basic.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cat.allocation/10_basic.yml index ed519438f1b1e..2ba01c3b5711e 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cat.allocation/10_basic.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cat.allocation/10_basic.yml @@ -2,8 +2,8 @@ "Help": - skip: - version: " - 8.9.99" - reason: "node.role column added in 8.10.0" + version: " - 8.13.99" + reason: "shards.undesired, write_load.forecast and disk.indices.forecast are added in 8.14.0" - do: cat.allocation: @@ -11,24 +11,27 @@ - match: $body: | - /^ shards .+ \n - disk.indices .+ \n - disk.used .+ \n - disk.avail .+ \n - disk.total .+ \n - disk.percent .+ \n - host .+ \n - ip .+ \n - node .+ \n - node.role .+ \n + /^ shards .+ \n + shards.undesired .+ \n + write_load.forecast .+ \n + disk.indices.forecast .+ \n + disk.indices .+ \n + disk.used .+ \n + disk.avail .+ \n + disk.total .+ \n + disk.percent .+ \n + host .+ \n + ip .+ \n + node .+ \n + node.role .+ \n $/ --- "One index": - skip: - version: " - 8.11.99" - reason: "node.role column shown by default from 8.12.0 onwards" + version: " - 8.13.99" + reason: "shards.undesired, write_load.forecast and disk.indices.forecast columns are added in 8.14.0" - do: indices.create: @@ -42,6 +45,9 @@ /^ ( \s* #allow leading spaces to account for right-justified text \d+ \s+ + \d+ \s+ + \d+(\.\d+)? \s+ + \d+(\.\d+)?[kmgt]?b \s+ \d+(\.\d+)?[kmgt]?b \s+ \d+(\.\d+)?[kmgt]?b \s+ (\d+(\.\d+)?[kmgt]b \s+) #always should return value since we filter out non data nodes by default @@ -65,8 +71,8 @@ "Node ID": - skip: - version: " - 8.11.99" - reason: "node.role column shown by default from 8.12.0 onwards" + version: " - 8.13.99" + reason: "shards.undesired, write_load.forecast and disk.indices.forecast columns are added in 8.14.0" - do: cat.allocation: @@ -76,6 +82,9 @@ $body: | /^ ( \d+ \s+ #usually 0, unless some system index has been recreated before this runs + \d+ \s+ + \d+(\.\d+)? \s+ + \d+(\.\d+)?[kmgt]?b \s+ \d+(\.\d+)?[kmgt]?b \s+ \d+(\.\d+)?[kmgt]?b \s+ (\d+(\.\d+)?[kmgt]b \s+)? #no value from client nodes @@ -99,12 +108,11 @@ $/ --- - "All Nodes": - skip: - version: " - 8.11.99" - reason: "node.role column shown by default from 8.12.0 onwards" + version: " - 8.13.99" + reason: "shards.undesired, write_load.forecast and disk.indices.forecast columns are added in 8.14.0" - do: cat.allocation: @@ -115,6 +123,9 @@ /^ ( \s* #allow leading spaces to account for right-justified text \d+ \s+ #usually 0, unless some system index has been recreated before this runs + \d+ \s+ + \d+(\.\d+)? \s+ + \d+(\.\d+)?[kmgt]?b \s+ \d+(\.\d+)?[kmgt]?b \s+ \d+(\.\d+)?[kmgt]?b \s+ (\d+(\.\d+)?[kmgt]b \s+)? #no value from client nodes @@ -138,8 +149,8 @@ "Column headers": - skip: - version: " - 8.11.99" - reason: "node.role column shown by default from 8.12.0 onwards" + version: " - 8.13.99" + reason: "shards.undesired, write_load.forecast and disk.indices.forecast columns are added in 8.14.0" - do: cat.allocation: @@ -148,6 +159,9 @@ $body: | /^ shards \s+ + shards.undesired \s+ + write_load.forecast \s+ + disk.indices.forecast \s+ disk.indices \s+ disk.used \s+ disk.avail \s+ @@ -161,6 +175,9 @@ ( \s* #allow leading spaces to account for right-justified text \d+ \s+ #usually 0, unless some system index has been recreated before this runs + \d+ \s+ + \d+(\.\d+)? \s+ + \d+(\.\d+)?[kmgt]?b \s+ \d+(\.\d+)?[kmgt]?b \s+ \d+(\.\d+)?[kmgt]?b \s+ (\d+(\.\d+)?[kmgt]b \s+) #always should return value since we filter out non data nodes by default @@ -211,12 +228,11 @@ --- - "Bytes": - skip: - version: " - 8.11.99" - reason: "node.role column shown by default from 8.12.0 onwards" + version: " - 8.13.99" + reason: "shards.undesired, write_load.forecast and disk.indices.forecast columns are added in 8.14.0" - do: cat.allocation: @@ -226,6 +242,9 @@ $body: | /^ ( \d+ \s+ #usually 0, unless some system index has been recreated before this runs + \d+ \s+ + \d+(\.\d+)? \s+ + \d+ \s+ 0 \s+ \d+ \s+ (\d+ \s+) #always should return value since we filter out non data nodes by default @@ -240,7 +259,6 @@ $/ --- - "Node roles": - skip: @@ -259,3 +277,25 @@ \n )+ $/ + +--- +"Node forecasts": + + - skip: + version: " - 8.13.99" + reason: "write_load.forecast and disk.indices.forecast columns added in 8.14.0" + + - do: + cat.allocation: + h: [node, shards.undesired, write_load.forecast, disk.indices.forecast] + + - match: + $body: | + /^ + ( [-\w.]+ \s+ + [-\w.]+ \s+ + [-\w.]+ \s+ + [\w]+ + \n + )+ + $/ diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/nodes.stats/80_allocation_stats.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/nodes.stats/80_allocation_stats.yml new file mode 100644 index 0000000000000..a2e1117073cde --- /dev/null +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/nodes.stats/80_allocation_stats.yml @@ -0,0 +1,22 @@ +--- +"Allocation stats": + - skip: + version: " - 8.13.99" + reason: "allocation stats was added in 8.14.0" + features: [arbitrary_key] + + - do: + nodes.info: {} + - set: + nodes._arbitrary_key_: node_id + + - do: + nodes.stats: + metric: [ allocations ] + + - exists: nodes.$node_id.allocations + - exists: nodes.$node_id.allocations.shards + - exists: nodes.$node_id.allocations.undesired_shards + - exists: nodes.$node_id.allocations.forecasted_ingest_load + - exists: nodes.$node_id.allocations.forecasted_disk_usage_in_bytes + - exists: nodes.$node_id.allocations.current_disk_usage_in_bytes diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/GetSnapshotsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/GetSnapshotsIT.java index d01064b9fb8bc..a04d1a5c8b02d 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/GetSnapshotsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/GetSnapshotsIT.java @@ -659,7 +659,7 @@ private static void assertStablePagination(String[] repoNames, Collection void reg actions.register(TransportAddVotingConfigExclusionsAction.TYPE, TransportAddVotingConfigExclusionsAction.class); actions.register(TransportClearVotingConfigExclusionsAction.TYPE, TransportClearVotingConfigExclusionsAction.class); actions.register(TransportClusterAllocationExplainAction.TYPE, TransportClusterAllocationExplainAction.class); + actions.register(TransportGetAllocationStatsAction.TYPE, TransportGetAllocationStatsAction.class); actions.register(TransportGetDesiredBalanceAction.TYPE, TransportGetDesiredBalanceAction.class); actions.register(TransportDeleteDesiredBalanceAction.TYPE, TransportDeleteDesiredBalanceAction.class); actions.register(ClusterStatsAction.INSTANCE, TransportClusterStatsAction.class); diff --git a/server/src/main/java/org/elasticsearch/action/DocWriteRequest.java b/server/src/main/java/org/elasticsearch/action/DocWriteRequest.java index 7f3578ce9f16f..bfe1ff04b7b77 100644 --- a/server/src/main/java/org/elasticsearch/action/DocWriteRequest.java +++ b/server/src/main/java/org/elasticsearch/action/DocWriteRequest.java @@ -185,7 +185,7 @@ default Index getConcreteWriteIndex(IndexAbstraction ia, Metadata metadata) { */ enum OpType { /** - * Index the source. If there an existing document with the id, it will + * Index the source. If there is an existing document with the id, it will * be replaced. */ INDEX(0), diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/TransportGetAllocationStatsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/TransportGetAllocationStatsAction.java new file mode 100644 index 0000000000000..a17a627342c4f --- /dev/null +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/TransportGetAllocationStatsAction.java @@ -0,0 +1,132 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.action.admin.cluster.allocation; + +import org.elasticsearch.TransportVersions; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.ActionType; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.master.MasterNodeReadRequest; +import org.elasticsearch.action.support.master.TransportMasterNodeReadAction; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.block.ClusterBlockException; +import org.elasticsearch.cluster.block.ClusterBlockLevel; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.routing.allocation.AllocationStatsService; +import org.elasticsearch.cluster.routing.allocation.NodeAllocationStats; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.tasks.TaskId; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; + +import java.io.IOException; +import java.util.Map; + +public class TransportGetAllocationStatsAction extends TransportMasterNodeReadAction< + TransportGetAllocationStatsAction.Request, + TransportGetAllocationStatsAction.Response> { + + public static final ActionType TYPE = new ActionType<>("cluster:monitor/allocation/stats"); + + private final AllocationStatsService allocationStatsService; + + @Inject + public TransportGetAllocationStatsAction( + TransportService transportService, + ClusterService clusterService, + ThreadPool threadPool, + ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver, + AllocationStatsService allocationStatsService + ) { + super( + TYPE.name(), + transportService, + clusterService, + threadPool, + actionFilters, + TransportGetAllocationStatsAction.Request::new, + indexNameExpressionResolver, + TransportGetAllocationStatsAction.Response::new, + threadPool.executor(ThreadPool.Names.MANAGEMENT) + ); + this.allocationStatsService = allocationStatsService; + } + + @Override + protected void doExecute(Task task, Request request, ActionListener listener) { + if (clusterService.state().getMinTransportVersion().before(TransportVersions.ALLOCATION_STATS)) { + // The action is not available before ALLOCATION_STATS + listener.onResponse(new Response(Map.of())); + return; + } + super.doExecute(task, request, listener); + } + + @Override + protected void masterOperation(Task task, Request request, ClusterState state, ActionListener listener) throws Exception { + listener.onResponse(new Response(allocationStatsService.stats())); + } + + @Override + protected ClusterBlockException checkBlock(Request request, ClusterState state) { + return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_READ); + } + + public static class Request extends MasterNodeReadRequest { + + public Request(TaskId parentTaskId) { + setParentTask(parentTaskId); + } + + public Request(StreamInput in) throws IOException { + super(in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + assert out.getTransportVersion().onOrAfter(TransportVersions.ALLOCATION_STATS); + super.writeTo(out); + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + } + + public static class Response extends ActionResponse { + + private final Map nodeAllocationStats; + + public Response(Map nodeAllocationStats) { + this.nodeAllocationStats = nodeAllocationStats; + } + + public Response(StreamInput in) throws IOException { + super(in); + this.nodeAllocationStats = in.readImmutableMap(StreamInput::readString, NodeAllocationStats::new); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeMap(nodeAllocationStats, StreamOutput::writeString, StreamOutput::writeWriteable); + } + + public Map getNodeAllocationStats() { + return nodeAllocationStats; + } + } +} diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodeStats.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodeStats.java index 595e441e9b2cf..8fcb5a320bd41 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodeStats.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodeStats.java @@ -12,6 +12,7 @@ import org.elasticsearch.action.support.nodes.BaseNodeResponse; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodeRole; +import org.elasticsearch.cluster.routing.allocation.NodeAllocationStats; import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -97,6 +98,9 @@ public class NodeStats extends BaseNodeResponse implements ChunkedToXContent { @Nullable private final RepositoriesStats repositoriesStats; + @Nullable + private final NodeAllocationStats nodeAllocationStats; + public NodeStats(StreamInput in) throws IOException { super(in); timestamp = in.readVLong(); @@ -117,11 +121,12 @@ public NodeStats(StreamInput in) throws IOException { ingestStats = in.readOptionalWriteable(IngestStats::read); adaptiveSelectionStats = in.readOptionalWriteable(AdaptiveSelectionStats::new); indexingPressureStats = in.readOptionalWriteable(IndexingPressureStats::new); - if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)) { - repositoriesStats = in.readOptionalWriteable(RepositoriesStats::new); - } else { - repositoriesStats = null; - } + repositoriesStats = in.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X) + ? in.readOptionalWriteable(RepositoriesStats::new) + : null; + nodeAllocationStats = in.getTransportVersion().onOrAfter(TransportVersions.ALLOCATION_STATS) + ? in.readOptionalWriteable(NodeAllocationStats::new) + : null; } public NodeStats( @@ -142,7 +147,8 @@ public NodeStats( @Nullable AdaptiveSelectionStats adaptiveSelectionStats, @Nullable ScriptCacheStats scriptCacheStats, @Nullable IndexingPressureStats indexingPressureStats, - @Nullable RepositoriesStats repositoriesStats + @Nullable RepositoriesStats repositoriesStats, + @Nullable NodeAllocationStats nodeAllocationStats ) { super(node); this.timestamp = timestamp; @@ -162,6 +168,31 @@ public NodeStats( this.scriptCacheStats = scriptCacheStats; this.indexingPressureStats = indexingPressureStats; this.repositoriesStats = repositoriesStats; + this.nodeAllocationStats = nodeAllocationStats; + } + + public NodeStats withNodeAllocationStats(@Nullable NodeAllocationStats nodeAllocationStats) { + return new NodeStats( + getNode(), + timestamp, + indices, + os, + process, + jvm, + threadPool, + fs, + transport, + http, + breaker, + scriptStats, + discoveryStats, + ingestStats, + adaptiveSelectionStats, + scriptCacheStats, + indexingPressureStats, + repositoriesStats, + nodeAllocationStats + ); } public long getTimestamp() { @@ -271,6 +302,11 @@ public RepositoriesStats getRepositoriesStats() { return repositoriesStats; } + @Nullable + public NodeAllocationStats getNodeAllocationStats() { + return nodeAllocationStats; + } + @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); @@ -297,6 +333,9 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)) { out.writeOptionalWriteable(repositoriesStats); } + if (out.getTransportVersion().onOrAfter(TransportVersions.ALLOCATION_STATS)) { + out.writeOptionalWriteable(nodeAllocationStats); + } } @Override @@ -343,7 +382,11 @@ public Iterator toXContentChunked(ToXContent.Params outerP ifPresent(getIngestStats()).toXContentChunked(outerParams), singleChunk(ifPresent(getAdaptiveSelectionStats())), ifPresent(getScriptCacheStats()).toXContentChunked(outerParams), - singleChunk((builder, p) -> builder.value(ifPresent(getIndexingPressureStats()), p).value(ifPresent(getRepositoriesStats()), p)) + singleChunk( + (builder, p) -> builder.value(ifPresent(getIndexingPressureStats()), p) + .value(ifPresent(getRepositoriesStats()), p) + .value(ifPresent(getNodeAllocationStats()), p) + ) ); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsRequestBuilder.java index ab7278c629bf2..8d863653874bb 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsRequestBuilder.java @@ -158,6 +158,11 @@ public NodesStatsRequestBuilder setRepositoryStats(boolean repositoryStats) { return this; } + public NodesStatsRequestBuilder setAllocationStats(boolean allocationStats) { + addOrRemoveMetric(allocationStats, NodesStatsRequestParameters.Metric.ALLOCATIONS); + return this; + } + /** * Helper method for adding metrics to a request */ diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsRequestParameters.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsRequestParameters.java index 2948af59d17fd..9e965fcccb2f3 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsRequestParameters.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsRequestParameters.java @@ -89,7 +89,8 @@ public enum Metric { ADAPTIVE_SELECTION("adaptive_selection"), SCRIPT_CACHE("script_cache"), INDEXING_PRESSURE("indexing_pressure"), - REPOSITORIES("repositories"); + REPOSITORIES("repositories"), + ALLOCATIONS("allocations"); private String metricName; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java index 1edc57b0a7df2..6ff2303997482 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java @@ -8,11 +8,15 @@ package org.elasticsearch.action.admin.cluster.node.stats; +import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionType; import org.elasticsearch.action.FailedNodeException; +import org.elasticsearch.action.admin.cluster.allocation.TransportGetAllocationStatsAction; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.nodes.TransportNodesAction; +import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.routing.allocation.NodeAllocationStats; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.Inject; @@ -42,7 +46,9 @@ public class TransportNodesStatsAction extends TransportNodesAction< NodeStats> { public static final ActionType TYPE = new ActionType<>("cluster:monitor/nodes/stats"); + private final NodeService nodeService; + private final NodeClient client; @Inject public TransportNodesStatsAction( @@ -50,7 +56,8 @@ public TransportNodesStatsAction( ClusterService clusterService, TransportService transportService, NodeService nodeService, - ActionFilters actionFilters + ActionFilters actionFilters, + NodeClient client ) { super( TYPE.name(), @@ -61,6 +68,7 @@ public TransportNodesStatsAction( threadPool.executor(ThreadPool.Names.MANAGEMENT) ); this.nodeService = nodeService; + this.client = client; } @Override @@ -68,6 +76,34 @@ protected NodesStatsResponse newResponse(NodesStatsRequest request, List responses, + List failures, + ActionListener listener + ) { + Set metrics = request.getNodesStatsRequestParameters().requestedMetrics(); + if (NodesStatsRequestParameters.Metric.ALLOCATIONS.containedIn(metrics)) { + client.execute( + TransportGetAllocationStatsAction.TYPE, + new TransportGetAllocationStatsAction.Request(new TaskId(clusterService.localNode().getId(), task.getId())), + listener.delegateFailure((l, r) -> { + ActionListener.respondAndRelease(l, newResponse(request, merge(responses, r.getNodeAllocationStats()), failures)); + }) + ); + } else { + ActionListener.run(listener, l -> ActionListener.respondAndRelease(l, newResponse(request, responses, failures))); + } + } + + private static List merge(List responses, Map allocationStats) { + return responses.stream() + .map(response -> response.withNodeAllocationStats(allocationStats.get(response.getNode().getId()))) + .toList(); + } + @Override protected NodeStatsRequest newNodeRequest(NodesStatsRequest request) { return new NodeStatsRequest(request); @@ -80,10 +116,10 @@ protected NodeStats newNodeResponse(StreamInput in, DiscoveryNode node) throws I } @Override - protected NodeStats nodeOperation(NodeStatsRequest nodeStatsRequest, Task task) { + protected NodeStats nodeOperation(NodeStatsRequest request, Task task) { assert task instanceof CancellableTask; - final NodesStatsRequestParameters nodesStatsRequestParameters = nodeStatsRequest.getNodesStatsRequestParameters(); + final NodesStatsRequestParameters nodesStatsRequestParameters = request.getNodesStatsRequestParameters(); Set metrics = nodesStatsRequestParameters.requestedMetrics(); return nodeService.stats( nodesStatsRequestParameters.indices(), diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsRequest.java index 25373178e2b89..b47abc0e4dd8f 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsRequest.java @@ -15,19 +15,15 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.regex.Regex; import org.elasticsearch.core.Nullable; import org.elasticsearch.search.sort.SortOrder; -import org.elasticsearch.snapshots.SnapshotInfo; import org.elasticsearch.tasks.CancellableTask; import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.TaskId; import java.io.IOException; -import java.nio.charset.StandardCharsets; import java.util.Arrays; -import java.util.Base64; import java.util.Map; import static org.elasticsearch.action.ValidateActions.addValidationError; @@ -51,12 +47,15 @@ public class GetSnapshotsRequest extends MasterNodeRequest private int size = NO_LIMIT; /** - * Numeric offset at which to start fetching snapshots. Mutually exclusive with {@link After} if not equal to {@code 0}. + * Numeric offset at which to start fetching snapshots. Mutually exclusive with {@link #after} if not equal to {@code 0}. */ private int offset = 0; + /** + * Sort key value at which to start fetching snapshots. Mutually exclusive with {@link #offset} if not {@code null}. + */ @Nullable - private After after; + private SnapshotSortKey.After after; @Nullable private String fromSortValue; @@ -105,7 +104,7 @@ public GetSnapshotsRequest(StreamInput in) throws IOException { snapshots = in.readStringArray(); ignoreUnavailable = in.readBoolean(); verbose = in.readBoolean(); - after = in.readOptionalWriteable(After::new); + after = in.readOptionalWriteable(SnapshotSortKey.After::new); sort = in.readEnum(SnapshotSortKey.class); size = in.readVInt(); order = SortOrder.readFromStream(in); @@ -283,7 +282,8 @@ public boolean includeIndexNames() { return includeIndexNames; } - public After after() { + @Nullable + public SnapshotSortKey.After after() { return after; } @@ -291,7 +291,7 @@ public SnapshotSortKey sort() { return sort; } - public GetSnapshotsRequest after(@Nullable After after) { + public GetSnapshotsRequest after(@Nullable SnapshotSortKey.After after) { this.after = after; return this; } @@ -350,73 +350,6 @@ public Task createTask(long id, String type, String action, TaskId parentTaskId, return new CancellableTask(id, type, action, getDescription(), parentTaskId, headers); } - public static final class After implements Writeable { - - private final String value; - - private final String repoName; - - private final String snapshotName; - - After(StreamInput in) throws IOException { - this(in.readString(), in.readString(), in.readString()); - } - - public static After fromQueryParam(String param) { - final String[] parts = new String(Base64.getUrlDecoder().decode(param), StandardCharsets.UTF_8).split(","); - if (parts.length != 3) { - throw new IllegalArgumentException("invalid ?after parameter [" + param + "]"); - } - return new After(parts[0], parts[1], parts[2]); - } - - @Nullable - public static After from(@Nullable SnapshotInfo snapshotInfo, SnapshotSortKey sortBy) { - if (snapshotInfo == null) { - return null; - } - final String afterValue = switch (sortBy) { - case START_TIME -> String.valueOf(snapshotInfo.startTime()); - case NAME -> snapshotInfo.snapshotId().getName(); - case DURATION -> String.valueOf(snapshotInfo.endTime() - snapshotInfo.startTime()); - case INDICES -> String.valueOf(snapshotInfo.indices().size()); - case SHARDS -> String.valueOf(snapshotInfo.totalShards()); - case FAILED_SHARDS -> String.valueOf(snapshotInfo.failedShards()); - case REPOSITORY -> snapshotInfo.repository(); - }; - return new After(afterValue, snapshotInfo.repository(), snapshotInfo.snapshotId().getName()); - } - - public After(String value, String repoName, String snapshotName) { - this.value = value; - this.repoName = repoName; - this.snapshotName = snapshotName; - } - - public String value() { - return value; - } - - public String snapshotName() { - return snapshotName; - } - - public String repoName() { - return repoName; - } - - public String asQueryParam() { - return Base64.getUrlEncoder().encodeToString((value + "," + repoName + "," + snapshotName).getBytes(StandardCharsets.UTF_8)); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeString(value); - out.writeString(repoName); - out.writeString(snapshotName); - } - } - @Override public String getDescription() { final StringBuilder stringBuilder = new StringBuilder("repositories["); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsRequestBuilder.java index 25e8a433bf243..68877f6144693 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsRequestBuilder.java @@ -109,10 +109,10 @@ public GetSnapshotsRequestBuilder setVerbose(boolean verbose) { } public GetSnapshotsRequestBuilder setAfter(String after) { - return setAfter(after == null ? null : GetSnapshotsRequest.After.fromQueryParam(after)); + return setAfter(after == null ? null : SnapshotSortKey.decodeAfterQueryParam(after)); } - public GetSnapshotsRequestBuilder setAfter(@Nullable GetSnapshotsRequest.After after) { + public GetSnapshotsRequestBuilder setAfter(@Nullable SnapshotSortKey.After after) { request.after(after); return this; } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/SnapshotSortKey.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/SnapshotSortKey.java index 599f41e8615da..14735d13ae68e 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/SnapshotSortKey.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/SnapshotSortKey.java @@ -8,9 +8,20 @@ package org.elasticsearch.action.admin.cluster.snapshots.get; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.Predicates; +import org.elasticsearch.search.sort.SortOrder; import org.elasticsearch.snapshots.SnapshotInfo; +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.util.Base64; import java.util.Comparator; +import java.util.function.Predicate; +import java.util.function.ToLongFunction; /** * Sort key for snapshots e.g. returned from the get-snapshots API. All values break ties using {@link SnapshotInfo#snapshotId} (i.e. by @@ -20,43 +31,135 @@ public enum SnapshotSortKey { /** * Sort by snapshot start time. */ - START_TIME("start_time", Comparator.comparingLong(SnapshotInfo::startTime)), + START_TIME("start_time", Comparator.comparingLong(SnapshotInfo::startTime)) { + @Override + protected String getSortKeyValue(SnapshotInfo snapshotInfo) { + return Long.toString(snapshotInfo.startTime()); + } + + @Override + protected Predicate innerGetAfterPredicate(After after, SortOrder sortOrder) { + return after.longValuePredicate(SnapshotInfo::startTime, sortOrder); + } + }, /** * Sort by snapshot name. */ - NAME("name", Comparator.comparing(sni -> sni.snapshotId().getName())), + NAME("name", Comparator.comparing(sni -> sni.snapshotId().getName())) { + @Override + protected String getSortKeyValue(SnapshotInfo snapshotInfo) { + return snapshotInfo.snapshotId().getName(); + } + + @Override + protected Predicate innerGetAfterPredicate(After after, SortOrder sortOrder) { + // TODO: cover via pre-flight predicate + final String snapshotName = after.snapshotName(); + final String repoName = after.repoName(); + return sortOrder == SortOrder.ASC + ? (info -> compareName(snapshotName, repoName, info) < 0) + : (info -> compareName(snapshotName, repoName, info) > 0); + } + }, /** * Sort by snapshot duration (end time minus start time). */ - DURATION("duration", Comparator.comparingLong(sni -> sni.endTime() - sni.startTime())), + DURATION("duration", Comparator.comparingLong(sni -> sni.endTime() - sni.startTime())) { + @Override + protected String getSortKeyValue(SnapshotInfo snapshotInfo) { + return Long.toString(snapshotInfo.endTime() - snapshotInfo.startTime()); + } + + @Override + protected Predicate innerGetAfterPredicate(After after, SortOrder sortOrder) { + return after.longValuePredicate(info -> info.endTime() - info.startTime(), sortOrder); + } + }, + /** * Sort by number of indices in the snapshot. */ - INDICES("index_count", Comparator.comparingInt(sni -> sni.indices().size())), + INDICES("index_count", Comparator.comparingInt(sni -> sni.indices().size())) { + @Override + protected String getSortKeyValue(SnapshotInfo snapshotInfo) { + return Integer.toString(snapshotInfo.indices().size()); + } + + @Override + protected Predicate innerGetAfterPredicate(After after, SortOrder sortOrder) { + // TODO: cover via pre-flight predicate + return after.longValuePredicate(info -> info.indices().size(), sortOrder); + } + }, /** * Sort by number of shards in the snapshot. */ - SHARDS("shard_count", Comparator.comparingInt(SnapshotInfo::totalShards)), + SHARDS("shard_count", Comparator.comparingInt(SnapshotInfo::totalShards)) { + @Override + protected String getSortKeyValue(SnapshotInfo snapshotInfo) { + return Integer.toString(snapshotInfo.totalShards()); + } + + @Override + protected Predicate innerGetAfterPredicate(After after, SortOrder sortOrder) { + return after.longValuePredicate(SnapshotInfo::totalShards, sortOrder); + } + }, /** * Sort by number of failed shards in the snapshot. */ - FAILED_SHARDS("failed_shard_count", Comparator.comparingInt(SnapshotInfo::failedShards)), + FAILED_SHARDS("failed_shard_count", Comparator.comparingInt(SnapshotInfo::failedShards)) { + @Override + protected String getSortKeyValue(SnapshotInfo snapshotInfo) { + return Integer.toString(snapshotInfo.failedShards()); + } + + @Override + protected Predicate innerGetAfterPredicate(After after, SortOrder sortOrder) { + return after.longValuePredicate(SnapshotInfo::failedShards, sortOrder); + } + }, /** * Sort by repository name. */ - REPOSITORY("repository", Comparator.comparing(SnapshotInfo::repository)); + REPOSITORY("repository", Comparator.comparing(SnapshotInfo::repository)) { + @Override + protected String getSortKeyValue(SnapshotInfo snapshotInfo) { + return snapshotInfo.repository(); + } + + @Override + protected Predicate innerGetAfterPredicate(After after, SortOrder sortOrder) { + // TODO: cover via pre-flight predicate + final String snapshotName = after.snapshotName(); + final String repoName = after.repoName(); + return sortOrder == SortOrder.ASC + ? (info -> compareRepositoryName(snapshotName, repoName, info) < 0) + : (info -> compareRepositoryName(snapshotName, repoName, info) > 0); + } + + private static int compareRepositoryName(String name, String repoName, SnapshotInfo info) { + final int res = repoName.compareTo(info.repository()); + if (res != 0) { + return res; + } + return name.compareTo(info.snapshotId().getName()); + } + }; private final String name; - private final Comparator snapshotInfoComparator; + private final Comparator ascendingSnapshotInfoComparator; + private final Comparator descendingSnapshotInfoComparator; SnapshotSortKey(String name, Comparator snapshotInfoComparator) { this.name = name; - this.snapshotInfoComparator = snapshotInfoComparator.thenComparing(SnapshotInfo::snapshotId); + this.ascendingSnapshotInfoComparator = snapshotInfoComparator.thenComparing(SnapshotInfo::snapshotId); + this.descendingSnapshotInfoComparator = ascendingSnapshotInfoComparator.reversed(); } @Override @@ -64,8 +167,67 @@ public String toString() { return name; } - public final Comparator getSnapshotInfoComparator() { - return snapshotInfoComparator; + /** + * @return a {@link Comparator} which sorts {@link SnapshotInfo} instances according to this sort key. + */ + public final Comparator getSnapshotInfoComparator(SortOrder sortOrder) { + return switch (sortOrder) { + case ASC -> ascendingSnapshotInfoComparator; + case DESC -> descendingSnapshotInfoComparator; + }; + } + + /** + * @return an {@link After} which can be included in a {@link GetSnapshotsRequest} (e.g. to be sent to a remote node) and ultimately + * converted into a predicate to filter out {@link SnapshotInfo} items which were returned on earlier pages of results. See also + * {@link #encodeAfterQueryParam} and {@link #getAfterPredicate}. + */ + public static After decodeAfterQueryParam(String param) { + final String[] parts = new String(Base64.getUrlDecoder().decode(param), StandardCharsets.UTF_8).split(","); + if (parts.length != 3) { + throw new IllegalArgumentException("invalid ?after parameter [" + param + "]"); + } + return new After(parts[0], parts[1], parts[2]); + } + + /** + * @return an encoded representation of the value of the sort key for the given {@link SnapshotInfo}, including the values of the + * snapshot name and repo name for tiebreaking purposes, which can be returned to the user so they can pass it back to the + * {@code ?after} param of a subsequent call to the get-snapshots API in order to retrieve the next page of results. + */ + public final String encodeAfterQueryParam(SnapshotInfo snapshotInfo) { + final var rawValue = getSortKeyValue(snapshotInfo) + "," + snapshotInfo.repository() + "," + snapshotInfo.snapshotId().getName(); + return Base64.getUrlEncoder().encodeToString(rawValue.getBytes(StandardCharsets.UTF_8)); + } + + /** + * @return a string representation of the value of the sort key for the given {@link SnapshotInfo}, which should be the last item in the + * response, which is combined with the snapshot and repository names, encoded, and returned to the user so they can pass it back to + * the {@code ?after} param of a subsequent call to the get-snapshots API in order to retrieve the next page of results. + */ + protected abstract String getSortKeyValue(SnapshotInfo snapshotInfo); + + /** + * @return a predicate to filter out {@link SnapshotInfo} items that match the user's query but which sort earlier than the given + * {@link After} value (i.e. they were returned on earlier pages of results). If {@code after} is {@code null} then the returned + * predicate matches all snapshots. + */ + public final Predicate getAfterPredicate(@Nullable After after, SortOrder sortOrder) { + return after == null ? Predicates.always() : innerGetAfterPredicate(after, sortOrder); + } + + /** + * @return a predicate to filter out {@link SnapshotInfo} items that match the user's query but which sort earlier than the given + * {@link After} value (i.e. they were returned on earlier pages of results). The {@code after} parameter is not {@code null}. + */ + protected abstract Predicate innerGetAfterPredicate(After after, SortOrder sortOrder); + + private static int compareName(String name, String repoName, SnapshotInfo info) { + final int res = name.compareTo(info.snapshotId().getName()); + if (res != 0) { + return res; + } + return repoName.compareTo(info.repository()); } public static SnapshotSortKey of(String name) { @@ -80,4 +242,29 @@ public static SnapshotSortKey of(String name) { default -> throw new IllegalArgumentException("unknown sort key [" + name + "]"); }; } + + public record After(String value, String repoName, String snapshotName) implements Writeable { + + After(StreamInput in) throws IOException { + this(in.readString(), in.readString(), in.readString()); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(value); + out.writeString(repoName); + out.writeString(snapshotName); + } + + Predicate longValuePredicate(ToLongFunction extractor, SortOrder sortOrder) { + final var after = Long.parseLong(value); + return sortOrder == SortOrder.ASC ? info -> { + final long val = extractor.applyAsLong(info); + return after < val || (after == val && compareName(snapshotName, repoName, info) < 0); + } : info -> { + final long val = extractor.applyAsLong(info); + return after > val || (after == val && compareName(snapshotName, repoName, info) > 0); + }; + } + } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/TransportGetSnapshotsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/TransportGetSnapshotsAction.java index cf779445fcd6a..28586c7a6410b 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/TransportGetSnapshotsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/TransportGetSnapshotsAction.java @@ -21,12 +21,18 @@ import org.elasticsearch.cluster.metadata.RepositoryMetadata; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.regex.Regex; +import org.elasticsearch.common.util.concurrent.AbstractThrottledTaskRunner; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; +import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.common.util.concurrent.ListenableFuture; +import org.elasticsearch.common.util.concurrent.ThrottledIterator; import org.elasticsearch.core.Nullable; -import org.elasticsearch.core.Predicates; +import org.elasticsearch.core.Releasable; +import org.elasticsearch.logging.LogManager; +import org.elasticsearch.logging.Logger; import org.elasticsearch.repositories.IndexId; import org.elasticsearch.repositories.RepositoriesService; import org.elasticsearch.repositories.Repository; @@ -41,13 +47,13 @@ import org.elasticsearch.snapshots.SnapshotsService; import org.elasticsearch.tasks.CancellableTask; import org.elasticsearch.tasks.Task; +import org.elasticsearch.tasks.TaskCancelledException; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; -import java.util.Comparator; import java.util.HashMap; import java.util.HashSet; import java.util.List; @@ -56,6 +62,7 @@ import java.util.Set; import java.util.concurrent.atomic.AtomicInteger; import java.util.function.BiPredicate; +import java.util.function.BooleanSupplier; import java.util.function.Predicate; import java.util.function.ToLongFunction; import java.util.stream.Stream; @@ -65,6 +72,8 @@ */ public class TransportGetSnapshotsAction extends TransportMasterNodeAction { + private static final Logger logger = LogManager.getLogger(TransportGetSnapshotsAction.class); + private final RepositoriesService repositoriesService; @Inject @@ -154,7 +163,7 @@ private class GetSnapshotsOperation { private final String fromSortValue; private final int offset; @Nullable - private final GetSnapshotsRequest.After after; + private final SnapshotSortKey.After after; private final int size; // current state @@ -164,6 +173,9 @@ private class GetSnapshotsOperation { private final boolean verbose; private final boolean indices; + // snapshot info throttling + private final GetSnapshotInfoExecutor getSnapshotInfoExecutor; + // results private final Map failuresByRepository = ConcurrentCollections.newConcurrentMap(); private final Queue> allSnapshotInfos = ConcurrentCollections.newQueue(); @@ -181,7 +193,7 @@ private class GetSnapshotsOperation { SortOrder order, String fromSortValue, int offset, - GetSnapshotsRequest.After after, + SnapshotSortKey.After after, int size, SnapshotsInProgress snapshotsInProgress, boolean verbose, @@ -203,6 +215,11 @@ private class GetSnapshotsOperation { this.verbose = verbose; this.indices = indices; + this.getSnapshotInfoExecutor = new GetSnapshotInfoExecutor( + threadPool.info(ThreadPool.Names.SNAPSHOT_META).getMax(), + cancellableTask::isCancelled + ); + for (final var missingRepo : resolvedRepositories.missing()) { failuresByRepository.put(missingRepo, new RepositoryMissingException(missingRepo)); } @@ -223,9 +240,7 @@ void getMultipleReposSnapshotInfo(ActionListener listener) return new GetSnapshotsResponse( snapshotInfos, failuresByRepository, - finalRemaining > 0 - ? GetSnapshotsRequest.After.from(snapshotInfos.get(snapshotInfos.size() - 1), sortBy).asQueryParam() - : null, + finalRemaining > 0 ? sortBy.encodeAfterQueryParam(snapshotInfos.get(snapshotInfos.size() - 1)) : null, totalCount.get(), finalRemaining ); @@ -436,11 +451,34 @@ private void snapshots(String repositoryName, Collection snapshotIds // only need to synchronize accesses related to reading SnapshotInfo from the repo final List syncSnapshots = Collections.synchronizedList(snapshots); - repository.getSnapshotInfo(snapshotIdsToIterate, ignoreUnavailable == false, cancellableTask::isCancelled, snapshotInfo -> { - if (predicates.test(snapshotInfo)) { - syncSnapshots.add(snapshotInfo.maybeWithoutIndices(indices)); - } - }, listeners.acquire()); + ThrottledIterator.run( + Iterators.failFast(snapshotIdsToIterate.iterator(), () -> cancellableTask.isCancelled() || listeners.isFailing()), + (ref, snapshotId) -> { + final var refListener = ActionListener.runBefore(listeners.acquire(), ref::close); + getSnapshotInfoExecutor.getSnapshotInfo(repository, snapshotId, new ActionListener<>() { + @Override + public void onResponse(SnapshotInfo snapshotInfo) { + if (predicates.test(snapshotInfo)) { + syncSnapshots.add(snapshotInfo.maybeWithoutIndices(indices)); + } + refListener.onResponse(null); + } + + @Override + public void onFailure(Exception e) { + if (ignoreUnavailable) { + logger.warn(Strings.format("failed to fetch snapshot info for [%s:%s]", repository, snapshotId), e); + refListener.onResponse(null); + } else { + refListener.onFailure(e); + } + } + }); + }, + getSnapshotInfoExecutor.getMaxRunningTasks(), + () -> {}, + () -> {} + ); } } @@ -489,7 +527,9 @@ private SnapshotsInRepo sortSnapshotsWithNoOffsetOrLimit(List snap } private SnapshotsInRepo sortSnapshots(Stream snapshotInfoStream, int totalCount, int offset, int size) { - final var resultsStream = snapshotInfoStream.filter(buildAfterPredicate()).sorted(buildComparator()).skip(offset); + final var resultsStream = snapshotInfoStream.filter(sortBy.getAfterPredicate(after, order)) + .sorted(sortBy.getSnapshotInfoComparator(order)) + .skip(offset); if (size == GetSnapshotsRequest.NO_LIMIT) { return new SnapshotsInRepo(resultsStream.toList(), totalCount, 0); } else { @@ -507,86 +547,6 @@ private SnapshotsInRepo sortSnapshots(Stream snapshotInfoStream, i return new SnapshotsInRepo(results, totalCount, remaining); } } - - private Comparator buildComparator() { - final var comparator = sortBy.getSnapshotInfoComparator(); - return order == SortOrder.DESC ? comparator.reversed() : comparator; - } - - private Predicate buildAfterPredicate() { - if (after == null) { - return Predicates.always(); - } - assert offset == 0 : "can't combine after and offset but saw [" + after + "] and offset [" + offset + "]"; - - final String snapshotName = after.snapshotName(); - final String repoName = after.repoName(); - final String value = after.value(); - return switch (sortBy) { - case START_TIME -> filterByLongOffset(SnapshotInfo::startTime, Long.parseLong(value), snapshotName, repoName, order); - case NAME -> - // TODO: cover via pre-flight predicate - order == SortOrder.ASC - ? (info -> compareName(snapshotName, repoName, info) < 0) - : (info -> compareName(snapshotName, repoName, info) > 0); - case DURATION -> filterByLongOffset( - info -> info.endTime() - info.startTime(), - Long.parseLong(value), - snapshotName, - repoName, - order - ); - case INDICES -> - // TODO: cover via pre-flight predicate - filterByLongOffset(info -> info.indices().size(), Integer.parseInt(value), snapshotName, repoName, order); - case SHARDS -> filterByLongOffset(SnapshotInfo::totalShards, Integer.parseInt(value), snapshotName, repoName, order); - case FAILED_SHARDS -> filterByLongOffset( - SnapshotInfo::failedShards, - Integer.parseInt(value), - snapshotName, - repoName, - order - ); - case REPOSITORY -> - // TODO: cover via pre-flight predicate - order == SortOrder.ASC - ? (info -> compareRepositoryName(snapshotName, repoName, info) < 0) - : (info -> compareRepositoryName(snapshotName, repoName, info) > 0); - }; - } - - private static Predicate filterByLongOffset( - ToLongFunction extractor, - long after, - String snapshotName, - String repoName, - SortOrder order - ) { - return order == SortOrder.ASC ? info -> { - final long val = extractor.applyAsLong(info); - return after < val || (after == val && compareName(snapshotName, repoName, info) < 0); - } : info -> { - final long val = extractor.applyAsLong(info); - return after > val || (after == val && compareName(snapshotName, repoName, info) > 0); - }; - } - - private static int compareRepositoryName(String name, String repoName, SnapshotInfo info) { - final int res = repoName.compareTo(info.repository()); - if (res != 0) { - return res; - } - return name.compareTo(info.snapshotId().getName()); - } - - private static int compareName(String name, String repoName, SnapshotInfo info) { - final int res = name.compareTo(info.snapshotId().getName()); - if (res != 0) { - return res; - } - return repoName.compareTo(info.repository()); - } - } /** @@ -803,4 +763,34 @@ private static int indexCount(SnapshotId snapshotId, RepositoryData repositoryDa private record SnapshotsInRepo(List snapshotInfos, int totalCount, int remaining) { private static final SnapshotsInRepo EMPTY = new SnapshotsInRepo(List.of(), 0, 0); } + + /** + * Throttling executor for retrieving {@link SnapshotInfo} instances from the repository without spamming the SNAPSHOT_META threadpool + * and starving other users of access to it. Similar to {@link Repository#getSnapshotInfo} but allows for finer-grained control over + * which snapshots are retrieved. + */ + private static class GetSnapshotInfoExecutor extends AbstractThrottledTaskRunner> { + private final int maxRunningTasks; + private final BooleanSupplier isCancelledSupplier; + + GetSnapshotInfoExecutor(int maxRunningTasks, BooleanSupplier isCancelledSupplier) { + super(GetSnapshotsAction.NAME, maxRunningTasks, EsExecutors.DIRECT_EXECUTOR_SERVICE, ConcurrentCollections.newBlockingQueue()); + this.maxRunningTasks = maxRunningTasks; + this.isCancelledSupplier = isCancelledSupplier; + } + + int getMaxRunningTasks() { + return maxRunningTasks; + } + + void getSnapshotInfo(Repository repository, SnapshotId snapshotId, ActionListener listener) { + enqueueTask(listener.delegateFailure((l, ref) -> { + if (isCancelledSupplier.getAsBoolean()) { + l.onFailure(new TaskCancelledException("task cancelled")); + } else { + repository.getSnapshotInfo(snapshotId, ActionListener.releaseAfter(l, ref)); + } + })); + } + } } diff --git a/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java b/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java index 3e661c2efe72f..a2445e95a572f 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java @@ -291,7 +291,7 @@ public void onTimeout(TimeValue timeout) { } private void forkAndExecute(Task task, BulkRequest bulkRequest, String executorName, ActionListener releasingListener) { - threadPool.executor(Names.WRITE).execute(new ActionRunnable<>(releasingListener) { + threadPool.executor(executorName).execute(new ActionRunnable<>(releasingListener) { @Override protected void doRun() { doInternalExecute(task, bulkRequest, executorName, releasingListener); diff --git a/server/src/main/java/org/elasticsearch/bootstrap/Natives.java b/server/src/main/java/org/elasticsearch/bootstrap/Natives.java index 4fa670b28872b..040c50b2b74e2 100644 --- a/server/src/main/java/org/elasticsearch/bootstrap/Natives.java +++ b/server/src/main/java/org/elasticsearch/bootstrap/Natives.java @@ -12,6 +12,7 @@ import org.apache.logging.log4j.Logger; import org.elasticsearch.common.ReferenceDocs; +import java.lang.invoke.MethodHandles; import java.nio.file.Path; import java.util.Locale; @@ -33,10 +34,10 @@ private Natives() {} try { // load one of the main JNA classes to see if the classes are available. this does not ensure that all native // libraries are available, only the ones necessary by JNA to function - Class.forName("com.sun.jna.Native"); + MethodHandles.publicLookup().ensureInitialized(com.sun.jna.Native.class); v = true; - } catch (ClassNotFoundException e) { - logger.warn("JNA not found. native methods will be disabled.", e); + } catch (IllegalAccessException e) { + throw new AssertionError(e); } catch (UnsatisfiedLinkError e) { logger.warn( String.format( diff --git a/server/src/main/java/org/elasticsearch/cluster/ClusterModule.java b/server/src/main/java/org/elasticsearch/cluster/ClusterModule.java index 5f682804a5b88..809e069b0028b 100644 --- a/server/src/main/java/org/elasticsearch/cluster/ClusterModule.java +++ b/server/src/main/java/org/elasticsearch/cluster/ClusterModule.java @@ -29,6 +29,7 @@ import org.elasticsearch.cluster.routing.ShardRoutingRoleStrategy; import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.routing.allocation.AllocationService.RerouteStrategy; +import org.elasticsearch.cluster.routing.allocation.AllocationStatsService; import org.elasticsearch.cluster.routing.allocation.ExistingShardsAllocator; import org.elasticsearch.cluster.routing.allocation.WriteLoadForecaster; import org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator; @@ -118,6 +119,7 @@ public class ClusterModule extends AbstractModule { final Collection deciderList; final ShardsAllocator shardsAllocator; private final ShardRoutingRoleStrategy shardRoutingRoleStrategy; + private final AllocationStatsService allocationStatsService; public ClusterModule( Settings settings, @@ -154,6 +156,7 @@ public ClusterModule( shardRoutingRoleStrategy ); this.metadataDeleteIndexService = new MetadataDeleteIndexService(settings, clusterService, allocationService); + this.allocationStatsService = new AllocationStatsService(clusterService, clusterInfoService, shardsAllocator, writeLoadForecaster); } static ShardRoutingRoleStrategy getShardRoutingRoleStrategy(List clusterPlugins) { @@ -440,6 +443,7 @@ protected void configure() { bind(AllocationDeciders.class).toInstance(allocationDeciders); bind(ShardsAllocator.class).toInstance(shardsAllocator); bind(ShardRoutingRoleStrategy.class).toInstance(shardRoutingRoleStrategy); + bind(AllocationStatsService.class).toInstance(allocationStatsService); } public void setExistingShardsAllocators(GatewayAllocator gatewayAllocator) { diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/stateless/AtomicRegisterPreVoteCollector.java b/server/src/main/java/org/elasticsearch/cluster/coordination/stateless/AtomicRegisterPreVoteCollector.java index ae53fa19da655..e9659bde065d7 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/stateless/AtomicRegisterPreVoteCollector.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/stateless/AtomicRegisterPreVoteCollector.java @@ -8,6 +8,8 @@ package org.elasticsearch.cluster.coordination.stateless; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.coordination.PreVoteCollector; import org.elasticsearch.cluster.node.DiscoveryNode; @@ -16,6 +18,8 @@ import java.util.concurrent.atomic.AtomicBoolean; public class AtomicRegisterPreVoteCollector extends PreVoteCollector { + private static final Logger logger = LogManager.getLogger(AtomicRegisterPreVoteCollector.class); + private final StoreHeartbeatService heartbeatService; private final Runnable startElection; @@ -27,11 +31,11 @@ public AtomicRegisterPreVoteCollector(StoreHeartbeatService heartbeatService, Ru @Override public Releasable start(ClusterState clusterState, Iterable broadcastNodes) { final var shouldRun = new AtomicBoolean(true); - heartbeatService.runIfNoRecentLeader(() -> { + heartbeatService.checkLeaderHeartbeatAndRun(() -> { if (shouldRun.getAndSet(false)) { startElection.run(); } - }); + }, heartbeat -> logger.info("skipping election since there is a recent heartbeat[{}] from the leader", heartbeat)); return () -> shouldRun.set(false); } diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/stateless/StoreHeartbeatService.java b/server/src/main/java/org/elasticsearch/cluster/coordination/stateless/StoreHeartbeatService.java index 0ea515012a190..d21add7e6954f 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/stateless/StoreHeartbeatService.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/stateless/StoreHeartbeatService.java @@ -95,15 +95,15 @@ protected long absoluteTimeInMillis() { return threadPool.absoluteTimeInMillis(); } - void runIfNoRecentLeader(Runnable runnable) { + void checkLeaderHeartbeatAndRun(Runnable noRecentLeaderRunnable, Consumer recentLeaderHeartbeatConsumer) { heartbeatStore.readLatestHeartbeat(new ActionListener<>() { @Override public void onResponse(Heartbeat heartBeat) { if (heartBeat == null || maxTimeSinceLastHeartbeat.millis() <= heartBeat.timeSinceLastHeartbeatInMillis(absoluteTimeInMillis())) { - runnable.run(); + noRecentLeaderRunnable.run(); } else { - logger.trace("runIfNoRecentLeader: found recent leader [{}]", heartBeat); + recentLeaderHeartbeatConsumer.accept(heartBeat); } } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateDataStreamService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateDataStreamService.java index 20b28edef5ca2..fd67a8ac7e230 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateDataStreamService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateDataStreamService.java @@ -25,10 +25,12 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Priority; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexMode; +import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.mapper.DataStreamTimestampFieldMapper; import org.elasticsearch.index.mapper.MappingLookup; import org.elasticsearch.index.mapper.MetadataFieldMapper; @@ -52,6 +54,8 @@ public class MetadataCreateDataStreamService { private static final Logger logger = LogManager.getLogger(MetadataCreateDataStreamService.class); + public static final String FAILURE_STORE_REFRESH_INTERVAL_SETTING_NAME = "data_streams.failure_store.refresh_interval"; + private final ThreadPool threadPool; private final ClusterService clusterService; private final MetadataCreateIndexService metadataCreateIndexService; @@ -98,6 +102,7 @@ public void createDataStream(CreateDataStreamClusterStateUpdateRequest request, public ClusterState execute(ClusterState currentState) throws Exception { ClusterState clusterState = createDataStream( metadataCreateIndexService, + clusterService.getSettings(), currentState, isDslOnlyMode, request, @@ -124,7 +129,7 @@ public ClusterState createDataStream( ClusterState current, ActionListener rerouteListener ) throws Exception { - return createDataStream(metadataCreateIndexService, current, isDslOnlyMode, request, rerouteListener); + return createDataStream(metadataCreateIndexService, clusterService.getSettings(), current, isDslOnlyMode, request, rerouteListener); } public static final class CreateDataStreamClusterStateUpdateRequest extends ClusterStateUpdateRequest< @@ -184,12 +189,22 @@ long getStartTime() { static ClusterState createDataStream( MetadataCreateIndexService metadataCreateIndexService, + Settings settings, ClusterState currentState, boolean isDslOnlyMode, CreateDataStreamClusterStateUpdateRequest request, ActionListener rerouteListener ) throws Exception { - return createDataStream(metadataCreateIndexService, currentState, isDslOnlyMode, request, List.of(), null, rerouteListener); + return createDataStream( + metadataCreateIndexService, + settings, + currentState, + isDslOnlyMode, + request, + List.of(), + null, + rerouteListener + ); } /** @@ -204,6 +219,7 @@ static ClusterState createDataStream( */ static ClusterState createDataStream( MetadataCreateIndexService metadataCreateIndexService, + Settings settings, ClusterState currentState, boolean isDslOnlyMode, CreateDataStreamClusterStateUpdateRequest request, @@ -260,6 +276,7 @@ static ClusterState createDataStream( String failureStoreIndexName = DataStream.getDefaultFailureStoreName(dataStreamName, 1, request.getStartTime()); currentState = createFailureStoreIndex( metadataCreateIndexService, + settings, currentState, request, dataStreamName, @@ -384,6 +401,7 @@ private static ClusterState createBackingIndex( private static ClusterState createFailureStoreIndex( MetadataCreateIndexService metadataCreateIndexService, + Settings settings, ClusterState currentState, CreateDataStreamClusterStateUpdateRequest request, String dataStreamName, @@ -394,6 +412,16 @@ private static ClusterState createFailureStoreIndex( return currentState; } + var indexSettings = MetadataRolloverService.HIDDEN_INDEX_SETTINGS; + // Optionally set a custom refresh interval for the failure store index. + var refreshInterval = getFailureStoreRefreshInterval(settings); + if (refreshInterval != null) { + indexSettings = Settings.builder() + .put(indexSettings) + .put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), refreshInterval) + .build(); + } + CreateIndexClusterStateUpdateRequest createIndexRequest = new CreateIndexClusterStateUpdateRequest( "initialize_data_stream", failureStoreIndexName, @@ -402,7 +430,7 @@ private static ClusterState createFailureStoreIndex( .nameResolvedInstant(request.getStartTime()) .performReroute(false) .setMatchingTemplate(template) - .settings(MetadataRolloverService.HIDDEN_INDEX_SETTINGS); + .settings(indexSettings); try { currentState = metadataCreateIndexService.applyCreateIndexRequest( @@ -451,4 +479,7 @@ public static void validateTimestampFieldMapping(MappingLookup mappingLookup) th fieldMapper.validate(mappingLookup); } + public static TimeValue getFailureStoreRefreshInterval(Settings settings) { + return settings.getAsTime(FAILURE_STORE_REFRESH_INTERVAL_SETTING_NAME, null); + } } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataMigrateToDataStreamService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataMigrateToDataStreamService.java index f7fa34d76498a..c40c5a09e99ee 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataMigrateToDataStreamService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataMigrateToDataStreamService.java @@ -113,7 +113,7 @@ public ClusterState execute(ClusterState currentState) throws Exception { } catch (IOException e) { throw new IllegalStateException(e); } - }, request, metadataCreateIndexService, delegate.reroute()); + }, request, metadataCreateIndexService, clusterService.getSettings(), delegate.reroute()); writeIndexRef.set(clusterState.metadata().dataStreams().get(request.aliasName).getWriteIndex().getName()); return clusterState; } @@ -132,6 +132,7 @@ static ClusterState migrateToDataStream( Function mapperSupplier, MigrateToDataStreamClusterStateUpdateRequest request, MetadataCreateIndexService metadataCreateIndexService, + Settings settings, ActionListener listener ) throws Exception { validateRequest(currentState, request); @@ -158,6 +159,7 @@ static ClusterState migrateToDataStream( CreateDataStreamClusterStateUpdateRequest req = new CreateDataStreamClusterStateUpdateRequest(request.aliasName); return createDataStream( metadataCreateIndexService, + settings, currentState, isDslOnlyMode, req, diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationStatsService.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationStatsService.java new file mode 100644 index 0000000000000..dbafd916b2a42 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationStatsService.java @@ -0,0 +1,94 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.cluster.routing.allocation; + +import org.elasticsearch.cluster.ClusterInfoService; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.cluster.routing.RoutingNode; +import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.routing.allocation.allocator.DesiredBalance; +import org.elasticsearch.cluster.routing.allocation.allocator.DesiredBalanceShardsAllocator; +import org.elasticsearch.cluster.routing.allocation.allocator.ShardsAllocator; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.util.Maps; + +import java.util.Map; + +public class AllocationStatsService { + + private final ClusterService clusterService; + private final ClusterInfoService clusterInfoService; + private final DesiredBalanceShardsAllocator desiredBalanceShardsAllocator; + private final WriteLoadForecaster writeLoadForecaster; + + public AllocationStatsService( + ClusterService clusterService, + ClusterInfoService clusterInfoService, + ShardsAllocator shardsAllocator, + WriteLoadForecaster writeLoadForecaster + ) { + this.clusterService = clusterService; + this.clusterInfoService = clusterInfoService; + this.desiredBalanceShardsAllocator = shardsAllocator instanceof DesiredBalanceShardsAllocator allocator ? allocator : null; + this.writeLoadForecaster = writeLoadForecaster; + } + + public Map stats() { + var state = clusterService.state(); + var info = clusterInfoService.getClusterInfo(); + var desiredBalance = desiredBalanceShardsAllocator != null ? desiredBalanceShardsAllocator.getDesiredBalance() : null; + + var stats = Maps.newMapWithExpectedSize(state.getRoutingNodes().size()); + for (RoutingNode node : state.getRoutingNodes()) { + int shards = 0; + int undesiredShards = 0; + double forecastedWriteLoad = 0.0; + long forecastedDiskUsage = 0; + long currentDiskUsage = 0; + for (ShardRouting shardRouting : node) { + if (shardRouting.relocating()) { + continue; + } + shards++; + IndexMetadata indexMetadata = state.metadata().getIndexSafe(shardRouting.index()); + if (isDesiredAllocation(desiredBalance, shardRouting) == false) { + undesiredShards++; + } + long shardSize = info.getShardSize(shardRouting.shardId(), shardRouting.primary(), 0); + forecastedWriteLoad += writeLoadForecaster.getForecastedWriteLoad(indexMetadata).orElse(0.0); + forecastedDiskUsage += Math.max(indexMetadata.getForecastedShardSizeInBytes().orElse(0), shardSize); + currentDiskUsage += shardSize; + + } + stats.put( + node.nodeId(), + new NodeAllocationStats( + shards, + desiredBalanceShardsAllocator != null ? undesiredShards : -1, + forecastedWriteLoad, + forecastedDiskUsage, + currentDiskUsage + ) + ); + } + + return stats; + } + + private static boolean isDesiredAllocation(DesiredBalance desiredBalance, ShardRouting shardRouting) { + if (desiredBalance == null) { + return true; + } + var assignment = desiredBalance.getAssignment(shardRouting.shardId()); + if (assignment == null) { + return false; + } + return assignment.nodeIds().contains(shardRouting.currentNodeId()); + } +} diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/NodeAllocationStats.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/NodeAllocationStats.java new file mode 100644 index 0000000000000..57484d6da53c7 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/NodeAllocationStats.java @@ -0,0 +1,51 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.cluster.routing.allocation; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.xcontent.ToXContentFragment; +import org.elasticsearch.xcontent.XContentBuilder; + +import java.io.IOException; + +public record NodeAllocationStats( + int shards, + int undesiredShards, + double forecastedIngestLoad, + long forecastedDiskUsage, + long currentDiskUsage +) implements Writeable, ToXContentFragment { + + public NodeAllocationStats(StreamInput in) throws IOException { + this(in.readVInt(), in.readVInt(), in.readDouble(), in.readVLong(), in.readVLong()); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeVInt(shards); + out.writeVInt(undesiredShards); + out.writeDouble(forecastedIngestLoad); + out.writeVLong(forecastedDiskUsage); + out.writeVLong(currentDiskUsage); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return builder.startObject("allocations") + .field("shards", shards) + .field("undesired_shards", undesiredShards) + .field("forecasted_ingest_load", forecastedIngestLoad) + .humanReadableField("forecasted_disk_usage_in_bytes", "forecasted_disk_usage", ByteSizeValue.ofBytes(forecastedDiskUsage)) + .humanReadableField("current_disk_usage_in_bytes", "current_disk_usage", ByteSizeValue.ofBytes(currentDiskUsage)) + .endObject(); + } +} diff --git a/server/src/main/java/org/elasticsearch/common/collect/Iterators.java b/server/src/main/java/org/elasticsearch/common/collect/Iterators.java index 4b5cef4bbbd45..ea8eadd66acaa 100644 --- a/server/src/main/java/org/elasticsearch/common/collect/Iterators.java +++ b/server/src/main/java/org/elasticsearch/common/collect/Iterators.java @@ -16,6 +16,7 @@ import java.util.NoSuchElementException; import java.util.Objects; import java.util.function.BiPredicate; +import java.util.function.BooleanSupplier; import java.util.function.Consumer; import java.util.function.Function; import java.util.function.IntFunction; @@ -225,6 +226,38 @@ public U next() { } } + /** + * Returns an iterator over the same items as the provided {@code input} except that it stops yielding items (i.e. starts returning + * {@code false} from {@link Iterator#hasNext()} on failure. + */ + public static Iterator failFast(Iterator input, BooleanSupplier isFailingSupplier) { + if (isFailingSupplier.getAsBoolean()) { + return Collections.emptyIterator(); + } else { + return new FailFastIterator<>(input, isFailingSupplier); + } + } + + private static class FailFastIterator implements Iterator { + private final Iterator delegate; + private final BooleanSupplier isFailingSupplier; + + FailFastIterator(Iterator delegate, BooleanSupplier isFailingSupplier) { + this.delegate = delegate; + this.isFailingSupplier = isFailingSupplier; + } + + @Override + public boolean hasNext() { + return isFailingSupplier.getAsBoolean() == false && delegate.hasNext(); + } + + @Override + public T next() { + return delegate.next(); + } + } + public static boolean equals(Iterator iterator1, Iterator iterator2, BiPredicate itemComparer) { if (iterator1 == null) { return iterator2 == null; diff --git a/server/src/main/java/org/elasticsearch/common/inject/Binder.java b/server/src/main/java/org/elasticsearch/common/inject/Binder.java index d1ff5ff4b0d93..97aa924d32cb1 100644 --- a/server/src/main/java/org/elasticsearch/common/inject/Binder.java +++ b/server/src/main/java/org/elasticsearch/common/inject/Binder.java @@ -52,11 +52,7 @@ * * Specifies that a request for a {@code Service} instance with no binding * annotations should be treated as if it were a request for a - * {@code ServiceImpl} instance. This overrides the function of any - * {@link ImplementedBy @ImplementedBy} or {@link ProvidedBy @ProvidedBy} - * annotations found on {@code Service}, since Guice will have already - * "moved on" to {@code ServiceImpl} before it reaches the point when it starts - * looking for these annotations. + * {@code ServiceImpl} instance. * *
  *     bind(Service.class).toProvider(ServiceProvider.class);
diff --git a/server/src/main/java/org/elasticsearch/common/inject/Binding.java b/server/src/main/java/org/elasticsearch/common/inject/Binding.java index 9f519e3daca0a..9bc446a867aa7 100644 --- a/server/src/main/java/org/elasticsearch/common/inject/Binding.java +++ b/server/src/main/java/org/elasticsearch/common/inject/Binding.java @@ -31,9 +31,7 @@ *
  *     bind(Service.class).annotatedWith(Red.class).to(ServiceImpl.class);
  *     bindConstant().annotatedWith(ServerHost.class).to(args[0]);
- *
  • Implicitly by the Injector by following a type's {@link ImplementedBy - * pointer} {@link ProvidedBy annotations} or by using its {@link Inject annotated} or - * default constructor.
  • + *
  • Implicitly by the Injector by using its {@link Inject annotated} or default constructor.
  • *
  • By converting a bound instance to a different type.
  • *
  • For {@link Provider providers}, by delegating to the binding for the provided type.
  • * @@ -77,6 +75,6 @@ public interface Binding extends Element { * @param visitor to call back on * @since 2.0 */ - V acceptTargetVisitor(BindingTargetVisitor visitor); + void acceptTargetVisitor(BindingTargetVisitor visitor); } diff --git a/server/src/main/java/org/elasticsearch/common/inject/BindingProcessor.java b/server/src/main/java/org/elasticsearch/common/inject/BindingProcessor.java index 99a9d6fab9c1d..0865bf47090af 100644 --- a/server/src/main/java/org/elasticsearch/common/inject/BindingProcessor.java +++ b/server/src/main/java/org/elasticsearch/common/inject/BindingProcessor.java @@ -25,7 +25,6 @@ import org.elasticsearch.common.inject.internal.LinkedBindingImpl; import org.elasticsearch.common.inject.internal.LinkedProviderBindingImpl; import org.elasticsearch.common.inject.internal.ProviderInstanceBindingImpl; -import org.elasticsearch.common.inject.internal.ProviderMethod; import org.elasticsearch.common.inject.internal.Scoping; import org.elasticsearch.common.inject.internal.UntargettedBindingImpl; import org.elasticsearch.common.inject.spi.BindingTargetVisitor; @@ -62,12 +61,7 @@ public Boolean visit(Binding command) { final Object source = command.getSource(); if (Void.class.equals(command.getKey().getRawType())) { - if (command instanceof ProviderInstanceBinding - && ((ProviderInstanceBinding) command).getProviderInstance() instanceof ProviderMethod) { - errors.voidProviderMethod(); - } else { - errors.missingConstantValues(); - } + errors.missingConstantValues(); return true; } diff --git a/server/src/main/java/org/elasticsearch/common/inject/ConstructionProxyFactory.java b/server/src/main/java/org/elasticsearch/common/inject/ConstructionProxyFactory.java deleted file mode 100644 index 9a0cd367e1650..0000000000000 --- a/server/src/main/java/org/elasticsearch/common/inject/ConstructionProxyFactory.java +++ /dev/null @@ -1,30 +0,0 @@ -/* - * Copyright (C) 2006 Google Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.elasticsearch.common.inject; - -/** - * Creates {@link ConstructionProxy} instances. - * - * @author crazybob@google.com (Bob Lee) - */ -interface ConstructionProxyFactory { - - /** - * Gets a construction proxy for the given constructor. - */ - ConstructionProxy create(); -} diff --git a/server/src/main/java/org/elasticsearch/common/inject/ConstructorBindingImpl.java b/server/src/main/java/org/elasticsearch/common/inject/ConstructorBindingImpl.java index 0c690f7ed9fa1..153c9627d736e 100644 --- a/server/src/main/java/org/elasticsearch/common/inject/ConstructorBindingImpl.java +++ b/server/src/main/java/org/elasticsearch/common/inject/ConstructorBindingImpl.java @@ -54,11 +54,11 @@ public void initialize(InjectorImpl injector, Errors errors) throws ErrorsExcept } @Override - public V acceptTargetVisitor(BindingTargetVisitor visitor) { + public void acceptTargetVisitor(BindingTargetVisitor visitor) { if (factory.constructorInjector == null) { throw new IllegalStateException("not initialized"); } - return visitor.visit(); + visitor.visit(); } @Override diff --git a/server/src/main/java/org/elasticsearch/common/inject/ConstructorInjector.java b/server/src/main/java/org/elasticsearch/common/inject/ConstructorInjector.java index 7b9f4be9c5a99..d38a75e0720d7 100644 --- a/server/src/main/java/org/elasticsearch/common/inject/ConstructorInjector.java +++ b/server/src/main/java/org/elasticsearch/common/inject/ConstructorInjector.java @@ -80,7 +80,6 @@ Object construct(Errors errors, InternalContext context, Class expectedType) constructionContext.setCurrentReference(t); membersInjector.injectMembers(t, errors, context); - membersInjector.notifyListeners(t, errors); return t; } catch (InvocationTargetException userException) { diff --git a/server/src/main/java/org/elasticsearch/common/inject/ConstructorInjectorStore.java b/server/src/main/java/org/elasticsearch/common/inject/ConstructorInjectorStore.java index 29ccae98c7d27..97a495f97cfbd 100644 --- a/server/src/main/java/org/elasticsearch/common/inject/ConstructorInjectorStore.java +++ b/server/src/main/java/org/elasticsearch/common/inject/ConstructorInjectorStore.java @@ -21,6 +21,9 @@ import org.elasticsearch.common.inject.internal.FailableCache; import org.elasticsearch.common.inject.spi.InjectionPoint; +import java.lang.reflect.Constructor; +import java.lang.reflect.InvocationTargetException; + /** * Constructor injectors by type. * @@ -65,10 +68,28 @@ private ConstructorInjector createConstructor(TypeLiteral type, Errors ); MembersInjectorImpl membersInjector = injector.membersInjectorStore.get(type, errors); - ConstructionProxyFactory factory = new DefaultConstructionProxyFactory<>(injectionPoint); - errors.throwIfNewErrors(numErrorsBefore); - return new ConstructorInjector<>(factory.create(), constructorParameterInjectors, membersInjector); + @SuppressWarnings("unchecked") // the injection point is for a constructor of T + final Constructor constructor = (Constructor) injectionPoint.getMember(); + return new ConstructorInjector<>(new ConstructionProxy<>() { + @Override + public T newInstance(Object... arguments) throws InvocationTargetException { + try { + return constructor.newInstance(arguments); + } catch (InstantiationException e) { + throw new AssertionError(e); // shouldn't happen, we know this is a concrete type + } catch (IllegalAccessException e) { + // a security manager is blocking us, we're hosed + throw new AssertionError("Wrong access modifiers on " + constructor, e); + } + } + + @Override + public InjectionPoint getInjectionPoint() { + return injectionPoint; + } + + }, constructorParameterInjectors, membersInjector); } } diff --git a/server/src/main/java/org/elasticsearch/common/inject/DefaultConstructionProxyFactory.java b/server/src/main/java/org/elasticsearch/common/inject/DefaultConstructionProxyFactory.java deleted file mode 100644 index cc713893abd69..0000000000000 --- a/server/src/main/java/org/elasticsearch/common/inject/DefaultConstructionProxyFactory.java +++ /dev/null @@ -1,65 +0,0 @@ -/* - * Copyright (C) 2006 Google Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.elasticsearch.common.inject; - -import org.elasticsearch.common.inject.spi.InjectionPoint; - -import java.lang.reflect.Constructor; -import java.lang.reflect.InvocationTargetException; - -/** - * Produces construction proxies that invoke the class constructor. - * - * @author crazybob@google.com (Bob Lee) - */ -class DefaultConstructionProxyFactory implements ConstructionProxyFactory { - - private final InjectionPoint injectionPoint; - - /** - * @param injectionPoint an injection point whose member is a constructor of {@code T}. - */ - DefaultConstructionProxyFactory(InjectionPoint injectionPoint) { - this.injectionPoint = injectionPoint; - } - - @Override - public ConstructionProxy create() { - @SuppressWarnings("unchecked") // the injection point is for a constructor of T - final Constructor constructor = (Constructor) injectionPoint.getMember(); - - return new ConstructionProxy<>() { - @Override - public T newInstance(Object... arguments) throws InvocationTargetException { - try { - return constructor.newInstance(arguments); - } catch (InstantiationException e) { - throw new AssertionError(e); // shouldn't happen, we know this is a concrete type - } catch (IllegalAccessException e) { - // a security manager is blocking us, we're hosed - throw new AssertionError("Wrong access modifiers on " + constructor, e); - } - } - - @Override - public InjectionPoint getInjectionPoint() { - return injectionPoint; - } - - }; - } -} diff --git a/server/src/main/java/org/elasticsearch/common/inject/Exposed.java b/server/src/main/java/org/elasticsearch/common/inject/Exposed.java deleted file mode 100644 index 4f557212da883..0000000000000 --- a/server/src/main/java/org/elasticsearch/common/inject/Exposed.java +++ /dev/null @@ -1,37 +0,0 @@ -/* - * Copyright (C) 2008 Google Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.elasticsearch.common.inject; - -import java.lang.annotation.Documented; -import java.lang.annotation.ElementType; -import java.lang.annotation.Retention; -import java.lang.annotation.Target; - -import static java.lang.annotation.RetentionPolicy.RUNTIME; - -/** - * Accompanies a {@literal @}{@link org.elasticsearch.common.inject.Provides Provides} method annotation in a - * private module to indicate that the provided binding is exposed. - * - * @author jessewilson@google.com (Jesse Wilson) - * @since 2.0 - */ -@Target(ElementType.METHOD) -@Retention(RUNTIME) -@Documented -public @interface Exposed { -} diff --git a/server/src/main/java/org/elasticsearch/common/inject/ImplementedBy.java b/server/src/main/java/org/elasticsearch/common/inject/ImplementedBy.java deleted file mode 100644 index 652be0f3ed30c..0000000000000 --- a/server/src/main/java/org/elasticsearch/common/inject/ImplementedBy.java +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Copyright (C) 2006 Google Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.elasticsearch.common.inject; - -import java.lang.annotation.Retention; -import java.lang.annotation.Target; - -import static java.lang.annotation.ElementType.TYPE; -import static java.lang.annotation.RetentionPolicy.RUNTIME; - -/** - * A pointer to the default implementation of a type. - * - * @author crazybob@google.com (Bob Lee) - */ -@Retention(RUNTIME) -@Target(TYPE) -public @interface ImplementedBy { - - /** - * The implementation type. - */ - Class value(); -} diff --git a/server/src/main/java/org/elasticsearch/common/inject/Inject.java b/server/src/main/java/org/elasticsearch/common/inject/Inject.java index 0a30b7b97a2da..e56c4c21ad39e 100644 --- a/server/src/main/java/org/elasticsearch/common/inject/Inject.java +++ b/server/src/main/java/org/elasticsearch/common/inject/Inject.java @@ -21,7 +21,6 @@ import java.lang.annotation.Target; import static java.lang.annotation.ElementType.CONSTRUCTOR; -import static java.lang.annotation.ElementType.FIELD; import static java.lang.annotation.ElementType.METHOD; import static java.lang.annotation.RetentionPolicy.RUNTIME; @@ -45,7 +44,7 @@ * * @author crazybob@google.com (Bob Lee) */ -@Target({ METHOD, CONSTRUCTOR, FIELD }) +@Target({ METHOD, CONSTRUCTOR }) @Retention(RUNTIME) @Documented public @interface Inject { diff --git a/server/src/main/java/org/elasticsearch/common/inject/InjectorImpl.java b/server/src/main/java/org/elasticsearch/common/inject/InjectorImpl.java index 8d51894bf9907..8614fd99da088 100644 --- a/server/src/main/java/org/elasticsearch/common/inject/InjectorImpl.java +++ b/server/src/main/java/org/elasticsearch/common/inject/InjectorImpl.java @@ -17,15 +17,12 @@ package org.elasticsearch.common.inject; import org.elasticsearch.common.Classes; -import org.elasticsearch.common.inject.internal.Annotations; import org.elasticsearch.common.inject.internal.BindingImpl; import org.elasticsearch.common.inject.internal.Errors; import org.elasticsearch.common.inject.internal.ErrorsException; import org.elasticsearch.common.inject.internal.InstanceBindingImpl; import org.elasticsearch.common.inject.internal.InternalContext; import org.elasticsearch.common.inject.internal.InternalFactory; -import org.elasticsearch.common.inject.internal.LinkedBindingImpl; -import org.elasticsearch.common.inject.internal.LinkedProviderBindingImpl; import org.elasticsearch.common.inject.internal.MatcherAndConverter; import org.elasticsearch.common.inject.internal.Scoping; import org.elasticsearch.common.inject.internal.SourceProvider; @@ -39,7 +36,6 @@ import java.lang.annotation.Annotation; import java.lang.reflect.GenericArrayType; -import java.lang.reflect.InvocationTargetException; import java.lang.reflect.Modifier; import java.lang.reflect.ParameterizedType; import java.lang.reflect.Type; @@ -195,8 +191,8 @@ static InternalFactory> createInternalFactory(Binding provide } @Override - public V acceptTargetVisitor(BindingTargetVisitor, V> visitor) { - return visitor.visit(); + public void acceptTargetVisitor(BindingTargetVisitor, V> visitor) { + visitor.visit(); } @Override @@ -270,8 +266,8 @@ public Provider getProvider() { } @Override - public V acceptTargetVisitor(BindingTargetVisitor visitor) { - return visitor.visit(); + public void acceptTargetVisitor(BindingTargetVisitor visitor) { + visitor.visit(); } @Override @@ -322,20 +318,6 @@ BindingImpl createUnitializedBinding(Key key, Scoping scoping, Object return binding; } - // Handle @ImplementedBy - ImplementedBy implementedBy = rawType.getAnnotation(ImplementedBy.class); - if (implementedBy != null) { - Annotations.checkForMisplacedScopeAnnotations(rawType, source, errors); - return createImplementedByBinding(key, scoping, implementedBy, errors); - } - - // Handle @ProvidedBy. - ProvidedBy providedBy = rawType.getAnnotation(ProvidedBy.class); - if (providedBy != null) { - Annotations.checkForMisplacedScopeAnnotations(rawType, source, errors); - return createProvidedByBinding(key, scoping, providedBy, errors); - } - // We can't inject abstract classes. // TODO: Method interceptors could actually enable us to implement // abstract types. Should we remove this restriction? @@ -385,80 +367,6 @@ private BindingImpl> createTypeLiteralBinding(Key(this, key, SourceProvider.UNKNOWN_SOURCE, factory, emptySet(), value); } - /** - * Creates a binding for a type annotated with @ProvidedBy. - */ - BindingImpl createProvidedByBinding(Key key, Scoping scoping, ProvidedBy providedBy, Errors errors) throws ErrorsException { - final Class rawType = key.getTypeLiteral().getRawType(); - final Class> providerType = providedBy.value(); - - // Make sure it's not the same type. TODO: Can we check for deeper loops? - if (providerType == rawType) { - throw errors.recursiveProviderType().toException(); - } - - // Assume the provider provides an appropriate type. We double check at runtime. - @SuppressWarnings("unchecked") - final Key> providerKey = (Key>) Key.get(providerType); - final BindingImpl> providerBinding = getBindingOrThrow(providerKey, errors); - - InternalFactory internalFactory = (errors1, context, dependency) -> { - errors1 = errors1.withSource(providerKey); - Provider provider = providerBinding.getInternalFactory().get(errors1, context, dependency); - try { - Object o = provider.get(); - if (o != null && rawType.isInstance(o) == false) { - throw errors1.subtypeNotProvided(providerType, rawType).toException(); - } - @SuppressWarnings("unchecked") // protected by isInstance() check above - T t = (T) o; - return t; - } catch (RuntimeException e) { - throw errors1.errorInProvider(e).toException(); - } - }; - - return new LinkedProviderBindingImpl<>( - this, - key, - rawType /* source */, - Scopes.scope(this, internalFactory, scoping), - scoping, - providerKey - ); - } - - /** - * Creates a binding for a type annotated with @ImplementedBy. - */ - BindingImpl createImplementedByBinding(Key key, Scoping scoping, ImplementedBy implementedBy, Errors errors) - throws ErrorsException { - Class rawType = key.getTypeLiteral().getRawType(); - Class implementationType = implementedBy.value(); - - // Make sure it's not the same type. TODO: Can we check for deeper cycles? - if (implementationType == rawType) { - throw errors.recursiveImplementationType().toException(); - } - - // Make sure implementationType extends type. - if (rawType.isAssignableFrom(implementationType) == false) { - throw errors.notASubtype(implementationType, rawType).toException(); - } - - @SuppressWarnings("unchecked") // After the preceding check, this cast is safe. - Class subclass = (Class) implementationType; - - // Look up the target binding. - final Key targetKey = Key.get(subclass); - final BindingImpl targetBinding = getBindingOrThrow(targetKey, errors); - - InternalFactory internalFactory = (errors1, context, dependency) -> targetBinding.getInternalFactory() - .get(errors1.withSource(targetKey), context, dependency); - - return new LinkedBindingImpl<>(this, key, rawType /* source */, Scopes.scope(this, internalFactory, scoping), scoping, targetKey); - } - /** * Attempts to create a just-in-time binding for {@code key} in the root injector, falling back to * other ancestor injectors until this injector is tried. @@ -584,13 +492,6 @@ SingleParameterInjector createParameterInjector(final Dependency depen return new SingleParameterInjector<>(dependency, factory); } - /** - * Invokes a method. - */ - interface MethodInvoker { - Object invoke(Object target, Object... parameters) throws IllegalAccessException, InvocationTargetException; - } - /** * Cached constructor injectors for each type */ diff --git a/server/src/main/java/org/elasticsearch/common/inject/MembersInjector.java b/server/src/main/java/org/elasticsearch/common/inject/MembersInjector.java index 0a4464a373e18..ffaee1648ab5a 100644 --- a/server/src/main/java/org/elasticsearch/common/inject/MembersInjector.java +++ b/server/src/main/java/org/elasticsearch/common/inject/MembersInjector.java @@ -25,17 +25,4 @@ * @author jessewilson@google.com (Jesse Wilson) * @since 2.0 */ -public interface MembersInjector { - - /** - * Injects dependencies into the fields and methods of {@code instance}. Ignores the presence or - * absence of an injectable constructor. - *

    - * Whenever Guice creates an instance, it performs this injection automatically (after first - * performing constructor injection), so if you're able to let Guice create all your objects for - * you, you'll never need to use this method. - * - * @param instance to inject members on. May be {@code null}. - */ - void injectMembers(T instance); -} +public interface MembersInjector {} diff --git a/server/src/main/java/org/elasticsearch/common/inject/MembersInjectorImpl.java b/server/src/main/java/org/elasticsearch/common/inject/MembersInjectorImpl.java index b32cddf9be4bc..8c190ef301651 100644 --- a/server/src/main/java/org/elasticsearch/common/inject/MembersInjectorImpl.java +++ b/server/src/main/java/org/elasticsearch/common/inject/MembersInjectorImpl.java @@ -19,7 +19,6 @@ import org.elasticsearch.common.inject.internal.Errors; import org.elasticsearch.common.inject.internal.ErrorsException; import org.elasticsearch.common.inject.internal.InternalContext; -import org.elasticsearch.common.inject.spi.InjectionListener; import java.util.List; @@ -31,28 +30,12 @@ class MembersInjectorImpl implements MembersInjector { private final TypeLiteral typeLiteral; private final InjectorImpl injector; - private final List memberInjectors; - private final List> userMembersInjectors; - private final List> injectionListeners; + private final List memberInjectors; - MembersInjectorImpl(InjectorImpl injector, TypeLiteral typeLiteral, List memberInjectors) { + MembersInjectorImpl(InjectorImpl injector, TypeLiteral typeLiteral, List memberInjectors) { this.injector = injector; this.typeLiteral = typeLiteral; this.memberInjectors = memberInjectors; - this.userMembersInjectors = List.of(); - this.injectionListeners = List.of(); - } - - @Override - public void injectMembers(T instance) { - Errors errors = new Errors(typeLiteral); - try { - injectAndNotify(instance, errors); - } catch (ErrorsException e) { - errors.merge(e.getErrors()); - } - - errors.throwProvisionExceptionIfErrorsExist(); } void injectAndNotify(final T instance, final Errors errors) throws ErrorsException { @@ -64,20 +47,6 @@ void injectAndNotify(final T instance, final Errors errors) throws ErrorsExcepti injectMembers(instance, errors, context); return null; }); - - notifyListeners(instance, errors); - } - - void notifyListeners(T instance, Errors errors) throws ErrorsException { - int numErrorsBefore = errors.size(); - for (InjectionListener injectionListener : injectionListeners) { - try { - injectionListener.afterInjection(instance); - } catch (RuntimeException e) { - errors.errorNotifyingInjectionListener(injectionListener, typeLiteral, e); - } - } - errors.throwIfNewErrors(numErrorsBefore); } void injectMembers(T t, Errors errors, InternalContext context) { @@ -85,16 +54,6 @@ void injectMembers(T t, Errors errors, InternalContext context) { for (int i = 0, size = memberInjectors.size(); i < size; i++) { memberInjectors.get(i).inject(errors, context, t); } - - // optimization: use manual for/each to save allocating an iterator here - for (int i = 0, size = userMembersInjectors.size(); i < size; i++) { - MembersInjector userMembersInjector = userMembersInjectors.get(i); - try { - userMembersInjector.injectMembers(t); - } catch (RuntimeException e) { - errors.errorInUserInjector(userMembersInjector, typeLiteral, e); - } - } } @Override diff --git a/server/src/main/java/org/elasticsearch/common/inject/MembersInjectorStore.java b/server/src/main/java/org/elasticsearch/common/inject/MembersInjectorStore.java index 9352c84db28f6..925739af25742 100644 --- a/server/src/main/java/org/elasticsearch/common/inject/MembersInjectorStore.java +++ b/server/src/main/java/org/elasticsearch/common/inject/MembersInjectorStore.java @@ -21,7 +21,6 @@ import org.elasticsearch.common.inject.internal.FailableCache; import org.elasticsearch.common.inject.spi.InjectionPoint; -import java.lang.reflect.Field; import java.util.ArrayList; import java.util.Collections; import java.util.List; @@ -62,12 +61,12 @@ private MembersInjectorImpl createWithListeners(TypeLiteral type, Erro Set injectionPoints; try { - injectionPoints = InjectionPoint.forInstanceMethodsAndFields(type); + injectionPoints = InjectionPoint.forInstanceMethods(type); } catch (ConfigurationException e) { errors.merge(e.getErrorMessages()); injectionPoints = e.getPartialValue(); } - List injectors = getInjectors(injectionPoints, errors); + List injectors = getInjectors(injectionPoints, errors); errors.throwIfNewErrors(numErrorsBefore); return new MembersInjectorImpl<>(injector, type, injectors); @@ -76,14 +75,12 @@ private MembersInjectorImpl createWithListeners(TypeLiteral type, Erro /** * Returns the injectors for the specified injection points. */ - List getInjectors(Set injectionPoints, Errors errors) { - List injectors = new ArrayList<>(); + List getInjectors(Set injectionPoints, Errors errors) { + List injectors = new ArrayList<>(); for (InjectionPoint injectionPoint : injectionPoints) { try { Errors errorsForMember = injectionPoint.isOptional() ? new Errors(injectionPoint) : errors.withSource(injectionPoint); - SingleMemberInjector injector = injectionPoint.getMember() instanceof Field - ? new SingleFieldInjector(this.injector, injectionPoint, errorsForMember) - : new SingleMethodInjector(this.injector, injectionPoint, errorsForMember); + SingleMethodInjector injector = new SingleMethodInjector(this.injector, injectionPoint, errorsForMember); injectors.add(injector); } catch (ErrorsException ignoredForNow) { // ignored for now diff --git a/server/src/main/java/org/elasticsearch/common/inject/Module.java b/server/src/main/java/org/elasticsearch/common/inject/Module.java index f3a43d80f31ec..38eddcdb200b7 100644 --- a/server/src/main/java/org/elasticsearch/common/inject/Module.java +++ b/server/src/main/java/org/elasticsearch/common/inject/Module.java @@ -24,11 +24,6 @@ *

    * Your Module classes can use a more streamlined syntax by extending * {@link AbstractModule} rather than implementing this interface directly. - *

    - * In addition to the bindings configured via {@link #configure}, bindings - * will be created for all methods annotated with {@literal @}{@link Provides}. - * Use scope and binding annotations on these methods to configure the - * bindings. */ public interface Module { @@ -36,8 +31,7 @@ public interface Module { * Contributes bindings and other configurations for this module to {@code binder}. *

    * Do not invoke this method directly to install submodules. Instead use - * {@link Binder#install(Module)}, which ensures that {@link Provides provider methods} are - * discovered. + * {@link Binder#install(Module)}. */ void configure(Binder binder); } diff --git a/server/src/main/java/org/elasticsearch/common/inject/PrivateBinder.java b/server/src/main/java/org/elasticsearch/common/inject/PrivateBinder.java index f1da98316465a..fd80e6271b2cf 100644 --- a/server/src/main/java/org/elasticsearch/common/inject/PrivateBinder.java +++ b/server/src/main/java/org/elasticsearch/common/inject/PrivateBinder.java @@ -24,11 +24,6 @@ */ public interface PrivateBinder extends Binder { - /** - * Makes the binding for {@code key} available to the enclosing environment - */ - void expose(Key key); - @Override PrivateBinder withSource(Object source); diff --git a/server/src/main/java/org/elasticsearch/common/inject/ProvidedBy.java b/server/src/main/java/org/elasticsearch/common/inject/ProvidedBy.java deleted file mode 100644 index 945de83cf9116..0000000000000 --- a/server/src/main/java/org/elasticsearch/common/inject/ProvidedBy.java +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Copyright (C) 2006 Google Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.elasticsearch.common.inject; - -import java.lang.annotation.Retention; -import java.lang.annotation.Target; - -import static java.lang.annotation.ElementType.TYPE; -import static java.lang.annotation.RetentionPolicy.RUNTIME; - -/** - * A pointer to the default provider type for a type. - * - * @author crazybob@google.com (Bob Lee) - */ -@Retention(RUNTIME) -@Target(TYPE) -public @interface ProvidedBy { - - /** - * The implementation type. - */ - Class> value(); -} diff --git a/server/src/main/java/org/elasticsearch/common/inject/Provides.java b/server/src/main/java/org/elasticsearch/common/inject/Provides.java deleted file mode 100644 index 587005f883574..0000000000000 --- a/server/src/main/java/org/elasticsearch/common/inject/Provides.java +++ /dev/null @@ -1,37 +0,0 @@ -/* - * Copyright (C) 2007 Google Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.elasticsearch.common.inject; - -import java.lang.annotation.Documented; -import java.lang.annotation.Retention; -import java.lang.annotation.Target; - -import static java.lang.annotation.ElementType.METHOD; -import static java.lang.annotation.RetentionPolicy.RUNTIME; - -/** - * Annotates methods of a {@link Module} to create a provider method binding. The method's return - * type is bound to its returned value. Guice will pass dependencies to the method as parameters. - * - * @author crazybob@google.com (Bob Lee) - * @since 2.0 - */ -@Documented -@Target(METHOD) -@Retention(RUNTIME) -public @interface Provides { -} diff --git a/server/src/main/java/org/elasticsearch/common/inject/SingleFieldInjector.java b/server/src/main/java/org/elasticsearch/common/inject/SingleFieldInjector.java deleted file mode 100644 index 7e8bfed724d59..0000000000000 --- a/server/src/main/java/org/elasticsearch/common/inject/SingleFieldInjector.java +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Copyright (C) 2008 Google Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.elasticsearch.common.inject; - -import org.elasticsearch.common.inject.internal.Errors; -import org.elasticsearch.common.inject.internal.ErrorsException; -import org.elasticsearch.common.inject.internal.InternalContext; -import org.elasticsearch.common.inject.internal.InternalFactory; -import org.elasticsearch.common.inject.spi.Dependency; -import org.elasticsearch.common.inject.spi.InjectionPoint; - -import java.lang.reflect.Field; - -/** - * Sets an injectable field. - */ -class SingleFieldInjector implements SingleMemberInjector { - final Field field; - final InjectionPoint injectionPoint; - final Dependency dependency; - final InternalFactory factory; - - SingleFieldInjector(InjectorImpl injector, InjectionPoint injectionPoint, Errors errors) throws ErrorsException { - this.injectionPoint = injectionPoint; - this.field = (Field) injectionPoint.getMember(); - this.dependency = injectionPoint.getDependencies().get(0); - factory = injector.getInternalFactory(dependency.getKey(), errors); - } - - @Override - public void inject(Errors errors, InternalContext context, Object o) { - errors = errors.withSource(dependency); - - context.setDependency(dependency); - try { - Object value = factory.get(errors, context, dependency); - field.set(o, value); - } catch (ErrorsException e) { - errors.withSource(injectionPoint).merge(e.getErrors()); - } catch (IllegalAccessException e) { - throw new AssertionError(e); // a security manager is blocking us, we're hosed - } finally { - context.setDependency(null); - } - } -} diff --git a/server/src/main/java/org/elasticsearch/common/inject/SingleMemberInjector.java b/server/src/main/java/org/elasticsearch/common/inject/SingleMemberInjector.java deleted file mode 100644 index a4e25f9fd000b..0000000000000 --- a/server/src/main/java/org/elasticsearch/common/inject/SingleMemberInjector.java +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Copyright (C) 2008 Google Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.elasticsearch.common.inject; - -import org.elasticsearch.common.inject.internal.Errors; -import org.elasticsearch.common.inject.internal.InternalContext; - -/** - * Injects a field or method of a given object. - */ -interface SingleMemberInjector { - void inject(Errors errors, InternalContext context, Object o); - -} diff --git a/server/src/main/java/org/elasticsearch/common/inject/SingleMethodInjector.java b/server/src/main/java/org/elasticsearch/common/inject/SingleMethodInjector.java index f6d9a2eb2c396..d36bc1e623a99 100644 --- a/server/src/main/java/org/elasticsearch/common/inject/SingleMethodInjector.java +++ b/server/src/main/java/org/elasticsearch/common/inject/SingleMethodInjector.java @@ -16,7 +16,6 @@ package org.elasticsearch.common.inject; -import org.elasticsearch.common.inject.InjectorImpl.MethodInvoker; import org.elasticsearch.common.inject.internal.Errors; import org.elasticsearch.common.inject.internal.ErrorsException; import org.elasticsearch.common.inject.internal.InternalContext; @@ -28,19 +27,17 @@ /** * Invokes an injectable method. */ -class SingleMethodInjector implements SingleMemberInjector { - final MethodInvoker methodInvoker; +class SingleMethodInjector { + final Method method; final SingleParameterInjector[] parameterInjectors; final InjectionPoint injectionPoint; SingleMethodInjector(InjectorImpl injector, InjectionPoint injectionPoint, Errors errors) throws ErrorsException { this.injectionPoint = injectionPoint; - final Method method = (Method) injectionPoint.getMember(); - methodInvoker = method::invoke; + method = (Method) injectionPoint.getMember(); parameterInjectors = injector.getParametersInjectors(injectionPoint.getDependencies(), errors); } - @Override public void inject(Errors errors, InternalContext context, Object o) { Object[] parameters; try { @@ -51,7 +48,7 @@ public void inject(Errors errors, InternalContext context, Object o) { } try { - methodInvoker.invoke(o, parameters); + method.invoke(o, parameters); } catch (IllegalAccessException e) { throw new AssertionError(e); // a security manager is blocking us, we're hosed } catch (InvocationTargetException userException) { diff --git a/server/src/main/java/org/elasticsearch/common/inject/TypeLiteral.java b/server/src/main/java/org/elasticsearch/common/inject/TypeLiteral.java index 72bf444d2dd3b..d39c4e44d2ff9 100644 --- a/server/src/main/java/org/elasticsearch/common/inject/TypeLiteral.java +++ b/server/src/main/java/org/elasticsearch/common/inject/TypeLiteral.java @@ -20,7 +20,6 @@ import org.elasticsearch.common.inject.util.Types; import java.lang.reflect.Constructor; -import java.lang.reflect.Field; import java.lang.reflect.GenericArrayType; import java.lang.reflect.Member; import java.lang.reflect.Method; @@ -249,19 +248,6 @@ public TypeLiteral getSupertype(Class supertype) { return resolve(MoreTypes.getGenericSupertype(type, rawType, supertype)); } - /** - * Returns the resolved generic type of {@code field}. - * - * @param field a field defined by this or any superclass. - * @since 2.0 - */ - public TypeLiteral getFieldType(Field field) { - if (field.getDeclaringClass().isAssignableFrom(rawType) == false) { - throw new IllegalArgumentException(field + " is not defined by a supertype of " + type); - } - return resolve(field.getGenericType()); - } - /** * Returns the resolved generic parameter types of {@code methodOrConstructor}. * @@ -291,17 +277,4 @@ public List> getParameterTypes(Member methodOrConstructor) { return resolveAll(genericParameterTypes); } - /** - * Returns the resolved generic return type of {@code method}. - * - * @param method a method defined by this or any supertype. - * @since 2.0 - */ - public TypeLiteral getReturnType(Method method) { - if (method.getDeclaringClass().isAssignableFrom(rawType) == false) { - throw new IllegalArgumentException(method + " is not defined by a supertype of " + type); - } - - return resolve(method.getGenericReturnType()); - } } diff --git a/server/src/main/java/org/elasticsearch/common/inject/binder/ScopedBindingBuilder.java b/server/src/main/java/org/elasticsearch/common/inject/binder/ScopedBindingBuilder.java index 88b7fd86370c6..dad91c3fb8878 100644 --- a/server/src/main/java/org/elasticsearch/common/inject/binder/ScopedBindingBuilder.java +++ b/server/src/main/java/org/elasticsearch/common/inject/binder/ScopedBindingBuilder.java @@ -16,8 +16,6 @@ package org.elasticsearch.common.inject.binder; -import java.lang.annotation.Annotation; - /** * See the EDSL examples at {@link org.elasticsearch.common.inject.Binder}. * @@ -25,11 +23,6 @@ */ public interface ScopedBindingBuilder { - /** - * See the EDSL examples at {@link org.elasticsearch.common.inject.Binder}. - */ - void in(Class scopeAnnotation); - /** * Instructs the {@link org.elasticsearch.common.inject.Injector} to eagerly initialize this * singleton-scoped binding upon creation. Useful for application diff --git a/server/src/main/java/org/elasticsearch/common/inject/internal/AbstractBindingBuilder.java b/server/src/main/java/org/elasticsearch/common/inject/internal/AbstractBindingBuilder.java index 083c7296fe883..60b6a74dec997 100644 --- a/server/src/main/java/org/elasticsearch/common/inject/internal/AbstractBindingBuilder.java +++ b/server/src/main/java/org/elasticsearch/common/inject/internal/AbstractBindingBuilder.java @@ -21,9 +21,7 @@ import org.elasticsearch.common.inject.spi.Element; import org.elasticsearch.common.inject.spi.InstanceBinding; -import java.lang.annotation.Annotation; import java.util.List; -import java.util.Objects; /** * Bind a value or constant. @@ -61,12 +59,6 @@ protected BindingImpl setBinding(BindingImpl binding) { return binding; } - public void in(final Class scopeAnnotation) { - Objects.requireNonNull(scopeAnnotation, "scopeAnnotation"); - checkNotScoped(); - setBinding(getBinding().withScoping(Scoping.forAnnotation(scopeAnnotation))); - } - public void asEagerSingleton() { checkNotScoped(); setBinding(getBinding().withScoping(Scoping.EAGER_SINGLETON)); diff --git a/server/src/main/java/org/elasticsearch/common/inject/internal/BindingBuilder.java b/server/src/main/java/org/elasticsearch/common/inject/internal/BindingBuilder.java index 3837741bc3119..fd40879025c65 100644 --- a/server/src/main/java/org/elasticsearch/common/inject/internal/BindingBuilder.java +++ b/server/src/main/java/org/elasticsearch/common/inject/internal/BindingBuilder.java @@ -60,7 +60,7 @@ public void toInstance(T instance) { Set injectionPoints; if (instance != null) { try { - injectionPoints = InjectionPoint.forInstanceMethodsAndFields(instance.getClass()); + injectionPoints = InjectionPoint.forInstanceMethods(instance.getClass()); } catch (ConfigurationException e) { for (Message message : e.getErrorMessages()) { binder.addError(message); @@ -84,7 +84,7 @@ public BindingBuilder toProvider(Provider provider) { // lookup the injection points, adding any errors to the binder's errors list Set injectionPoints; try { - injectionPoints = InjectionPoint.forInstanceMethodsAndFields(provider.getClass()); + injectionPoints = InjectionPoint.forInstanceMethods(provider.getClass()); } catch (ConfigurationException e) { for (Message message : e.getErrorMessages()) { binder.addError(message); diff --git a/server/src/main/java/org/elasticsearch/common/inject/internal/Errors.java b/server/src/main/java/org/elasticsearch/common/inject/internal/Errors.java index 03a584d5c508b..ea4b530f48b9b 100644 --- a/server/src/main/java/org/elasticsearch/common/inject/internal/Errors.java +++ b/server/src/main/java/org/elasticsearch/common/inject/internal/Errors.java @@ -21,13 +21,10 @@ import org.elasticsearch.common.inject.CreationException; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.Key; -import org.elasticsearch.common.inject.MembersInjector; -import org.elasticsearch.common.inject.Provider; import org.elasticsearch.common.inject.ProvisionException; import org.elasticsearch.common.inject.Scope; import org.elasticsearch.common.inject.TypeLiteral; import org.elasticsearch.common.inject.spi.Dependency; -import org.elasticsearch.common.inject.spi.InjectionListener; import org.elasticsearch.common.inject.spi.InjectionPoint; import org.elasticsearch.common.inject.spi.Message; @@ -199,22 +196,6 @@ public Errors bindingToProvider() { return addMessage("Binding to Provider is not allowed."); } - public Errors subtypeNotProvided(Class> providerType, Class type) { - return addMessage("%s doesn't provide instances of %s.", providerType, type); - } - - public Errors notASubtype(Class implementationType, Class type) { - return addMessage("%s doesn't extend %s.", implementationType, type); - } - - public Errors recursiveImplementationType() { - return addMessage("@ImplementedBy points to the same class it annotates."); - } - - public Errors recursiveProviderType() { - return addMessage("@ProvidedBy points to the same class it annotates."); - } - public Errors missingRuntimeRetention(Object source) { return addMessage("Please annotate with @Retention(RUNTIME).%n" + " Bound at %s.", convert(source)); } @@ -268,10 +249,6 @@ public Errors duplicateScopes(Scope existing, Class annota return addMessage("Scope %s is already bound to %s. Cannot bind %s.", existing, annotationType, scope); } - public Errors voidProviderMethod() { - return addMessage("Provider methods must return a value. Do not return void."); - } - public Errors missingConstantValues() { return addMessage("Missing constant value. Please call to(...)."); } @@ -315,14 +292,6 @@ public Errors errorInProvider(RuntimeException runtimeException) { return errorInUserCode(runtimeException, "Error in custom provider, %s", runtimeException); } - public Errors errorInUserInjector(MembersInjector listener, TypeLiteral type, RuntimeException cause) { - return errorInUserCode(cause, "Error injecting %s using %s.%n" + " Reason: %s", type, listener, cause); - } - - public Errors errorNotifyingInjectionListener(InjectionListener listener, TypeLiteral type, RuntimeException cause) { - return errorInUserCode(cause, "Error notifying InjectionListener %s of %s.%n" + " Reason: %s", listener, type, cause); - } - public static Collection getMessagesFromThrowable(Throwable throwable) { if (throwable instanceof ProvisionException) { return ((ProvisionException) throwable).getErrorMessages(); @@ -381,14 +350,6 @@ public void throwConfigurationExceptionIfErrorsExist() { throw new ConfigurationException(getMessages()); } - public void throwProvisionExceptionIfErrorsExist() { - if (hasErrors() == false) { - return; - } - - throw new ProvisionException(getMessages()); - } - private Message merge(Message message) { List sources = new ArrayList<>(); sources.addAll(getSources()); diff --git a/server/src/main/java/org/elasticsearch/common/inject/internal/InstanceBindingImpl.java b/server/src/main/java/org/elasticsearch/common/inject/internal/InstanceBindingImpl.java index 07c9dd0e4cf25..f5b36cf33b800 100644 --- a/server/src/main/java/org/elasticsearch/common/inject/internal/InstanceBindingImpl.java +++ b/server/src/main/java/org/elasticsearch/common/inject/internal/InstanceBindingImpl.java @@ -59,8 +59,8 @@ public Provider getProvider() { } @Override - public V acceptTargetVisitor(BindingTargetVisitor visitor) { - return visitor.visit(this); + public void acceptTargetVisitor(BindingTargetVisitor visitor) { + visitor.visit(this); } @Override diff --git a/server/src/main/java/org/elasticsearch/common/inject/internal/LinkedBindingImpl.java b/server/src/main/java/org/elasticsearch/common/inject/internal/LinkedBindingImpl.java index 56e1a92c25018..135726f80e25b 100644 --- a/server/src/main/java/org/elasticsearch/common/inject/internal/LinkedBindingImpl.java +++ b/server/src/main/java/org/elasticsearch/common/inject/internal/LinkedBindingImpl.java @@ -43,8 +43,8 @@ public LinkedBindingImpl(Object source, Key key, Scoping scoping, Key V acceptTargetVisitor(BindingTargetVisitor visitor) { - return visitor.visit(this); + public void acceptTargetVisitor(BindingTargetVisitor visitor) { + visitor.visit(this); } @Override diff --git a/server/src/main/java/org/elasticsearch/common/inject/internal/LinkedProviderBindingImpl.java b/server/src/main/java/org/elasticsearch/common/inject/internal/LinkedProviderBindingImpl.java index a27692a68882b..0bfd2ef273a74 100644 --- a/server/src/main/java/org/elasticsearch/common/inject/internal/LinkedProviderBindingImpl.java +++ b/server/src/main/java/org/elasticsearch/common/inject/internal/LinkedProviderBindingImpl.java @@ -44,8 +44,8 @@ public LinkedProviderBindingImpl( } @Override - public V acceptTargetVisitor(BindingTargetVisitor visitor) { - return visitor.visit(this); + public void acceptTargetVisitor(BindingTargetVisitor visitor) { + visitor.visit(this); } @Override diff --git a/server/src/main/java/org/elasticsearch/common/inject/internal/ProviderInstanceBindingImpl.java b/server/src/main/java/org/elasticsearch/common/inject/internal/ProviderInstanceBindingImpl.java index 676c0717896d5..792c18920a6fa 100644 --- a/server/src/main/java/org/elasticsearch/common/inject/internal/ProviderInstanceBindingImpl.java +++ b/server/src/main/java/org/elasticsearch/common/inject/internal/ProviderInstanceBindingImpl.java @@ -57,8 +57,8 @@ public ProviderInstanceBindingImpl( } @Override - public V acceptTargetVisitor(BindingTargetVisitor visitor) { - return visitor.visit(this); + public void acceptTargetVisitor(BindingTargetVisitor visitor) { + visitor.visit(this); } @Override diff --git a/server/src/main/java/org/elasticsearch/common/inject/internal/ProviderMethod.java b/server/src/main/java/org/elasticsearch/common/inject/internal/ProviderMethod.java deleted file mode 100644 index 861f9ad77128e..0000000000000 --- a/server/src/main/java/org/elasticsearch/common/inject/internal/ProviderMethod.java +++ /dev/null @@ -1,97 +0,0 @@ -/* - * Copyright (C) 2008 Google Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.elasticsearch.common.inject.internal; - -import org.elasticsearch.common.inject.Binder; -import org.elasticsearch.common.inject.Exposed; -import org.elasticsearch.common.inject.Key; -import org.elasticsearch.common.inject.PrivateBinder; -import org.elasticsearch.common.inject.Provider; -import org.elasticsearch.common.inject.spi.ProviderWithDependencies; - -import java.lang.annotation.Annotation; -import java.lang.reflect.InvocationTargetException; -import java.lang.reflect.Method; -import java.util.List; - -/** - * A provider that invokes a method and returns its result. - * - * @author jessewilson@google.com (Jesse Wilson) - */ -public class ProviderMethod implements ProviderWithDependencies { - private final Key key; - private final Class scopeAnnotation; - private final Object instance; - private final Method method; - private final List> parameterProviders; - private final boolean exposed; - - /** - * @param method the method to invoke. Its return type must be the same type as {@code key}. - */ - ProviderMethod( - Key key, - Method method, - Object instance, - List> parameterProviders, - Class scopeAnnotation - ) { - this.key = key; - this.scopeAnnotation = scopeAnnotation; - this.instance = instance; - this.method = method; - this.parameterProviders = parameterProviders; - this.exposed = method.getAnnotation(Exposed.class) != null; - } - - public void configure(Binder binder) { - binder = binder.withSource(method); - - if (scopeAnnotation != null) { - binder.bind(key).toProvider(this).in(scopeAnnotation); - } else { - binder.bind(key).toProvider(this); - } - - if (exposed) { - // the cast is safe 'cause the only binder we have implements PrivateBinder. If there's a - // misplaced @Exposed, calling this will add an error to the binder's error queue - ((PrivateBinder) binder).expose(key); - } - } - - @Override - public T get() { - Object[] parameters = new Object[parameterProviders.size()]; - for (int i = 0; i < parameters.length; i++) { - parameters[i] = parameterProviders.get(i).get(); - } - - try { - // We know this cast is safe because T is the method's return type. - @SuppressWarnings({ "unchecked" }) - T result = (T) method.invoke(instance, parameters); - return result; - } catch (IllegalAccessException e) { - throw new AssertionError(e); - } catch (InvocationTargetException e) { - throw new RuntimeException(e); - } - } - -} diff --git a/server/src/main/java/org/elasticsearch/common/inject/internal/ProviderMethodsModule.java b/server/src/main/java/org/elasticsearch/common/inject/internal/ProviderMethodsModule.java deleted file mode 100644 index 6a1d7aabed962..0000000000000 --- a/server/src/main/java/org/elasticsearch/common/inject/internal/ProviderMethodsModule.java +++ /dev/null @@ -1,130 +0,0 @@ -/* - * Copyright (C) 2008 Google Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.elasticsearch.common.inject.internal; - -import org.elasticsearch.common.inject.Binder; -import org.elasticsearch.common.inject.Key; -import org.elasticsearch.common.inject.Module; -import org.elasticsearch.common.inject.Provider; -import org.elasticsearch.common.inject.Provides; -import org.elasticsearch.common.inject.TypeLiteral; -import org.elasticsearch.common.inject.spi.Message; -import org.elasticsearch.common.inject.util.Modules; - -import java.lang.annotation.Annotation; -import java.lang.reflect.Member; -import java.lang.reflect.Method; -import java.util.ArrayList; -import java.util.List; -import java.util.Objects; - -/** - * Creates bindings to methods annotated with {@literal @}{@link Provides}. Use the scope and - * binding annotations on the provider method to configure the binding. - * - * @author crazybob@google.com (Bob Lee) - * @author jessewilson@google.com (Jesse Wilson) - */ -public final class ProviderMethodsModule implements Module { - private final Object delegate; - private final TypeLiteral typeLiteral; - - private ProviderMethodsModule(Object delegate) { - this.delegate = Objects.requireNonNull(delegate, "delegate"); - this.typeLiteral = TypeLiteral.get(this.delegate.getClass()); - } - - /** - * Returns a module which creates bindings for provider methods from the given module. - */ - public static Module forModule(Module module) { - return forObject(module); - } - - /** - * Returns a module which creates bindings for provider methods from the given object. - * This is useful notably for GIN - */ - public static Module forObject(Object object) { - // avoid infinite recursion, since installing a module always installs itself - if (object instanceof ProviderMethodsModule) { - return Modules.EMPTY_MODULE; - } - - return new ProviderMethodsModule(object); - } - - @Override - public synchronized void configure(Binder binder) { - for (ProviderMethod providerMethod : getProviderMethods(binder)) { - providerMethod.configure(binder); - } - } - - public List> getProviderMethods(Binder binder) { - List> result = new ArrayList<>(); - for (Class c = delegate.getClass(); c != Object.class; c = c.getSuperclass()) { - for (Method method : c.getMethods()) { - if (method.getAnnotation(Provides.class) != null) { - result.add(createProviderMethod(binder, method)); - } - } - } - return result; - } - - ProviderMethod createProviderMethod(Binder binder, final Method method) { - binder = binder.withSource(method); - Errors errors = new Errors(method); - - // prepare the parameter providers - List> parameterProviders = new ArrayList<>(); - List> parameterTypes = typeLiteral.getParameterTypes(method); - Annotation[][] parameterAnnotations = method.getParameterAnnotations(); - for (int i = 0; i < parameterTypes.size(); i++) { - Key key = getKey(errors, parameterTypes.get(i), method, parameterAnnotations[i]); - parameterProviders.add(binder.getProvider(key)); - } - - @SuppressWarnings("unchecked") // Define T as the method's return type. - TypeLiteral returnType = (TypeLiteral) typeLiteral.getReturnType(method); - - Key key = getKey(errors, returnType, method, method.getAnnotations()); - Class scopeAnnotation = Annotations.findScopeAnnotation(errors, method.getAnnotations()); - - for (Message message : errors.getMessages()) { - binder.addError(message); - } - - return new ProviderMethod<>(key, method, delegate, parameterProviders, scopeAnnotation); - } - - static Key getKey(Errors errors, TypeLiteral type, Member member, Annotation[] annotations) { - Annotation bindingAnnotation = Annotations.findBindingAnnotation(errors, member, annotations); - return bindingAnnotation == null ? Key.get(type) : Key.get(type, bindingAnnotation); - } - - @Override - public boolean equals(Object o) { - return o instanceof ProviderMethodsModule && ((ProviderMethodsModule) o).delegate == delegate; - } - - @Override - public int hashCode() { - return delegate.hashCode(); - } -} diff --git a/server/src/main/java/org/elasticsearch/common/inject/internal/UntargettedBindingImpl.java b/server/src/main/java/org/elasticsearch/common/inject/internal/UntargettedBindingImpl.java index e5a916d4be62e..c5595d570563f 100644 --- a/server/src/main/java/org/elasticsearch/common/inject/internal/UntargettedBindingImpl.java +++ b/server/src/main/java/org/elasticsearch/common/inject/internal/UntargettedBindingImpl.java @@ -32,8 +32,8 @@ public UntargettedBindingImpl(Object source, Key key, Scoping scoping) { } @Override - public V acceptTargetVisitor(BindingTargetVisitor visitor) { - return visitor.visit(this); + public void acceptTargetVisitor(BindingTargetVisitor visitor) { + visitor.visit(this); } @Override diff --git a/server/src/main/java/org/elasticsearch/common/inject/spi/Elements.java b/server/src/main/java/org/elasticsearch/common/inject/spi/Elements.java index 47e5d7d0753c4..22f86d6991e84 100644 --- a/server/src/main/java/org/elasticsearch/common/inject/spi/Elements.java +++ b/server/src/main/java/org/elasticsearch/common/inject/spi/Elements.java @@ -31,7 +31,6 @@ import org.elasticsearch.common.inject.internal.AbstractBindingBuilder; import org.elasticsearch.common.inject.internal.BindingBuilder; import org.elasticsearch.common.inject.internal.Errors; -import org.elasticsearch.common.inject.internal.ProviderMethodsModule; import org.elasticsearch.common.inject.internal.SourceProvider; import java.lang.annotation.Annotation; @@ -135,7 +134,6 @@ public void install(Module module) { addError(e); } } - binder.install(ProviderMethodsModule.forModule(module)); } } @@ -192,12 +190,6 @@ public RecordingBinder skipSources(Class... classesToSkip) { return new RecordingBinder(this, null, newSourceProvider); } - @Override - public void expose(Key key) { - addError("Cannot expose %s on a standard binder. " + "Exposed bindings are only applicable to private binders.", key); - - } - private static final Logger logger = LogManager.getLogger(Elements.class); protected Object getSource() { diff --git a/server/src/main/java/org/elasticsearch/common/inject/spi/InjectionListener.java b/server/src/main/java/org/elasticsearch/common/inject/spi/InjectionListener.java deleted file mode 100644 index 1f5b969559020..0000000000000 --- a/server/src/main/java/org/elasticsearch/common/inject/spi/InjectionListener.java +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Copyright (C) 2009 Google Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.elasticsearch.common.inject.spi; - -/** - * Listens for injections into instances of type {@code I}. Useful for performing further - * injections, post-injection initialization, and more. - * - * @author crazybob@google.com (Bob Lee) - * @author jessewilson@google.com (Jesse Wilson) - * @since 2.0 - */ -public interface InjectionListener { - - /** - * Invoked by Guice after it injects the fields and methods of instance. - * - * @param injectee instance that Guice injected dependencies into - */ - void afterInjection(I injectee); -} diff --git a/server/src/main/java/org/elasticsearch/common/inject/spi/InjectionPoint.java b/server/src/main/java/org/elasticsearch/common/inject/spi/InjectionPoint.java index 4e20b26d83284..945dfca96072e 100644 --- a/server/src/main/java/org/elasticsearch/common/inject/spi/InjectionPoint.java +++ b/server/src/main/java/org/elasticsearch/common/inject/spi/InjectionPoint.java @@ -29,7 +29,7 @@ import java.lang.annotation.Annotation; import java.lang.reflect.AnnotatedElement; import java.lang.reflect.Constructor; -import java.lang.reflect.Field; +import java.lang.reflect.Executable; import java.lang.reflect.Member; import java.lang.reflect.Method; import java.lang.reflect.Modifier; @@ -57,7 +57,7 @@ public final class InjectionPoint { private final boolean optional; - private final Member member; + private final Executable member; private final List> dependencies; InjectionPoint(TypeLiteral type, Method method) { @@ -75,26 +75,6 @@ public final class InjectionPoint { this.dependencies = forMember(constructor, type, constructor.getParameterAnnotations()); } - InjectionPoint(TypeLiteral type, Field field) { - this.member = field; - - Inject inject = field.getAnnotation(Inject.class); - this.optional = inject.optional(); - - Annotation[] annotations = field.getAnnotations(); - - Errors errors = new Errors(field); - Key key = null; - try { - key = Annotations.getKey(type.getFieldType(field), field, annotations, errors); - } catch (ErrorsException e) { - errors.merge(e.getErrors()); - } - errors.throwConfigurationExceptionIfErrorsExist(); - - this.dependencies = Collections.singletonList(newDependency(key, Nullability.allowsNull(annotations), -1)); - } - private List> forMember(Member member, TypeLiteral type, Annotation[][] parameterAnnotations) { Errors errors = new Errors(member); Iterator annotationsIterator = Arrays.asList(parameterAnnotations).iterator(); @@ -125,7 +105,7 @@ private Dependency newDependency(Key key, boolean allowsNull, int para /** * Returns the injected constructor, field, or method. */ - public Member getMember() { + public Executable getMember() { return member; } @@ -143,8 +123,7 @@ public List> getDependencies() { /** * Returns true if this injection point shall be skipped if the injector cannot resolve bindings * for all required dependencies. Both explicit bindings (as specified in a module), and implicit - * bindings ({@literal @}{@link org.elasticsearch.common.inject.ImplementedBy ImplementedBy}, default - * constructors etc.) may be used to satisfy optional injection points. + * bindings by default constructors etc.) may be used to satisfy optional injection points. */ public boolean isOptional() { return optional; @@ -230,13 +209,12 @@ public static InjectionPoint forConstructorOf(TypeLiteral type) { * ConfigurationException#getPartialValue() partial value} is a {@code Set} * of the valid injection points. */ - public static Set forInstanceMethodsAndFields(TypeLiteral type) { + public static Set forInstanceMethods(TypeLiteral type) { Set result = new HashSet<>(); Errors errors = new Errors(); // TODO (crazybob): Filter out overridden members. - addInjectionPoints(type, Factory.FIELDS, false, result, errors); - addInjectionPoints(type, Factory.METHODS, false, result, errors); + addInjectionPoints(type, false, result, errors); result = unmodifiableSet(result); if (errors.hasErrors()) { @@ -246,7 +224,7 @@ public static Set forInstanceMethodsAndFields(TypeLiteral typ } /** - * Returns all instance method and field injection points on {@code type}. + * Returns all instance method injection points on {@code type}. * * @return a possibly empty set of injection points. The set has a specified iteration order. All * fields are returned and then all methods. Within the fields, supertype fields are returned @@ -256,8 +234,8 @@ public static Set forInstanceMethodsAndFields(TypeLiteral typ * ConfigurationException#getPartialValue() partial value} is a {@code Set} * of the valid injection points. */ - public static Set forInstanceMethodsAndFields(Class type) { - return forInstanceMethodsAndFields(TypeLiteral.get(type)); + public static Set forInstanceMethods(Class type) { + return forInstanceMethods(TypeLiteral.get(type)); } private static void checkForMisplacedBindingAnnotations(Member member, Errors errors) { @@ -274,18 +252,16 @@ private static void checkForMisplacedBindingAnnotations(Member member, Errors er // name. In Scala, fields always get accessor methods (that we need to ignore). See bug 242. if (member instanceof Method) { try { - if (member.getDeclaringClass().getField(member.getName()) != null) { - return; - } + member.getDeclaringClass().getField(member.getName()); + return; } catch (NoSuchFieldException ignore) {} } errors.misplacedBindingAnnotation(member, misplacedBindingAnnotation); } - private static void addInjectionPoints( + private static void addInjectionPoints( TypeLiteral type, - Factory factory, boolean statics, Collection injectionPoints, Errors errors @@ -296,20 +272,19 @@ private static void addInjectionPoints( // Add injectors for superclass first. TypeLiteral superType = type.getSupertype(type.getRawType().getSuperclass()); - addInjectionPoints(superType, factory, statics, injectionPoints, errors); + addInjectionPoints(superType, statics, injectionPoints, errors); // Add injectors for all members next - addInjectorsForMembers(type, factory, statics, injectionPoints, errors); + addInjectorsForMembers(type, statics, injectionPoints, errors); } - private static void addInjectorsForMembers( + private static void addInjectorsForMembers( TypeLiteral typeLiteral, - Factory factory, boolean statics, Collection injectionPoints, Errors errors ) { - for (M member : factory.getMembers(getRawType(typeLiteral.getType()))) { + for (Method member : getRawType(typeLiteral.getType()).getMethods()) { if (isStatic(member) != statics) { continue; } @@ -320,7 +295,8 @@ private static void addInjectorsForMembers } try { - injectionPoints.add(factory.create(typeLiteral, member, errors)); + checkForMisplacedBindingAnnotations(member, errors); + injectionPoints.add(new InjectionPoint(typeLiteral, member)); } catch (ConfigurationException ignorable) { if (inject.optional() == false) { errors.merge(ignorable.getErrorMessages()); @@ -333,34 +309,4 @@ private static boolean isStatic(Member member) { return Modifier.isStatic(member.getModifiers()); } - private interface Factory { - Factory FIELDS = new Factory<>() { - @Override - public Field[] getMembers(Class type) { - return type.getFields(); - } - - @Override - public InjectionPoint create(TypeLiteral typeLiteral, Field member, Errors errors) { - return new InjectionPoint(typeLiteral, member); - } - }; - - Factory METHODS = new Factory<>() { - @Override - public Method[] getMembers(Class type) { - return type.getMethods(); - } - - @Override - public InjectionPoint create(TypeLiteral typeLiteral, Method member, Errors errors) { - checkForMisplacedBindingAnnotations(member, errors); - return new InjectionPoint(typeLiteral, member); - } - }; - - M[] getMembers(Class type); - - InjectionPoint create(TypeLiteral typeLiteral, M member, Errors errors); - } } diff --git a/server/src/main/java/org/elasticsearch/common/inject/util/Modules.java b/server/src/main/java/org/elasticsearch/common/inject/util/Modules.java deleted file mode 100644 index 1162bef25e682..0000000000000 --- a/server/src/main/java/org/elasticsearch/common/inject/util/Modules.java +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Copyright (C) 2008 Google Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.elasticsearch.common.inject.util; - -import org.elasticsearch.common.inject.Module; - -/** - * Static utility methods for creating and working with instances of {@link Module}. - * - * @author jessewilson@google.com (Jesse Wilson) - * @since 2.0 - */ -public final class Modules { - private Modules() {} - - public static final Module EMPTY_MODULE = binder -> {}; - -} diff --git a/server/src/main/java/org/elasticsearch/env/BuildVersion.java b/server/src/main/java/org/elasticsearch/env/BuildVersion.java index e1f5879ae9569..e19ad87932e7f 100644 --- a/server/src/main/java/org/elasticsearch/env/BuildVersion.java +++ b/server/src/main/java/org/elasticsearch/env/BuildVersion.java @@ -92,10 +92,7 @@ public static BuildVersion current() { } // only exists for NodeMetadata#toXContent - // TODO[wrb]: make this abstract once all downstream classes override it - protected int id() { - return -1; - } + public abstract int id(); private static class CurrentExtensionHolder { private static final BuildExtension BUILD_EXTENSION = findExtension(); diff --git a/server/src/main/java/org/elasticsearch/env/DefaultBuildVersion.java b/server/src/main/java/org/elasticsearch/env/DefaultBuildVersion.java index 6cec751a1cad1..8271b836269a7 100644 --- a/server/src/main/java/org/elasticsearch/env/DefaultBuildVersion.java +++ b/server/src/main/java/org/elasticsearch/env/DefaultBuildVersion.java @@ -23,15 +23,14 @@ * give users simple rules in terms of public-facing release versions for Elasticsearch * compatibility when upgrading nodes and prevents downgrades in place.

    */ -// TODO[wrb]: make package-private once default implementations are removed in BuildExtension -public final class DefaultBuildVersion extends BuildVersion { +final class DefaultBuildVersion extends BuildVersion { public static BuildVersion CURRENT = new DefaultBuildVersion(Version.CURRENT.id()); private final int versionId; private final Version version; - public DefaultBuildVersion(int versionId) { + DefaultBuildVersion(int versionId) { assert versionId >= 0 : "Release version IDs must be non-negative integers"; this.versionId = versionId; this.version = Version.fromId(versionId); diff --git a/server/src/main/java/org/elasticsearch/internal/BuildExtension.java b/server/src/main/java/org/elasticsearch/internal/BuildExtension.java index cc02495b39520..b1b9a568e3083 100644 --- a/server/src/main/java/org/elasticsearch/internal/BuildExtension.java +++ b/server/src/main/java/org/elasticsearch/internal/BuildExtension.java @@ -10,7 +10,6 @@ import org.elasticsearch.Build; import org.elasticsearch.env.BuildVersion; -import org.elasticsearch.env.DefaultBuildVersion; /** * Allows plugging in current build info. @@ -29,13 +28,13 @@ default boolean hasReleaseVersioning() { return true; } - // TODO[wrb]: Remove default implementation once downstream BuildExtensions are updated - default BuildVersion currentBuildVersion() { - return DefaultBuildVersion.CURRENT; - } + /** + * Returns the {@link BuildVersion} for the running Elasticsearch code. + */ + BuildVersion currentBuildVersion(); - // TODO[wrb]: Remove default implementation once downstream BuildExtensions are updated - default BuildVersion fromVersionId(int versionId) { - return new DefaultBuildVersion(versionId); - } + /** + * Returns the {@link BuildVersion} for a given version identifier. + */ + BuildVersion fromVersionId(int versionId); } diff --git a/server/src/main/java/org/elasticsearch/node/NodeService.java b/server/src/main/java/org/elasticsearch/node/NodeService.java index 4b9e5dc83c538..87384b50d7ffd 100644 --- a/server/src/main/java/org/elasticsearch/node/NodeService.java +++ b/server/src/main/java/org/elasticsearch/node/NodeService.java @@ -195,7 +195,8 @@ public NodeStats stats( adaptiveSelection ? responseCollectorService.getAdaptiveStats(searchTransportService.getPendingSearchRequests()) : null, scriptCache ? scriptService.cacheStats() : null, indexingPressure ? this.indexingPressure.stats() : null, - repositoriesStats ? this.repositoriesService.getRepositoriesThrottlingStats() : null + repositoriesStats ? this.repositoriesService.getRepositoriesThrottlingStats() : null, + null ); } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestGetSnapshotsAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestGetSnapshotsAction.java index 7a66c6d7c435a..7482ae7683b4a 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestGetSnapshotsAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestGetSnapshotsAction.java @@ -68,7 +68,7 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC getSnapshotsRequest.offset(offset); final String afterString = request.param("after"); if (afterString != null) { - getSnapshotsRequest.after(GetSnapshotsRequest.After.fromQueryParam(afterString)); + getSnapshotsRequest.after(SnapshotSortKey.decodeAfterQueryParam(afterString)); } final String fromSortValue = request.param("from_sort_value"); if (fromSortValue != null) { diff --git a/server/src/main/java/org/elasticsearch/rest/action/cat/RestAllocationAction.java b/server/src/main/java/org/elasticsearch/rest/action/cat/RestAllocationAction.java index 068c809554631..570fb0ebc7c77 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/cat/RestAllocationAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/cat/RestAllocationAction.java @@ -18,6 +18,7 @@ import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.routing.allocation.NodeAllocationStats; import org.elasticsearch.common.Strings; import org.elasticsearch.common.Table; import org.elasticsearch.common.unit.ByteSizeValue; @@ -37,6 +38,8 @@ @ServerlessScope(Scope.INTERNAL) public class RestAllocationAction extends AbstractCatAction { + private static final String UNASSIGNED = "UNASSIGNED"; + @Override public List routes() { return List.of(new Route(GET, "/_cat/allocation"), new Route(GET, "/_cat/allocation/{nodes}")); @@ -67,9 +70,10 @@ public void processResponse(final ClusterStateResponse state) { statsRequest.setIncludeShardsStats(false); statsRequest.clear() .addMetric(NodesStatsRequestParameters.Metric.FS.metricName()) + .addMetric(NodesStatsRequestParameters.Metric.ALLOCATIONS.metricName()) .indices(new CommonStatsFlags(CommonStatsFlags.Flag.Store)); - client.admin().cluster().nodesStats(statsRequest, new RestResponseListener(channel) { + client.admin().cluster().nodesStats(statsRequest, new RestResponseListener<>(channel) { @Override public RestResponse buildResponse(NodesStatsResponse stats) throws Exception { Table tab = buildTable(request, state, stats); @@ -86,6 +90,9 @@ protected Table getTableWithHeader(final RestRequest request) { final Table table = new Table(); table.startHeaders(); table.addCell("shards", "alias:s;text-align:right;desc:number of shards on node"); + table.addCell("shards.undesired", "text-align:right;desc:number of shards that are scheduled to be moved elsewhere in the cluster"); + table.addCell("write_load.forecast", "alias:wlf,writeLoadForecast;text-align:right;desc:sum of index write load forecasts"); + table.addCell("disk.indices.forecast", "alias:dif,diskIndicesForecast;text-align:right;desc:sum of shard size forecasts"); table.addCell("disk.indices", "alias:di,diskIndices;text-align:right;desc:disk used by ES indices"); table.addCell("disk.used", "alias:du,diskUsed;text-align:right;desc:disk used (total, not just ES)"); table.addCell("disk.avail", "alias:da,diskAvail;text-align:right;desc:disk available"); @@ -100,22 +107,17 @@ protected Table getTableWithHeader(final RestRequest request) { } private Table buildTable(RestRequest request, final ClusterStateResponse state, final NodesStatsResponse stats) { - final Map allocs = new HashMap<>(); + final Map shardCounts = new HashMap<>(); for (ShardRouting shard : state.getState().routingTable().allShardsIterator()) { - String nodeId = "UNASSIGNED"; - if (shard.assignedToNode()) { - nodeId = shard.currentNodeId(); - } - allocs.merge(nodeId, 1, Integer::sum); + String nodeId = shard.assignedToNode() ? shard.currentNodeId() : UNASSIGNED; + shardCounts.merge(nodeId, 1, Integer::sum); } Table table = getTableWithHeader(request); for (NodeStats nodeStats : stats.getNodes()) { DiscoveryNode node = nodeStats.getNode(); - int shardCount = allocs.getOrDefault(node.getId(), 0); - ByteSizeValue total = nodeStats.getFs().getTotal().getTotal(); ByteSizeValue avail = nodeStats.getFs().getTotal().getAvailable(); // if we don't know how much we use (non data nodes), it means 0 @@ -127,9 +129,13 @@ private Table buildTable(RestRequest request, final ClusterStateResponse state, diskPercent = (short) (used * 100 / (used + avail.getBytes())); } } + NodeAllocationStats nodeAllocationStats = nodeStats.getNodeAllocationStats(); table.startRow(); - table.addCell(shardCount); + table.addCell(shardCounts.getOrDefault(node.getId(), 0)); + table.addCell(nodeAllocationStats != null ? nodeAllocationStats.undesiredShards() : null); + table.addCell(nodeAllocationStats != null ? nodeAllocationStats.forecastedIngestLoad() : null); + table.addCell(nodeAllocationStats != null ? ByteSizeValue.ofBytes(nodeAllocationStats.forecastedDiskUsage()) : null); table.addCell(nodeStats.getIndices().getStore().size()); table.addCell(used < 0 ? null : ByteSizeValue.ofBytes(used)); table.addCell(avail.getBytes() < 0 ? null : avail); @@ -142,10 +148,12 @@ private Table buildTable(RestRequest request, final ClusterStateResponse state, table.endRow(); } - final String UNASSIGNED = "UNASSIGNED"; - if (allocs.containsKey(UNASSIGNED)) { + if (shardCounts.containsKey(UNASSIGNED)) { table.startRow(); - table.addCell(allocs.get(UNASSIGNED)); + table.addCell(shardCounts.get(UNASSIGNED)); + table.addCell(null); + table.addCell(null); + table.addCell(null); table.addCell(null); table.addCell(null); table.addCell(null); @@ -160,5 +168,4 @@ private Table buildTable(RestRequest request, final ClusterStateResponse state, return table; } - } diff --git a/server/src/main/java/org/elasticsearch/rest/action/cat/RestNodesAction.java b/server/src/main/java/org/elasticsearch/rest/action/cat/RestNodesAction.java index 9b70776551ba6..e5e0f9ee926f3 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/cat/RestNodesAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/cat/RestNodesAction.java @@ -57,7 +57,6 @@ import org.elasticsearch.search.suggest.completion.CompletionStats; import java.util.List; -import java.util.Locale; import java.util.concurrent.atomic.AtomicReference; import static org.elasticsearch.rest.RestRequest.Method.GET; @@ -375,14 +374,14 @@ Table buildTable( ByteSizeValue diskTotal = null; ByteSizeValue diskUsed = null; ByteSizeValue diskAvailable = null; - String diskUsedPercent = null; + RestTable.FormattedDouble diskUsedPercent = null; if (fsInfo != null) { diskTotal = fsInfo.getTotal().getTotal(); diskAvailable = fsInfo.getTotal().getAvailable(); diskUsed = ByteSizeValue.ofBytes(diskTotal.getBytes() - diskAvailable.getBytes()); double diskUsedRatio = diskTotal.getBytes() == 0 ? 1.0 : (double) diskUsed.getBytes() / diskTotal.getBytes(); - diskUsedPercent = String.format(Locale.ROOT, "%.2f", 100.0 * diskUsedRatio); + diskUsedPercent = RestTable.FormattedDouble.format2DecimalPlaces(100.0 * diskUsedRatio); } table.addCell(diskTotal); table.addCell(diskUsed); @@ -408,17 +407,17 @@ Table buildTable( table.addCell( hasLoadAverage == false || osStats.getCpu().getLoadAverage()[0] == -1 ? null - : String.format(Locale.ROOT, "%.2f", osStats.getCpu().getLoadAverage()[0]) + : RestTable.FormattedDouble.format2DecimalPlaces(osStats.getCpu().getLoadAverage()[0]) ); table.addCell( hasLoadAverage == false || osStats.getCpu().getLoadAverage()[1] == -1 ? null - : String.format(Locale.ROOT, "%.2f", osStats.getCpu().getLoadAverage()[1]) + : RestTable.FormattedDouble.format2DecimalPlaces(osStats.getCpu().getLoadAverage()[1]) ); table.addCell( hasLoadAverage == false || osStats.getCpu().getLoadAverage()[2] == -1 ? null - : String.format(Locale.ROOT, "%.2f", osStats.getCpu().getLoadAverage()[2]) + : RestTable.FormattedDouble.format2DecimalPlaces(osStats.getCpu().getLoadAverage()[2]) ); table.addCell(jvmStats == null ? null : jvmStats.getUptime()); diff --git a/server/src/main/java/org/elasticsearch/rest/action/cat/RestTable.java b/server/src/main/java/org/elasticsearch/rest/action/cat/RestTable.java index 6845fec4db6fe..cfe5d6d2aef39 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/cat/RestTable.java +++ b/server/src/main/java/org/elasticsearch/rest/action/cat/RestTable.java @@ -496,4 +496,24 @@ public boolean isReversed() { return reverse; } } + + /** + * A formatted number, such that it sorts according to its numeric value but captures a specific string representation too + */ + record FormattedDouble(String displayValue, double numericValue) implements Comparable { + + static FormattedDouble format2DecimalPlaces(double numericValue) { + return new FormattedDouble(Strings.format("%.2f", numericValue), numericValue); + } + + @Override + public int compareTo(FormattedDouble other) { + return Double.compare(numericValue, other.numericValue); + } + + @Override + public String toString() { + return displayValue; + } + } } diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/stats/NodeStatsTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/stats/NodeStatsTests.java index 0290bfb9c236f..e4b821fba7634 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/stats/NodeStatsTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/stats/NodeStatsTests.java @@ -21,6 +21,7 @@ import org.elasticsearch.cluster.routing.RecoverySource; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.UnassignedInfo; +import org.elasticsearch.cluster.routing.allocation.NodeAllocationStats; import org.elasticsearch.cluster.service.ClusterApplierRecordingService; import org.elasticsearch.cluster.service.ClusterApplierRecordingService.Stats.Recording; import org.elasticsearch.cluster.service.ClusterStateUpdateStats; @@ -1043,6 +1044,13 @@ public static NodeStats createNodeStats() { RepositoriesStats repositoriesStats = new RepositoriesStats( Map.of("test-repository", new RepositoriesStats.ThrottlingStats(100, 200)) ); + NodeAllocationStats nodeAllocationStats = new NodeAllocationStats( + randomIntBetween(0, 10000), + randomIntBetween(0, 1000), + randomDoubleBetween(0, 8, true), + randomNonNegativeLong(), + randomNonNegativeLong() + ); return new NodeStats( node, @@ -1062,7 +1070,8 @@ public static NodeStats createNodeStats() { adaptiveSelectionStats, scriptCacheStats, indexingPressureStats, - repositoriesStats + repositoriesStats, + nodeAllocationStats ); } diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsRequestTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsRequestTests.java index 14db6bdf84264..810d297602e8a 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsRequestTests.java @@ -50,7 +50,7 @@ public void testValidateParameters() { } { final GetSnapshotsRequest request = new GetSnapshotsRequest("repo", "snapshot").verbose(false) - .after(new GetSnapshotsRequest.After("foo", "repo", "bar")); + .after(new SnapshotSortKey.After("foo", "repo", "bar")); final ActionRequestValidationException e = request.validate(); assertThat(e.getMessage(), containsString("can't use after with verbose=false")); } @@ -61,14 +61,14 @@ public void testValidateParameters() { } { final GetSnapshotsRequest request = new GetSnapshotsRequest("repo", "snapshot").after( - new GetSnapshotsRequest.After("foo", "repo", "bar") + new SnapshotSortKey.After("foo", "repo", "bar") ).offset(randomIntBetween(1, 500)); final ActionRequestValidationException e = request.validate(); assertThat(e.getMessage(), containsString("can't use after and offset simultaneously")); } { final GetSnapshotsRequest request = new GetSnapshotsRequest("repo", "snapshot").fromSortValue("foo") - .after(new GetSnapshotsRequest.After("foo", "repo", "bar")); + .after(new SnapshotSortKey.After("foo", "repo", "bar")); final ActionRequestValidationException e = request.validate(); assertThat(e.getMessage(), containsString("can't use after and from_sort_value simultaneously")); } diff --git a/server/src/test/java/org/elasticsearch/cluster/DiskUsageTests.java b/server/src/test/java/org/elasticsearch/cluster/DiskUsageTests.java index 5e122c4050b6c..8334c535cea43 100644 --- a/server/src/test/java/org/elasticsearch/cluster/DiskUsageTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/DiskUsageTests.java @@ -183,6 +183,7 @@ public void testLeastAndMostAvailableDiskSpace() { null, null, null, + null, null ); DiskUsage leastNode = DiskUsage.findLeastAvailablePath(nodeStats); @@ -211,6 +212,7 @@ public void testLeastAndMostAvailableDiskSpace() { null, null, null, + null, null ); DiskUsage leastNode = DiskUsage.findLeastAvailablePath(nodeStats); @@ -241,6 +243,7 @@ public void testLeastAndMostAvailableDiskSpace() { null, null, null, + null, null ); DiskUsage leastNode = DiskUsage.findLeastAvailablePath(nodeStats); @@ -275,6 +278,7 @@ public void testLeastAndMostAvailableDiskSpaceSomeInvalidValues() { null, null, null, + null, null ); DiskUsage leastNode = DiskUsage.findLeastAvailablePath(nodeStats); @@ -304,6 +308,7 @@ public void testLeastAndMostAvailableDiskSpaceSomeInvalidValues() { null, null, null, + null, null ); DiskUsage leastNode = DiskUsage.findLeastAvailablePath(nodeStats); @@ -334,6 +339,7 @@ public void testLeastAndMostAvailableDiskSpaceSomeInvalidValues() { null, null, null, + null, null ); diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/stateless/AtomicRegisterPreVoteCollectorTests.java b/server/src/test/java/org/elasticsearch/cluster/coordination/stateless/AtomicRegisterPreVoteCollectorTests.java index ddb1ccbbd4f9a..f0b6d62ef9767 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/stateless/AtomicRegisterPreVoteCollectorTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/stateless/AtomicRegisterPreVoteCollectorTests.java @@ -8,12 +8,16 @@ package org.elasticsearch.cluster.coordination.stateless; +import org.apache.logging.log4j.Level; +import org.apache.logging.log4j.LogManager; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.node.DiscoveryNodeUtils; +import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.core.TimeValue; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.MockLogAppender; import org.elasticsearch.threadpool.ThreadPool; import org.junit.After; import org.junit.Before; @@ -65,7 +69,7 @@ protected long absoluteTimeInMillis() { // Either there's no heartbeat or is stale if (randomBoolean()) { PlainActionFuture.get(f -> heartbeatStore.writeHeartbeat(new Heartbeat(1, fakeClock.get()), f)); - fakeClock.set(maxTimeSinceLastHeartbeat.millis() + 1); + fakeClock.set(maxTimeSinceLastHeartbeat.millis() + randomLongBetween(0, 1000)); } var startElection = new AtomicBoolean(); @@ -76,6 +80,55 @@ protected long absoluteTimeInMillis() { assertThat(startElection.get(), is(true)); } + public void testLogSkippedElectionIfRecentLeaderHeartbeat() throws Exception { + final var currentTermProvider = new AtomicLong(1); + final var heartbeatFrequency = TimeValue.timeValueSeconds(randomIntBetween(15, 30)); + final var maxTimeSinceLastHeartbeat = TimeValue.timeValueSeconds(2 * heartbeatFrequency.seconds()); + DiscoveryNodeUtils.create("master"); + final var logger = LogManager.getLogger(AtomicRegisterPreVoteCollector.class); + final var appender = new MockLogAppender(); + appender.start(); + try { + Loggers.addAppender(logger, appender); + appender.addExpectation( + new MockLogAppender.SeenEventExpectation( + "log emitted when skipping election", + AtomicRegisterPreVoteCollector.class.getCanonicalName(), + Level.INFO, + "skipping election since there is a recent heartbeat*" + ) + ); + final var fakeClock = new AtomicLong(); + final var heartbeatStore = new InMemoryHeartbeatStore(); + final var heartbeatService = new StoreHeartbeatService( + heartbeatStore, + threadPool, + heartbeatFrequency, + maxTimeSinceLastHeartbeat, + listener -> listener.onResponse(OptionalLong.of(currentTermProvider.get())) + ) { + @Override + protected long absoluteTimeInMillis() { + return fakeClock.get(); + } + }; + + PlainActionFuture.get(f -> heartbeatStore.writeHeartbeat(new Heartbeat(1, fakeClock.get()), f)); + fakeClock.addAndGet(randomLongBetween(0L, maxTimeSinceLastHeartbeat.millis() - 1)); + + var startElection = new AtomicBoolean(); + var preVoteCollector = new AtomicRegisterPreVoteCollector(heartbeatService, () -> startElection.set(true)); + + preVoteCollector.start(ClusterState.EMPTY_STATE, Collections.emptyList()); + + assertThat(startElection.get(), is(false)); + appender.assertAllExpectationsMatched(); + } finally { + Loggers.removeAppender(logger, appender); + appender.stop(); + } + } + public void testElectionDoesNotRunWhenThereIsALeader() throws Exception { final var currentTermProvider = new AtomicLong(1); final var heartbeatFrequency = TimeValue.timeValueSeconds(randomIntBetween(15, 30)); diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/stateless/StoreHeartbeatServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/coordination/stateless/StoreHeartbeatServiceTests.java index 1df613a500f83..bad8385acfbf3 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/stateless/StoreHeartbeatServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/stateless/StoreHeartbeatServiceTests.java @@ -233,7 +233,7 @@ protected long absoluteTimeInMillis() { assertThat(heartbeat, is(nullValue())); AtomicBoolean noRecentLeaderFound = new AtomicBoolean(); - heartbeatService.runIfNoRecentLeader(() -> noRecentLeaderFound.set(true)); + heartbeatService.checkLeaderHeartbeatAndRun(() -> noRecentLeaderFound.set(true), hb -> {}); assertThat(noRecentLeaderFound.get(), is(true)); } @@ -242,7 +242,7 @@ protected long absoluteTimeInMillis() { PlainActionFuture.get(f -> heartbeatStore.writeHeartbeat(new Heartbeat(1, fakeClock.get()), f)); AtomicBoolean noRecentLeaderFound = new AtomicBoolean(); - heartbeatService.runIfNoRecentLeader(() -> noRecentLeaderFound.set(true)); + heartbeatService.checkLeaderHeartbeatAndRun(() -> noRecentLeaderFound.set(true), hb -> {}); assertThat(noRecentLeaderFound.get(), is(false)); } @@ -252,7 +252,7 @@ protected long absoluteTimeInMillis() { fakeClock.set(maxTimeSinceLastHeartbeat.millis() + 1); AtomicBoolean noRecentLeaderFound = new AtomicBoolean(); - heartbeatService.runIfNoRecentLeader(() -> noRecentLeaderFound.set(true)); + heartbeatService.checkLeaderHeartbeatAndRun(() -> noRecentLeaderFound.set(true), hb -> {}); assertThat(noRecentLeaderFound.get(), is(true)); } @@ -273,7 +273,7 @@ protected long absoluteTimeInMillis() { ) ); try (var ignored = mockAppender.capturing(StoreHeartbeatService.class)) { - heartbeatService.runIfNoRecentLeader(() -> fail("should not be called")); + heartbeatService.checkLeaderHeartbeatAndRun(() -> fail("should not be called"), hb -> {}); mockAppender.assertAllExpectationsMatched(); } } diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamGlobalRetentionSerializationTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamGlobalRetentionSerializationTests.java index 8c3d36464784e..491ba868dfd9b 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamGlobalRetentionSerializationTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamGlobalRetentionSerializationTests.java @@ -59,18 +59,16 @@ protected ClusterState.Custom mutateInstance(ClusterState.Custom instance) { var maxRetention = metadata.getMaxRetention(); switch (randomInt(1)) { case 0 -> { - if (defaultRetention == null) { - defaultRetention = TimeValue.timeValueDays(randomIntBetween(1, 1000)); - } else { - defaultRetention = randomBoolean() ? null : TimeValue.timeValueDays(randomIntBetween(1, 1000)); - } + defaultRetention = randomValueOtherThan( + defaultRetention, + () -> randomBoolean() ? null : TimeValue.timeValueDays(randomIntBetween(1, 1000)) + ); } case 1 -> { - if (maxRetention == null) { - maxRetention = TimeValue.timeValueDays(randomIntBetween(1000, 2000)); - } else { - maxRetention = randomBoolean() ? null : TimeValue.timeValueDays(randomIntBetween(1000, 2000)); - } + maxRetention = randomValueOtherThan( + maxRetention, + () -> randomBoolean() ? null : TimeValue.timeValueDays(randomIntBetween(1001, 2000)) + ); } } return new DataStreamGlobalRetention(defaultRetention, maxRetention); diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamTests.java index 141434842a4bc..a07cd8e60411a 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamTests.java @@ -1842,7 +1842,6 @@ public void testWriteFailureIndex() { assertThat(failureStoreDataStream.getFailureStoreWriteIndex(), is(writeFailureIndex)); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/106123") public void testIsFailureIndex() { boolean hidden = randomBoolean(); boolean system = hidden && randomBoolean(); diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataCreateDataStreamServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataCreateDataStreamServiceTests.java index e11f8c0cbe108..ea79bc8f13765 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataCreateDataStreamServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataCreateDataStreamServiceTests.java @@ -18,6 +18,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.Maps; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.indices.ExecutorNames; import org.elasticsearch.indices.SystemDataStreamDescriptor; @@ -59,6 +60,7 @@ public void testCreateDataStream() throws Exception { CreateDataStreamClusterStateUpdateRequest req = new CreateDataStreamClusterStateUpdateRequest(dataStreamName); ClusterState newState = MetadataCreateDataStreamService.createDataStream( metadataCreateIndexService, + Settings.EMPTY, cs, true, req, @@ -98,6 +100,7 @@ public void testCreateDataStreamWithAliasFromTemplate() throws Exception { CreateDataStreamClusterStateUpdateRequest req = new CreateDataStreamClusterStateUpdateRequest(dataStreamName); ClusterState newState = MetadataCreateDataStreamService.createDataStream( metadataCreateIndexService, + Settings.EMPTY, cs, randomBoolean(), req, @@ -174,6 +177,7 @@ public void testCreateDataStreamWithAliasFromComponentTemplate() throws Exceptio CreateDataStreamClusterStateUpdateRequest req = new CreateDataStreamClusterStateUpdateRequest(dataStreamName); ClusterState newState = MetadataCreateDataStreamService.createDataStream( metadataCreateIndexService, + Settings.EMPTY, cs, randomBoolean(), req, @@ -226,6 +230,7 @@ public void testCreateDataStreamWithFailureStore() throws Exception { CreateDataStreamClusterStateUpdateRequest req = new CreateDataStreamClusterStateUpdateRequest(dataStreamName); ClusterState newState = MetadataCreateDataStreamService.createDataStream( metadataCreateIndexService, + Settings.EMPTY, cs, randomBoolean(), req, @@ -246,6 +251,40 @@ public void testCreateDataStreamWithFailureStore() throws Exception { assertThat(newState.metadata().index(failureStoreIndexName).isSystem(), is(false)); } + public void testCreateDataStreamWithFailureStoreWithRefreshRate() throws Exception { + final MetadataCreateIndexService metadataCreateIndexService = getMetadataCreateIndexService(); + var timeValue = randomTimeValue(); + var settings = Settings.builder() + .put(MetadataCreateDataStreamService.FAILURE_STORE_REFRESH_INTERVAL_SETTING_NAME, timeValue) + .build(); + final String dataStreamName = "my-data-stream"; + ComposableIndexTemplate template = new ComposableIndexTemplate.Builder().indexPatterns(List.of(dataStreamName + "*")) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate(false, false, true)) + .build(); + ClusterState cs = ClusterState.builder(new ClusterName("_name")) + .metadata(Metadata.builder().put("template", template).build()) + .build(); + CreateDataStreamClusterStateUpdateRequest req = new CreateDataStreamClusterStateUpdateRequest(dataStreamName); + ClusterState newState = MetadataCreateDataStreamService.createDataStream( + metadataCreateIndexService, + settings, + cs, + randomBoolean(), + req, + ActionListener.noop() + ); + var backingIndexName = DataStream.getDefaultBackingIndexName(dataStreamName, 1, req.getStartTime()); + var failureStoreIndexName = DataStream.getDefaultFailureStoreName(dataStreamName, 1, req.getStartTime()); + assertThat(newState.metadata().dataStreams().size(), equalTo(1)); + assertThat(newState.metadata().dataStreams().get(dataStreamName).getName(), equalTo(dataStreamName)); + assertThat(newState.metadata().index(backingIndexName), notNullValue()); + assertThat(newState.metadata().index(failureStoreIndexName), notNullValue()); + assertThat( + newState.metadata().index(failureStoreIndexName).getSettings().get(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey()), + equalTo(timeValue) + ); + } + public void testCreateSystemDataStream() throws Exception { final MetadataCreateIndexService metadataCreateIndexService = getMetadataCreateIndexService(); final String dataStreamName = ".system-data-stream"; @@ -259,6 +298,7 @@ public void testCreateSystemDataStream() throws Exception { ); ClusterState newState = MetadataCreateDataStreamService.createDataStream( metadataCreateIndexService, + Settings.EMPTY, cs, randomBoolean(), req, @@ -291,6 +331,7 @@ public void testCreateDuplicateDataStream() throws Exception { ResourceAlreadyExistsException.class, () -> MetadataCreateDataStreamService.createDataStream( metadataCreateIndexService, + Settings.EMPTY, cs, randomBoolean(), req, @@ -309,6 +350,7 @@ public void testCreateDataStreamWithInvalidName() throws Exception { IllegalArgumentException.class, () -> MetadataCreateDataStreamService.createDataStream( metadataCreateIndexService, + Settings.EMPTY, cs, randomBoolean(), req, @@ -327,6 +369,7 @@ public void testCreateDataStreamWithUppercaseCharacters() throws Exception { IllegalArgumentException.class, () -> MetadataCreateDataStreamService.createDataStream( metadataCreateIndexService, + Settings.EMPTY, cs, randomBoolean(), req, @@ -345,6 +388,7 @@ public void testCreateDataStreamStartingWithPeriod() throws Exception { IllegalArgumentException.class, () -> MetadataCreateDataStreamService.createDataStream( metadataCreateIndexService, + Settings.EMPTY, cs, randomBoolean(), req, @@ -363,6 +407,7 @@ public void testCreateDataStreamNoTemplate() throws Exception { IllegalArgumentException.class, () -> MetadataCreateDataStreamService.createDataStream( metadataCreateIndexService, + Settings.EMPTY, cs, randomBoolean(), req, @@ -384,6 +429,7 @@ public void testCreateDataStreamNoValidTemplate() throws Exception { IllegalArgumentException.class, () -> MetadataCreateDataStreamService.createDataStream( metadataCreateIndexService, + Settings.EMPTY, cs, randomBoolean(), req, @@ -408,6 +454,7 @@ public static ClusterState createDataStream(final String dataStreamName) throws CreateDataStreamClusterStateUpdateRequest req = new CreateDataStreamClusterStateUpdateRequest(dataStreamName); return MetadataCreateDataStreamService.createDataStream( metadataCreateIndexService, + Settings.EMPTY, cs, randomBoolean(), req, diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataMigrateToDataStreamServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataMigrateToDataStreamServiceTests.java index 128601ff21250..cefbd31db1ee6 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataMigrateToDataStreamServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataMigrateToDataStreamServiceTests.java @@ -297,6 +297,7 @@ public void testCreateDataStreamWithSuppliedWriteIndex() throws Exception { TimeValue.ZERO ), getMetadataCreateIndexService(), + Settings.EMPTY, ActionListener.noop() ); IndexAbstraction ds = newState.metadata().getIndicesLookup().get(dataStreamName); @@ -355,6 +356,7 @@ public void testCreateDataStreamHidesBackingIndicesAndRemovesAlias() throws Exce TimeValue.ZERO ), getMetadataCreateIndexService(), + Settings.EMPTY, ActionListener.noop() ); IndexAbstraction ds = newState.metadata().getIndicesLookup().get(dataStreamName); @@ -415,6 +417,7 @@ public void testCreateDataStreamWithoutSuppliedWriteIndex() { TimeValue.ZERO ), getMetadataCreateIndexService(), + Settings.EMPTY, ActionListener.noop() ) ); diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationStatsServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationStatsServiceTests.java new file mode 100644 index 0000000000000..d99d4c1b54527 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationStatsServiceTests.java @@ -0,0 +1,205 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.cluster.routing.allocation; + +import org.elasticsearch.cluster.ClusterInfo; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ESAllocationTestCase; +import org.elasticsearch.cluster.EmptyClusterInfoService; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.cluster.routing.IndexRoutingTable; +import org.elasticsearch.cluster.routing.RoutingTable; +import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.routing.ShardRoutingState; +import org.elasticsearch.cluster.routing.allocation.allocator.DesiredBalance; +import org.elasticsearch.cluster.routing.allocation.allocator.DesiredBalanceShardsAllocator; +import org.elasticsearch.cluster.routing.allocation.allocator.ShardAssignment; +import org.elasticsearch.cluster.routing.allocation.allocator.ShardsAllocator; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.util.concurrent.DeterministicTaskQueue; +import org.elasticsearch.index.IndexVersion; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.telemetry.TelemetryProvider; +import org.elasticsearch.test.ClusterServiceUtils; + +import java.util.Map; +import java.util.Set; + +import static org.elasticsearch.cluster.routing.TestShardRouting.newShardRouting; +import static org.elasticsearch.cluster.routing.TestShardRouting.shardRoutingBuilder; +import static org.hamcrest.Matchers.aMapWithSize; +import static org.hamcrest.Matchers.allOf; +import static org.hamcrest.Matchers.hasEntry; + +public class AllocationStatsServiceTests extends ESAllocationTestCase { + + public void testShardStats() { + + var ingestLoadForecast = randomDoubleBetween(0, 10, true); + var shardSizeForecast = randomNonNegativeLong(); + var currentShardSize = randomNonNegativeLong(); + + var indexMetadata = IndexMetadata.builder("my-index") + .settings(indexSettings(IndexVersion.current(), 1, 0)) + .indexWriteLoadForecast(ingestLoadForecast) + .shardSizeInBytesForecast(shardSizeForecast) + .build(); + var shardId = new ShardId(indexMetadata.getIndex(), 0); + + var state = ClusterState.builder(ClusterName.DEFAULT) + .nodes(DiscoveryNodes.builder().add(newNode("node-1"))) + .metadata(Metadata.builder().put(indexMetadata, false)) + .routingTable( + RoutingTable.builder() + .add( + IndexRoutingTable.builder(indexMetadata.getIndex()) + .addShard(newShardRouting(shardId, "node-1", true, ShardRoutingState.STARTED)) + .build() + ) + ) + .build(); + + var clusterInfo = new ClusterInfo( + Map.of(), + Map.of(), + Map.of(ClusterInfo.shardIdentifierFromRouting(shardId, true), currentShardSize), + Map.of(), + Map.of(), + Map.of() + ); + + var queue = new DeterministicTaskQueue(); + try (var clusterService = ClusterServiceUtils.createClusterService(state, queue.getThreadPool())) { + var service = new AllocationStatsService(clusterService, () -> clusterInfo, createShardAllocator(), TEST_WRITE_LOAD_FORECASTER); + assertThat( + service.stats(), + allOf( + aMapWithSize(1), + hasEntry( + "node-1", + new NodeAllocationStats(1, -1, ingestLoadForecast, Math.max(shardSizeForecast, currentShardSize), currentShardSize) + ) + ) + ); + } + } + + public void testRelocatingShardIsOnlyCountedOnceOnTargetNode() { + + var indexMetadata = IndexMetadata.builder("my-index").settings(indexSettings(IndexVersion.current(), 1, 0)).build(); + var state = ClusterState.builder(ClusterName.DEFAULT) + .nodes(DiscoveryNodes.builder().add(newNode("node-1")).add(newNode("node-2"))) + .metadata(Metadata.builder().put(indexMetadata, false)) + .routingTable( + RoutingTable.builder() + .add( + IndexRoutingTable.builder(indexMetadata.getIndex()) + .addShard( + shardRoutingBuilder(new ShardId(indexMetadata.getIndex(), 0), "node-1", true, ShardRoutingState.RELOCATING) + .withRelocatingNodeId("node-2") + .build() + ) + .build() + ) + ) + .build(); + + var queue = new DeterministicTaskQueue(); + try (var clusterService = ClusterServiceUtils.createClusterService(state, queue.getThreadPool())) { + var service = new AllocationStatsService( + clusterService, + EmptyClusterInfoService.INSTANCE, + createShardAllocator(), + TEST_WRITE_LOAD_FORECASTER + ); + assertThat( + service.stats(), + allOf( + aMapWithSize(2), + hasEntry("node-1", new NodeAllocationStats(0, -1, 0, 0, 0)), + hasEntry("node-2", new NodeAllocationStats(1, -1, 0, 0, 0)) + ) + ); + } + } + + public void testUndesiredShardCount() { + + var indexMetadata = IndexMetadata.builder("my-index").settings(indexSettings(IndexVersion.current(), 2, 0)).build(); + + var state = ClusterState.builder(ClusterName.DEFAULT) + .nodes(DiscoveryNodes.builder().add(newNode("node-1")).add(newNode("node-2")).add(newNode("node-3"))) + .metadata(Metadata.builder().put(indexMetadata, false)) + .routingTable( + RoutingTable.builder() + .add( + IndexRoutingTable.builder(indexMetadata.getIndex()) + .addShard(newShardRouting(new ShardId(indexMetadata.getIndex(), 0), "node-1", true, ShardRoutingState.STARTED)) + .addShard(newShardRouting(new ShardId(indexMetadata.getIndex(), 1), "node-3", true, ShardRoutingState.STARTED)) + .build() + ) + ) + .build(); + + var queue = new DeterministicTaskQueue(); + var threadPool = queue.getThreadPool(); + try (var clusterService = ClusterServiceUtils.createClusterService(state, threadPool)) { + var service = new AllocationStatsService( + clusterService, + EmptyClusterInfoService.INSTANCE, + new DesiredBalanceShardsAllocator( + ClusterSettings.createBuiltInClusterSettings(), + createShardAllocator(), + threadPool, + clusterService, + (innerState, strategy) -> innerState, + TelemetryProvider.NOOP + ) { + @Override + public DesiredBalance getDesiredBalance() { + return new DesiredBalance( + 1, + Map.ofEntries( + Map.entry(new ShardId(indexMetadata.getIndex(), 0), new ShardAssignment(Set.of("node-1"), 1, 0, 0)), + Map.entry(new ShardId(indexMetadata.getIndex(), 1), new ShardAssignment(Set.of("node-2"), 1, 0, 0)) + ) + ); + } + }, + TEST_WRITE_LOAD_FORECASTER + ); + assertThat( + service.stats(), + allOf( + aMapWithSize(3), + hasEntry("node-1", new NodeAllocationStats(1, 0, 0, 0, 0)), + hasEntry("node-2", new NodeAllocationStats(0, 0, 0, 0, 0)), + hasEntry("node-3", new NodeAllocationStats(1, 1, 0, 0, 0)) // [my-index][1] should be allocated to [node-2] + ) + ); + } + } + + private ShardsAllocator createShardAllocator() { + return new ShardsAllocator() { + @Override + public void allocate(RoutingAllocation allocation) { + + } + + @Override + public ShardAllocationDecision decideShardAllocation(ShardRouting shard, RoutingAllocation allocation) { + return null; + } + }; + } +} diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/NodeAllocationStatsTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/NodeAllocationStatsTests.java new file mode 100644 index 0000000000000..ad371ed239795 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/NodeAllocationStatsTests.java @@ -0,0 +1,76 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.cluster.routing.allocation; + +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.test.AbstractWireSerializingTestCase; +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; + +public class NodeAllocationStatsTests extends AbstractWireSerializingTestCase { + + @Override + protected Writeable.Reader instanceReader() { + return NodeAllocationStats::new; + } + + @Override + protected NodeAllocationStats createTestInstance() { + return new NodeAllocationStats( + randomIntBetween(0, 10000), + randomIntBetween(0, 1000), + randomDoubleBetween(0, 8, true), + randomNonNegativeLong(), + randomNonNegativeLong() + ); + } + + @Override + protected NodeAllocationStats mutateInstance(NodeAllocationStats instance) throws IOException { + return switch (randomInt(4)) { + case 0 -> new NodeAllocationStats( + randomValueOtherThan(instance.shards(), () -> randomIntBetween(0, 10000)), + instance.undesiredShards(), + instance.forecastedIngestLoad(), + instance.forecastedDiskUsage(), + instance.currentDiskUsage() + ); + case 1 -> new NodeAllocationStats( + instance.shards(), + randomValueOtherThan(instance.undesiredShards(), () -> randomIntBetween(0, 1000)), + instance.forecastedIngestLoad(), + instance.forecastedDiskUsage(), + instance.currentDiskUsage() + ); + case 2 -> new NodeAllocationStats( + instance.shards(), + instance.undesiredShards(), + randomValueOtherThan(instance.forecastedIngestLoad(), () -> randomDoubleBetween(0, 8, true)), + instance.forecastedDiskUsage(), + instance.currentDiskUsage() + ); + case 3 -> new NodeAllocationStats( + instance.shards(), + instance.undesiredShards(), + instance.forecastedIngestLoad(), + randomValueOtherThan(instance.forecastedDiskUsage(), ESTestCase::randomNonNegativeLong), + instance.currentDiskUsage() + ); + case 4 -> new NodeAllocationStats( + instance.shards(), + instance.undesiredShards(), + instance.forecastedIngestLoad(), + instance.currentDiskUsage(), + randomValueOtherThan(instance.forecastedDiskUsage(), ESTestCase::randomNonNegativeLong) + ); + default -> throw new RuntimeException("unreachable"); + }; + } +} diff --git a/server/src/test/java/org/elasticsearch/common/collect/IteratorsTests.java b/server/src/test/java/org/elasticsearch/common/collect/IteratorsTests.java index eb1d5838c734b..351efa59f2381 100644 --- a/server/src/test/java/org/elasticsearch/common/collect/IteratorsTests.java +++ b/server/src/test/java/org/elasticsearch/common/collect/IteratorsTests.java @@ -18,6 +18,7 @@ import java.util.List; import java.util.NoSuchElementException; import java.util.Objects; +import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.function.BiPredicate; import java.util.function.ToIntFunction; @@ -216,6 +217,31 @@ public void testMap() { assertEquals(array.length, index.get()); } + public void testFailFast() { + final var array = randomIntegerArray(); + assertEmptyIterator(Iterators.failFast(Iterators.forArray(array), () -> true)); + + final var index = new AtomicInteger(); + Iterators.failFast(Iterators.forArray(array), () -> false).forEachRemaining(i -> assertEquals(array[index.getAndIncrement()], i)); + assertEquals(array.length, index.get()); + + final var isFailing = new AtomicBoolean(); + index.set(0); + Iterators.failFast(Iterators.concat(Iterators.forArray(array), new Iterator<>() { + @Override + public boolean hasNext() { + isFailing.set(true); + return true; + } + + @Override + public Integer next() { + return 0; + } + }), isFailing::get).forEachRemaining(i -> assertEquals(array[index.getAndIncrement()], i)); + assertEquals(array.length, index.get()); + } + public void testEquals() { final BiPredicate notCalled = (a, b) -> { throw new AssertionError("not called"); }; diff --git a/server/src/test/java/org/elasticsearch/health/node/tracker/DiskHealthTrackerTests.java b/server/src/test/java/org/elasticsearch/health/node/tracker/DiskHealthTrackerTests.java index 7089e5a19bc63..dd2ef861e85c3 100644 --- a/server/src/test/java/org/elasticsearch/health/node/tracker/DiskHealthTrackerTests.java +++ b/server/src/test/java/org/elasticsearch/health/node/tracker/DiskHealthTrackerTests.java @@ -325,6 +325,7 @@ private NodeStats nodeStats(FsInfo fs) { null, null, null, + null, null ); } diff --git a/server/src/test/java/org/elasticsearch/rest/action/cat/RestNodesActionTests.java b/server/src/test/java/org/elasticsearch/rest/action/cat/RestNodesActionTests.java index 2f5293d7a44a8..7ddd63db73986 100644 --- a/server/src/test/java/org/elasticsearch/rest/action/cat/RestNodesActionTests.java +++ b/server/src/test/java/org/elasticsearch/rest/action/cat/RestNodesActionTests.java @@ -9,17 +9,24 @@ package org.elasticsearch.rest.action.cat; import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse; +import org.elasticsearch.action.admin.cluster.node.stats.NodeStats; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse; import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodeUtils; import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.monitor.fs.FsInfo; +import org.elasticsearch.monitor.os.OsStats; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.rest.FakeRestRequest; import org.junit.Before; +import java.util.ArrayList; import java.util.Collections; +import java.util.List; +import java.util.Map; import static java.util.Collections.emptySet; import static org.mockito.Mockito.mock; @@ -48,4 +55,70 @@ public void testBuildTableDoesNotThrowGivenNullNodeInfoAndStats() { action.buildTable(false, new FakeRestRequest(), clusterStateResponse, nodesInfoResponse, nodesStatsResponse); } + + public void testFormattedNumericSort() { + final var clusterState = ClusterState.builder(ClusterName.DEFAULT) + .nodes(DiscoveryNodes.builder().add(DiscoveryNodeUtils.create("node-1")).add(DiscoveryNodeUtils.create("node-2"))) + .build(); + + final var nowMillis = System.currentTimeMillis(); + final var rowOrder = RestTable.getRowOrder( + action.buildTable( + false, + new FakeRestRequest(), + new ClusterStateResponse(clusterState.getClusterName(), clusterState, false), + new NodesInfoResponse(clusterState.getClusterName(), List.of(), List.of()), + new NodesStatsResponse( + clusterState.getClusterName(), + List.of( + // sorting 10 vs 9 in all relevant columns, since these sort incorrectly as strings + getTrickySortingNodeStats(nowMillis, clusterState.nodes().get("node-1"), 10), + getTrickySortingNodeStats(nowMillis, clusterState.nodes().get("node-2"), 9) + ), + Collections.emptyList() + ) + ), + new FakeRestRequest.Builder(xContentRegistry()).withParams( + Map.of("s", randomFrom("load_1m", "load_5m", "load_15m", "disk.used_percent")) + ).build() + ); + + final var nodesList = new ArrayList(); + for (final var node : clusterState.nodes()) { + nodesList.add(node); + } + + assertEquals("node-2", nodesList.get(rowOrder.get(0)).getId()); + assertEquals("node-1", nodesList.get(rowOrder.get(1)).getId()); + } + + private static NodeStats getTrickySortingNodeStats(long nowMillis, DiscoveryNode node, int sortValue) { + return new NodeStats( + node, + nowMillis, + null, + new OsStats( + nowMillis, + new OsStats.Cpu((short) sortValue, new double[] { sortValue, sortValue, sortValue }), + new OsStats.Mem(0, 0, 0), + new OsStats.Swap(0, 0), + null + ), + null, + null, + null, + new FsInfo(nowMillis, null, new FsInfo.Path[] { new FsInfo.Path("/foo", "/foo", 100, 100 - sortValue, 100 - sortValue) }), + null, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null + ); + } } diff --git a/server/src/test/java/org/elasticsearch/rest/action/cat/RestTableTests.java b/server/src/test/java/org/elasticsearch/rest/action/cat/RestTableTests.java index 7a8c67177aade..1ec180fdaad77 100644 --- a/server/src/test/java/org/elasticsearch/rest/action/cat/RestTableTests.java +++ b/server/src/test/java/org/elasticsearch/rest/action/cat/RestTableTests.java @@ -259,6 +259,26 @@ public void testMultiSort() { assertEquals(Arrays.asList(1, 0, 2), rowOrder); } + public void testFormattedDouble() { + Table table = new Table(); + table.startHeaders(); + table.addCell("number"); + table.endHeaders(); + List comparisonList = Arrays.asList(10, 9, 11); + for (int i = 0; i < comparisonList.size(); i++) { + table.startRow(); + table.addCell(RestTable.FormattedDouble.format2DecimalPlaces(comparisonList.get(i))); + table.endRow(); + } + restRequest.params().put("s", "number"); + List rowOrder = RestTable.getRowOrder(table, restRequest); + assertEquals(Arrays.asList(1, 0, 2), rowOrder); + + restRequest.params().put("s", "number:desc"); + rowOrder = RestTable.getRowOrder(table, restRequest); + assertEquals(Arrays.asList(2, 0, 1), rowOrder); + } + public void testPlainTextChunking() throws Exception { final var cells = randomArray(8, 8, String[]::new, () -> randomAlphaOfLengthBetween(1, 5)); final var expectedRow = String.join(" ", cells) + "\n"; diff --git a/server/src/test/java/org/elasticsearch/rest/action/info/RestClusterInfoActionTests.java b/server/src/test/java/org/elasticsearch/rest/action/info/RestClusterInfoActionTests.java index f3ef110ad4ce8..f0473ae344a79 100644 --- a/server/src/test/java/org/elasticsearch/rest/action/info/RestClusterInfoActionTests.java +++ b/server/src/test/java/org/elasticsearch/rest/action/info/RestClusterInfoActionTests.java @@ -121,6 +121,7 @@ private NodeStats randomNodeStatsWithOnlyHttpStats(int i) { null, null, null, + null, null ); } diff --git a/test/framework/src/main/java/org/elasticsearch/cluster/MockInternalClusterInfoService.java b/test/framework/src/main/java/org/elasticsearch/cluster/MockInternalClusterInfoService.java index 1004ea5b50119..e07c27b22c926 100644 --- a/test/framework/src/main/java/org/elasticsearch/cluster/MockInternalClusterInfoService.java +++ b/test/framework/src/main/java/org/elasticsearch/cluster/MockInternalClusterInfoService.java @@ -89,7 +89,8 @@ List adjustNodesStats(List nodesStats) { nodeStats.getAdaptiveSelectionStats(), nodeStats.getScriptCacheStats(), nodeStats.getIndexingPressureStats(), - nodeStats.getRepositoriesStats() + nodeStats.getRepositoriesStats(), + nodeStats.getNodeAllocationStats() ); }).collect(Collectors.toList()); } diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java index a841e9b4304b3..307daddd17c37 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java @@ -355,16 +355,21 @@ public void initClient() throws IOException { assert nodesVersions != null; } - protected TestFeatureService createTestFeatureService( + protected List createAdditionalFeatureSpecifications() { + return List.of(); + } + + protected final TestFeatureService createTestFeatureService( Map> clusterStateFeatures, Set semanticNodeVersions ) { // Historical features information is unavailable when using legacy test plugins boolean hasHistoricalFeaturesInformation = System.getProperty("tests.features.metadata.path") != null; - final List featureSpecifications; + final List featureSpecifications = new ArrayList<>(createAdditionalFeatureSpecifications()); + featureSpecifications.add(new RestTestLegacyFeatures()); if (hasHistoricalFeaturesInformation) { - featureSpecifications = List.of(new RestTestLegacyFeatures(), new ESRestTestCaseHistoricalFeatures()); + featureSpecifications.add(new ESRestTestCaseHistoricalFeatures()); } else { logger.warn( "This test is running on the legacy test framework; historical features from production code will not be available. " @@ -372,7 +377,6 @@ protected TestFeatureService createTestFeatureService( + "If this is a legacy feature used only in tests, you can add it to a test-only FeatureSpecification such as {}.", RestTestLegacyFeatures.class.getCanonicalName() ); - featureSpecifications = List.of(new RestTestLegacyFeatures()); } return new ESRestTestFeatureService( @@ -1105,7 +1109,7 @@ protected static void wipeAllIndices(boolean preserveSecurityIndices) throws IOE try { // remove all indices except some history indices which can pop up after deleting all data streams but shouldn't interfere final List indexPatterns = new ArrayList<>( - List.of("*", "-.ds-ilm-history-*", "-.ds-.slm-history-*", ".ds-.watcher-history-*") + List.of("*", "-.ds-ilm-history-*", "-.ds-.slm-history-*", "-.ds-.watcher-history-*") ); if (preserveSecurityIndices) { indexPatterns.add("-.security-*"); diff --git a/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCase.java b/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCase.java index 7d8d1175385a1..804f4eae4042d 100644 --- a/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCase.java +++ b/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCase.java @@ -29,6 +29,7 @@ import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.core.IOUtils; import org.elasticsearch.core.UpdateForV9; +import org.elasticsearch.features.FeatureSpecification; import org.elasticsearch.test.ClasspathUtils; import org.elasticsearch.test.rest.ESRestTestCase; import org.elasticsearch.test.rest.TestFeatureService; @@ -194,6 +195,11 @@ public void initAndResetContext() throws Exception { restTestExecutionContext.clear(); } + @Override + protected List createAdditionalFeatureSpecifications() { + return List.of(new YamlTestLegacyFeatures()); + } + /** * Create the test execution context. Can be overwritten in sub-implementations of the test if the context needs to be modified. */ diff --git a/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/YamlTestLegacyFeatures.java b/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/YamlTestLegacyFeatures.java new file mode 100644 index 0000000000000..0c27cea49f955 --- /dev/null +++ b/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/YamlTestLegacyFeatures.java @@ -0,0 +1,29 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.test.rest.yaml; + +import org.elasticsearch.Version; +import org.elasticsearch.features.FeatureSpecification; +import org.elasticsearch.features.NodeFeature; + +import java.util.Map; + +/** + * This class groups historical features that have been removed from the production codebase, but are still used by YAML test + * to support BwC. Rather than leaving them in the main src we group them here, so it's clear they are not used in production code anymore. + */ +public class YamlTestLegacyFeatures implements FeatureSpecification { + + private static final NodeFeature CAT_ALIASES_SHOW_WRITE_INDEX = new NodeFeature("cat_aliases_show_write_index"); + + @Override + public Map getHistoricalFeatures() { + return Map.ofEntries(Map.entry(CAT_ALIASES_SHOW_WRITE_INDEX, Version.V_7_4_0)); + } +} diff --git a/x-pack/plugin/autoscaling/src/test/java/org/elasticsearch/xpack/autoscaling/capacity/nodeinfo/AutoscalingNodesInfoServiceTests.java b/x-pack/plugin/autoscaling/src/test/java/org/elasticsearch/xpack/autoscaling/capacity/nodeinfo/AutoscalingNodesInfoServiceTests.java index 3f147c94c5ec2..9658db911f6df 100644 --- a/x-pack/plugin/autoscaling/src/test/java/org/elasticsearch/xpack/autoscaling/capacity/nodeinfo/AutoscalingNodesInfoServiceTests.java +++ b/x-pack/plugin/autoscaling/src/test/java/org/elasticsearch/xpack/autoscaling/capacity/nodeinfo/AutoscalingNodesInfoServiceTests.java @@ -443,6 +443,7 @@ private static NodeStats statsForNode(DiscoveryNode node, long memory) { null, null, null, + null, null ); } diff --git a/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-events.json b/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-events.json index e2d17c8327704..3f2e0ca21bdbd 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-events.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-events.json @@ -7,10 +7,14 @@ }, "composed_of": [ "profiling-events", + "profiling-events@custom", "profiling-ilm", "profiling-ilm@custom" ], - "ignore_missing_component_templates": ["profiling-ilm@custom"], + "ignore_missing_component_templates": [ + "profiling-events@custom", + "profiling-ilm@custom" + ], "priority": 100, "_meta": { "description": "Index template for profiling-events", diff --git a/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-executables.json b/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-executables.json index 57fd114c57e27..088589f7df769 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-executables.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-executables.json @@ -4,11 +4,15 @@ ], "composed_of": [ "profiling-executables", + "profiling-executables@custom", "profiling-ilm", "profiling-hot-tier", "profiling-ilm@custom" ], - "ignore_missing_component_templates": ["profiling-ilm@custom"], + "ignore_missing_component_templates": [ + "profiling-executables@custom", + "profiling-ilm@custom" + ], "priority": 100, "_meta": { "description": "Index template for .profiling-executables", diff --git a/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-hosts.json b/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-hosts.json index 526d8090b0ac6..4d750726b8028 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-hosts.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-hosts.json @@ -5,10 +5,14 @@ "data_stream": {}, "composed_of": [ "profiling-hosts", + "profiling-hosts@custom", "profiling-ilm", "profiling-ilm@custom" ], - "ignore_missing_component_templates": ["profiling-ilm@custom"], + "ignore_missing_component_templates": [ + "profiling-hosts@custom", + "profiling-ilm@custom" + ], "priority": 100, "_meta": { "description": "Index template for profiling-hosts", diff --git a/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-metrics.json b/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-metrics.json index d09de006d025d..74516d7cb826c 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-metrics.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-metrics.json @@ -5,10 +5,14 @@ "data_stream": {}, "composed_of": [ "profiling-metrics", + "profiling-metrics@custom", "profiling-ilm", "profiling-ilm@custom" ], - "ignore_missing_component_templates": ["profiling-ilm@custom"], + "ignore_missing_component_templates": [ + "profiling-metrics@custom", + "profiling-ilm@custom" + ], "priority": 100, "_meta": { "description": "Index template for profiling-metrics", diff --git a/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-stackframes.json b/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-stackframes.json index 694ae6ba92a57..0cbd868c2eade 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-stackframes.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-stackframes.json @@ -4,11 +4,15 @@ ], "composed_of": [ "profiling-stackframes", + "profiling-stackframes@custom", "profiling-ilm", "profiling-hot-tier", "profiling-ilm@custom" ], - "ignore_missing_component_templates": ["profiling-ilm@custom"], + "ignore_missing_component_templates": [ + "profiling-stackframes@custom", + "profiling-ilm@custom" + ], "priority": 100, "_meta": { "description": "Index template for .profiling-stackframes", diff --git a/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-stacktraces.json b/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-stacktraces.json index c4c920a76c375..d280906873ffa 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-stacktraces.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-stacktraces.json @@ -4,11 +4,15 @@ ], "composed_of": [ "profiling-stacktraces", + "profiling-stacktraces@custom", "profiling-ilm", "profiling-hot-tier", "profiling-ilm@custom" ], - "ignore_missing_component_templates": ["profiling-ilm@custom"], + "ignore_missing_component_templates": [ + "profiling-stacktraces@custom", + "profiling-ilm@custom" + ], "priority": 100, "_meta": { "description": "Index template for .profiling-stacktraces", diff --git a/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-symbols-global.json b/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-symbols-global.json index a7bae1adbb548..dd5eca49b9daa 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-symbols-global.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-symbols-global.json @@ -4,11 +4,15 @@ ], "composed_of": [ "profiling-symbols", + "profiling-symbols@custom", "profiling-ilm", "profiling-hot-tier", "profiling-ilm@custom" ], - "ignore_missing_component_templates": ["profiling-ilm@custom"], + "ignore_missing_component_templates": [ + "profiling-symbols@custom", + "profiling-ilm@custom" + ], "template": { "settings": { "index": { diff --git a/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-symbols-private.json b/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-symbols-private.json index 999bf7721b897..04c382e558591 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-symbols-private.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-symbols-private.json @@ -3,7 +3,11 @@ ".profiling-symbols-private*" ], "composed_of": [ - "profiling-symbols" + "profiling-symbols", + "profiling-symbols@custom" + ], + "ignore_missing_component_templates": [ + "profiling-symbols@custom" ], "priority": 100, "_meta": { diff --git a/x-pack/plugin/downsample/src/internalClusterTest/java/org/elasticsearch/xpack/downsample/DownsampleClusterDisruptionIT.java b/x-pack/plugin/downsample/src/internalClusterTest/java/org/elasticsearch/xpack/downsample/DownsampleClusterDisruptionIT.java deleted file mode 100644 index dc915738f6d13..0000000000000 --- a/x-pack/plugin/downsample/src/internalClusterTest/java/org/elasticsearch/xpack/downsample/DownsampleClusterDisruptionIT.java +++ /dev/null @@ -1,443 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -package org.elasticsearch.xpack.downsample; - -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.elasticsearch.action.DocWriteRequest; -import org.elasticsearch.action.admin.indices.get.GetIndexRequest; -import org.elasticsearch.action.admin.indices.get.GetIndexResponse; -import org.elasticsearch.action.bulk.BulkItemResponse; -import org.elasticsearch.action.bulk.BulkRequestBuilder; -import org.elasticsearch.action.bulk.BulkResponse; -import org.elasticsearch.action.downsample.DownsampleAction; -import org.elasticsearch.action.downsample.DownsampleConfig; -import org.elasticsearch.action.index.IndexRequest; -import org.elasticsearch.action.support.WriteRequest; -import org.elasticsearch.cluster.metadata.IndexMetadata; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.time.DateFormatter; -import org.elasticsearch.core.TimeValue; -import org.elasticsearch.index.IndexMode; -import org.elasticsearch.index.IndexSettings; -import org.elasticsearch.index.engine.VersionConflictEngineException; -import org.elasticsearch.index.mapper.DateFieldMapper; -import org.elasticsearch.index.query.MatchAllQueryBuilder; -import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; -import org.elasticsearch.test.ESIntegTestCase; -import org.elasticsearch.test.InternalTestCluster; -import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xcontent.XContentFactory; -import org.elasticsearch.xpack.aggregatemetric.AggregateMetricMapperPlugin; -import org.elasticsearch.xpack.core.LocalStateCompositeXPackPlugin; - -import java.io.IOException; -import java.time.Instant; -import java.time.LocalDateTime; -import java.time.ZoneId; -import java.util.ArrayList; -import java.util.Collection; -import java.util.List; -import java.util.Locale; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.TimeUnit; -import java.util.function.Consumer; - -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; -import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; -import static org.elasticsearch.xpack.core.rollup.ConfigTestHelpers.randomInterval; - -@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0, numClientNodes = 4) -public class DownsampleClusterDisruptionIT extends ESIntegTestCase { - private static final Logger logger = LogManager.getLogger(DownsampleClusterDisruptionIT.class); - private static final DateFormatter DATE_FORMATTER = DateFormatter.forPattern("yyyy-MM-dd'T'HH:mm:ss.SSSXXX"); - private static final TimeValue TIMEOUT = new TimeValue(1, TimeUnit.MINUTES); - public static final String FIELD_TIMESTAMP = "@timestamp"; - public static final String FIELD_DIMENSION_1 = "dimension_kw"; - public static final String FIELD_DIMENSION_2 = "dimension_long"; - public static final String FIELD_METRIC_COUNTER = "counter"; - public static final int DOC_COUNT = 10_000; - - @Override - protected Collection> nodePlugins() { - return List.of(LocalStateCompositeXPackPlugin.class, Downsample.class, AggregateMetricMapperPlugin.class); - } - - interface DisruptionListener { - void disruptionStart(); - - void disruptionEnd(); - } - - private class Disruptor implements Runnable { - final InternalTestCluster cluster; - private final String sourceIndex; - private final DisruptionListener listener; - private final String clientNode; - private final Consumer disruption; - - private Disruptor( - final InternalTestCluster cluster, - final String sourceIndex, - final DisruptionListener listener, - final String clientNode, - final Consumer disruption - ) { - this.cluster = cluster; - this.sourceIndex = sourceIndex; - this.listener = listener; - this.clientNode = clientNode; - this.disruption = disruption; - } - - @Override - public void run() { - listener.disruptionStart(); - try { - final String candidateNode = cluster.client(clientNode) - .admin() - .cluster() - .prepareSearchShards(sourceIndex) - .get() - .getNodes()[0].getName(); - logger.info("Candidate node [" + candidateNode + "]"); - disruption.accept(candidateNode); - ensureGreen(TimeValue.timeValueSeconds(60), sourceIndex); - ensureStableCluster(cluster.numDataAndMasterNodes(), clientNode); - - } catch (Exception e) { - logger.error("Ignoring Error while injecting disruption [" + e.getMessage() + "]"); - } finally { - listener.disruptionEnd(); - } - } - } - - public void setup(final String sourceIndex, int numOfShards, int numOfReplicas, long startTime) throws IOException { - final Settings.Builder settings = indexSettings(numOfShards, numOfReplicas).put(IndexSettings.MODE.getKey(), IndexMode.TIME_SERIES) - .putList(IndexMetadata.INDEX_ROUTING_PATH.getKey(), List.of(FIELD_DIMENSION_1)) - .put( - IndexSettings.TIME_SERIES_START_TIME.getKey(), - DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.formatMillis(Instant.ofEpochMilli(startTime).toEpochMilli()) - ) - .put(IndexSettings.TIME_SERIES_END_TIME.getKey(), "2106-01-08T23:40:53.384Z"); - - if (randomBoolean()) { - settings.put(IndexMetadata.SETTING_INDEX_HIDDEN, randomBoolean()); - } - - final XContentBuilder mapping = jsonBuilder().startObject().startObject("_doc").startObject("properties"); - mapping.startObject(FIELD_TIMESTAMP).field("type", "date").endObject(); - - mapping.startObject(FIELD_DIMENSION_1).field("type", "keyword").field("time_series_dimension", true).endObject(); - mapping.startObject(FIELD_DIMENSION_2).field("type", "long").field("time_series_dimension", true).endObject(); - - mapping.startObject(FIELD_METRIC_COUNTER) - .field("type", "double") /* numeric label indexed as a metric */ - .field("time_series_metric", "counter") - .endObject(); - - mapping.endObject().endObject().endObject(); - assertAcked(indicesAdmin().prepareCreate(sourceIndex).setSettings(settings.build()).setMapping(mapping).get()); - } - - public void testDownsampleIndexWithDataNodeRestart() throws Exception { - final InternalTestCluster cluster = internalCluster(); - final List masterNodes = cluster.startMasterOnlyNodes(1); - cluster.startDataOnlyNodes(3); - ensureStableCluster(cluster.size()); - ensureGreen(); - - final String sourceIndex = randomAlphaOfLength(10).toLowerCase(Locale.ROOT); - final String targetIndex = randomAlphaOfLength(11).toLowerCase(Locale.ROOT); - long startTime = LocalDateTime.parse("2020-09-09T18:00:00").atZone(ZoneId.of("UTC")).toInstant().toEpochMilli(); - setup(sourceIndex, 1, 0, startTime); - final DownsampleConfig config = new DownsampleConfig(randomInterval()); - final DownsampleActionSingleNodeTests.SourceSupplier sourceSupplier = () -> { - final String ts = randomDateForInterval(config.getInterval(), startTime); - double counterValue = DATE_FORMATTER.parseMillis(ts); - final List dimensionValues = new ArrayList<>(5); - for (int j = 0; j < randomIntBetween(1, 5); j++) { - dimensionValues.add(randomAlphaOfLength(6)); - } - return XContentFactory.jsonBuilder() - .startObject() - .field(FIELD_TIMESTAMP, ts) - .field(FIELD_DIMENSION_1, randomFrom(dimensionValues)) - .field(FIELD_DIMENSION_2, randomIntBetween(1, 10)) - .field(FIELD_METRIC_COUNTER, counterValue) - .endObject(); - }; - int indexedDocs = bulkIndex(sourceIndex, sourceSupplier, DOC_COUNT); - prepareSourceIndex(sourceIndex); - final CountDownLatch disruptionStart = new CountDownLatch(1); - final CountDownLatch disruptionEnd = new CountDownLatch(1); - - new Thread(new Disruptor(cluster, sourceIndex, new DisruptionListener() { - @Override - public void disruptionStart() { - disruptionStart.countDown(); - } - - @Override - public void disruptionEnd() { - disruptionEnd.countDown(); - } - }, masterNodes.get(0), (node) -> { - try { - cluster.restartNode(node, new InternalTestCluster.RestartCallback() { - @Override - public boolean validateClusterForming() { - return true; - } - }); - } catch (Exception e) { - throw new RuntimeException(e); - } - })).start(); - startDownsampleTaskDuringDisruption(sourceIndex, targetIndex, config, disruptionStart, disruptionEnd); - waitUntil(() -> getClusterPendingTasks(cluster.client()).pendingTasks().isEmpty()); - ensureStableCluster(cluster.numDataAndMasterNodes()); - assertTargetIndex(cluster, sourceIndex, targetIndex, indexedDocs); - } - - public void testDownsampleIndexWithRollingRestart() throws Exception { - final InternalTestCluster cluster = internalCluster(); - final List masterNodes = cluster.startMasterOnlyNodes(1); - cluster.startDataOnlyNodes(3); - ensureStableCluster(cluster.size()); - ensureGreen(); - - final String sourceIndex = randomAlphaOfLength(10).toLowerCase(Locale.ROOT); - final String targetIndex = randomAlphaOfLength(11).toLowerCase(Locale.ROOT); - long startTime = LocalDateTime.parse("2020-09-09T18:00:00").atZone(ZoneId.of("UTC")).toInstant().toEpochMilli(); - setup(sourceIndex, 1, 0, startTime); - final DownsampleConfig config = new DownsampleConfig(randomInterval()); - final DownsampleActionSingleNodeTests.SourceSupplier sourceSupplier = () -> { - final String ts = randomDateForInterval(config.getInterval(), startTime); - double counterValue = DATE_FORMATTER.parseMillis(ts); - final List dimensionValues = new ArrayList<>(5); - for (int j = 0; j < randomIntBetween(1, 5); j++) { - dimensionValues.add(randomAlphaOfLength(6)); - } - return XContentFactory.jsonBuilder() - .startObject() - .field(FIELD_TIMESTAMP, ts) - .field(FIELD_DIMENSION_1, randomFrom(dimensionValues)) - .field(FIELD_DIMENSION_2, randomIntBetween(1, 10)) - .field(FIELD_METRIC_COUNTER, counterValue) - .endObject(); - }; - int indexedDocs = bulkIndex(sourceIndex, sourceSupplier, DOC_COUNT); - prepareSourceIndex(sourceIndex); - final CountDownLatch disruptionStart = new CountDownLatch(1); - final CountDownLatch disruptionEnd = new CountDownLatch(1); - - new Thread(new Disruptor(cluster, sourceIndex, new DisruptionListener() { - @Override - public void disruptionStart() { - disruptionStart.countDown(); - } - - @Override - public void disruptionEnd() { - disruptionEnd.countDown(); - } - }, masterNodes.get(0), (ignored) -> { - try { - cluster.rollingRestart(new InternalTestCluster.RestartCallback() { - @Override - public boolean validateClusterForming() { - return true; - } - }); - } catch (Exception e) { - throw new RuntimeException(e); - } - })).start(); - - startDownsampleTaskDuringDisruption(sourceIndex, targetIndex, config, disruptionStart, disruptionEnd); - waitUntil(() -> getClusterPendingTasks(cluster.client()).pendingTasks().isEmpty()); - ensureStableCluster(cluster.numDataAndMasterNodes()); - assertTargetIndex(cluster, sourceIndex, targetIndex, indexedDocs); - } - - /** - * Starts a downsample operation. - * - * @param sourceIndex the idex to read data from - * @param targetIndex the idnex to write downsampled data to - * @param config the downsample configuration including the downsample granularity - * @param disruptionStart a latch to synchronize on the disruption starting - * @param disruptionEnd a latch to synchronize on the disruption ending - * @throws InterruptedException if the thread is interrupted while waiting - */ - private void startDownsampleTaskDuringDisruption( - final String sourceIndex, - final String targetIndex, - final DownsampleConfig config, - final CountDownLatch disruptionStart, - final CountDownLatch disruptionEnd - ) throws Exception { - disruptionStart.await(); - assertBusy(() -> { - try { - downsample(sourceIndex, targetIndex, config); - } catch (Exception e) { - throw new AssertionError(e); - } - }, 120, TimeUnit.SECONDS); - disruptionEnd.await(); - } - - public void testDownsampleIndexWithFullClusterRestart() throws Exception { - final InternalTestCluster cluster = internalCluster(); - final List masterNodes = cluster.startMasterOnlyNodes(1); - cluster.startDataOnlyNodes(3); - ensureStableCluster(cluster.size()); - ensureGreen(); - - final String sourceIndex = randomAlphaOfLength(10).toLowerCase(Locale.ROOT); - final String downsampleIndex = randomAlphaOfLength(11).toLowerCase(Locale.ROOT); - long startTime = LocalDateTime.parse("2020-09-09T18:00:00").atZone(ZoneId.of("UTC")).toInstant().toEpochMilli(); - setup(sourceIndex, 1, 0, startTime); - final DownsampleConfig config = new DownsampleConfig(randomInterval()); - final DownsampleActionSingleNodeTests.SourceSupplier sourceSupplier = () -> { - final String ts = randomDateForInterval(config.getInterval(), startTime); - double counterValue = DATE_FORMATTER.parseMillis(ts); - final List dimensionValues = new ArrayList<>(5); - for (int j = 0; j < randomIntBetween(1, 5); j++) { - dimensionValues.add(randomAlphaOfLength(6)); - } - return XContentFactory.jsonBuilder() - .startObject() - .field(FIELD_TIMESTAMP, ts) - .field(FIELD_DIMENSION_1, randomFrom(dimensionValues)) - .field(FIELD_DIMENSION_2, randomIntBetween(1, 10)) - .field(FIELD_METRIC_COUNTER, counterValue) - .endObject(); - }; - int indexedDocs = bulkIndex(sourceIndex, sourceSupplier, DOC_COUNT); - prepareSourceIndex(sourceIndex); - final CountDownLatch disruptionStart = new CountDownLatch(1); - final CountDownLatch disruptionEnd = new CountDownLatch(1); - - new Thread(new Disruptor(cluster, sourceIndex, new DisruptionListener() { - @Override - public void disruptionStart() { - disruptionStart.countDown(); - } - - @Override - public void disruptionEnd() { - disruptionEnd.countDown(); - } - }, masterNodes.get(0), (ignored) -> { - try { - cluster.fullRestart(new InternalTestCluster.RestartCallback() { - @Override - public boolean validateClusterForming() { - return true; - } - }); - } catch (Exception e) { - throw new RuntimeException(e); - } - })).start(); - - startDownsampleTaskDuringDisruption(sourceIndex, downsampleIndex, config, disruptionStart, disruptionEnd); - waitUntil(() -> getClusterPendingTasks(cluster.client()).pendingTasks().isEmpty()); - ensureStableCluster(cluster.numDataAndMasterNodes()); - assertTargetIndex(cluster, sourceIndex, downsampleIndex, indexedDocs); - } - - private void assertTargetIndex(final InternalTestCluster cluster, final String sourceIndex, final String targetIndex, int indexedDocs) { - final GetIndexResponse getIndexResponse = cluster.client() - .admin() - .indices() - .getIndex(new GetIndexRequest().indices(targetIndex)) - .actionGet(); - assertEquals(1, getIndexResponse.indices().length); - assertResponse( - cluster.client() - .prepareSearch(sourceIndex) - .setQuery(new MatchAllQueryBuilder()) - .setSize(Math.min(DOC_COUNT, indexedDocs)) - .setTrackTotalHitsUpTo(Integer.MAX_VALUE), - sourceIndexSearch -> { - assertEquals(indexedDocs, sourceIndexSearch.getHits().getHits().length); - } - ); - assertResponse( - cluster.client() - .prepareSearch(targetIndex) - .setQuery(new MatchAllQueryBuilder()) - .setSize(Math.min(DOC_COUNT, indexedDocs)) - .setTrackTotalHitsUpTo(Integer.MAX_VALUE), - targetIndexSearch -> { - assertTrue(targetIndexSearch.getHits().getHits().length > 0); - } - ); - } - - private int bulkIndex(final String indexName, final DownsampleActionSingleNodeTests.SourceSupplier sourceSupplier, int docCount) - throws IOException { - BulkRequestBuilder bulkRequestBuilder = internalCluster().client().prepareBulk(); - bulkRequestBuilder.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); - for (int i = 0; i < docCount; i++) { - IndexRequest indexRequest = new IndexRequest(indexName).opType(DocWriteRequest.OpType.CREATE); - XContentBuilder source = sourceSupplier.get(); - indexRequest.source(source); - bulkRequestBuilder.add(indexRequest); - } - BulkResponse bulkResponse = bulkRequestBuilder.get(); - int duplicates = 0; - for (BulkItemResponse response : bulkResponse.getItems()) { - if (response.isFailed()) { - if (response.getFailure().getCause() instanceof VersionConflictEngineException) { - // A duplicate event was created by random generator. We should not fail for this - // reason. - logger.debug("We tried to insert a duplicate: [{}]", response.getFailureMessage()); - duplicates++; - } else { - fail("Failed to index data: " + bulkResponse.buildFailureMessage()); - } - } - } - int docsIndexed = docCount - duplicates; - logger.info("Indexed [{}] documents. Dropped [{}] duplicates.", docsIndexed, duplicates); - return docsIndexed; - } - - private void prepareSourceIndex(String sourceIndex) { - // Set the source index to read-only state - assertAcked( - indicesAdmin().prepareUpdateSettings(sourceIndex) - .setSettings(Settings.builder().put(IndexMetadata.INDEX_BLOCKS_WRITE_SETTING.getKey(), true).build()) - ); - } - - private void downsample(final String sourceIndex, final String downsampleIndex, final DownsampleConfig config) { - assertAcked( - internalCluster().client() - .execute(DownsampleAction.INSTANCE, new DownsampleAction.Request(sourceIndex, downsampleIndex, TIMEOUT, config)) - .actionGet(TIMEOUT) - ); - } - - private String randomDateForInterval(final DateHistogramInterval interval, final long startTime) { - long endTime = startTime + 10 * interval.estimateMillis(); - return randomDateForRange(startTime, endTime); - } - - private String randomDateForRange(long start, long end) { - return DATE_FORMATTER.formatMillis(randomLongBetween(start, end)); - } -} diff --git a/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/EvaluatorImplementer.java b/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/EvaluatorImplementer.java index f283e3b59bb63..55a81cd7aaace 100644 --- a/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/EvaluatorImplementer.java +++ b/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/EvaluatorImplementer.java @@ -36,10 +36,15 @@ import static org.elasticsearch.compute.gen.Methods.buildFromFactory; import static org.elasticsearch.compute.gen.Methods.getMethod; import static org.elasticsearch.compute.gen.Types.BLOCK; +import static org.elasticsearch.compute.gen.Types.BOOLEAN_BLOCK; import static org.elasticsearch.compute.gen.Types.BYTES_REF; +import static org.elasticsearch.compute.gen.Types.BYTES_REF_BLOCK; +import static org.elasticsearch.compute.gen.Types.DOUBLE_BLOCK; import static org.elasticsearch.compute.gen.Types.DRIVER_CONTEXT; import static org.elasticsearch.compute.gen.Types.EXPRESSION_EVALUATOR; import static org.elasticsearch.compute.gen.Types.EXPRESSION_EVALUATOR_FACTORY; +import static org.elasticsearch.compute.gen.Types.INT_BLOCK; +import static org.elasticsearch.compute.gen.Types.LONG_BLOCK; import static org.elasticsearch.compute.gen.Types.PAGE; import static org.elasticsearch.compute.gen.Types.RELEASABLE; import static org.elasticsearch.compute.gen.Types.RELEASABLES; @@ -53,6 +58,7 @@ public class EvaluatorImplementer { private final TypeElement declarationType; private final ProcessFunction processFunction; private final ClassName implementation; + private final boolean processOutputsMultivalued; public EvaluatorImplementer( Elements elements, @@ -68,6 +74,7 @@ public EvaluatorImplementer( elements.getPackageOf(declarationType).toString(), declarationType.getSimpleName() + extraName + "Evaluator" ); + this.processOutputsMultivalued = this.processFunction.hasBlockType && (this.processFunction.builderArg != null); } public JavaFile sourceFile() { @@ -94,10 +101,17 @@ private TypeSpec type() { builder.addMethod(ctor()); builder.addMethod(eval()); - if (processFunction.args.stream().anyMatch(x -> x instanceof FixedProcessFunctionArg == false)) { - builder.addMethod(realEval(true)); + + if (processOutputsMultivalued) { + if (processFunction.args.stream().anyMatch(x -> x instanceof FixedProcessFunctionArg == false)) { + builder.addMethod(realEval(true)); + } + } else { + if (processFunction.args.stream().anyMatch(x -> x instanceof FixedProcessFunctionArg == false)) { + builder.addMethod(realEval(true)); + } + builder.addMethod(realEval(false)); } - builder.addMethod(realEval(false)); builder.addMethod(toStringMethod()); builder.addMethod(close()); return builder.build(); @@ -117,17 +131,21 @@ private MethodSpec ctor() { private MethodSpec eval() { MethodSpec.Builder builder = MethodSpec.methodBuilder("eval").addAnnotation(Override.class); builder.addModifiers(Modifier.PUBLIC).returns(BLOCK).addParameter(PAGE, "page"); - processFunction.args.stream().forEach(a -> a.evalToBlock(builder)); String invokeBlockEval = invokeRealEval(true); - processFunction.args.stream().forEach(a -> a.resolveVectors(builder, invokeBlockEval)); - builder.addStatement(invokeRealEval(false)); + if (processOutputsMultivalued) { + builder.addStatement(invokeBlockEval); + } else { + processFunction.args.stream().forEach(a -> a.resolveVectors(builder, invokeBlockEval)); + builder.addStatement(invokeRealEval(false)); + } processFunction.args.stream().forEach(a -> a.closeEvalToBlock(builder)); return builder.build(); } private String invokeRealEval(boolean blockStyle) { StringBuilder builder = new StringBuilder("return eval(page.getPositionCount()"); + String params = processFunction.args.stream() .map(a -> a.paramName(blockStyle)) .filter(a -> a != null) @@ -154,6 +172,7 @@ private MethodSpec realEval(boolean blockStyle) { builder.addParameter(a.dataType(blockStyle), a.paramName(blockStyle)); } }); + TypeName builderType = builderType(resultDataType); builder.beginControlFlow( "try($T result = driverContext.blockFactory().$L(positionCount))", @@ -166,13 +185,36 @@ private MethodSpec realEval(boolean blockStyle) { builder.beginControlFlow("position: for (int p = 0; p < positionCount; p++)"); { if (blockStyle) { - processFunction.args.stream().forEach(a -> a.skipNull(builder)); + if (processOutputsMultivalued == false) { + processFunction.args.stream().forEach(a -> a.skipNull(builder)); + } else { + builder.addStatement("boolean allBlocksAreNulls = true"); + // allow block type inputs to be null + processFunction.args.stream().forEach(a -> { + if (a instanceof StandardProcessFunctionArg as) { + as.skipNull(builder); + } else if (a instanceof BlockProcessFunctionArg ab) { + builder.beginControlFlow("if (!$N.isNull(p))", ab.paramName(blockStyle)); + { + builder.addStatement("allBlocksAreNulls = false"); + } + builder.endControlFlow(); + } + }); + + builder.beginControlFlow("if (allBlocksAreNulls)"); + { + builder.addStatement("result.appendNull()"); + builder.addStatement("continue position"); + } + builder.endControlFlow(); + } } processFunction.args.stream().forEach(a -> a.unpackValues(builder, blockStyle)); StringBuilder pattern = new StringBuilder(); List args = new ArrayList<>(); - pattern.append("$T.$N("); + pattern.append(processOutputsMultivalued ? "$T.$N(result, p, " : "$T.$N("); args.add(declarationType); args.add(processFunction.function.getSimpleName()); processFunction.args.stream().forEach(a -> { @@ -189,11 +231,12 @@ private MethodSpec realEval(boolean blockStyle) { } else { builtPattern = pattern.toString(); } - if (processFunction.warnExceptions.isEmpty() == false) { builder.beginControlFlow("try"); } + builder.addStatement(builtPattern, args.toArray()); + if (processFunction.warnExceptions.isEmpty() == false) { String catchPattern = "catch (" + processFunction.warnExceptions.stream().map(m -> "$T").collect(Collectors.joining(" | ")) @@ -403,7 +446,7 @@ private record StandardProcessFunctionArg(TypeName type, String name) implements @Override public TypeName dataType(boolean blockStyle) { if (blockStyle) { - return blockType(type); + return isBlockType() ? type : blockType(type); } return vectorType(type); } @@ -442,7 +485,7 @@ public String factoryInvocation(MethodSpec.Builder factoryMethodBuilder) { @Override public void evalToBlock(MethodSpec.Builder builder) { - TypeName blockType = blockType(type); + TypeName blockType = isBlockType() ? type : blockType(type); builder.beginControlFlow("try ($T $LBlock = ($T) $L.eval(page))", blockType, name, blockType, name); } @@ -474,6 +517,10 @@ public void unpackValues(MethodSpec.Builder builder, boolean blockStyle) { // nothing to do } + private boolean isBlockType() { + return EvaluatorImplementer.isBlockType(type); + } + @Override public void buildInvocation(StringBuilder pattern, List args, boolean blockStyle) { if (type.equals(BYTES_REF)) { @@ -488,14 +535,21 @@ public void buildInvocation(StringBuilder pattern, List args, boolean bl return; } if (blockStyle) { - pattern.append("$L.$L($L.getFirstValueIndex(p))"); + if (isBlockType()) { + pattern.append("$L"); + } else { + pattern.append("$L.$L($L.getFirstValueIndex(p))"); + } } else { pattern.append("$L.$L(p)"); } args.add(paramName(blockStyle)); - args.add(getMethod(type)); - if (blockStyle) { - args.add(paramName(true)); + String method = isBlockType() ? null : getMethod(type); + if (method != null) { + args.add(method); + if (blockStyle) { + args.add(paramName(true)); + } } } @@ -824,12 +878,101 @@ public String closeInvocation() { } } + private record BlockProcessFunctionArg(TypeName type, String name) implements ProcessFunctionArg { + @Override + public TypeName dataType(boolean blockStyle) { + return type; + } + + @Override + public String paramName(boolean blockStyle) { + return name + (blockStyle ? "Block" : "Vector"); + } + + @Override + public void declareField(TypeSpec.Builder builder) { + builder.addField(EXPRESSION_EVALUATOR, name, Modifier.PRIVATE, Modifier.FINAL); + } + + @Override + public void declareFactoryField(TypeSpec.Builder builder) { + builder.addField(EXPRESSION_EVALUATOR_FACTORY, name, Modifier.PRIVATE, Modifier.FINAL); + } + + @Override + public void implementCtor(MethodSpec.Builder builder) { + builder.addParameter(EXPRESSION_EVALUATOR, name); + builder.addStatement("this.$L = $L", name, name); + } + + @Override + public void implementFactoryCtor(MethodSpec.Builder builder) { + builder.addParameter(EXPRESSION_EVALUATOR_FACTORY, name); + builder.addStatement("this.$L = $L", name, name); + } + + @Override + public String factoryInvocation(MethodSpec.Builder factoryMethodBuilder) { + return name + ".get(context)"; + } + + @Override + public void evalToBlock(MethodSpec.Builder builder) { + builder.beginControlFlow("try ($T $LBlock = ($T) $L.eval(page))", type, name, type, name); + } + + @Override + public void closeEvalToBlock(MethodSpec.Builder builder) { + builder.endControlFlow(); + } + + @Override + public void resolveVectors(MethodSpec.Builder builder, String invokeBlockEval) { + // nothing to do + } + + @Override + public void createScratch(MethodSpec.Builder builder) { + // nothing to do + } + + @Override + public void skipNull(MethodSpec.Builder builder) { + EvaluatorImplementer.skipNull(builder, paramName(true)); + } + + @Override + public void unpackValues(MethodSpec.Builder builder, boolean blockStyle) { + // nothing to do + } + + @Override + public void buildInvocation(StringBuilder pattern, List args, boolean blockStyle) { + pattern.append("$L"); + args.add(paramName(blockStyle)); + } + + @Override + public void buildToStringInvocation(StringBuilder pattern, List args, String prefix) { + pattern.append(" + $S + $L"); + args.add(prefix + name + "="); + args.add(name); + } + + @Override + public String closeInvocation() { + return name; + } + } + private static class ProcessFunction { private final ExecutableElement function; private final List args; private final BuilderProcessFunctionArg builderArg; private final List warnExceptions; + private boolean hasBlockType; + private ProcessFunction( Elements elements, javax.lang.model.util.Types types, @@ -839,6 +982,7 @@ private ProcessFunction( this.function = function; args = new ArrayList<>(); BuilderProcessFunctionArg builderArg = null; + hasBlockType = false; for (VariableElement v : function.getParameters()) { TypeName type = TypeName.get(v.asType()); String name = v.getSimpleName().toString(); @@ -871,6 +1015,14 @@ private ProcessFunction( args.add(new ArrayProcessFunctionArg(TypeName.get(componentType), name)); continue; } + if (isBlockType(type)) { + if (builderArg != null && args.size() == 2 && hasBlockType == false) { + args.clear(); + hasBlockType = true; + } + args.add(new BlockProcessFunctionArg(type, name)); + continue; + } args.add(new StandardProcessFunctionArg(type, name)); } this.builderArg = builderArg; @@ -885,4 +1037,12 @@ private ClassName resultDataType(boolean blockStyle) { return useBlockStyle ? blockType(TypeName.get(function.getReturnType())) : vectorType(TypeName.get(function.getReturnType())); } } + + static boolean isBlockType(TypeName type) { + return type.equals(INT_BLOCK) + || type.equals(LONG_BLOCK) + || type.equals(DOUBLE_BLOCK) + || type.equals(BOOLEAN_BLOCK) + || type.equals(BYTES_REF_BLOCK); + } } diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/boolean.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/boolean.csv-spec index 3d9f9aa6e1c27..bda103080adc0 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/boolean.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/boolean.csv-spec @@ -232,3 +232,26 @@ emp_no:integer |languages:integer |byte2bool:boolean |short2bool:boolean 10020 |null |null |null 10030 |3 |true |true ; + +mvSlice#[skip:-8.13.99, reason:newly added in 8.14] +row a = [true, false, false, true] +| eval a1 = mv_slice(a, 1), a2 = mv_slice(a, 2, 3); + +a:boolean | a1:boolean | a2:boolean +[true, false, false, true] | false | [false, true] +; + +mvSliceEmp#[skip:-8.13.99, reason:newly added in 8.14] +from employees +| eval a1 = mv_slice(is_rehired, 0) +| keep emp_no, is_rehired, a1 +| sort emp_no +| limit 5; + +emp_no:integer | is_rehired:boolean | a1:boolean +10001 | [false,true] | false +10002 | [false,false] | false +10003 | null | null +10004 | true | true +10005 | [false,false,false,true] | false +; diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/floats.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/floats.csv-spec index f56266f868d44..0138ec1a70989 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/floats.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/floats.csv-spec @@ -224,6 +224,21 @@ row a = [1.1, 2.1, 2.1] | eval da = mv_dedupe(a); [1.1, 2.1, 2.1] | [1.1, 2.1] ; +mvSliceEmp#[skip:-8.13.99, reason:newly added in 8.14] +from employees +| eval a1 = mv_slice(salary_change, 0, 1) +| keep emp_no, salary_change, a1 +| sort emp_no +| limit 5; + +emp_no:integer | salary_change:double | a1:double +10001 | 1.19 | 1.19 +10002 | [-7.23,11.17] | [-7.23,11.17] +10003 | [12.82,14.68] | [12.82,14.68] +10004 | [-0.35,1.13,3.65,13.48] | [-0.35, 1.13] +10005 | [-2.14,13.07] | [-2.14,13.07] +; + autoBucket FROM employees | WHERE hire_date >= "1985-01-01T00:00:00Z" AND hire_date < "1986-01-01T00:00:00Z" diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/ints.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/ints.csv-spec index baf6da2cd0bde..63bc452bf5bd5 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/ints.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/ints.csv-spec @@ -384,6 +384,151 @@ row a = [1, 2, 2, 3] | eval da = mv_dedupe(a); [1, 2, 2, 3] | [1, 2, 3] ; +mvSlice#[skip:-8.13.99, reason:newly added in 8.14] +// tag::mv_slice_positive[] +row a = [1, 2, 2, 3] +| eval a1 = mv_slice(a, 1), a2 = mv_slice(a, 2, 3) +// end::mv_slice_positive[] +; +// tag::mv_slice_positive-result[] +a:integer | a1:integer | a2:integer +[1, 2, 2, 3] | 2 | [2, 3] +// end::mv_slice_positive-result[] +; + +mvSliceNegativeOffset#[skip:-8.13.99, reason:newly added in 8.14] +// tag::mv_slice_negative[] +row a = [1, 2, 2, 3] +| eval a1 = mv_slice(a, -2), a2 = mv_slice(a, -3, -1) +// end::mv_slice_negative[] +; +// tag::mv_slice_negative-result[] +a:integer | a1:integer | a2:integer +[1, 2, 2, 3] | 2 | [2, 2, 3] +// end::mv_slice_negative-result[] +; + +mvSliceSingle#[skip:-8.13.99, reason:newly added in 8.14] +row a = 1 +| eval a1 = mv_slice(a, 0); + +a:integer | a1:integer +1 | 1 +; + +mvSliceOutOfBound#[skip:-8.13.99, reason:newly added in 8.14] +row a = [1, 2, 2, 3] +| eval a1 = mv_slice(a, 4), a2 = mv_slice(a, 2, 6), a3 = mv_slice(a, 4, 6); + +a:integer | a1:integer | a2:integer | a3:integer +[1, 2, 2, 3] | null | [2, 3] | null +; + +mvSliceEmpInt#[skip:-8.13.99, reason:newly added in 8.14] +from employees +| eval a1 = mv_slice(salary_change.int, 0, 1) +| keep emp_no, salary_change.int, a1 +| sort emp_no +| limit 5; + +emp_no:integer | salary_change.int:integer | a1:integer +10001 | 1 | 1 +10002 | [-7, 11] | [-7, 11] +10003 | [12, 14] | [12, 14] +10004 | [0, 1, 3, 13] | [0, 1] +10005 | [-2, 13] | [-2, 13] +; + +mvSliceEmpIntSingle#[skip:-8.13.99, reason:newly added in 8.14] +from employees +| eval a1 = mv_slice(salary_change.int, 1) +| keep emp_no, salary_change.int, a1 +| sort emp_no +| limit 5; + +emp_no:integer | salary_change.int:integer | a1:integer +10001 | 1 | null +10002 | [-7, 11] | 11 +10003 | [12, 14] | 14 +10004 | [0, 1, 3, 13] | 1 +10005 | [-2, 13] | 13 +; + +mvSliceEmpIntEndOutOfBound#[skip:-8.13.99, reason:newly added in 8.14] +from employees +| eval a1 = mv_slice(salary_change.int, 1, 4) +| keep emp_no, salary_change.int, a1 +| sort emp_no +| limit 5; + +emp_no:integer | salary_change.int:integer | a1:integer +10001 | 1 | null +10002 | [-7, 11] | 11 +10003 | [12, 14] | 14 +10004 | [0, 1, 3, 13] | [1, 3, 13] +10005 | [-2, 13] | 13 +; + +mvSliceEmpIntOutOfBound#[skip:-8.13.99, reason:newly added in 8.14] +from employees +| eval a1 = mv_slice(salary_change.int, 2, 4) +| keep emp_no, salary_change.int, a1 +| sort emp_no +| limit 5; + +emp_no:integer | salary_change.int:integer | a1:integer +10001 | 1 | null +10002 | [-7, 11] | null +10003 | [12, 14] | null +10004 | [0, 1, 3, 13] | [3, 13] +10005 | [-2, 13] | null +; + +mvSliceEmpIntStartOutOfBoundNegative#[skip:-8.13.99, reason:newly added in 8.14] +from employees +| eval a1 = mv_slice(salary_change.int, -5, -2) +| keep emp_no, salary_change.int, a1 +| sort emp_no +| limit 5; + +emp_no:integer | salary_change.int:integer | a1:integer +10001 | 1 | null +10002 | [-7, 11] | -7 +10003 | [12, 14] | 12 +10004 | [0, 1, 3, 13] | [0, 1, 3] +10005 | [-2, 13] | -2 +; + +mvSliceEmpIntOutOfBoundNegative#[skip:-8.13.99, reason:newly added in 8.14] +from employees +| eval a1 = mv_slice(salary_change.int, -5, -3) +| keep emp_no, salary_change.int, a1 +| sort emp_no +| limit 5; + +emp_no:integer | salary_change.int:integer | a1:integer +10001 | 1 | null +10002 | [-7, 11] | null +10003 | [12, 14] | null +10004 | [0, 1, 3, 13] | [0, 1] +10005 | [-2, 13] | null +; + +mvSliceEmpLong#[skip:-8.13.99, reason:newly added in 8.14] +from employees +| eval a1 = mv_slice(salary_change.long, 0, 1) +| keep emp_no, salary_change.long, a1 +| sort emp_no +| limit 5; + +emp_no:integer | salary_change.long:long | a1:long +10001 | 1 | 1 +10002 | [-7, 11] | [-7, 11] +10003 | [12, 14] | [12, 14] +10004 | [0, 1, 3, 13] | [0, 1] +10005 | [-2, 13] | [-2, 13] +; + autoBucket // tag::auto_bucket[] FROM employees diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/ip.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/ip.csv-spec index 0b2ce54d5fd22..54256b3420c82 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/ip.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/ip.csv-spec @@ -277,3 +277,47 @@ lo0 |fe81::cae2:65ff:fece:feb9 eth0 |127.0.0.3 eth0 |fe80::cae2:65ff:fece:fec1 ; + +mvSlice#[skip:-8.13.99, reason:newly added in 8.14] +from hosts +| where host == "epsilon" +| eval a1 = mv_slice(ip1, 0, 1) +| keep host, ip1, a1 +| sort host, ip1 +| limit 5; + +host:keyword | ip1:ip | a1:ip +epsilon | [127.0.0.1, 127.0.0.2, 127.0.0.3] | [127.0.0.1, 127.0.0.2] +epsilon | fe80::cae2:65ff:fece:fec1 | fe80::cae2:65ff:fece:fec1 +epsilon | [fe81::cae2:65ff:fece:feb9, fe82::cae2:65ff:fece:fec0] | [fe81::cae2:65ff:fece:feb9, fe82::cae2:65ff:fece:fec0] +; + +mvSlice#[skip:-8.13.99, reason:newly added in 8.14] +from hosts +| where host == "epsilon" +| eval a1 = mv_slice(ip1, 0, 1) +| keep host, ip1, a1 +| sort host, ip1 +| limit 5; + +host:keyword | ip1:ip | a1:ip +epsilon | [127.0.0.1, 127.0.0.2, 127.0.0.3] | [127.0.0.1, 127.0.0.2] +epsilon | fe80::cae2:65ff:fece:fec1 | fe80::cae2:65ff:fece:fec1 +epsilon | [fe81::cae2:65ff:fece:feb9, fe82::cae2:65ff:fece:fec0] | [fe81::cae2:65ff:fece:feb9, fe82::cae2:65ff:fece:fec0] +; + +mvZip#[skip:-8.13.99, reason:newly added in 8.14] +from hosts +| eval zip = mv_zip(to_string(description), to_string(ip0), "@@") +| keep host, description, ip0, zip +| sort host desc, ip0 +| limit 5 +; + +host:keyword | description:text | ip0:ip | zip:keyword +gamma | gamma k8s server | fe80::cae2:65ff:fece:feb9 | gamma k8s server@@fe80::cae2:65ff:fece:feb9 +gamma | gamma k8s server | fe80::cae2:65ff:fece:feb9 | gamma k8s server@@fe80::cae2:65ff:fece:feb9 +epsilon | epsilon gw instance | [fe80::cae2:65ff:fece:feb9, fe80::cae2:65ff:fece:fec0, fe80::cae2:65ff:fece:fec1] | [epsilon gw instance@@fe80::cae2:65ff:fece:feb9, fe80::cae2:65ff:fece:fec0, fe80::cae2:65ff:fece:fec1] +epsilon | [epsilon host, epsilon2 host] | [fe81::cae2:65ff:fece:feb9, fe82::cae2:65ff:fece:fec0] | [epsilon host@@fe81::cae2:65ff:fece:feb9, epsilon2 host@@fe82::cae2:65ff:fece:fec0] +epsilon | null | null | null +; diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/show.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/show.csv-spec index 3f2d87c6d7a08..d38dce49020c4 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/show.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/show.csv-spec @@ -54,7 +54,9 @@ mv_last |"boolean|cartesian_point|cartesian_shape|date|double|g mv_max |"boolean|date|double|integer|ip|keyword|long|text|unsigned_long|version mv_max(v:boolean|date|double|integer|ip|keyword|long|text|unsigned_long|version)" |v | "boolean|date|double|integer|ip|keyword|long|text|unsigned_long|version" | "" |"boolean|date|double|integer|ip|keyword|long|text|unsigned_long|version" | "Reduce a multivalued field to a single valued field containing the maximum value." | false | false | false mv_median |"double|integer|long|unsigned_long mv_median(v:double|integer|long|unsigned_long)" |v |"double|integer|long|unsigned_long" | "" |"double|integer|long|unsigned_long" | "Converts a multivalued field into a single valued field containing the median value." | false | false | false mv_min |"boolean|date|double|integer|ip|keyword|long|text|unsigned_long|version mv_min(v:boolean|date|double|integer|ip|keyword|long|text|unsigned_long|version)" |v | "boolean|date|double|integer|ip|keyword|long|text|unsigned_long|version" | "" |"boolean|date|double|integer|ip|keyword|long|text|unsigned_long|version" | "Reduce a multivalued field to a single valued field containing the minimum value." | false | false | false +mv_slice |"boolean|cartesian_point|cartesian_shape|date|double|geo_point|geo_shape|integer|ip|keyword|long|text|version mv_slice(v:boolean|cartesian_point|cartesian_shape|date|double|geo_point|geo_shape|integer|ip|keyword|long|text|version, start:integer, ?end:integer)" |[v, start, end] | "[boolean|cartesian_point|cartesian_shape|date|double|geo_point|geo_shape|integer|ip|keyword|long|text|version, integer, integer]" | "[A multivalued field, start index, end index (included)]" |"boolean|cartesian_point|cartesian_shape|date|double|geo_point|geo_shape|integer|ip|keyword|long|text|version" | "Returns a subset of the multivalued field using the start and end index values." | [false, false, true] | false | false mv_sum |"double|integer|long|unsigned_long mv_sum(v:double|integer|long|unsigned_long)" |v |"double|integer|long|unsigned_long" | "" |"double|integer|long|unsigned_long" | "Converts a multivalued field into a single valued field containing the sum of all of the values." | false | false | false +mv_zip |"keyword mv_zip(mvLeft:keyword|text, mvRight:keyword|text, ?delim:keyword|text)" |[mvLeft, mvRight, delim] | ["keyword|text", "keyword|text", "keyword|text"] | [A multivalued field, A multivalued field, delimiter] | "keyword" | "Combines the values from two multivalued fields with a delimiter that joins them together." | [false, false, true] | false | false now |date now() | null |null | null |date | "Returns current date and time." | null | false | false percentile |"double|integer|long percentile(field:double|integer|long, percentile:double|integer|long)" |[field, percentile] |["double|integer|long, double|integer|long"] |["", ""] |"double|integer|long" | "The value at which a certain percentage of observed values occur." | [false, false] | false | true pi |double pi() | null | null | null |double | "The ratio of a circle’s circumference to its diameter." | null | false | false @@ -153,7 +155,9 @@ double e() "boolean|date|double|integer|ip|keyword|long|text|unsigned_long|version mv_max(v:boolean|date|double|integer|ip|keyword|long|text|unsigned_long|version)" "double|integer|long|unsigned_long mv_median(v:double|integer|long|unsigned_long)" "boolean|date|double|integer|ip|keyword|long|text|unsigned_long|version mv_min(v:boolean|date|double|integer|ip|keyword|long|text|unsigned_long|version)" +"boolean|cartesian_point|cartesian_shape|date|double|geo_point|geo_shape|integer|ip|keyword|long|text|version mv_slice(v:boolean|cartesian_point|cartesian_shape|date|double|geo_point|geo_shape|integer|ip|keyword|long|text|version, start:integer, ?end:integer)" "double|integer|long|unsigned_long mv_sum(v:double|integer|long|unsigned_long)" +"keyword mv_zip(mvLeft:keyword|text, mvRight:keyword|text, ?delim:keyword|text)" date now() "double|integer|long percentile(field:double|integer|long, percentile:double|integer|long)" double pi() @@ -224,5 +228,5 @@ countFunctions#[skip:-8.13.99] show functions | stats a = count(*), b = count(*), c = count(*) | mv_expand c; a:long | b:long | c:long -92 | 92 | 92 +94 | 94 | 94 ; diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/string.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/string.csv-spec index bdbcfb3cb49e9..e6c73f9054c51 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/string.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/string.csv-spec @@ -696,6 +696,50 @@ ROW a=[10, 9, 8] // end::mv_concat-to_string-result[] ; +mvSliceEmp#[skip:-8.13.99, reason:newly added in 8.14] +from employees +| eval a1 = mv_slice(salary_change.keyword, 0, 1) +| keep emp_no, salary_change.keyword, a1 +| sort emp_no +| limit 5; + +emp_no:integer | salary_change.keyword:keyword | a1:keyword +10001 | 1.19 | 1.19 +10002 | [-7.23,11.17] | [-7.23,11.17] +10003 | [12.82,14.68] | [12.82,14.68] +10004 | [-0.35,1.13,13.48,3.65] | [-0.35,1.13] +10005 | [-2.14,13.07] | [-2.14,13.07] +; + +mvZip#[skip:-8.13.99, reason:newly added in 8.14] +// tag::mv_zip[] +ROW a = ["x", "y", "z"], b = ["1", "2"] +| EVAL c = mv_zip(a, b, "-") +| KEEP a, b, c +// end::mv_zip[] +; + +// tag::mv_zip-result[] +a:keyword | b:keyword | c:keyword +[x, y, z] | [1 ,2] | [x-1, y-2, z] +// end::mv_zip-result[] +; + +mvZipEmp#[skip:-8.13.99, reason:newly added in 8.14] +from employees +| eval full_name = mv_zip(first_name, last_name, " "), full_name_2 = mv_zip(last_name, first_name), jobs = mv_zip(job_positions, salary_change.keyword, "#") +| keep emp_no, full_name, full_name_2, job_positions, salary_change.keyword, jobs +| sort emp_no +| limit 5; + +emp_no:integer | full_name:keyword | full_name_2:keyword | job_positions:keyword | salary_change.keyword:keyword | jobs:keyword +10001 | Georgi Facello | Facello,Georgi | [Accountant, Senior Python Developer] | 1.19 | [Accountant#1.19, Senior Python Developer] +10002 | Bezalel Simmel | Simmel,Bezalel | Senior Team Lead | [-7.23,11.17] | [Senior Team Lead#-7.23, 11.17] +10003 | Parto Bamford | Bamford,Parto | null | [12.82, 14.68] | [12.82, 14.68] +10004 | Chirstian Koblick | Koblick,Chirstian | [Head Human Resources, Reporting Analyst, Support Engineer, Tech Lead] | [-0.35, 1.13, 13.48, 3.65] | [Head Human Resources#-0.35, Reporting Analyst#1.13, Support Engineer#13.48, Tech Lead#3.65] +10005 | Kyoichi Maliniak | Maliniak,Kyoichi | null | [-2.14,13.07] | [-2.14,13.07] +; + showTextFields from hosts | where host == "beta" | keep host, host_group, description; ignoreOrder:true diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSliceBooleanEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSliceBooleanEvaluator.java new file mode 100644 index 0000000000000..6c4174bd9cca9 --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSliceBooleanEvaluator.java @@ -0,0 +1,140 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.xpack.esql.expression.function.scalar.multivalue; + +import java.lang.IllegalArgumentException; +import java.lang.Override; +import java.lang.String; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BooleanBlock; +import org.elasticsearch.compute.data.IntBlock; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.compute.operator.EvalOperator; +import org.elasticsearch.core.Releasables; +import org.elasticsearch.xpack.esql.expression.function.Warnings; +import org.elasticsearch.xpack.ql.InvalidArgumentException; +import org.elasticsearch.xpack.ql.tree.Source; + +/** + * {@link EvalOperator.ExpressionEvaluator} implementation for {@link MvSlice}. + * This class is generated. Do not edit it. + */ +public final class MvSliceBooleanEvaluator implements EvalOperator.ExpressionEvaluator { + private final Warnings warnings; + + private final EvalOperator.ExpressionEvaluator field; + + private final EvalOperator.ExpressionEvaluator start; + + private final EvalOperator.ExpressionEvaluator end; + + private final DriverContext driverContext; + + public MvSliceBooleanEvaluator(Source source, EvalOperator.ExpressionEvaluator field, + EvalOperator.ExpressionEvaluator start, EvalOperator.ExpressionEvaluator end, + DriverContext driverContext) { + this.warnings = new Warnings(source); + this.field = field; + this.start = start; + this.end = end; + this.driverContext = driverContext; + } + + @Override + public Block eval(Page page) { + try (BooleanBlock fieldBlock = (BooleanBlock) field.eval(page)) { + try (IntBlock startBlock = (IntBlock) start.eval(page)) { + try (IntBlock endBlock = (IntBlock) end.eval(page)) { + return eval(page.getPositionCount(), fieldBlock, startBlock, endBlock); + } + } + } + } + + public BooleanBlock eval(int positionCount, BooleanBlock fieldBlock, IntBlock startBlock, + IntBlock endBlock) { + try(BooleanBlock.Builder result = driverContext.blockFactory().newBooleanBlockBuilder(positionCount)) { + position: for (int p = 0; p < positionCount; p++) { + boolean allBlocksAreNulls = true; + if (!fieldBlock.isNull(p)) { + allBlocksAreNulls = false; + } + if (startBlock.isNull(p)) { + result.appendNull(); + continue position; + } + if (startBlock.getValueCount(p) != 1) { + if (startBlock.getValueCount(p) > 1) { + warnings.registerException(new IllegalArgumentException("single-value function encountered multi-value")); + } + result.appendNull(); + continue position; + } + if (endBlock.isNull(p)) { + result.appendNull(); + continue position; + } + if (endBlock.getValueCount(p) != 1) { + if (endBlock.getValueCount(p) > 1) { + warnings.registerException(new IllegalArgumentException("single-value function encountered multi-value")); + } + result.appendNull(); + continue position; + } + if (allBlocksAreNulls) { + result.appendNull(); + continue position; + } + try { + MvSlice.process(result, p, fieldBlock, startBlock.getInt(startBlock.getFirstValueIndex(p)), endBlock.getInt(endBlock.getFirstValueIndex(p))); + } catch (InvalidArgumentException e) { + warnings.registerException(e); + result.appendNull(); + } + } + return result.build(); + } + } + + @Override + public String toString() { + return "MvSliceBooleanEvaluator[" + "field=" + field + ", start=" + start + ", end=" + end + "]"; + } + + @Override + public void close() { + Releasables.closeExpectNoException(field, start, end); + } + + static class Factory implements EvalOperator.ExpressionEvaluator.Factory { + private final Source source; + + private final EvalOperator.ExpressionEvaluator.Factory field; + + private final EvalOperator.ExpressionEvaluator.Factory start; + + private final EvalOperator.ExpressionEvaluator.Factory end; + + public Factory(Source source, EvalOperator.ExpressionEvaluator.Factory field, + EvalOperator.ExpressionEvaluator.Factory start, + EvalOperator.ExpressionEvaluator.Factory end) { + this.source = source; + this.field = field; + this.start = start; + this.end = end; + } + + @Override + public MvSliceBooleanEvaluator get(DriverContext context) { + return new MvSliceBooleanEvaluator(source, field.get(context), start.get(context), end.get(context), context); + } + + @Override + public String toString() { + return "MvSliceBooleanEvaluator[" + "field=" + field + ", start=" + start + ", end=" + end + "]"; + } + } +} diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSliceBytesRefEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSliceBytesRefEvaluator.java new file mode 100644 index 0000000000000..4a4a169e45aee --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSliceBytesRefEvaluator.java @@ -0,0 +1,140 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.xpack.esql.expression.function.scalar.multivalue; + +import java.lang.IllegalArgumentException; +import java.lang.Override; +import java.lang.String; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BytesRefBlock; +import org.elasticsearch.compute.data.IntBlock; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.compute.operator.EvalOperator; +import org.elasticsearch.core.Releasables; +import org.elasticsearch.xpack.esql.expression.function.Warnings; +import org.elasticsearch.xpack.ql.InvalidArgumentException; +import org.elasticsearch.xpack.ql.tree.Source; + +/** + * {@link EvalOperator.ExpressionEvaluator} implementation for {@link MvSlice}. + * This class is generated. Do not edit it. + */ +public final class MvSliceBytesRefEvaluator implements EvalOperator.ExpressionEvaluator { + private final Warnings warnings; + + private final EvalOperator.ExpressionEvaluator field; + + private final EvalOperator.ExpressionEvaluator start; + + private final EvalOperator.ExpressionEvaluator end; + + private final DriverContext driverContext; + + public MvSliceBytesRefEvaluator(Source source, EvalOperator.ExpressionEvaluator field, + EvalOperator.ExpressionEvaluator start, EvalOperator.ExpressionEvaluator end, + DriverContext driverContext) { + this.warnings = new Warnings(source); + this.field = field; + this.start = start; + this.end = end; + this.driverContext = driverContext; + } + + @Override + public Block eval(Page page) { + try (BytesRefBlock fieldBlock = (BytesRefBlock) field.eval(page)) { + try (IntBlock startBlock = (IntBlock) start.eval(page)) { + try (IntBlock endBlock = (IntBlock) end.eval(page)) { + return eval(page.getPositionCount(), fieldBlock, startBlock, endBlock); + } + } + } + } + + public BytesRefBlock eval(int positionCount, BytesRefBlock fieldBlock, IntBlock startBlock, + IntBlock endBlock) { + try(BytesRefBlock.Builder result = driverContext.blockFactory().newBytesRefBlockBuilder(positionCount)) { + position: for (int p = 0; p < positionCount; p++) { + boolean allBlocksAreNulls = true; + if (!fieldBlock.isNull(p)) { + allBlocksAreNulls = false; + } + if (startBlock.isNull(p)) { + result.appendNull(); + continue position; + } + if (startBlock.getValueCount(p) != 1) { + if (startBlock.getValueCount(p) > 1) { + warnings.registerException(new IllegalArgumentException("single-value function encountered multi-value")); + } + result.appendNull(); + continue position; + } + if (endBlock.isNull(p)) { + result.appendNull(); + continue position; + } + if (endBlock.getValueCount(p) != 1) { + if (endBlock.getValueCount(p) > 1) { + warnings.registerException(new IllegalArgumentException("single-value function encountered multi-value")); + } + result.appendNull(); + continue position; + } + if (allBlocksAreNulls) { + result.appendNull(); + continue position; + } + try { + MvSlice.process(result, p, fieldBlock, startBlock.getInt(startBlock.getFirstValueIndex(p)), endBlock.getInt(endBlock.getFirstValueIndex(p))); + } catch (InvalidArgumentException e) { + warnings.registerException(e); + result.appendNull(); + } + } + return result.build(); + } + } + + @Override + public String toString() { + return "MvSliceBytesRefEvaluator[" + "field=" + field + ", start=" + start + ", end=" + end + "]"; + } + + @Override + public void close() { + Releasables.closeExpectNoException(field, start, end); + } + + static class Factory implements EvalOperator.ExpressionEvaluator.Factory { + private final Source source; + + private final EvalOperator.ExpressionEvaluator.Factory field; + + private final EvalOperator.ExpressionEvaluator.Factory start; + + private final EvalOperator.ExpressionEvaluator.Factory end; + + public Factory(Source source, EvalOperator.ExpressionEvaluator.Factory field, + EvalOperator.ExpressionEvaluator.Factory start, + EvalOperator.ExpressionEvaluator.Factory end) { + this.source = source; + this.field = field; + this.start = start; + this.end = end; + } + + @Override + public MvSliceBytesRefEvaluator get(DriverContext context) { + return new MvSliceBytesRefEvaluator(source, field.get(context), start.get(context), end.get(context), context); + } + + @Override + public String toString() { + return "MvSliceBytesRefEvaluator[" + "field=" + field + ", start=" + start + ", end=" + end + "]"; + } + } +} diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSliceDoubleEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSliceDoubleEvaluator.java new file mode 100644 index 0000000000000..3e4a83cec68b7 --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSliceDoubleEvaluator.java @@ -0,0 +1,140 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.xpack.esql.expression.function.scalar.multivalue; + +import java.lang.IllegalArgumentException; +import java.lang.Override; +import java.lang.String; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.DoubleBlock; +import org.elasticsearch.compute.data.IntBlock; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.compute.operator.EvalOperator; +import org.elasticsearch.core.Releasables; +import org.elasticsearch.xpack.esql.expression.function.Warnings; +import org.elasticsearch.xpack.ql.InvalidArgumentException; +import org.elasticsearch.xpack.ql.tree.Source; + +/** + * {@link EvalOperator.ExpressionEvaluator} implementation for {@link MvSlice}. + * This class is generated. Do not edit it. + */ +public final class MvSliceDoubleEvaluator implements EvalOperator.ExpressionEvaluator { + private final Warnings warnings; + + private final EvalOperator.ExpressionEvaluator field; + + private final EvalOperator.ExpressionEvaluator start; + + private final EvalOperator.ExpressionEvaluator end; + + private final DriverContext driverContext; + + public MvSliceDoubleEvaluator(Source source, EvalOperator.ExpressionEvaluator field, + EvalOperator.ExpressionEvaluator start, EvalOperator.ExpressionEvaluator end, + DriverContext driverContext) { + this.warnings = new Warnings(source); + this.field = field; + this.start = start; + this.end = end; + this.driverContext = driverContext; + } + + @Override + public Block eval(Page page) { + try (DoubleBlock fieldBlock = (DoubleBlock) field.eval(page)) { + try (IntBlock startBlock = (IntBlock) start.eval(page)) { + try (IntBlock endBlock = (IntBlock) end.eval(page)) { + return eval(page.getPositionCount(), fieldBlock, startBlock, endBlock); + } + } + } + } + + public DoubleBlock eval(int positionCount, DoubleBlock fieldBlock, IntBlock startBlock, + IntBlock endBlock) { + try(DoubleBlock.Builder result = driverContext.blockFactory().newDoubleBlockBuilder(positionCount)) { + position: for (int p = 0; p < positionCount; p++) { + boolean allBlocksAreNulls = true; + if (!fieldBlock.isNull(p)) { + allBlocksAreNulls = false; + } + if (startBlock.isNull(p)) { + result.appendNull(); + continue position; + } + if (startBlock.getValueCount(p) != 1) { + if (startBlock.getValueCount(p) > 1) { + warnings.registerException(new IllegalArgumentException("single-value function encountered multi-value")); + } + result.appendNull(); + continue position; + } + if (endBlock.isNull(p)) { + result.appendNull(); + continue position; + } + if (endBlock.getValueCount(p) != 1) { + if (endBlock.getValueCount(p) > 1) { + warnings.registerException(new IllegalArgumentException("single-value function encountered multi-value")); + } + result.appendNull(); + continue position; + } + if (allBlocksAreNulls) { + result.appendNull(); + continue position; + } + try { + MvSlice.process(result, p, fieldBlock, startBlock.getInt(startBlock.getFirstValueIndex(p)), endBlock.getInt(endBlock.getFirstValueIndex(p))); + } catch (InvalidArgumentException e) { + warnings.registerException(e); + result.appendNull(); + } + } + return result.build(); + } + } + + @Override + public String toString() { + return "MvSliceDoubleEvaluator[" + "field=" + field + ", start=" + start + ", end=" + end + "]"; + } + + @Override + public void close() { + Releasables.closeExpectNoException(field, start, end); + } + + static class Factory implements EvalOperator.ExpressionEvaluator.Factory { + private final Source source; + + private final EvalOperator.ExpressionEvaluator.Factory field; + + private final EvalOperator.ExpressionEvaluator.Factory start; + + private final EvalOperator.ExpressionEvaluator.Factory end; + + public Factory(Source source, EvalOperator.ExpressionEvaluator.Factory field, + EvalOperator.ExpressionEvaluator.Factory start, + EvalOperator.ExpressionEvaluator.Factory end) { + this.source = source; + this.field = field; + this.start = start; + this.end = end; + } + + @Override + public MvSliceDoubleEvaluator get(DriverContext context) { + return new MvSliceDoubleEvaluator(source, field.get(context), start.get(context), end.get(context), context); + } + + @Override + public String toString() { + return "MvSliceDoubleEvaluator[" + "field=" + field + ", start=" + start + ", end=" + end + "]"; + } + } +} diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSliceIntEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSliceIntEvaluator.java new file mode 100644 index 0000000000000..fc54dfb1f8336 --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSliceIntEvaluator.java @@ -0,0 +1,139 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.xpack.esql.expression.function.scalar.multivalue; + +import java.lang.IllegalArgumentException; +import java.lang.Override; +import java.lang.String; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.IntBlock; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.compute.operator.EvalOperator; +import org.elasticsearch.core.Releasables; +import org.elasticsearch.xpack.esql.expression.function.Warnings; +import org.elasticsearch.xpack.ql.InvalidArgumentException; +import org.elasticsearch.xpack.ql.tree.Source; + +/** + * {@link EvalOperator.ExpressionEvaluator} implementation for {@link MvSlice}. + * This class is generated. Do not edit it. + */ +public final class MvSliceIntEvaluator implements EvalOperator.ExpressionEvaluator { + private final Warnings warnings; + + private final EvalOperator.ExpressionEvaluator field; + + private final EvalOperator.ExpressionEvaluator start; + + private final EvalOperator.ExpressionEvaluator end; + + private final DriverContext driverContext; + + public MvSliceIntEvaluator(Source source, EvalOperator.ExpressionEvaluator field, + EvalOperator.ExpressionEvaluator start, EvalOperator.ExpressionEvaluator end, + DriverContext driverContext) { + this.warnings = new Warnings(source); + this.field = field; + this.start = start; + this.end = end; + this.driverContext = driverContext; + } + + @Override + public Block eval(Page page) { + try (IntBlock fieldBlock = (IntBlock) field.eval(page)) { + try (IntBlock startBlock = (IntBlock) start.eval(page)) { + try (IntBlock endBlock = (IntBlock) end.eval(page)) { + return eval(page.getPositionCount(), fieldBlock, startBlock, endBlock); + } + } + } + } + + public IntBlock eval(int positionCount, IntBlock fieldBlock, IntBlock startBlock, + IntBlock endBlock) { + try(IntBlock.Builder result = driverContext.blockFactory().newIntBlockBuilder(positionCount)) { + position: for (int p = 0; p < positionCount; p++) { + boolean allBlocksAreNulls = true; + if (!fieldBlock.isNull(p)) { + allBlocksAreNulls = false; + } + if (startBlock.isNull(p)) { + result.appendNull(); + continue position; + } + if (startBlock.getValueCount(p) != 1) { + if (startBlock.getValueCount(p) > 1) { + warnings.registerException(new IllegalArgumentException("single-value function encountered multi-value")); + } + result.appendNull(); + continue position; + } + if (endBlock.isNull(p)) { + result.appendNull(); + continue position; + } + if (endBlock.getValueCount(p) != 1) { + if (endBlock.getValueCount(p) > 1) { + warnings.registerException(new IllegalArgumentException("single-value function encountered multi-value")); + } + result.appendNull(); + continue position; + } + if (allBlocksAreNulls) { + result.appendNull(); + continue position; + } + try { + MvSlice.process(result, p, fieldBlock, startBlock.getInt(startBlock.getFirstValueIndex(p)), endBlock.getInt(endBlock.getFirstValueIndex(p))); + } catch (InvalidArgumentException e) { + warnings.registerException(e); + result.appendNull(); + } + } + return result.build(); + } + } + + @Override + public String toString() { + return "MvSliceIntEvaluator[" + "field=" + field + ", start=" + start + ", end=" + end + "]"; + } + + @Override + public void close() { + Releasables.closeExpectNoException(field, start, end); + } + + static class Factory implements EvalOperator.ExpressionEvaluator.Factory { + private final Source source; + + private final EvalOperator.ExpressionEvaluator.Factory field; + + private final EvalOperator.ExpressionEvaluator.Factory start; + + private final EvalOperator.ExpressionEvaluator.Factory end; + + public Factory(Source source, EvalOperator.ExpressionEvaluator.Factory field, + EvalOperator.ExpressionEvaluator.Factory start, + EvalOperator.ExpressionEvaluator.Factory end) { + this.source = source; + this.field = field; + this.start = start; + this.end = end; + } + + @Override + public MvSliceIntEvaluator get(DriverContext context) { + return new MvSliceIntEvaluator(source, field.get(context), start.get(context), end.get(context), context); + } + + @Override + public String toString() { + return "MvSliceIntEvaluator[" + "field=" + field + ", start=" + start + ", end=" + end + "]"; + } + } +} diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSliceLongEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSliceLongEvaluator.java new file mode 100644 index 0000000000000..d6a1e7e45cabf --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSliceLongEvaluator.java @@ -0,0 +1,140 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.xpack.esql.expression.function.scalar.multivalue; + +import java.lang.IllegalArgumentException; +import java.lang.Override; +import java.lang.String; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.IntBlock; +import org.elasticsearch.compute.data.LongBlock; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.compute.operator.EvalOperator; +import org.elasticsearch.core.Releasables; +import org.elasticsearch.xpack.esql.expression.function.Warnings; +import org.elasticsearch.xpack.ql.InvalidArgumentException; +import org.elasticsearch.xpack.ql.tree.Source; + +/** + * {@link EvalOperator.ExpressionEvaluator} implementation for {@link MvSlice}. + * This class is generated. Do not edit it. + */ +public final class MvSliceLongEvaluator implements EvalOperator.ExpressionEvaluator { + private final Warnings warnings; + + private final EvalOperator.ExpressionEvaluator field; + + private final EvalOperator.ExpressionEvaluator start; + + private final EvalOperator.ExpressionEvaluator end; + + private final DriverContext driverContext; + + public MvSliceLongEvaluator(Source source, EvalOperator.ExpressionEvaluator field, + EvalOperator.ExpressionEvaluator start, EvalOperator.ExpressionEvaluator end, + DriverContext driverContext) { + this.warnings = new Warnings(source); + this.field = field; + this.start = start; + this.end = end; + this.driverContext = driverContext; + } + + @Override + public Block eval(Page page) { + try (LongBlock fieldBlock = (LongBlock) field.eval(page)) { + try (IntBlock startBlock = (IntBlock) start.eval(page)) { + try (IntBlock endBlock = (IntBlock) end.eval(page)) { + return eval(page.getPositionCount(), fieldBlock, startBlock, endBlock); + } + } + } + } + + public LongBlock eval(int positionCount, LongBlock fieldBlock, IntBlock startBlock, + IntBlock endBlock) { + try(LongBlock.Builder result = driverContext.blockFactory().newLongBlockBuilder(positionCount)) { + position: for (int p = 0; p < positionCount; p++) { + boolean allBlocksAreNulls = true; + if (!fieldBlock.isNull(p)) { + allBlocksAreNulls = false; + } + if (startBlock.isNull(p)) { + result.appendNull(); + continue position; + } + if (startBlock.getValueCount(p) != 1) { + if (startBlock.getValueCount(p) > 1) { + warnings.registerException(new IllegalArgumentException("single-value function encountered multi-value")); + } + result.appendNull(); + continue position; + } + if (endBlock.isNull(p)) { + result.appendNull(); + continue position; + } + if (endBlock.getValueCount(p) != 1) { + if (endBlock.getValueCount(p) > 1) { + warnings.registerException(new IllegalArgumentException("single-value function encountered multi-value")); + } + result.appendNull(); + continue position; + } + if (allBlocksAreNulls) { + result.appendNull(); + continue position; + } + try { + MvSlice.process(result, p, fieldBlock, startBlock.getInt(startBlock.getFirstValueIndex(p)), endBlock.getInt(endBlock.getFirstValueIndex(p))); + } catch (InvalidArgumentException e) { + warnings.registerException(e); + result.appendNull(); + } + } + return result.build(); + } + } + + @Override + public String toString() { + return "MvSliceLongEvaluator[" + "field=" + field + ", start=" + start + ", end=" + end + "]"; + } + + @Override + public void close() { + Releasables.closeExpectNoException(field, start, end); + } + + static class Factory implements EvalOperator.ExpressionEvaluator.Factory { + private final Source source; + + private final EvalOperator.ExpressionEvaluator.Factory field; + + private final EvalOperator.ExpressionEvaluator.Factory start; + + private final EvalOperator.ExpressionEvaluator.Factory end; + + public Factory(Source source, EvalOperator.ExpressionEvaluator.Factory field, + EvalOperator.ExpressionEvaluator.Factory start, + EvalOperator.ExpressionEvaluator.Factory end) { + this.source = source; + this.field = field; + this.start = start; + this.end = end; + } + + @Override + public MvSliceLongEvaluator get(DriverContext context) { + return new MvSliceLongEvaluator(source, field.get(context), start.get(context), end.get(context), context); + } + + @Override + public String toString() { + return "MvSliceLongEvaluator[" + "field=" + field + ", start=" + start + ", end=" + end + "]"; + } + } +} diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvZipEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvZipEvaluator.java new file mode 100644 index 0000000000000..b53a1c8f9b3c0 --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvZipEvaluator.java @@ -0,0 +1,127 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.xpack.esql.expression.function.scalar.multivalue; + +import java.lang.IllegalArgumentException; +import java.lang.Override; +import java.lang.String; +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BytesRefBlock; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.compute.operator.EvalOperator; +import org.elasticsearch.core.Releasables; +import org.elasticsearch.xpack.esql.expression.function.Warnings; +import org.elasticsearch.xpack.ql.tree.Source; + +/** + * {@link EvalOperator.ExpressionEvaluator} implementation for {@link MvZip}. + * This class is generated. Do not edit it. + */ +public final class MvZipEvaluator implements EvalOperator.ExpressionEvaluator { + private final Warnings warnings; + + private final EvalOperator.ExpressionEvaluator leftField; + + private final EvalOperator.ExpressionEvaluator rightField; + + private final EvalOperator.ExpressionEvaluator delim; + + private final DriverContext driverContext; + + public MvZipEvaluator(Source source, EvalOperator.ExpressionEvaluator leftField, + EvalOperator.ExpressionEvaluator rightField, EvalOperator.ExpressionEvaluator delim, + DriverContext driverContext) { + this.warnings = new Warnings(source); + this.leftField = leftField; + this.rightField = rightField; + this.delim = delim; + this.driverContext = driverContext; + } + + @Override + public Block eval(Page page) { + try (BytesRefBlock leftFieldBlock = (BytesRefBlock) leftField.eval(page)) { + try (BytesRefBlock rightFieldBlock = (BytesRefBlock) rightField.eval(page)) { + try (BytesRefBlock delimBlock = (BytesRefBlock) delim.eval(page)) { + return eval(page.getPositionCount(), leftFieldBlock, rightFieldBlock, delimBlock); + } + } + } + } + + public BytesRefBlock eval(int positionCount, BytesRefBlock leftFieldBlock, + BytesRefBlock rightFieldBlock, BytesRefBlock delimBlock) { + try(BytesRefBlock.Builder result = driverContext.blockFactory().newBytesRefBlockBuilder(positionCount)) { + BytesRef delimScratch = new BytesRef(); + position: for (int p = 0; p < positionCount; p++) { + boolean allBlocksAreNulls = true; + if (!leftFieldBlock.isNull(p)) { + allBlocksAreNulls = false; + } + if (!rightFieldBlock.isNull(p)) { + allBlocksAreNulls = false; + } + if (delimBlock.isNull(p)) { + result.appendNull(); + continue position; + } + if (delimBlock.getValueCount(p) != 1) { + if (delimBlock.getValueCount(p) > 1) { + warnings.registerException(new IllegalArgumentException("single-value function encountered multi-value")); + } + result.appendNull(); + continue position; + } + if (allBlocksAreNulls) { + result.appendNull(); + continue position; + } + MvZip.process(result, p, leftFieldBlock, rightFieldBlock, delimBlock.getBytesRef(delimBlock.getFirstValueIndex(p), delimScratch)); + } + return result.build(); + } + } + + @Override + public String toString() { + return "MvZipEvaluator[" + "leftField=" + leftField + ", rightField=" + rightField + ", delim=" + delim + "]"; + } + + @Override + public void close() { + Releasables.closeExpectNoException(leftField, rightField, delim); + } + + static class Factory implements EvalOperator.ExpressionEvaluator.Factory { + private final Source source; + + private final EvalOperator.ExpressionEvaluator.Factory leftField; + + private final EvalOperator.ExpressionEvaluator.Factory rightField; + + private final EvalOperator.ExpressionEvaluator.Factory delim; + + public Factory(Source source, EvalOperator.ExpressionEvaluator.Factory leftField, + EvalOperator.ExpressionEvaluator.Factory rightField, + EvalOperator.ExpressionEvaluator.Factory delim) { + this.source = source; + this.leftField = leftField; + this.rightField = rightField; + this.delim = delim; + } + + @Override + public MvZipEvaluator get(DriverContext context) { + return new MvZipEvaluator(source, leftField.get(context), rightField.get(context), delim.get(context), context); + } + + @Override + public String toString() { + return "MvZipEvaluator[" + "leftField=" + leftField + ", rightField=" + rightField + ", delim=" + delim + "]"; + } + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichLookupService.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichLookupService.java index 8e5db20d7c849..b935632874157 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichLookupService.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichLookupService.java @@ -52,6 +52,7 @@ import org.elasticsearch.tasks.CancellableTask; import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.TaskId; +import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportChannel; import org.elasticsearch.transport.TransportRequest; import org.elasticsearch.transport.TransportRequestHandler; @@ -128,13 +129,13 @@ public EnrichLookupService( this.clusterService = clusterService; this.searchService = searchService; this.transportService = transportService; - this.executor = transportService.getThreadPool().executor(EsqlPlugin.ESQL_THREAD_POOL_NAME); + this.executor = transportService.getThreadPool().executor(ThreadPool.Names.SEARCH); this.bigArrays = bigArrays; this.blockFactory = blockFactory; this.localBreakerSettings = new LocalCircuitBreaker.SizeSettings(clusterService.getSettings()); transportService.registerRequestHandler( LOOKUP_ACTION_NAME, - this.executor, + transportService.getThreadPool().executor(EsqlPlugin.ESQL_WORKER_THREAD_POOL_NAME), in -> new LookupRequest(in, blockFactory), new TransportHandler() ); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichPolicyResolver.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichPolicyResolver.java index 113f8b95ca089..d4f6ea3e510c7 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichPolicyResolver.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichPolicyResolver.java @@ -37,7 +37,6 @@ import org.elasticsearch.xpack.core.enrich.EnrichPolicy; import org.elasticsearch.xpack.esql.analysis.EnrichResolution; import org.elasticsearch.xpack.esql.plan.logical.Enrich; -import org.elasticsearch.xpack.esql.plugin.EsqlPlugin; import org.elasticsearch.xpack.esql.session.EsqlSession; import org.elasticsearch.xpack.esql.type.EsqlDataTypes; import org.elasticsearch.xpack.ql.index.EsIndex; @@ -80,7 +79,7 @@ public EnrichPolicyResolver(ClusterService clusterService, TransportService tran this.threadPool = transportService.getThreadPool(); transportService.registerRequestHandler( RESOLVE_ACTION_NAME, - threadPool.executor(EsqlPlugin.ESQL_THREAD_POOL_NAME), + threadPool.executor(ThreadPool.Names.SEARCH), LookupRequest::new, new RequestHandler() ); @@ -272,7 +271,7 @@ private void lookupPolicies( new ActionListenerResponseHandler<>( refs.acquire(resp -> lookupResponses.put(cluster, resp)), LookupResponse::new, - threadPool.executor(EsqlPlugin.ESQL_THREAD_POOL_NAME) + threadPool.executor(ThreadPool.Names.SEARCH) ) ); } @@ -290,7 +289,7 @@ private void lookupPolicies( new ActionListenerResponseHandler<>( refs.acquire(resp -> lookupResponses.put(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY, resp)), LookupResponse::new, - threadPool.executor(EsqlPlugin.ESQL_THREAD_POOL_NAME) + threadPool.executor(ThreadPool.Names.SEARCH) ) ); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/EsqlFunctionRegistry.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/EsqlFunctionRegistry.java index ede3633c1b3e8..b577b8a68cd54 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/EsqlFunctionRegistry.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/EsqlFunctionRegistry.java @@ -73,7 +73,9 @@ import org.elasticsearch.xpack.esql.expression.function.scalar.multivalue.MvMax; import org.elasticsearch.xpack.esql.expression.function.scalar.multivalue.MvMedian; import org.elasticsearch.xpack.esql.expression.function.scalar.multivalue.MvMin; +import org.elasticsearch.xpack.esql.expression.function.scalar.multivalue.MvSlice; import org.elasticsearch.xpack.esql.expression.function.scalar.multivalue.MvSum; +import org.elasticsearch.xpack.esql.expression.function.scalar.multivalue.MvZip; import org.elasticsearch.xpack.esql.expression.function.scalar.nulls.Coalesce; import org.elasticsearch.xpack.esql.expression.function.scalar.spatial.StX; import org.elasticsearch.xpack.esql.expression.function.scalar.spatial.StY; @@ -212,6 +214,8 @@ private FunctionDefinition[][] functions() { def(MvMax.class, MvMax::new, "mv_max"), def(MvMedian.class, MvMedian::new, "mv_median"), def(MvMin.class, MvMin::new, "mv_min"), + def(MvSlice.class, MvSlice::new, "mv_slice"), + def(MvZip.class, MvZip::new, "mv_zip"), def(MvSum.class, MvSum::new, "mv_sum"), def(Split.class, Split::new, "split") } }; } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSlice.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSlice.java new file mode 100644 index 0000000000000..b7868b33102a3 --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSlice.java @@ -0,0 +1,344 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.scalar.multivalue; + +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.compute.ann.Evaluator; +import org.elasticsearch.compute.data.BooleanBlock; +import org.elasticsearch.compute.data.BytesRefBlock; +import org.elasticsearch.compute.data.DoubleBlock; +import org.elasticsearch.compute.data.IntBlock; +import org.elasticsearch.compute.data.LongBlock; +import org.elasticsearch.compute.operator.EvalOperator; +import org.elasticsearch.xpack.esql.EsqlIllegalArgumentException; +import org.elasticsearch.xpack.esql.evaluator.mapper.EvaluatorMapper; +import org.elasticsearch.xpack.esql.expression.function.FunctionInfo; +import org.elasticsearch.xpack.esql.expression.function.Param; +import org.elasticsearch.xpack.esql.planner.PlannerUtils; +import org.elasticsearch.xpack.esql.type.EsqlDataTypes; +import org.elasticsearch.xpack.ql.InvalidArgumentException; +import org.elasticsearch.xpack.ql.expression.Expression; +import org.elasticsearch.xpack.ql.expression.function.OptionalArgument; +import org.elasticsearch.xpack.ql.expression.function.scalar.ScalarFunction; +import org.elasticsearch.xpack.ql.expression.gen.script.ScriptTemplate; +import org.elasticsearch.xpack.ql.tree.NodeInfo; +import org.elasticsearch.xpack.ql.tree.Source; +import org.elasticsearch.xpack.ql.type.DataType; + +import java.util.Arrays; +import java.util.List; +import java.util.Objects; +import java.util.function.Function; + +import static org.elasticsearch.xpack.ql.expression.TypeResolutions.ParamOrdinal.FIRST; +import static org.elasticsearch.xpack.ql.expression.TypeResolutions.ParamOrdinal.SECOND; +import static org.elasticsearch.xpack.ql.expression.TypeResolutions.ParamOrdinal.THIRD; +import static org.elasticsearch.xpack.ql.expression.TypeResolutions.isInteger; +import static org.elasticsearch.xpack.ql.expression.TypeResolutions.isType; + +/** + * Returns a subset of the multivalued field using the start and end index values. + */ +public class MvSlice extends ScalarFunction implements OptionalArgument, EvaluatorMapper { + private final Expression field, start, end; + + @FunctionInfo( + returnType = { + "boolean", + "cartesian_point", + "cartesian_shape", + "date", + "double", + "geo_point", + "geo_shape", + "integer", + "ip", + "keyword", + "long", + "text", + "version" }, + description = "Returns a subset of the multivalued field using the start and end index values." + ) + public MvSlice( + Source source, + @Param( + name = "v", + type = { + "boolean", + "cartesian_point", + "cartesian_shape", + "date", + "double", + "geo_point", + "geo_shape", + "integer", + "ip", + "keyword", + "long", + "text", + "version" }, + description = "A multivalued field" + ) Expression field, + @Param(name = "start", type = { "integer" }, description = "start index") Expression start, + @Param(name = "end", type = { "integer" }, description = "end index (included)", optional = true) Expression end + ) { + super(source, end == null ? Arrays.asList(field, start, start) : Arrays.asList(field, start, end)); + this.field = field; + this.start = start; + this.end = end == null ? start : end; + } + + @Override + protected TypeResolution resolveType() { + if (childrenResolved() == false) { + return new TypeResolution("Unresolved children"); + } + + TypeResolution resolution = isType(field, EsqlDataTypes::isRepresentable, sourceText(), FIRST, "representable"); + if (resolution.unresolved()) { + return resolution; + } + + resolution = isInteger(start, sourceText(), SECOND); + if (resolution.unresolved()) { + return resolution; + } + + if (end != null) { + resolution = isInteger(end, sourceText(), THIRD); + if (resolution.unresolved()) { + return resolution; + } + } + + return resolution; + } + + @Override + public boolean foldable() { + return field.foldable() && start.foldable() && (end == null || end.foldable()); + } + + @Override + public EvalOperator.ExpressionEvaluator.Factory toEvaluator( + Function toEvaluator + ) { + if (start.foldable() && end.foldable()) { + int startOffset = Integer.parseInt(String.valueOf(start.fold())); + int endOffset = Integer.parseInt(String.valueOf(end.fold())); + checkStartEnd(startOffset, endOffset); + } + return switch (PlannerUtils.toElementType(field.dataType())) { + case BOOLEAN -> new MvSliceBooleanEvaluator.Factory( + source(), + toEvaluator.apply(field), + toEvaluator.apply(start), + toEvaluator.apply(end) + ); + case BYTES_REF -> new MvSliceBytesRefEvaluator.Factory( + source(), + toEvaluator.apply(field), + toEvaluator.apply(start), + toEvaluator.apply(end) + ); + case DOUBLE -> new MvSliceDoubleEvaluator.Factory( + source(), + toEvaluator.apply(field), + toEvaluator.apply(start), + toEvaluator.apply(end) + ); + case INT -> new MvSliceIntEvaluator.Factory( + source(), + toEvaluator.apply(field), + toEvaluator.apply(start), + toEvaluator.apply(end) + ); + case LONG -> new MvSliceLongEvaluator.Factory( + source(), + toEvaluator.apply(field), + toEvaluator.apply(start), + toEvaluator.apply(end) + ); + case NULL -> EvalOperator.CONSTANT_NULL_FACTORY; + default -> throw EsqlIllegalArgumentException.illegalDataType(field.dataType()); + }; + } + + @Override + public Object fold() { + return EvaluatorMapper.super.fold(); + } + + @Override + public Expression replaceChildren(List newChildren) { + return new MvSlice(source(), newChildren.get(0), newChildren.get(1), newChildren.size() > 2 ? newChildren.get(2) : null); + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, MvSlice::new, field, start, end); + } + + @Override + public DataType dataType() { + return field.dataType(); + } + + @Override + public ScriptTemplate asScript() { + throw new UnsupportedOperationException("functions do not support scripting"); + } + + @Override + public int hashCode() { + return Objects.hash(field, start, end); + } + + @Override + public boolean equals(Object obj) { + if (obj == null || obj.getClass() != getClass()) { + return false; + } + MvSlice other = (MvSlice) obj; + return Objects.equals(other.field, field) && Objects.equals(other.start, start) && Objects.equals(other.end, end); + } + + static int adjustIndex(int oldOffset, int fieldValueCount, int first) { + return oldOffset < 0 ? oldOffset + fieldValueCount + first : oldOffset + first; + } + + static void checkStartEnd(int start, int end) throws InvalidArgumentException { + if (start > end) { + throw new InvalidArgumentException("Start offset is greater than end offset"); + } + if (start < 0 && end >= 0) { + throw new InvalidArgumentException("Start and end offset have different signs"); + } + } + + @Evaluator(extraName = "Boolean", warnExceptions = { InvalidArgumentException.class }) + static void process(BooleanBlock.Builder builder, int position, BooleanBlock field, int start, int end) { + int fieldValueCount = field.getValueCount(position); + checkStartEnd(start, end); + int first = field.getFirstValueIndex(position); + int mvStartIndex = adjustIndex(start, fieldValueCount, first); + mvStartIndex = Math.max(first, mvStartIndex); + int mvEndIndex = adjustIndex(end, fieldValueCount, first); + mvEndIndex = Math.min(fieldValueCount + first - 1, mvEndIndex); + if (mvStartIndex >= fieldValueCount + first || mvEndIndex < first) { + builder.appendNull(); + return; + } + if (mvStartIndex == mvEndIndex) { + builder.appendBoolean(field.getBoolean(mvStartIndex)); + return; + } + builder.beginPositionEntry(); + for (int i = mvStartIndex; i <= mvEndIndex; i++) { + builder.appendBoolean(field.getBoolean(i)); + } + builder.endPositionEntry(); + } + + @Evaluator(extraName = "Int", warnExceptions = { InvalidArgumentException.class }) + static void process(IntBlock.Builder builder, int position, IntBlock field, int start, int end) { + int fieldValueCount = field.getValueCount(position); + checkStartEnd(start, end); + int first = field.getFirstValueIndex(position); + int mvStartIndex = adjustIndex(start, fieldValueCount, first); + mvStartIndex = Math.max(first, mvStartIndex); + int mvEndIndex = adjustIndex(end, fieldValueCount, first); + mvEndIndex = Math.min(fieldValueCount + first - 1, mvEndIndex); + if (mvStartIndex >= fieldValueCount + first || mvEndIndex < first) { + builder.appendNull(); + return; + } + if (mvStartIndex == mvEndIndex) { + builder.appendInt(field.getInt(mvStartIndex)); + return; + } + builder.beginPositionEntry(); + for (int i = mvStartIndex; i <= mvEndIndex; i++) { + builder.appendInt(field.getInt(i)); + } + builder.endPositionEntry(); + } + + @Evaluator(extraName = "Long", warnExceptions = { InvalidArgumentException.class }) + static void process(LongBlock.Builder builder, int position, LongBlock field, int start, int end) { + int fieldValueCount = field.getValueCount(position); + checkStartEnd(start, end); + int first = field.getFirstValueIndex(position); + int mvStartIndex = adjustIndex(start, fieldValueCount, first); + mvStartIndex = Math.max(first, mvStartIndex); + int mvEndIndex = adjustIndex(end, fieldValueCount, first); + mvEndIndex = Math.min(fieldValueCount + first - 1, mvEndIndex); + if (mvStartIndex >= fieldValueCount + first || mvEndIndex < first) { + builder.appendNull(); + return; + } + if (mvStartIndex == mvEndIndex) { + builder.appendLong(field.getLong(mvStartIndex)); + return; + } + builder.beginPositionEntry(); + for (int i = mvStartIndex; i <= mvEndIndex; i++) { + builder.appendLong(field.getLong(i)); + } + builder.endPositionEntry(); + } + + @Evaluator(extraName = "Double", warnExceptions = { InvalidArgumentException.class }) + static void process(DoubleBlock.Builder builder, int position, DoubleBlock field, int start, int end) { + int fieldValueCount = field.getValueCount(position); + checkStartEnd(start, end); + int first = field.getFirstValueIndex(position); + int mvStartIndex = adjustIndex(start, fieldValueCount, first); + mvStartIndex = Math.max(first, mvStartIndex); + int mvEndIndex = adjustIndex(end, fieldValueCount, first); + mvEndIndex = Math.min(fieldValueCount + first - 1, mvEndIndex); + if (mvStartIndex >= fieldValueCount + first || mvEndIndex < first) { + builder.appendNull(); + return; + } + if (mvStartIndex == mvEndIndex) { + builder.appendDouble(field.getDouble(mvStartIndex)); + return; + } + builder.beginPositionEntry(); + for (int i = mvStartIndex; i <= mvEndIndex; i++) { + builder.appendDouble(field.getDouble(i)); + } + builder.endPositionEntry(); + } + + @Evaluator(extraName = "BytesRef", warnExceptions = { InvalidArgumentException.class }) + static void process(BytesRefBlock.Builder builder, int position, BytesRefBlock field, int start, int end) { + int fieldValueCount = field.getValueCount(position); + checkStartEnd(start, end); // append null here ? + int first = field.getFirstValueIndex(position); + int mvStartIndex = adjustIndex(start, fieldValueCount, first); + mvStartIndex = Math.max(first, mvStartIndex); + int mvEndIndex = adjustIndex(end, fieldValueCount, first); + mvEndIndex = Math.min(fieldValueCount + first - 1, mvEndIndex); + if (mvStartIndex >= fieldValueCount + first || mvEndIndex < first) { + builder.appendNull(); + return; + } + BytesRef fieldScratch = new BytesRef(); + if (mvStartIndex == mvEndIndex) { + builder.appendBytesRef(field.getBytesRef(mvStartIndex, fieldScratch)); + return; + } + builder.beginPositionEntry(); + for (int i = mvStartIndex; i <= mvEndIndex; i++) { + builder.appendBytesRef(field.getBytesRef(i, fieldScratch)); + } + builder.endPositionEntry(); + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvZip.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvZip.java new file mode 100644 index 0000000000000..6227efeced36e --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvZip.java @@ -0,0 +1,211 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.scalar.multivalue; + +import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.BytesRefBuilder; +import org.elasticsearch.compute.ann.Evaluator; +import org.elasticsearch.compute.data.BytesRefBlock; +import org.elasticsearch.compute.operator.EvalOperator; +import org.elasticsearch.xpack.esql.evaluator.mapper.EvaluatorMapper; +import org.elasticsearch.xpack.esql.expression.function.FunctionInfo; +import org.elasticsearch.xpack.esql.expression.function.Param; +import org.elasticsearch.xpack.ql.expression.Expression; +import org.elasticsearch.xpack.ql.expression.Literal; +import org.elasticsearch.xpack.ql.expression.function.OptionalArgument; +import org.elasticsearch.xpack.ql.expression.function.scalar.ScalarFunction; +import org.elasticsearch.xpack.ql.expression.gen.script.ScriptTemplate; +import org.elasticsearch.xpack.ql.tree.NodeInfo; +import org.elasticsearch.xpack.ql.tree.Source; +import org.elasticsearch.xpack.ql.type.DataType; +import org.elasticsearch.xpack.ql.type.DataTypes; + +import java.util.Arrays; +import java.util.List; +import java.util.Objects; +import java.util.function.Function; + +import static org.elasticsearch.xpack.ql.expression.TypeResolutions.ParamOrdinal.FIRST; +import static org.elasticsearch.xpack.ql.expression.TypeResolutions.ParamOrdinal.SECOND; +import static org.elasticsearch.xpack.ql.expression.TypeResolutions.ParamOrdinal.THIRD; +import static org.elasticsearch.xpack.ql.expression.TypeResolutions.isString; + +/** + * Combines the values from two multivalued fields with a delimiter that joins them together. + */ +public class MvZip extends ScalarFunction implements OptionalArgument, EvaluatorMapper { + private final Expression mvLeft, mvRight, delim; + private static final Literal COMMA = new Literal(Source.EMPTY, ",", DataTypes.TEXT); + + @FunctionInfo( + returnType = { "keyword" }, + description = "Combines the values from two multivalued fields with a delimiter that joins them together." + ) + public MvZip( + Source source, + @Param(name = "mvLeft", type = { "keyword", "text" }, description = "A multivalued field") Expression mvLeft, + @Param(name = "mvRight", type = { "keyword", "text" }, description = "A multivalued field") Expression mvRight, + @Param(name = "delim", type = { "keyword", "text" }, description = "delimiter", optional = true) Expression delim + ) { + super(source, delim == null ? Arrays.asList(mvLeft, mvRight, COMMA) : Arrays.asList(mvLeft, mvRight, delim)); + this.mvLeft = mvLeft; + this.mvRight = mvRight; + this.delim = delim == null ? COMMA : delim; + } + + @Override + protected TypeResolution resolveType() { + if (childrenResolved() == false) { + return new TypeResolution("Unresolved children"); + } + + TypeResolution resolution = isString(mvLeft, sourceText(), FIRST); + if (resolution.unresolved()) { + return resolution; + } + + resolution = isString(mvRight, sourceText(), SECOND); + if (resolution.unresolved()) { + return resolution; + } + + if (delim != null) { + resolution = isString(delim, sourceText(), THIRD); + if (resolution.unresolved()) { + return resolution; + } + } + + return resolution; + } + + @Override + public boolean foldable() { + return mvLeft.foldable() && mvRight.foldable() && (delim == null || delim.foldable()); + } + + @Override + public EvalOperator.ExpressionEvaluator.Factory toEvaluator( + Function toEvaluator + ) { + return new MvZipEvaluator.Factory(source(), toEvaluator.apply(mvLeft), toEvaluator.apply(mvRight), toEvaluator.apply(delim)); + } + + @Override + public Object fold() { + return EvaluatorMapper.super.fold(); + } + + @Override + public Expression replaceChildren(List newChildren) { + return new MvZip(source(), newChildren.get(0), newChildren.get(1), newChildren.size() > 2 ? newChildren.get(2) : null); + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, MvZip::new, mvLeft, mvRight, delim); + } + + @Override + public DataType dataType() { + return DataTypes.KEYWORD; + } + + @Override + public ScriptTemplate asScript() { + throw new UnsupportedOperationException("functions do not support scripting"); + } + + @Override + public int hashCode() { + return Objects.hash(mvLeft, mvRight, delim); + } + + @Override + public boolean equals(Object obj) { + if (obj == null || obj.getClass() != getClass()) { + return false; + } + MvZip other = (MvZip) obj; + return Objects.equals(other.mvLeft, mvLeft) && Objects.equals(other.mvRight, mvRight) && Objects.equals(other.delim, delim); + } + + private static void buildOneSide(BytesRefBlock.Builder builder, int start, int end, BytesRefBlock field, BytesRef fieldScratch) { + builder.beginPositionEntry(); + for (int i = start; i < end; i++) { + builder.appendBytesRef(field.getBytesRef(i, fieldScratch)); + } + builder.endPositionEntry(); + } + + @Evaluator + static void process(BytesRefBlock.Builder builder, int position, BytesRefBlock leftField, BytesRefBlock rightField, BytesRef delim) { + int leftFieldValueCount = leftField.getValueCount(position); + int rightFieldValueCount = rightField.getValueCount(position); + + int leftFirst = leftField.getFirstValueIndex(position); + int rightFirst = rightField.getFirstValueIndex(position); + + BytesRef fieldScratch = new BytesRef(); + + // nulls + if (leftField.isNull(position)) { + if (rightFieldValueCount == 1) { + builder.appendBytesRef(rightField.getBytesRef(rightFirst, fieldScratch)); + return; + } + buildOneSide(builder, rightFirst, rightFirst + rightFieldValueCount, rightField, fieldScratch); + return; + } + + if (rightField.isNull(position)) { + if (leftFieldValueCount == 1) { + builder.appendBytesRef(leftField.getBytesRef(leftFirst, fieldScratch)); + return; + } + buildOneSide(builder, leftFirst, leftFirst + leftFieldValueCount, leftField, fieldScratch); + return; + } + + BytesRefBuilder work = new BytesRefBuilder(); + // single value + if (leftFieldValueCount == 1 && rightFieldValueCount == 1) { + work.append(leftField.getBytesRef(leftFirst, fieldScratch)); + work.append(delim); + work.append(rightField.getBytesRef(rightFirst, fieldScratch)); + builder.appendBytesRef(work.get()); + return; + } + // multiple values + int leftIndex = 0, rightIndex = 0; + builder.beginPositionEntry(); + while (leftIndex < leftFieldValueCount && rightIndex < rightFieldValueCount) { + // concat + work.clear(); + work.append(leftField.getBytesRef(leftIndex + leftFirst, fieldScratch)); + work.append(delim); + work.append(rightField.getBytesRef(rightIndex + rightFirst, fieldScratch)); + builder.appendBytesRef(work.get()); + leftIndex++; + rightIndex++; + } + while (leftIndex < leftFieldValueCount) { + work.clear(); + work.append(leftField.getBytesRef(leftIndex + leftFirst, fieldScratch)); + builder.appendBytesRef(work.get()); + leftIndex++; + } + while (rightIndex < rightFieldValueCount) { + work.clear(); + work.append(rightField.getBytesRef(rightIndex + rightFirst, fieldScratch)); + builder.appendBytesRef(work.get()); + rightIndex++; + } + builder.endPositionEntry(); + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanNamedTypes.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanNamedTypes.java index 3ca5f2f5868ba..384bfd164b0a7 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanNamedTypes.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanNamedTypes.java @@ -97,7 +97,9 @@ import org.elasticsearch.xpack.esql.expression.function.scalar.multivalue.MvMax; import org.elasticsearch.xpack.esql.expression.function.scalar.multivalue.MvMedian; import org.elasticsearch.xpack.esql.expression.function.scalar.multivalue.MvMin; +import org.elasticsearch.xpack.esql.expression.function.scalar.multivalue.MvSlice; import org.elasticsearch.xpack.esql.expression.function.scalar.multivalue.MvSum; +import org.elasticsearch.xpack.esql.expression.function.scalar.multivalue.MvZip; import org.elasticsearch.xpack.esql.expression.function.scalar.nulls.Coalesce; import org.elasticsearch.xpack.esql.expression.function.scalar.spatial.StX; import org.elasticsearch.xpack.esql.expression.function.scalar.spatial.StY; @@ -419,7 +421,9 @@ public static List namedTypeEntries() { of(ScalarFunction.class, MvMax.class, PlanNamedTypes::writeMvFunction, PlanNamedTypes::readMvFunction), of(ScalarFunction.class, MvMedian.class, PlanNamedTypes::writeMvFunction, PlanNamedTypes::readMvFunction), of(ScalarFunction.class, MvMin.class, PlanNamedTypes::writeMvFunction, PlanNamedTypes::readMvFunction), + of(ScalarFunction.class, MvSlice.class, PlanNamedTypes::writeMvSlice, PlanNamedTypes::readMvSlice), of(ScalarFunction.class, MvSum.class, PlanNamedTypes::writeMvFunction, PlanNamedTypes::readMvFunction), + of(ScalarFunction.class, MvZip.class, PlanNamedTypes::writeMvZip, PlanNamedTypes::readMvZip), // Expressions (other) of(Expression.class, Literal.class, PlanNamedTypes::writeLiteral, PlanNamedTypes::readLiteral), of(Expression.class, Order.class, PlanNamedTypes::writeOrder, PlanNamedTypes::readOrder) @@ -1831,4 +1835,30 @@ static void writeLog(PlanStreamOutput out, Log log) throws IOException { out.writeExpression(fields.get(0)); out.writeOptionalWriteable(fields.size() == 2 ? o -> out.writeExpression(fields.get(1)) : null); } + + static MvSlice readMvSlice(PlanStreamInput in) throws IOException { + return new MvSlice(in.readSource(), in.readExpression(), in.readExpression(), in.readOptionalNamed(Expression.class)); + } + + static void writeMvSlice(PlanStreamOutput out, MvSlice fn) throws IOException { + out.writeNoSource(); + List fields = fn.children(); + assert fields.size() == 2 || fields.size() == 3; + out.writeExpression(fields.get(0)); + out.writeExpression(fields.get(1)); + out.writeOptionalWriteable(fields.size() == 3 ? o -> out.writeExpression(fields.get(2)) : null); + } + + static MvZip readMvZip(PlanStreamInput in) throws IOException { + return new MvZip(in.readSource(), in.readExpression(), in.readExpression(), in.readOptionalNamed(Expression.class)); + } + + static void writeMvZip(PlanStreamOutput out, MvZip fn) throws IOException { + out.writeNoSource(); + List fields = fn.children(); + assert fields.size() == 2 || fields.size() == 3; + out.writeExpression(fields.get(0)); + out.writeExpression(fields.get(1)); + out.writeOptionalWriteable(fields.size() == 3 ? o -> out.writeExpression(fields.get(2)) : null); + } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/ComputeService.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/ComputeService.java index 64f393ccdf2b0..7af37a3eeb114 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/ComputeService.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/ComputeService.java @@ -81,7 +81,6 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.Supplier; -import static org.elasticsearch.xpack.esql.plugin.EsqlPlugin.ESQL_THREAD_POOL_NAME; import static org.elasticsearch.xpack.esql.plugin.EsqlPlugin.ESQL_WORKER_THREAD_POOL_NAME; /** @@ -116,7 +115,7 @@ public ComputeService( this.transportService = transportService; this.bigArrays = bigArrays.withCircuitBreaking(); this.blockFactory = blockFactory; - this.esqlExecutor = threadPool.executor(ESQL_THREAD_POOL_NAME); + this.esqlExecutor = threadPool.executor(ThreadPool.Names.SEARCH); transportService.registerRequestHandler(DATA_ACTION_NAME, this.esqlExecutor, DataNodeRequest::new, new DataNodeRequestHandler()); transportService.registerRequestHandler( CLUSTER_ACTION_NAME, @@ -196,7 +195,7 @@ public void execute( final List collectedProfiles = configuration.profile() ? Collections.synchronizedList(new ArrayList<>()) : List.of(); final var exchangeSource = new ExchangeSourceHandler( queryPragmas.exchangeBufferSize(), - transportService.getThreadPool().executor(ESQL_THREAD_POOL_NAME) + transportService.getThreadPool().executor(ThreadPool.Names.SEARCH) ); try ( Releasable ignored = exchangeSource.addEmptySink(); @@ -628,7 +627,7 @@ private void runBatch(int startBatchIndex) { final int endBatchIndex = Math.min(startBatchIndex + maxConcurrentShards, request.shardIds().size()); List shardIds = request.shardIds().subList(startBatchIndex, endBatchIndex); acquireSearchContexts(clusterAlias, shardIds, configuration, request.aliasFilters(), ActionListener.wrap(searchContexts -> { - assert ThreadPool.assertCurrentThreadPool(ESQL_THREAD_POOL_NAME, ESQL_WORKER_THREAD_POOL_NAME); + assert ThreadPool.assertCurrentThreadPool(ThreadPool.Names.SEARCH, ESQL_WORKER_THREAD_POOL_NAME); var computeContext = new ComputeContext(sessionId, clusterAlias, searchContexts, configuration, null, exchangeSink); runCompute( parentTask, @@ -734,7 +733,7 @@ void runComputeOnRemoteCluster( final String localSessionId = clusterAlias + ":" + globalSessionId; var exchangeSource = new ExchangeSourceHandler( configuration.pragmas().exchangeBufferSize(), - transportService.getThreadPool().executor(ESQL_THREAD_POOL_NAME) + transportService.getThreadPool().executor(ThreadPool.Names.SEARCH) ); try ( Releasable ignored = exchangeSource.addEmptySink(); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/EsqlPlugin.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/EsqlPlugin.java index 61f0393c80948..fded9339567bd 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/EsqlPlugin.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/EsqlPlugin.java @@ -70,7 +70,6 @@ public class EsqlPlugin extends Plugin implements ActionPlugin { - public static final String ESQL_THREAD_POOL_NAME = "esql"; public static final String ESQL_WORKER_THREAD_POOL_NAME = "esql_worker"; public static final Setting QUERY_RESULT_TRUNCATION_MAX_SIZE = Setting.intSetting( @@ -112,12 +111,7 @@ public Collection createComponents(PluginServices services) { ), new EsqlIndexResolver(services.client(), EsqlDataTypeRegistry.INSTANCE) ), - new ExchangeService( - services.clusterService().getSettings(), - services.threadPool(), - EsqlPlugin.ESQL_THREAD_POOL_NAME, - blockFactory - ), + new ExchangeService(services.clusterService().getSettings(), services.threadPool(), ThreadPool.Names.SEARCH, blockFactory), blockFactory ); } @@ -186,18 +180,9 @@ public List getNamedWriteables() { ).toList(); } - @Override public List> getExecutorBuilders(Settings settings) { final int allocatedProcessors = EsExecutors.allocatedProcessors(settings); return List.of( - new FixedExecutorBuilder( - settings, - ESQL_THREAD_POOL_NAME, - allocatedProcessors, - 1000, - ESQL_THREAD_POOL_NAME, - EsExecutors.TaskTrackingConfig.DEFAULT - ), // TODO: Maybe have two types of threadpools for workers: one for CPU-bound and one for I/O-bound tasks. // And we should also reduce the number of threads of the CPU-bound threadpool to allocatedProcessors. new FixedExecutorBuilder( diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/TransportEsqlQueryAction.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/TransportEsqlQueryAction.java index baaa4abe23b3d..366046d39dc43 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/TransportEsqlQueryAction.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/TransportEsqlQueryAction.java @@ -82,7 +82,7 @@ public TransportEsqlQueryAction( super(EsqlQueryAction.NAME, transportService, actionFilters, EsqlQueryRequest::new, EsExecutors.DIRECT_EXECUTOR_SERVICE); this.planExecutor = planExecutor; this.clusterService = clusterService; - this.requestExecutor = threadPool.executor(EsqlPlugin.ESQL_THREAD_POOL_NAME); + this.requestExecutor = threadPool.executor(ThreadPool.Names.SEARCH); exchangeService.registerTransportHandler(transportService); this.exchangeService = exchangeService; this.enrichPolicyResolver = new EnrichPolicyResolver(clusterService, transportService, planExecutor.indexResolver()); @@ -124,7 +124,7 @@ protected void doExecute(Task task, EsqlQueryRequest request, ActionListener listener) { - assert ThreadPool.assertCurrentThreadPool(EsqlPlugin.ESQL_THREAD_POOL_NAME); + assert ThreadPool.assertCurrentThreadPool(ThreadPool.Names.SEARCH); if (requestIsAsync(request)) { asyncTaskManagementService.asyncExecute( request, diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java index 20714cc5633b6..dd937c11c9642 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java @@ -96,6 +96,7 @@ import java.util.Map; import java.util.Set; import java.util.TreeMap; +import java.util.concurrent.Executor; import java.util.concurrent.TimeUnit; import static org.elasticsearch.test.ListMatcher.matchesList; @@ -107,7 +108,6 @@ import static org.elasticsearch.xpack.esql.CsvTestsDataLoader.CSV_DATASET_MAP; import static org.elasticsearch.xpack.esql.EsqlTestUtils.TEST_VERIFIER; import static org.elasticsearch.xpack.esql.EsqlTestUtils.loadMapping; -import static org.elasticsearch.xpack.esql.plugin.EsqlPlugin.ESQL_THREAD_POOL_NAME; import static org.elasticsearch.xpack.ql.CsvSpecReader.specParser; import static org.elasticsearch.xpack.ql.TestUtils.classpathResources; import static org.hamcrest.Matchers.equalTo; @@ -161,6 +161,7 @@ public class CsvTests extends ESTestCase { private final Mapper mapper = new Mapper(functionRegistry); private final PhysicalPlanOptimizer physicalPlanOptimizer = new TestPhysicalPlanOptimizer(new PhysicalOptimizerContext(configuration)); private ThreadPool threadPool; + private Executor executor; @ParametersFactory(argumentFormatting = "%2$s.%3$s") public static List readScriptSpec() throws Exception { @@ -174,18 +175,17 @@ public static List readScriptSpec() throws Exception { @Before public void setUp() throws Exception { super.setUp(); - int numThreads = randomBoolean() ? 1 : between(2, 16); - threadPool = new TestThreadPool( - "CsvTests", - new FixedExecutorBuilder( - Settings.EMPTY, - ESQL_THREAD_POOL_NAME, - numThreads, - 1024, - "esql", - EsExecutors.TaskTrackingConfig.DEFAULT - ) - ); + if (randomBoolean()) { + int numThreads = randomBoolean() ? 1 : between(2, 16); + threadPool = new TestThreadPool( + "CsvTests", + new FixedExecutorBuilder(Settings.EMPTY, "esql_test", numThreads, 1024, "esql", EsExecutors.TaskTrackingConfig.DEFAULT) + ); + executor = threadPool.executor("esql_test"); + } else { + threadPool = new TestThreadPool(getTestName()); + executor = threadPool.executor(ThreadPool.Names.SEARCH); + } HeaderWarning.setThreadContext(threadPool.getThreadContext()); } @@ -343,7 +343,7 @@ private ActualResults executePlan(BigArrays bigArrays) throws Exception { bigArrays, ByteSizeValue.ofBytes(randomLongBetween(1, BlockFactory.DEFAULT_MAX_BLOCK_PRIMITIVE_ARRAY_SIZE.getBytes() * 2)) ); - ExchangeSourceHandler exchangeSource = new ExchangeSourceHandler(between(1, 64), threadPool.executor(ESQL_THREAD_POOL_NAME)); + ExchangeSourceHandler exchangeSource = new ExchangeSourceHandler(between(1, 64), executor); ExchangeSinkHandler exchangeSink = new ExchangeSinkHandler(blockFactory, between(1, 64), threadPool::relativeTimeInMillis); LocalExecutionPlanner executionPlanner = new LocalExecutionPlanner( sessionId, @@ -406,13 +406,7 @@ private ActualResults executePlan(BigArrays bigArrays) throws Exception { DriverRunner runner = new DriverRunner(threadPool.getThreadContext()) { @Override protected void start(Driver driver, ActionListener driverListener) { - Driver.start( - threadPool.getThreadContext(), - threadPool.executor(ESQL_THREAD_POOL_NAME), - driver, - between(1, 1000), - driverListener - ); + Driver.start(threadPool.getThreadContext(), executor, driver, between(1, 1000), driverListener); } }; PlainActionFuture future = new PlainActionFuture<>(); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java index 4d44d3111c094..9daf043714efc 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java @@ -446,7 +446,7 @@ private void testEvaluateBlock(BlockFactory inputBlockFactory, DriverContext con // TODO cranky time - public final void testSimpleWithNulls() { // TODO replace this with nulls inserted into the test case like anyNullIsNull + public void testSimpleWithNulls() { // TODO replace this with nulls inserted into the test case like anyNullIsNull assumeTrue("nothing to do if a type error", testCase.getExpectedTypeError() == null); assumeTrue("All test data types must be representable in order to build fields", testCase.allTypesAreRepresentable()); List simpleData = testCase.getDataValues(); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSliceTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSliceTests.java new file mode 100644 index 0000000000000..4d1e58893739a --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSliceTests.java @@ -0,0 +1,346 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.scalar.multivalue; + +import com.carrotsearch.randomizedtesting.annotations.Name; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.geo.GeometryTestUtils; +import org.elasticsearch.geo.ShapeTestUtils; +import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; +import org.elasticsearch.xpack.esql.expression.function.scalar.AbstractScalarFunctionTestCase; +import org.elasticsearch.xpack.esql.type.EsqlDataTypes; +import org.elasticsearch.xpack.ql.expression.Expression; +import org.elasticsearch.xpack.ql.tree.Source; +import org.elasticsearch.xpack.ql.type.DataType; +import org.elasticsearch.xpack.ql.type.DataTypes; + +import java.util.ArrayList; +import java.util.List; +import java.util.function.Supplier; + +import static org.elasticsearch.xpack.ql.util.SpatialCoordinateTypes.CARTESIAN; +import static org.elasticsearch.xpack.ql.util.SpatialCoordinateTypes.GEO; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.nullValue; + +public class MvSliceTests extends AbstractScalarFunctionTestCase { + public MvSliceTests(@Name("TestCase") Supplier testCaseSupplier) { + this.testCase = testCaseSupplier.get(); + } + + @ParametersFactory + public static Iterable parameters() { + List suppliers = new ArrayList<>(); + booleans(suppliers); + ints(suppliers); + longs(suppliers); + doubles(suppliers); + bytesRefs(suppliers); + return parameterSuppliersFromTypedData(suppliers); + } + + @Override + protected DataType expectedType(List argTypes) { + return argTypes.get(0); + } + + @Override + protected List argSpec() { + return List.of(required(representableTypes()), required(integers()), optional(integers())); + } + + @Override + protected Expression build(Source source, List args) { + return new MvSlice(source, args.get(0), args.get(1), args.size() > 2 ? args.get(2) : null); + } + + private static void booleans(List suppliers) { + // Positive + suppliers.add(new TestCaseSupplier(List.of(DataTypes.BOOLEAN, DataTypes.INTEGER, DataTypes.INTEGER), () -> { + List field = randomList(1, 10, () -> randomBoolean()); + int length = field.size(); + int start = randomIntBetween(0, length - 1); + int end = randomIntBetween(start, length - 1); + return new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(field, DataTypes.BOOLEAN, "field"), + new TestCaseSupplier.TypedData(start, DataTypes.INTEGER, "start"), + new TestCaseSupplier.TypedData(end, DataTypes.INTEGER, "end") + ), + "MvSliceBooleanEvaluator[field=Attribute[channel=0], start=Attribute[channel=1], end=Attribute[channel=2]]", + DataTypes.BOOLEAN, + equalTo(start == end ? field.get(start) : field.subList(start, end + 1)) + ); + })); + // Positive Start IndexOutofBound + suppliers.add(new TestCaseSupplier(List.of(DataTypes.BOOLEAN, DataTypes.INTEGER, DataTypes.INTEGER), () -> { + List field = randomList(1, 10, () -> randomBoolean()); + int length = field.size(); + int start = randomIntBetween(length, length + 1); + int end = randomIntBetween(start, length + 10); + return new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(field, DataTypes.BOOLEAN, "field"), + new TestCaseSupplier.TypedData(start, DataTypes.INTEGER, "start"), + new TestCaseSupplier.TypedData(end, DataTypes.INTEGER, "end") + ), + "MvSliceBooleanEvaluator[field=Attribute[channel=0], start=Attribute[channel=1], end=Attribute[channel=2]]", + DataTypes.BOOLEAN, + nullValue() + ); + })); + // Positive End IndexOutofBound + suppliers.add(new TestCaseSupplier(List.of(DataTypes.BOOLEAN, DataTypes.INTEGER, DataTypes.INTEGER), () -> { + List field = randomList(1, 10, () -> randomBoolean()); + int length = field.size(); + int start = randomIntBetween(0, length - 1); + int end = randomIntBetween(length, length + 10); + return new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(field, DataTypes.BOOLEAN, "field"), + new TestCaseSupplier.TypedData(start, DataTypes.INTEGER, "start"), + new TestCaseSupplier.TypedData(end, DataTypes.INTEGER, "end") + ), + "MvSliceBooleanEvaluator[field=Attribute[channel=0], start=Attribute[channel=1], end=Attribute[channel=2]]", + DataTypes.BOOLEAN, + equalTo(start == length - 1 ? field.get(start) : field.subList(start, length)) + ); + })); + // Negative + suppliers.add(new TestCaseSupplier(List.of(DataTypes.BOOLEAN, DataTypes.INTEGER, DataTypes.INTEGER), () -> { + List field = randomList(1, 10, () -> randomBoolean()); + int length = field.size(); + int start = randomIntBetween(0 - length, -1); + int end = randomIntBetween(start, -1); + return new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(field, DataTypes.BOOLEAN, "field"), + new TestCaseSupplier.TypedData(start, DataTypes.INTEGER, "start"), + new TestCaseSupplier.TypedData(end, DataTypes.INTEGER, "end") + ), + "MvSliceBooleanEvaluator[field=Attribute[channel=0], start=Attribute[channel=1], end=Attribute[channel=2]]", + DataTypes.BOOLEAN, + equalTo(start == end ? field.get(start + length) : field.subList(start + length, end + 1 + length)) + ); + })); + } + + private static void ints(List suppliers) { + suppliers.add(new TestCaseSupplier(List.of(DataTypes.INTEGER, DataTypes.INTEGER, DataTypes.INTEGER), () -> { + List field = randomList(1, 10, () -> randomInt()); + int length = field.size(); + int start = randomIntBetween(0, length - 1); + int end = randomIntBetween(start, length - 1); + return new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(field, DataTypes.INTEGER, "field"), + new TestCaseSupplier.TypedData(start, DataTypes.INTEGER, "start"), + new TestCaseSupplier.TypedData(end, DataTypes.INTEGER, "end") + ), + "MvSliceIntEvaluator[field=Attribute[channel=0], start=Attribute[channel=1], end=Attribute[channel=2]]", + DataTypes.INTEGER, + equalTo(start == end ? field.get(start) : field.subList(start, end + 1)) + ); + })); + } + + private static void longs(List suppliers) { + suppliers.add(new TestCaseSupplier(List.of(DataTypes.LONG, DataTypes.INTEGER, DataTypes.INTEGER), () -> { + List field = randomList(1, 10, () -> randomLong()); + int length = field.size(); + int start = randomIntBetween(0, length - 1); + int end = randomIntBetween(start, length - 1); + return new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(field, DataTypes.LONG, "field"), + new TestCaseSupplier.TypedData(start, DataTypes.INTEGER, "start"), + new TestCaseSupplier.TypedData(end, DataTypes.INTEGER, "end") + ), + "MvSliceLongEvaluator[field=Attribute[channel=0], start=Attribute[channel=1], end=Attribute[channel=2]]", + DataTypes.LONG, + equalTo(start == end ? field.get(start) : field.subList(start, end + 1)) + ); + })); + + suppliers.add(new TestCaseSupplier(List.of(DataTypes.DATETIME, DataTypes.INTEGER, DataTypes.INTEGER), () -> { + List field = randomList(1, 10, () -> randomLong()); + int length = field.size(); + int start = randomIntBetween(0, length - 1); + int end = randomIntBetween(start, length - 1); + return new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(field, DataTypes.DATETIME, "field"), + new TestCaseSupplier.TypedData(start, DataTypes.INTEGER, "start"), + new TestCaseSupplier.TypedData(end, DataTypes.INTEGER, "end") + ), + "MvSliceLongEvaluator[field=Attribute[channel=0], start=Attribute[channel=1], end=Attribute[channel=2]]", + DataTypes.DATETIME, + equalTo(start == end ? field.get(start) : field.subList(start, end + 1)) + ); + })); + } + + private static void doubles(List suppliers) { + suppliers.add(new TestCaseSupplier(List.of(DataTypes.DOUBLE, DataTypes.INTEGER, DataTypes.INTEGER), () -> { + List field = randomList(1, 10, () -> randomDouble()); + int length = field.size(); + int start = randomIntBetween(0, length - 1); + int end = randomIntBetween(start, length - 1); + return new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(field, DataTypes.DOUBLE, "field"), + new TestCaseSupplier.TypedData(start, DataTypes.INTEGER, "start"), + new TestCaseSupplier.TypedData(end, DataTypes.INTEGER, "end") + ), + "MvSliceDoubleEvaluator[field=Attribute[channel=0], start=Attribute[channel=1], end=Attribute[channel=2]]", + DataTypes.DOUBLE, + equalTo(start == end ? field.get(start) : field.subList(start, end + 1)) + ); + })); + } + + private static void bytesRefs(List suppliers) { + suppliers.add(new TestCaseSupplier(List.of(DataTypes.KEYWORD, DataTypes.INTEGER, DataTypes.INTEGER), () -> { + List field = randomList(1, 10, () -> randomLiteral(DataTypes.KEYWORD).value()); + int length = field.size(); + int start = randomIntBetween(0, length - 1); + int end = randomIntBetween(start, length - 1); + return new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(field, DataTypes.KEYWORD, "field"), + new TestCaseSupplier.TypedData(start, DataTypes.INTEGER, "start"), + new TestCaseSupplier.TypedData(end, DataTypes.INTEGER, "end") + ), + "MvSliceBytesRefEvaluator[field=Attribute[channel=0], start=Attribute[channel=1], end=Attribute[channel=2]]", + DataTypes.KEYWORD, + equalTo(start == end ? field.get(start) : field.subList(start, end + 1)) + ); + })); + + suppliers.add(new TestCaseSupplier(List.of(DataTypes.TEXT, DataTypes.INTEGER, DataTypes.INTEGER), () -> { + List field = randomList(1, 10, () -> randomLiteral(DataTypes.TEXT).value()); + int length = field.size(); + int start = randomIntBetween(0, length - 1); + int end = randomIntBetween(start, length - 1); + return new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(field, DataTypes.TEXT, "field"), + new TestCaseSupplier.TypedData(start, DataTypes.INTEGER, "start"), + new TestCaseSupplier.TypedData(end, DataTypes.INTEGER, "end") + ), + "MvSliceBytesRefEvaluator[field=Attribute[channel=0], start=Attribute[channel=1], end=Attribute[channel=2]]", + DataTypes.TEXT, + equalTo(start == end ? field.get(start) : field.subList(start, end + 1)) + ); + })); + + suppliers.add(new TestCaseSupplier(List.of(DataTypes.IP, DataTypes.INTEGER, DataTypes.INTEGER), () -> { + List field = randomList(1, 10, () -> randomLiteral(DataTypes.IP).value()); + int length = field.size(); + int start = randomIntBetween(0, length - 1); + int end = randomIntBetween(start, length - 1); + return new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(field, DataTypes.IP, "field"), + new TestCaseSupplier.TypedData(start, DataTypes.INTEGER, "start"), + new TestCaseSupplier.TypedData(end, DataTypes.INTEGER, "end") + ), + "MvSliceBytesRefEvaluator[field=Attribute[channel=0], start=Attribute[channel=1], end=Attribute[channel=2]]", + DataTypes.IP, + equalTo(start == end ? field.get(start) : field.subList(start, end + 1)) + ); + })); + + suppliers.add(new TestCaseSupplier(List.of(DataTypes.VERSION, DataTypes.INTEGER, DataTypes.INTEGER), () -> { + List field = randomList(1, 10, () -> randomLiteral(DataTypes.VERSION).value()); + int length = field.size(); + int start = randomIntBetween(0, length - 1); + int end = randomIntBetween(start, length - 1); + return new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(field, DataTypes.VERSION, "field"), + new TestCaseSupplier.TypedData(start, DataTypes.INTEGER, "start"), + new TestCaseSupplier.TypedData(end, DataTypes.INTEGER, "end") + ), + "MvSliceBytesRefEvaluator[field=Attribute[channel=0], start=Attribute[channel=1], end=Attribute[channel=2]]", + DataTypes.VERSION, + equalTo(start == end ? field.get(start) : field.subList(start, end + 1)) + ); + })); + + suppliers.add(new TestCaseSupplier(List.of(EsqlDataTypes.GEO_POINT, DataTypes.INTEGER, DataTypes.INTEGER), () -> { + List field = randomList(1, 10, () -> new BytesRef(GEO.asWkt(GeometryTestUtils.randomPoint()))); + int length = field.size(); + int start = randomIntBetween(0, length - 1); + int end = randomIntBetween(start, length - 1); + return new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(field, EsqlDataTypes.GEO_POINT, "field"), + new TestCaseSupplier.TypedData(start, DataTypes.INTEGER, "start"), + new TestCaseSupplier.TypedData(end, DataTypes.INTEGER, "end") + ), + "MvSliceBytesRefEvaluator[field=Attribute[channel=0], start=Attribute[channel=1], end=Attribute[channel=2]]", + EsqlDataTypes.GEO_POINT, + equalTo(start == end ? field.get(start) : field.subList(start, end + 1)) + ); + })); + + suppliers.add(new TestCaseSupplier(List.of(EsqlDataTypes.CARTESIAN_POINT, DataTypes.INTEGER, DataTypes.INTEGER), () -> { + List field = randomList(1, 10, () -> new BytesRef(CARTESIAN.asWkt(ShapeTestUtils.randomPoint()))); + int length = field.size(); + int start = randomIntBetween(0, length - 1); + int end = randomIntBetween(start, length - 1); + return new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(field, EsqlDataTypes.CARTESIAN_POINT, "field"), + new TestCaseSupplier.TypedData(start, DataTypes.INTEGER, "start"), + new TestCaseSupplier.TypedData(end, DataTypes.INTEGER, "end") + ), + "MvSliceBytesRefEvaluator[field=Attribute[channel=0], start=Attribute[channel=1], end=Attribute[channel=2]]", + EsqlDataTypes.CARTESIAN_POINT, + equalTo(start == end ? field.get(start) : field.subList(start, end + 1)) + ); + })); + + suppliers.add(new TestCaseSupplier(List.of(EsqlDataTypes.GEO_SHAPE, DataTypes.INTEGER, DataTypes.INTEGER), () -> { + List field = randomList(1, 10, () -> new BytesRef(GEO.asWkt(GeometryTestUtils.randomGeometry(randomBoolean())))); + int length = field.size(); + int start = randomIntBetween(0, length - 1); + int end = randomIntBetween(start, length - 1); + return new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(field, EsqlDataTypes.GEO_SHAPE, "field"), + new TestCaseSupplier.TypedData(start, DataTypes.INTEGER, "start"), + new TestCaseSupplier.TypedData(end, DataTypes.INTEGER, "end") + ), + "MvSliceBytesRefEvaluator[field=Attribute[channel=0], start=Attribute[channel=1], end=Attribute[channel=2]]", + EsqlDataTypes.GEO_SHAPE, + equalTo(start == end ? field.get(start) : field.subList(start, end + 1)) + ); + })); + + suppliers.add(new TestCaseSupplier(List.of(EsqlDataTypes.CARTESIAN_SHAPE, DataTypes.INTEGER, DataTypes.INTEGER), () -> { + List field = randomList(1, 10, () -> new BytesRef(CARTESIAN.asWkt(ShapeTestUtils.randomGeometry(randomBoolean())))); + int length = field.size(); + int start = randomIntBetween(0, length - 1); + int end = randomIntBetween(start, length - 1); + return new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(field, EsqlDataTypes.CARTESIAN_SHAPE, "field"), + new TestCaseSupplier.TypedData(start, DataTypes.INTEGER, "start"), + new TestCaseSupplier.TypedData(end, DataTypes.INTEGER, "end") + ), + "MvSliceBytesRefEvaluator[field=Attribute[channel=0], start=Attribute[channel=1], end=Attribute[channel=2]]", + EsqlDataTypes.CARTESIAN_SHAPE, + equalTo(start == end ? field.get(start) : field.subList(start, end + 1)) + ); + })); + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvZipTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvZipTests.java new file mode 100644 index 0000000000000..c4162f6ddc367 --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvZipTests.java @@ -0,0 +1,120 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.scalar.multivalue; + +import com.carrotsearch.randomizedtesting.annotations.Name; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.BytesRefBuilder; +import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; +import org.elasticsearch.xpack.esql.expression.function.scalar.AbstractScalarFunctionTestCase; +import org.elasticsearch.xpack.ql.expression.Expression; +import org.elasticsearch.xpack.ql.tree.Source; +import org.elasticsearch.xpack.ql.type.DataType; +import org.elasticsearch.xpack.ql.type.DataTypes; + +import java.util.ArrayList; +import java.util.List; +import java.util.function.Supplier; + +import static java.lang.Math.max; +import static org.hamcrest.Matchers.equalTo; + +public class MvZipTests extends AbstractScalarFunctionTestCase { + public MvZipTests(@Name("TestCase") Supplier testCaseSupplier) { + this.testCase = testCaseSupplier.get(); + } + + @ParametersFactory + public static Iterable parameters() { + List suppliers = new ArrayList<>(); + suppliers.add(new TestCaseSupplier(List.of(DataTypes.KEYWORD, DataTypes.KEYWORD, DataTypes.KEYWORD), () -> { + List left = randomList(1, 3, () -> randomLiteral(DataTypes.KEYWORD).value()); + List right = randomList(1, 3, () -> randomLiteral(DataTypes.KEYWORD).value()); + String delim = randomAlphaOfLengthBetween(1, 1); + List expected = calculateExpected(left, right, delim); + return new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(left, DataTypes.KEYWORD, "mvLeft"), + new TestCaseSupplier.TypedData(right, DataTypes.KEYWORD, "mvRight"), + new TestCaseSupplier.TypedData(delim, DataTypes.KEYWORD, "delim") + ), + "MvZipEvaluator[leftField=Attribute[channel=0], rightField=Attribute[channel=1], delim=Attribute[channel=2]]", + DataTypes.KEYWORD, + equalTo(expected.size() == 1 ? expected.iterator().next() : expected) + ); + })); + + suppliers.add(new TestCaseSupplier(List.of(DataTypes.TEXT, DataTypes.TEXT, DataTypes.TEXT), () -> { + List left = randomList(1, 10, () -> randomLiteral(DataTypes.TEXT).value()); + List right = randomList(1, 10, () -> randomLiteral(DataTypes.TEXT).value()); + String delim = randomAlphaOfLengthBetween(1, 1); + List expected = calculateExpected(left, right, delim); + return new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(left, DataTypes.TEXT, "mvLeft"), + new TestCaseSupplier.TypedData(right, DataTypes.TEXT, "mvRight"), + new TestCaseSupplier.TypedData(delim, DataTypes.TEXT, "delim") + ), + "MvZipEvaluator[leftField=Attribute[channel=0], rightField=Attribute[channel=1], delim=Attribute[channel=2]]", + DataTypes.KEYWORD, + equalTo(expected.size() == 1 ? expected.iterator().next() : expected) + ); + })); + + return parameterSuppliersFromTypedData(suppliers); + } + + @Override + protected DataType expectedType(List argTypes) { + return DataTypes.KEYWORD; + } + + @Override + protected List argSpec() { + return List.of(required(strings()), required(strings()), optional(strings())); + } + + @Override + protected Expression build(Source source, List args) { + return new MvZip(source, args.get(0), args.get(1), args.size() > 2 ? args.get(2) : null); + } + + private static List calculateExpected(List left, List right, String delim) { + List expected = new ArrayList<>(max(left.size(), right.size())); + int i = 0, j = 0; + while (i < left.size() && j < right.size()) { + BytesRefBuilder work = new BytesRefBuilder(); + work.append((BytesRef) left.get(i)); + work.append(new BytesRef(delim)); + work.append((BytesRef) right.get(j)); + expected.add(work.get()); + i++; + j++; + } + while (i < left.size()) { + BytesRefBuilder work = new BytesRefBuilder(); + work.append((BytesRef) left.get(i)); + expected.add(work.get()); + i++; + } + while (j < right.size()) { + BytesRefBuilder work = new BytesRefBuilder(); + work.append((BytesRef) right.get(j)); + expected.add(work.get()); + j++; + } + return expected; + } + + @Override + public void testSimpleWithNulls() { + assumeFalse("mv_zip returns null only if both left and right inputs are nulls", false); + } +} diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportGetTrainedModelsStatsActionTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportGetTrainedModelsStatsActionTests.java index 377b7fd45d78c..5c2c3abf232f5 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportGetTrainedModelsStatsActionTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportGetTrainedModelsStatsActionTests.java @@ -274,6 +274,7 @@ private static NodeStats buildNodeStats( null, null, null, + null, null ); diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/node/NodeStatsMonitoringDocTests.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/node/NodeStatsMonitoringDocTests.java index 2acdc8ae72232..54f3ce634a25a 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/node/NodeStatsMonitoringDocTests.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/node/NodeStatsMonitoringDocTests.java @@ -460,6 +460,7 @@ private static NodeStats mockNodeStats() { null, null, null, + null, null ); } diff --git a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/ProfilingIndexTemplateRegistry.java b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/ProfilingIndexTemplateRegistry.java index 1762b2537c455..c90e0e52c4d58 100644 --- a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/ProfilingIndexTemplateRegistry.java +++ b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/ProfilingIndexTemplateRegistry.java @@ -45,7 +45,8 @@ public class ProfilingIndexTemplateRegistry extends IndexTemplateRegistry { // version 2: Added 'profiling.host.machine' keyword mapping to profiling-hosts // version 3: Add optional component template 'profiling-ilm@custom' to all ILM-managed index templates // version 4: Added 'service.name' keyword mapping to profiling-events - public static final int INDEX_TEMPLATE_VERSION = 4; + // version 5: Add optional component template '@custom' to all index templates that reference component templates + public static final int INDEX_TEMPLATE_VERSION = 5; // history for individual indices / index templates. Only bump these for breaking changes that require to create a new index public static final int PROFILING_EVENTS_VERSION = 2; diff --git a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/Rollup.java b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/Rollup.java index aef7a266fff37..1748c1be86b78 100644 --- a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/Rollup.java +++ b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/Rollup.java @@ -77,8 +77,6 @@ public class Rollup extends Plugin implements ActionPlugin, PersistentTaskPlugin public static final String TASK_THREAD_POOL_NAME = RollupField.NAME + "_indexing"; - public static final String ROLLUP_TEMPLATE_VERSION_FIELD = "rollup-version"; - private final SetOnce schedulerEngine = new SetOnce<>(); private final Settings settings; diff --git a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/action/RollupIndexCaps.java b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/action/RollupIndexCaps.java index 87f7a3de956fc..ebdcc1ed13e1f 100644 --- a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/action/RollupIndexCaps.java +++ b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/action/RollupIndexCaps.java @@ -60,7 +60,7 @@ public class RollupIndexCaps implements Writeable, ToXContentFragment { ... job config, parsable by RollupJobConfig.PARSER ... } }, - "rollup-version": "7.0.0" + "rollup-version": "" } } */ diff --git a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/action/TransportPutRollupJobAction.java b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/action/TransportPutRollupJobAction.java index e66bb35cce1cf..a276971762c81 100644 --- a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/action/TransportPutRollupJobAction.java +++ b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/action/TransportPutRollupJobAction.java @@ -11,7 +11,6 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.ResourceAlreadyExistsException; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.admin.indices.create.CreateIndexAction; @@ -53,7 +52,6 @@ import org.elasticsearch.xpack.core.rollup.action.PutRollupJobAction; import org.elasticsearch.xpack.core.rollup.job.RollupJob; import org.elasticsearch.xpack.core.rollup.job.RollupJobConfig; -import org.elasticsearch.xpack.rollup.Rollup; import java.io.IOException; import java.util.Map; @@ -188,7 +186,7 @@ private static XContentBuilder createMappings(RollupJobConfig config) throws IOE .startObject("mappings") .startObject("_doc") .startObject("_meta") - .field(Rollup.ROLLUP_TEMPLATE_VERSION_FIELD, Version.CURRENT.toString()) + .field("rollup-version", "") // empty string to remain backwards compatible .startObject("_rollup") .field(config.getId(), config) .endObject() @@ -255,14 +253,6 @@ static void updateMapping( Map rollupMeta = (Map) ((Map) m).get(RollupField.ROLLUP_META); - String stringVersion = (String) ((Map) m).get(Rollup.ROLLUP_TEMPLATE_VERSION_FIELD); - if (stringVersion == null) { - listener.onFailure( - new IllegalStateException("Could not determine version of existing rollup metadata for index [" + indexName + "]") - ); - return; - } - if (rollupMeta.get(job.getConfig().getId()) != null) { String msg = "Cannot create rollup job [" + job.getConfig().getId() diff --git a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/action/PutJobStateMachineTests.java b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/action/PutJobStateMachineTests.java index fc5805d7ed9d1..b1455c4738623 100644 --- a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/action/PutJobStateMachineTests.java +++ b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/action/PutJobStateMachineTests.java @@ -8,7 +8,6 @@ import org.apache.logging.log4j.Logger; import org.elasticsearch.ResourceAlreadyExistsException; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.indices.create.CreateIndexAction; import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; @@ -25,7 +24,6 @@ import org.elasticsearch.persistent.PersistentTasksService; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.VersionUtils; import org.elasticsearch.xpack.core.rollup.ConfigTestHelpers; import org.elasticsearch.xpack.core.rollup.RollupField; import org.elasticsearch.xpack.core.rollup.action.PutRollupJobAction; @@ -33,7 +31,6 @@ import org.elasticsearch.xpack.core.rollup.job.GroupConfig; import org.elasticsearch.xpack.core.rollup.job.RollupJob; import org.elasticsearch.xpack.core.rollup.job.RollupJobConfig; -import org.elasticsearch.xpack.rollup.Rollup; import org.mockito.ArgumentCaptor; import java.util.Collections; @@ -127,7 +124,7 @@ public void testIndexMetadata() throws InterruptedException { String mapping = requestCaptor.getValue().mappings(); // Make sure the version is present, and we have our date template (the most important aspects) - assertThat(mapping, containsString("\"rollup-version\":\"" + Version.CURRENT.toString() + "\"")); + assertThat(mapping, containsString("\"rollup-version\":\"\"")); assertThat(mapping, containsString("\"path_match\":\"*.date_histogram.timestamp\"")); listenerCaptor.getValue().onFailure(new ResourceAlreadyExistsException(job.getConfig().getRollupIndex())); @@ -245,38 +242,6 @@ public void testMetadataButNotRollup() { verify(client).execute(eq(GetMappingsAction.INSTANCE), any(GetMappingsRequest.class), any()); } - @SuppressWarnings({ "unchecked", "rawtypes" }) - public void testNoMappingVersion() { - RollupJob job = new RollupJob(ConfigTestHelpers.randomRollupJobConfig(random()), Collections.emptyMap()); - - ActionListener testListener = ActionListener.wrap(response -> { - fail("Listener success should not have been triggered."); - }, e -> { - assertThat( - e.getMessage(), - equalTo("Could not determine version of existing rollup metadata for index [" + job.getConfig().getRollupIndex() + "]") - ); - }); - - Logger logger = mock(Logger.class); - Client client = mock(Client.class); - - ArgumentCaptor requestCaptor = ArgumentCaptor.forClass(ActionListener.class); - doAnswer(invocation -> { - GetMappingsResponse response = mock(GetMappingsResponse.class); - Map m = Maps.newMapWithExpectedSize(2); - m.put(RollupField.ROLLUP_META, Collections.singletonMap(job.getConfig().getId(), job.getConfig())); - MappingMetadata meta = new MappingMetadata(RollupField.TYPE_NAME, Collections.singletonMap("_meta", m)); - - when(response.getMappings()).thenReturn(Map.of(job.getConfig().getRollupIndex(), meta)); - requestCaptor.getValue().onResponse(response); - return null; - }).when(client).execute(eq(GetMappingsAction.INSTANCE), any(GetMappingsRequest.class), requestCaptor.capture()); - - TransportPutRollupJobAction.updateMapping(job, testListener, mock(PersistentTasksService.class), client, logger); - verify(client).execute(eq(GetMappingsAction.INSTANCE), any(GetMappingsRequest.class), any()); - } - @SuppressWarnings({ "unchecked", "rawtypes" }) public void testJobAlreadyInMapping() { RollupJob job = new RollupJob(ConfigTestHelpers.randomRollupJobConfig(random(), "foo"), Collections.emptyMap()); @@ -299,7 +264,6 @@ public void testJobAlreadyInMapping() { doAnswer(invocation -> { GetMappingsResponse response = mock(GetMappingsResponse.class); Map m = Maps.newMapWithExpectedSize(2); - m.put(Rollup.ROLLUP_TEMPLATE_VERSION_FIELD, VersionUtils.randomCompatibleVersion(random(), Version.CURRENT)); m.put(RollupField.ROLLUP_META, Collections.singletonMap(job.getConfig().getId(), job.getConfig())); MappingMetadata meta = new MappingMetadata(RollupField.TYPE_NAME, Collections.singletonMap("_meta", m)); @@ -339,7 +303,6 @@ public void testAddJobToMapping() { doAnswer(invocation -> { GetMappingsResponse response = mock(GetMappingsResponse.class); Map m = Maps.newMapWithExpectedSize(2); - m.put(Rollup.ROLLUP_TEMPLATE_VERSION_FIELD, VersionUtils.randomCompatibleVersion(random(), Version.CURRENT)); m.put(RollupField.ROLLUP_META, Collections.singletonMap(unrelatedJob.getId(), unrelatedJob)); MappingMetadata meta = new MappingMetadata(RollupField.TYPE_NAME, Collections.singletonMap("_meta", m)); diff --git a/x-pack/plugin/security/qa/operator-privileges-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/operator/Constants.java b/x-pack/plugin/security/qa/operator-privileges-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/operator/Constants.java index 2250411fa7882..e65db8632062d 100644 --- a/x-pack/plugin/security/qa/operator-privileges-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/operator/Constants.java +++ b/x-pack/plugin/security/qa/operator-privileges-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/operator/Constants.java @@ -340,6 +340,7 @@ public class Constants { "cluster:monitor/nodes/info", "cluster:monitor/nodes/stats", "cluster:monitor/nodes/usage", + "cluster:monitor/allocation/stats", "cluster:monitor/profiling/status/get", "cluster:monitor/remote/info", "cluster:monitor/settings", diff --git a/x-pack/plugin/watcher/qa/common/src/main/java/org/elasticsearch/xpack/watcher/WatcherRestTestCase.java b/x-pack/plugin/watcher/qa/common/src/main/java/org/elasticsearch/xpack/watcher/WatcherRestTestCase.java index 1779fa4345a85..341e92641f641 100644 --- a/x-pack/plugin/watcher/qa/common/src/main/java/org/elasticsearch/xpack/watcher/WatcherRestTestCase.java +++ b/x-pack/plugin/watcher/qa/common/src/main/java/org/elasticsearch/xpack/watcher/WatcherRestTestCase.java @@ -21,8 +21,6 @@ import java.util.Map; import java.util.concurrent.TimeUnit; -import static org.hamcrest.Matchers.equalTo; - /** * Parent test class for Watcher (not-YAML) based REST tests */ @@ -80,37 +78,20 @@ public final void stopWatcher() throws Exception { } public static void deleteAllWatcherData() throws IOException { - { - var queryWatchesRequest = new Request("GET", "/_watcher/_query/watches"); - var response = ObjectPath.createFromResponse(ESRestTestCase.adminClient().performRequest(queryWatchesRequest)); - - int totalCount = response.evaluate("count"); - List> watches = response.evaluate("watches"); - assert watches.size() == totalCount : "number of watches returned is unequal to the total number of watches"; - for (Map watch : watches) { - String id = (String) watch.get("_id"); - var deleteWatchRequest = new Request("DELETE", "/_watcher/watch/" + id); - assertOK(ESRestTestCase.adminClient().performRequest(deleteWatchRequest)); - } - } + var queryWatchesRequest = new Request("GET", "/_watcher/_query/watches"); + var response = ObjectPath.createFromResponse(ESRestTestCase.adminClient().performRequest(queryWatchesRequest)); - { - var queryWatchesRequest = new Request("GET", "/_watcher/_query/watches"); - var response = ObjectPath.createFromResponse(ESRestTestCase.adminClient().performRequest(queryWatchesRequest)); - assertThat(response.evaluate("count"), equalTo(0)); + int totalCount = response.evaluate("count"); + List> watches = response.evaluate("watches"); + assert watches.size() == totalCount : "number of watches returned is unequal to the total number of watches"; + for (Map watch : watches) { + String id = (String) watch.get("_id"); + var deleteWatchRequest = new Request("DELETE", "/_watcher/watch/" + id); + assertOK(ESRestTestCase.adminClient().performRequest(deleteWatchRequest)); } - { - var xpackUsageRequest = new Request("GET", "/_xpack/usage"); - var response = ObjectPath.createFromResponse(ESRestTestCase.adminClient().performRequest(xpackUsageRequest)); - assertThat(response.evaluate("watcher.count.active"), equalTo(0)); - assertThat(response.evaluate("watcher.count.total"), equalTo(0)); - } - - { - var deleteWatchHistoryRequest = new Request("DELETE", ".watcher-history-*"); - deleteWatchHistoryRequest.addParameter("ignore_unavailable", "true"); - ESRestTestCase.adminClient().performRequest(deleteWatchHistoryRequest); - } + var deleteWatchHistoryRequest = new Request("DELETE", ".watcher-history-*"); + deleteWatchHistoryRequest.addParameter("ignore_unavailable", "true"); + ESRestTestCase.adminClient().performRequest(deleteWatchHistoryRequest); } } diff --git a/x-pack/plugin/watcher/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/watcher/usage/10_basic.yml b/x-pack/plugin/watcher/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/watcher/usage/10_basic.yml index b3682b05d7e68..17031abf39e02 100644 --- a/x-pack/plugin/watcher/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/watcher/usage/10_basic.yml +++ b/x-pack/plugin/watcher/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/watcher/usage/10_basic.yml @@ -1,18 +1,21 @@ --- "Test watcher usage stats output": + - skip: + version: "all" + reason: "AwaitsFix https://github.com/elastic/elasticsearch/issues/65547" - do: catch: missing watcher.delete_watch: id: "usage_stats_watch" - - do: { xpack.usage: {} } - - match: { "watcher.count.active": 0 } - - match: { "watcher.count.total": 0 } + - do: {xpack.usage: {}} + - set: { "watcher.count.active": watch_count_active } + - set: { "watcher.count.total": watch_count_total } - do: watcher.put_watch: id: "usage_stats_watch" - body: > + body: > { "trigger": { "schedule" : { "cron" : "0 0 0 1 * ? 2099" } @@ -44,9 +47,9 @@ } - match: { _id: "usage_stats_watch" } - - do: { xpack.usage: {} } - - match: { "watcher.count.active": 1 } - - match: { "watcher.count.total": 1 } + - do: {xpack.usage: {}} + - gt: { "watcher.count.active": $watch_count_active } + - gt: { "watcher.count.total": $watch_count_total } - gte: { "watcher.watch.action._all.active": 1 } - gte: { "watcher.watch.action.logging.active": 1 } - gte: { "watcher.watch.condition._all.active": 1 } @@ -57,3 +60,4 @@ - gte: { "watcher.watch.trigger.schedule.active": 1 } - gte: { "watcher.watch.trigger.schedule.cron.active": 1 } - gte: { "watcher.watch.trigger.schedule._all.active": 1 } + diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherLifeCycleService.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherLifeCycleService.java index 571e8912b43b2..f6e34ccb243c8 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherLifeCycleService.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherLifeCycleService.java @@ -166,7 +166,9 @@ public void clusterChanged(ClusterChangedEvent event) { if (watcherService.validate(event.state())) { previousShardRoutings.set(localAffectedShardRoutings); if (state.get() == WatcherState.STARTED) { - watcherService.reload(event.state(), "new local watcher shard allocation ids"); + watcherService.reload(event.state(), "new local watcher shard allocation ids", (exception) -> { + clearAllocationIds(); // will cause reload again + }); } else if (isStoppedOrStopping) { this.state.set(WatcherState.STARTING); watcherService.start(event.state(), () -> this.state.set(WatcherState.STARTED), (exception) -> { diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherService.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherService.java index a067b99c6bff0..5389f34212270 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherService.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherService.java @@ -201,7 +201,7 @@ void stopExecutor() { * Reload the watcher service, does not switch the state from stopped to started, just keep going * @param state cluster state, which is needed to find out about local shards */ - void reload(ClusterState state, String reason) { + void reload(ClusterState state, String reason, Consumer exceptionConsumer) { boolean hasValidWatcherTemplates = WatcherIndexTemplateRegistry.validate(state); if (hasValidWatcherTemplates == false) { logger.warn("missing watcher index templates"); @@ -221,7 +221,10 @@ void reload(ClusterState state, String reason) { int cancelledTaskCount = executionService.clearExecutionsAndQueue(() -> {}); logger.info("reloading watcher, reason [{}], cancelled [{}] queued tasks", reason, cancelledTaskCount); - executor.execute(wrapWatcherService(() -> reloadInner(state, reason, false), e -> logger.error("error reloading watcher", e))); + executor.execute(wrapWatcherService(() -> reloadInner(state, reason, false), e -> { + logger.error("error reloading watcher", e); + exceptionConsumer.accept(e); + })); } /** diff --git a/x-pack/plugin/watcher/src/main/resources/org/elasticsearch/xpack/watcher/painless_whitelist.txt b/x-pack/plugin/watcher/src/main/resources/org/elasticsearch/xpack/watcher/painless_whitelist.txt index 89e313875c18e..2dc9b41bbba23 100644 --- a/x-pack/plugin/watcher/src/main/resources/org/elasticsearch/xpack/watcher/painless_whitelist.txt +++ b/x-pack/plugin/watcher/src/main/resources/org/elasticsearch/xpack/watcher/painless_whitelist.txt @@ -8,6 +8,7 @@ class java.lang.String { String org.elasticsearch.painless.api.Augmentation sha1() String org.elasticsearch.painless.api.Augmentation sha256() + String org.elasticsearch.painless.api.Augmentation sha512() } class org.elasticsearch.painless.api.Json { diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherLifeCycleServiceTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherLifeCycleServiceTests.java index 57ec168728171..365b072a418ef 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherLifeCycleServiceTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherLifeCycleServiceTests.java @@ -258,6 +258,91 @@ public void testExceptionOnStart() { assertThat(lifeCycleService.getState().get(), equalTo(WatcherState.STARTED)); } + public void testReloadWithIdenticalRoutingTable() { + /* + * This tests that the identical routing table causes reload only once. + */ + startWatcher(); + + ClusterChangedEvent[] events = masterChangeScenario(); + assertThat(events[1].previousState(), equalTo(events[0].state())); + assertFalse(events[1].routingTableChanged()); + + for (ClusterChangedEvent event : events) { + when(watcherService.validate(event.state())).thenReturn(true); + lifeCycleService.clusterChanged(event); + } + // reload should occur on the first event + verify(watcherService).reload(eq(events[0].state()), anyString(), any()); + // but it shouldn't on the second event unless routing table changes + verify(watcherService, never()).reload(eq(events[1].state()), anyString(), any()); + } + + public void testReloadWithIdenticalRoutingTableAfterException() { + /* + * This tests that even the identical routing table causes reload again if some exception (for example a timeout while loading + * watches) interrupted the previous one. + */ + startWatcher(); + + ClusterChangedEvent[] events = masterChangeScenario(); + assertThat(events[1].previousState(), equalTo(events[0].state())); + assertFalse(events[1].routingTableChanged()); + + // simulate exception on the first event + doAnswer(invocation -> { + Consumer exceptionConsumer = invocation.getArgument(2); + exceptionConsumer.accept(new ElasticsearchTimeoutException(new TimeoutException("Artificial timeout"))); + return null; + }).when(watcherService).reload(eq(events[0].state()), anyString(), any()); + + for (ClusterChangedEvent event : events) { + when(watcherService.validate(event.state())).thenReturn(true); + lifeCycleService.clusterChanged(event); + } + // reload should occur on the first event but it fails + verify(watcherService).reload(eq(events[0].state()), anyString(), any()); + // reload should occur again on the second event because the previous one failed + verify(watcherService).reload(eq(events[1].state()), anyString(), any()); + } + + private ClusterChangedEvent[] masterChangeScenario() { + DiscoveryNodes nodes = new DiscoveryNodes.Builder().localNodeId("node_1").add(newNode("node_1")).add(newNode("node_2")).build(); + + Index index = new Index(Watch.INDEX, "uuid"); + IndexRoutingTable.Builder indexRoutingTableBuilder = IndexRoutingTable.builder(index); + indexRoutingTableBuilder.addShard( + TestShardRouting.newShardRouting(new ShardId(index, 0), "node_1", true, ShardRoutingState.STARTED) + ); + RoutingTable routingTable = RoutingTable.builder().add(indexRoutingTableBuilder.build()).build(); + + IndexMetadata.Builder indexMetadataBuilder = IndexMetadata.builder(Watch.INDEX) + .settings(settings(IndexVersion.current()).put(IndexMetadata.INDEX_FORMAT_SETTING.getKey(), 6)) // the internal index format, + // required + .numberOfShards(1) + .numberOfReplicas(0); + Metadata metadata = Metadata.builder() + .put(IndexTemplateMetadata.builder(HISTORY_TEMPLATE_NAME).patterns(randomIndexPatterns())) + .put(indexMetadataBuilder) + .build(); + + ClusterState emptyState = ClusterState.builder(new ClusterName("my-cluster")).nodes(nodes).metadata(metadata).build(); + ClusterState stateWithMasterNode1 = ClusterState.builder(new ClusterName("my-cluster")) + .nodes(nodes.withMasterNodeId("node_1")) + .metadata(metadata) + .routingTable(routingTable) + .build(); + ClusterState stateWithMasterNode2 = ClusterState.builder(new ClusterName("my-cluster")) + .nodes(nodes.withMasterNodeId("node_2")) + .metadata(metadata) + .routingTable(routingTable) + .build(); + + return new ClusterChangedEvent[] { + new ClusterChangedEvent("any", stateWithMasterNode1, emptyState), + new ClusterChangedEvent("any", stateWithMasterNode2, stateWithMasterNode1) }; + } + public void testNoLocalShards() { Index watchIndex = new Index(Watch.INDEX, "foo"); ShardId shardId = new ShardId(watchIndex, 0); @@ -301,7 +386,7 @@ public void testNoLocalShards() { when(watcherService.validate(eq(clusterStateWithLocalShards))).thenReturn(true); when(watcherService.validate(eq(clusterStateWithoutLocalShards))).thenReturn(false); lifeCycleService.clusterChanged(new ClusterChangedEvent("any", clusterStateWithLocalShards, clusterStateWithoutLocalShards)); - verify(watcherService, times(1)).reload(eq(clusterStateWithLocalShards), eq("new local watcher shard allocation ids")); + verify(watcherService, times(1)).reload(eq(clusterStateWithLocalShards), eq("new local watcher shard allocation ids"), any()); verify(watcherService, times(1)).validate(eq(clusterStateWithLocalShards)); verifyNoMoreInteractions(watcherService); @@ -380,12 +465,12 @@ public void testReplicaWasAddedOrRemoved() { when(watcherService.validate(eq(firstEvent.state()))).thenReturn(true); lifeCycleService.clusterChanged(firstEvent); - verify(watcherService).reload(eq(firstEvent.state()), anyString()); + verify(watcherService).reload(eq(firstEvent.state()), anyString(), any()); reset(watcherService); when(watcherService.validate(eq(secondEvent.state()))).thenReturn(true); lifeCycleService.clusterChanged(secondEvent); - verify(watcherService).reload(eq(secondEvent.state()), anyString()); + verify(watcherService).reload(eq(secondEvent.state()), anyString(), any()); } // make sure that cluster state changes can be processed on nodes that do not hold data @@ -425,7 +510,7 @@ public void testNonDataNode() { lifeCycleService.clusterChanged(new ClusterChangedEvent("any", currentState, previousState)); verify(watcherService, times(0)).pauseExecution(any()); - verify(watcherService, times(0)).reload(any(), any()); + verify(watcherService, times(0)).reload(any(), any(), any()); } public void testThatMissingWatcherIndexMetadataOnlyResetsOnce() { @@ -452,7 +537,7 @@ public void testThatMissingWatcherIndexMetadataOnlyResetsOnce() { // first add the shard allocation ids, by going from empty cs to CS with watcher index lifeCycleService.clusterChanged(new ClusterChangedEvent("any", clusterStateWithWatcherIndex, clusterStateWithoutWatcherIndex)); - verify(watcherService).reload(eq(clusterStateWithWatcherIndex), anyString()); + verify(watcherService).reload(eq(clusterStateWithWatcherIndex), anyString(), any()); // now remove watches index, and ensure that pausing is only called once, no matter how often called (i.e. each CS update) lifeCycleService.clusterChanged(new ClusterChangedEvent("any", clusterStateWithoutWatcherIndex, clusterStateWithWatcherIndex)); @@ -577,7 +662,7 @@ public void testWatcherReloadsOnNodeOutageWithWatcherShard() { when(watcherService.validate(any())).thenReturn(true); ClusterChangedEvent event = new ClusterChangedEvent("whatever", currentState, previousState); lifeCycleService.clusterChanged(event); - verify(watcherService).reload(eq(event.state()), anyString()); + verify(watcherService).reload(eq(event.state()), anyString(), any()); } private void startWatcher() { @@ -609,7 +694,7 @@ private void startWatcher() { lifeCycleService.clusterChanged(new ClusterChangedEvent("foo", state, emptyState)); assertThat(lifeCycleService.getState().get(), is(WatcherState.STARTED)); - verify(watcherService, times(1)).reload(eq(state), anyString()); + verify(watcherService, times(1)).reload(eq(state), anyString(), any()); assertThat(lifeCycleService.shardRoutings(), hasSize(1)); // reset the mock, the user has to mock everything themselves again diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherServiceTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherServiceTests.java index 19bac967c576a..24a4eede1b20d 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherServiceTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherServiceTests.java @@ -77,6 +77,7 @@ import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.never; +import static org.mockito.Mockito.spy; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; @@ -349,12 +350,38 @@ void stopExecutor() {} ClusterState.Builder csBuilder = new ClusterState.Builder(new ClusterName("_name")); csBuilder.metadata(Metadata.builder()); - service.reload(csBuilder.build(), "whatever"); + service.reload(csBuilder.build(), "whatever", exception -> {}); verify(executionService).clearExecutionsAndQueue(any()); verify(executionService, never()).pause(any()); verify(triggerService).pauseExecution(); } + // the trigger service should not start unless watches are loaded successfully + public void testReloadingWatcherDoesNotStartTriggerServiceIfFailingToLoadWatches() { + ExecutionService executionService = mock(ExecutionService.class); + TriggerService triggerService = mock(TriggerService.class); + WatcherService service = new WatcherService( + Settings.EMPTY, + triggerService, + mock(TriggeredWatchStore.class), + executionService, + mock(WatchParser.class), + client, + EsExecutors.DIRECT_EXECUTOR_SERVICE + ) { + @Override + void stopExecutor() {} + }; + + ClusterState.Builder csBuilder = new ClusterState.Builder(new ClusterName("_name")); + Metadata metadata = spy(Metadata.builder().build()); + when(metadata.getIndicesLookup()).thenThrow(RuntimeException.class); // simulate exception in WatcherService's private loadWatches() + + service.reload(csBuilder.metadata(metadata).build(), "whatever", exception -> {}); + verify(triggerService).pauseExecution(); + verify(triggerService, never()).start(any()); + } + private static DiscoveryNode newNode() { return DiscoveryNodeUtils.create("node"); }