diff --git a/elasticsearch/_async/client/__init__.py b/elasticsearch/_async/client/__init__.py index 8de6e7d4c..2874b49fb 100644 --- a/elasticsearch/_async/client/__init__.py +++ b/elasticsearch/_async/client/__init__.py @@ -2714,6 +2714,7 @@ async def msearch( human: t.Optional[bool] = None, ignore_throttled: t.Optional[bool] = None, ignore_unavailable: t.Optional[bool] = None, + include_named_queries_score: t.Optional[bool] = None, max_concurrent_searches: t.Optional[int] = None, max_concurrent_shard_requests: t.Optional[int] = None, pre_filter_shard_size: t.Optional[int] = None, @@ -2747,6 +2748,13 @@ async def msearch( when frozen. :param ignore_unavailable: If true, missing or closed indices are not included in the response. + :param include_named_queries_score: Indicates whether hit.matched_queries should + be rendered as a map that includes the name of the matched query associated + with its score (true) or as an array containing the name of the matched queries + (false) This functionality reruns each named query on every hit in a search + response. Typically, this adds a small overhead to a request. However, using + computationally expensive named queries on a large number of hits may add + significant overhead. :param max_concurrent_searches: Maximum number of concurrent searches the multi search API can execute. :param max_concurrent_shard_requests: Maximum number of concurrent shard requests @@ -2796,6 +2804,8 @@ async def msearch( __query["ignore_throttled"] = ignore_throttled if ignore_unavailable is not None: __query["ignore_unavailable"] = ignore_unavailable + if include_named_queries_score is not None: + __query["include_named_queries_score"] = include_named_queries_score if max_concurrent_searches is not None: __query["max_concurrent_searches"] = max_concurrent_searches if max_concurrent_shard_requests is not None: @@ -3709,6 +3719,7 @@ async def search( human: t.Optional[bool] = None, ignore_throttled: t.Optional[bool] = None, ignore_unavailable: t.Optional[bool] = None, + include_named_queries_score: t.Optional[bool] = None, indices_boost: t.Optional[t.Sequence[t.Mapping[str, float]]] = None, knn: t.Optional[ t.Union[t.Mapping[str, t.Any], t.Sequence[t.Mapping[str, t.Any]]] @@ -3836,6 +3847,13 @@ async def search( be ignored when frozen. :param ignore_unavailable: If `false`, the request returns an error if it targets a missing or closed index. + :param include_named_queries_score: Indicates whether hit.matched_queries should + be rendered as a map that includes the name of the matched query associated + with its score (true) or as an array containing the name of the matched queries + (false) This functionality reruns each named query on every hit in a search + response. Typically, this adds a small overhead to a request. However, using + computationally expensive named queries on a large number of hits may add + significant overhead. :param indices_boost: Boosts the _score of documents from specified indices. :param knn: Defines the approximate kNN search to run. :param lenient: If `true`, format-based query failures (such as providing text @@ -4017,6 +4035,8 @@ async def search( __query["ignore_throttled"] = ignore_throttled if ignore_unavailable is not None: __query["ignore_unavailable"] = ignore_unavailable + if include_named_queries_score is not None: + __query["include_named_queries_score"] = include_named_queries_score if lenient is not None: __query["lenient"] = lenient if max_concurrent_shard_requests is not None: @@ -4963,6 +4983,7 @@ async def update_by_query( pipeline: t.Optional[str] = None, preference: t.Optional[str] = None, pretty: t.Optional[bool] = None, + q: t.Optional[str] = None, query: t.Optional[t.Mapping[str, t.Any]] = None, refresh: t.Optional[bool] = None, request_cache: t.Optional[bool] = None, @@ -5029,6 +5050,7 @@ async def update_by_query( parameter. :param preference: Specifies the node or shard the operation should be performed on. Random by default. + :param q: Query in the Lucene query string syntax. :param query: Specifies the documents to update using the Query DSL. :param refresh: If `true`, Elasticsearch refreshes affected shards to make the operation visible to search. @@ -5113,6 +5135,8 @@ async def update_by_query( __query["preference"] = preference if pretty is not None: __query["pretty"] = pretty + if q is not None: + __query["q"] = q if refresh is not None: __query["refresh"] = refresh if request_cache is not None: diff --git a/elasticsearch/_async/client/cat.py b/elasticsearch/_async/client/cat.py index e3221bb97..83a393e7a 100644 --- a/elasticsearch/_async/client/cat.py +++ b/elasticsearch/_async/client/cat.py @@ -310,7 +310,7 @@ async def count( ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ Get a document count. Provides quick access to a document count for a data stream, - an index, or an entire cluster.n/ The document count only includes live documents, + an index, or an entire cluster. The document count only includes live documents, not deleted documents which have not yet been removed by the merge process. CAT APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, diff --git a/elasticsearch/_async/client/cluster.py b/elasticsearch/_async/client/cluster.py index 5e05a3cfe..dfd44b588 100644 --- a/elasticsearch/_async/client/cluster.py +++ b/elasticsearch/_async/client/cluster.py @@ -427,9 +427,9 @@ async def health( ``_ :param index: Comma-separated list of data streams, indices, and index aliases - used to limit the request. Wildcard expressions (*) are supported. To target + used to limit the request. Wildcard expressions (`*`) are supported. To target all data streams and indices in a cluster, omit this parameter or use _all - or *. + or `*`. :param expand_wildcards: Whether to expand wildcard expression to concrete indices that are open, closed or both. :param level: Can be one of cluster, indices or shards. Controls the details @@ -703,7 +703,7 @@ async def put_component_template( ``_ :param name: Name of the component template to create. Elasticsearch includes - the following built-in component templates: `logs-mappings`; 'logs-settings`; + the following built-in component templates: `logs-mappings`; `logs-settings`; `metrics-mappings`; `metrics-settings`;`synthetics-mapping`; `synthetics-settings`. Elastic Agent uses these templates to configure backing indices for its data streams. If you use Elastic Agent and want to overwrite one of these templates, diff --git a/elasticsearch/_async/client/indices.py b/elasticsearch/_async/client/indices.py index 66cadcb23..0ab0e4575 100644 --- a/elasticsearch/_async/client/indices.py +++ b/elasticsearch/_async/client/indices.py @@ -561,7 +561,9 @@ async def create_data_stream( error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, + timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ Create a data stream. Creates a data stream. You must have a matching index template @@ -574,6 +576,11 @@ async def create_data_stream( `#`, `:`, or a space character; Cannot start with `-`, `_`, `+`, or `.ds-`; Cannot be `.` or `..`; Cannot be longer than 255 bytes. Multi-byte characters count towards this limit faster. + :param master_timeout: Period to wait for a connection to the master node. If + no response is received before the timeout expires, the request fails and + returns an error. + :param timeout: Period to wait for a response. If no response is received before + the timeout expires, the request fails and returns an error. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") @@ -586,8 +593,12 @@ async def create_data_stream( __query["filter_path"] = filter_path if human is not None: __query["human"] = human + if master_timeout is not None: + __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty + if timeout is not None: + __query["timeout"] = timeout __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "PUT", @@ -869,6 +880,7 @@ async def delete_data_stream( ] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ @@ -880,6 +892,9 @@ async def delete_data_stream( are supported. :param expand_wildcards: Type of data stream that wildcard patterns can match. Supports comma-separated values,such as `open,hidden`. + :param master_timeout: Period to wait for a connection to the master node. If + no response is received before the timeout expires, the request fails and + returns an error. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") @@ -894,6 +909,8 @@ async def delete_data_stream( __query["filter_path"] = filter_path if human is not None: __query["human"] = human + if master_timeout is not None: + __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} @@ -1928,6 +1945,7 @@ async def get_data_lifecycle( filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, include_defaults: t.Optional[bool] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ @@ -1943,6 +1961,9 @@ async def get_data_lifecycle( Supports comma-separated values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. :param include_defaults: If `true`, return all default settings in the response. + :param master_timeout: Period to wait for a connection to the master node. If + no response is received before the timeout expires, the request fails and + returns an error. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") @@ -1959,6 +1980,8 @@ async def get_data_lifecycle( __query["human"] = human if include_defaults is not None: __query["include_defaults"] = include_defaults + if master_timeout is not None: + __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} @@ -1988,6 +2011,7 @@ async def get_data_stream( filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, include_defaults: t.Optional[bool] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ @@ -2002,6 +2026,9 @@ async def get_data_stream( Supports comma-separated values, such as `open,hidden`. :param include_defaults: If true, returns all relevant default configurations for the index template. + :param master_timeout: Period to wait for a connection to the master node. If + no response is received before the timeout expires, the request fails and + returns an error. """ __path_parts: t.Dict[str, str] if name not in SKIP_IN_PATH: @@ -2021,6 +2048,8 @@ async def get_data_stream( __query["human"] = human if include_defaults is not None: __query["include_defaults"] = include_defaults + if master_timeout is not None: + __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} @@ -2435,7 +2464,9 @@ async def migrate_to_data_stream( error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, + timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ Convert an index alias to a data stream. Converts an index alias to a data stream. @@ -2450,6 +2481,11 @@ async def migrate_to_data_stream( ``_ :param name: Name of the index alias to convert to a data stream. + :param master_timeout: Period to wait for a connection to the master node. If + no response is received before the timeout expires, the request fails and + returns an error. + :param timeout: Period to wait for a response. If no response is received before + the timeout expires, the request fails and returns an error. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") @@ -2462,8 +2498,12 @@ async def migrate_to_data_stream( __query["filter_path"] = filter_path if human is not None: __query["human"] = human + if master_timeout is not None: + __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty + if timeout is not None: + __query["timeout"] = timeout __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "POST", @@ -2620,6 +2660,7 @@ async def promote_data_stream( error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ @@ -2629,6 +2670,9 @@ async def promote_data_stream( ``_ :param name: The name of the data stream + :param master_timeout: Period to wait for a connection to the master node. If + no response is received before the timeout expires, the request fails and + returns an error. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") @@ -2641,6 +2685,8 @@ async def promote_data_stream( __query["filter_path"] = filter_path if human is not None: __query["human"] = human + if master_timeout is not None: + __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} diff --git a/elasticsearch/_async/client/ml.py b/elasticsearch/_async/client/ml.py index da52b3ef5..cf86f37a6 100644 --- a/elasticsearch/_async/client/ml.py +++ b/elasticsearch/_async/client/ml.py @@ -1041,14 +1041,14 @@ async def flush_job( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Forces any buffered data to be processed by the job. The flush jobs API is only - applicable when sending data for analysis using the post data API. Depending - on the content of the buffer, then it might additionally calculate new results. - Both flush and close operations are similar, however the flush is more efficient - if you are expecting to send more data for analysis. When flushing, the job remains - open and is available to continue analyzing data. A close operation additionally - prunes and persists the model state to disk and the job must be opened again - before analyzing further data. + Force buffered data to be processed. The flush jobs API is only applicable when + sending data for analysis using the post data API. Depending on the content of + the buffer, then it might additionally calculate new results. Both flush and + close operations are similar, however the flush is more efficient if you are + expecting to send more data for analysis. When flushing, the job remains open + and is available to continue analyzing data. A close operation additionally prunes + and persists the model state to disk and the job must be opened again before + analyzing further data. ``_ @@ -1116,10 +1116,10 @@ async def forecast( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Predicts the future behavior of a time series by using its historical behavior. - Forecasts are not supported for jobs that perform population analysis; an error - occurs if you try to create a forecast for a job that has an `over_field_name` - in its configuration. + Predict future behavior of a time series. Forecasts are not supported for jobs + that perform population analysis; an error occurs if you try to create a forecast + for a job that has an `over_field_name` in its configuration. Forcasts predict + future behavior based on historical data. ``_ @@ -1201,8 +1201,8 @@ async def get_buckets( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves anomaly detection job results for one or more buckets. The API presents - a chronological view of the records, grouped by bucket. + Get anomaly detection job results for buckets. The API presents a chronological + view of the records, grouped by bucket. ``_ @@ -1297,7 +1297,7 @@ async def get_calendar_events( start: t.Optional[t.Union[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves information about the scheduled events in calendars. + Get info about events in calendars. ``_ @@ -1363,7 +1363,7 @@ async def get_calendars( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves configuration information for calendars. + Get calendar configuration info. ``_ @@ -1436,7 +1436,7 @@ async def get_categories( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves anomaly detection job results for one or more categories. + Get anomaly detection job results for categories. ``_ @@ -1518,9 +1518,9 @@ async def get_data_frame_analytics( size: t.Optional[int] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves configuration information for data frame analytics jobs. You can get - information for multiple data frame analytics jobs in a single API request by - using a comma-separated list of data frame analytics jobs or a wildcard expression. + Get data frame analytics job configuration info. You can get information for + multiple data frame analytics jobs in a single API request by using a comma-separated + list of data frame analytics jobs or a wildcard expression. ``_ @@ -1592,7 +1592,7 @@ async def get_data_frame_analytics_stats( verbose: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves usage information for data frame analytics jobs. + Get data frame analytics jobs usage info. ``_ @@ -1657,12 +1657,12 @@ async def get_datafeed_stats( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves usage information for datafeeds. You can get statistics for multiple - datafeeds in a single API request by using a comma-separated list of datafeeds - or a wildcard expression. You can get statistics for all datafeeds by using `_all`, - by specifying `*` as the ``, or by omitting the ``. If the - datafeed is stopped, the only information you receive is the `datafeed_id` and - the `state`. This API returns a maximum of 10,000 datafeeds. + Get datafeeds usage info. You can get statistics for multiple datafeeds in a + single API request by using a comma-separated list of datafeeds or a wildcard + expression. You can get statistics for all datafeeds by using `_all`, by specifying + `*` as the ``, or by omitting the ``. If the datafeed is stopped, + the only information you receive is the `datafeed_id` and the `state`. This API + returns a maximum of 10,000 datafeeds. ``_ @@ -1718,11 +1718,11 @@ async def get_datafeeds( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves configuration information for datafeeds. You can get information for - multiple datafeeds in a single API request by using a comma-separated list of - datafeeds or a wildcard expression. You can get information for all datafeeds - by using `_all`, by specifying `*` as the ``, or by omitting the ``. - This API returns a maximum of 10,000 datafeeds. + Get datafeeds configuration info. You can get information for multiple datafeeds + in a single API request by using a comma-separated list of datafeeds or a wildcard + expression. You can get information for all datafeeds by using `_all`, by specifying + `*` as the ``, or by omitting the ``. This API returns a maximum + of 10,000 datafeeds. ``_ @@ -1785,7 +1785,7 @@ async def get_filters( size: t.Optional[int] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves filters. You can get a single filter or all filters. + Get filters. You can get a single filter or all filters. ``_ @@ -1847,10 +1847,9 @@ async def get_influencers( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves anomaly detection job results for one or more influencers. Influencers - are the entities that have contributed to, or are to blame for, the anomalies. - Influencer results are available only if an `influencer_field_name` is specified - in the job configuration. + Get anomaly detection job results for influencers. Influencers are the entities + that have contributed to, or are to blame for, the anomalies. Influencer results + are available only if an `influencer_field_name` is specified in the job configuration. ``_ @@ -1931,7 +1930,7 @@ async def get_job_stats( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves usage information for anomaly detection jobs. + Get anomaly detection jobs usage info. ``_ @@ -1988,11 +1987,11 @@ async def get_jobs( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves configuration information for anomaly detection jobs. You can get information - for multiple anomaly detection jobs in a single API request by using a group - name, a comma-separated list of jobs, or a wildcard expression. You can get information - for all anomaly detection jobs by using `_all`, by specifying `*` as the ``, - or by omitting the ``. + Get anomaly detection jobs configuration info. You can get information for multiple + anomaly detection jobs in a single API request by using a group name, a comma-separated + list of jobs, or a wildcard expression. You can get information for all anomaly + detection jobs by using `_all`, by specifying `*` as the ``, or by omitting + the ``. ``_ @@ -2053,9 +2052,9 @@ async def get_memory_stats( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Get information about how machine learning jobs and trained models are using - memory, on each node, both within the JVM heap, and natively, outside of the - JVM. + Get machine learning memory usage info. Get information about how machine learning + jobs and trained models are using memory, on each node, both within the JVM heap, + and natively, outside of the JVM. ``_ @@ -2110,7 +2109,7 @@ async def get_model_snapshot_upgrade_stats( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves usage information for anomaly detection job model snapshot upgrades. + Get anomaly detection job model snapshot upgrade usage info. ``_ @@ -2181,7 +2180,7 @@ async def get_model_snapshots( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves information about model snapshots. + Get model snapshots info. ``_ @@ -2282,19 +2281,19 @@ async def get_overall_buckets( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves overall bucket results that summarize the bucket results of multiple - anomaly detection jobs. The `overall_score` is calculated by combining the scores - of all the buckets within the overall bucket span. First, the maximum `anomaly_score` - per anomaly detection job in the overall bucket is calculated. Then the `top_n` - of those scores are averaged to result in the `overall_score`. This means that - you can fine-tune the `overall_score` so that it is more or less sensitive to - the number of jobs that detect an anomaly at the same time. For example, if you - set `top_n` to `1`, the `overall_score` is the maximum bucket score in the overall - bucket. Alternatively, if you set `top_n` to the number of jobs, the `overall_score` - is high only when all jobs detect anomalies in that overall bucket. If you set - the `bucket_span` parameter (to a value greater than its default), the `overall_score` - is the maximum `overall_score` of the overall buckets that have a span equal - to the jobs' largest bucket span. + Get overall bucket results. Retrievs overall bucket results that summarize the + bucket results of multiple anomaly detection jobs. The `overall_score` is calculated + by combining the scores of all the buckets within the overall bucket span. First, + the maximum `anomaly_score` per anomaly detection job in the overall bucket is + calculated. Then the `top_n` of those scores are averaged to result in the `overall_score`. + This means that you can fine-tune the `overall_score` so that it is more or less + sensitive to the number of jobs that detect an anomaly at the same time. For + example, if you set `top_n` to `1`, the `overall_score` is the maximum bucket + score in the overall bucket. Alternatively, if you set `top_n` to the number + of jobs, the `overall_score` is high only when all jobs detect anomalies in that + overall bucket. If you set the `bucket_span` parameter (to a value greater than + its default), the `overall_score` is the maximum `overall_score` of the overall + buckets that have a span equal to the jobs' largest bucket span. ``_ @@ -2391,7 +2390,7 @@ async def get_records( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves anomaly records for an anomaly detection job. Records contain the detailed + Get anomaly records for an anomaly detection job. Records contain the detailed analytical results. They describe the anomalous activity that has been identified in the input data based on the detector configuration. There can be many anomaly records depending on the characteristics and size of the input data. In practice, @@ -2494,7 +2493,7 @@ async def get_trained_models( tags: t.Optional[t.Union[str, t.Sequence[str]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves configuration information for a trained model. + Get trained model configuration info. ``_ @@ -2576,9 +2575,9 @@ async def get_trained_models_stats( size: t.Optional[int] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves usage information for trained models. You can get usage information - for multiple trained models in a single API request by using a comma-separated - list of model IDs or a wildcard expression. + Get trained models usage info. You can get usage information for multiple trained + models in a single API request by using a comma-separated list of model IDs or + a wildcard expression. ``_ @@ -2641,7 +2640,7 @@ async def infer_trained_model( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Evaluates a trained model. + Evaluate a trained model. ``_ @@ -2698,12 +2697,12 @@ async def info( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns defaults and limits used by machine learning. This endpoint is designed - to be used by a user interface that needs to fully understand machine learning - configurations where some options are not specified, meaning that the defaults - should be used. This endpoint may be used to find out what those defaults are. - It also provides information about the maximum size of machine learning jobs - that could run in the current cluster configuration. + Return ML defaults and limits. Returns defaults and limits used by machine learning. + This endpoint is designed to be used by a user interface that needs to fully + understand machine learning configurations where some options are not specified, + meaning that the defaults should be used. This endpoint may be used to find out + what those defaults are. It also provides information about the maximum size + of machine learning jobs that could run in the current cluster configuration. ``_ """ @@ -2743,12 +2742,12 @@ async def open_job( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Open anomaly detection jobs. An anomaly detection job must be opened in order - for it to be ready to receive and analyze data. It can be opened and closed multiple - times throughout its lifecycle. When you open a new job, it starts with an empty - model. When you open an existing job, the most recent model state is automatically - loaded. The job is ready to resume its analysis from where it left off, once - new data is received. + Open anomaly detection jobs. An anomaly detection job must be opened to be ready + to receive and analyze data. It can be opened and closed multiple times throughout + its lifecycle. When you open a new job, it starts with an empty model. When you + open an existing job, the most recent model state is automatically loaded. The + job is ready to resume its analysis from where it left off, once new data is + received. ``_ @@ -2802,7 +2801,7 @@ async def post_calendar_events( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Adds scheduled events to a calendar. + Add scheduled events to the calendar. ``_ @@ -2858,7 +2857,7 @@ async def post_data( reset_start: t.Optional[t.Union[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Sends data to an anomaly detection job for analysis. IMPORTANT: For each job, + Send data to an anomaly detection job for analysis. IMPORTANT: For each job, data can be accepted from only a single connection at a time. It is not currently possible to post data to multiple jobs using wildcards or a comma-separated list. @@ -2923,7 +2922,8 @@ async def preview_data_frame_analytics( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Previews the extracted features used by a data frame analytics config. + Preview features used by data frame analytics. Previews the extracted features + used by a data frame analytics config. ``_ @@ -2985,7 +2985,7 @@ async def preview_datafeed( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Previews a datafeed. This API returns the first "page" of search results from + Preview a datafeed. This API returns the first "page" of search results from a datafeed. You can preview an existing datafeed or provide configuration details for a datafeed and anomaly detection job in the API. The preview shows the structure of the data that will be passed to the anomaly detection engine. IMPORTANT: When @@ -3069,7 +3069,7 @@ async def put_calendar( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates a calendar. + Create a calendar. ``_ @@ -3123,7 +3123,7 @@ async def put_calendar_job( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Adds an anomaly detection job to a calendar. + Add anomaly detection job to calendar. ``_ @@ -3195,9 +3195,9 @@ async def put_data_frame_analytics( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Instantiates a data frame analytics job. This API creates a data frame analytics - job that performs an analysis on the source indices and stores the outcome in - a destination index. + Create a data frame analytics job. This API creates a data frame analytics job + that performs an analysis on the source indices and stores the outcome in a destination + index. ``_ @@ -3365,8 +3365,8 @@ async def put_datafeed( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Instantiates a datafeed. Datafeeds retrieve data from Elasticsearch for analysis - by an anomaly detection job. You can associate only one datafeed with each anomaly + Create a datafeed. Datafeeds retrieve data from Elasticsearch for analysis by + an anomaly detection job. You can associate only one datafeed with each anomaly detection job. The datafeed contains a query that runs at a defined interval (`frequency`). If you are concerned about delayed data, you can add a delay (`query_delay') at each interval. When Elasticsearch security features are enabled, your datafeed @@ -3528,9 +3528,9 @@ async def put_filter( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Instantiates a filter. A filter contains a list of strings. It can be used by - one or more anomaly detection jobs. Specifically, filters are referenced in the - `custom_rules` property of detector configuration objects. + Create a filter. A filter contains a list of strings. It can be used by one or + more anomaly detection jobs. Specifically, filters are referenced in the `custom_rules` + property of detector configuration objects. ``_ @@ -3797,7 +3797,8 @@ async def put_trained_model( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Enables you to supply a trained model that is not created by data frame analytics. + Create a trained model. Enable you to supply a trained model that is not created + by data frame analytics. ``_ @@ -3899,15 +3900,15 @@ async def put_trained_model_alias( reassign: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates or updates a trained model alias. A trained model alias is a logical - name used to reference a single trained model. You can use aliases instead of - trained model identifiers to make it easier to reference your models. For example, - you can use aliases in inference aggregations and processors. An alias must be - unique and refer to only a single trained model. However, you can have multiple - aliases for each trained model. If you use this API to update an alias such that - it references a different trained model ID and the model uses a different type - of data frame analytics, an error occurs. For example, this situation occurs - if you have a trained model for regression analysis and a trained model for classification + Create or update a trained model alias. A trained model alias is a logical name + used to reference a single trained model. You can use aliases instead of trained + model identifiers to make it easier to reference your models. For example, you + can use aliases in inference aggregations and processors. An alias must be unique + and refer to only a single trained model. However, you can have multiple aliases + for each trained model. If you use this API to update an alias such that it references + a different trained model ID and the model uses a different type of data frame + analytics, an error occurs. For example, this situation occurs if you have a + trained model for regression analysis and a trained model for classification analysis; you cannot reassign an alias from one type of trained model to another. If you use this API to update an alias and there are very few input fields in common between the old and new trained models for the model alias, the API returns @@ -3969,7 +3970,7 @@ async def put_trained_model_definition_part( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates part of a trained model definition. + Create part of a trained model definition. ``_ @@ -4046,7 +4047,7 @@ async def put_trained_model_vocabulary( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates a trained model vocabulary. This API is supported only for natural language + Create a trained model vocabulary. This API is supported only for natural language processing (NLP) models. The vocabulary is stored in the index as described in `inference_config.*.vocabulary` of the trained model definition. @@ -4104,7 +4105,7 @@ async def reset_job( wait_for_completion: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Resets an anomaly detection job. All model state and results are deleted. The + Reset an anomaly detection job. All model state and results are deleted. The job is ready to start over as if it had just been created. It is not currently possible to reset multiple jobs using wildcards or a comma separated list. @@ -4160,13 +4161,13 @@ async def revert_model_snapshot( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Reverts to a specific snapshot. The machine learning features react quickly to - anomalous input, learning new behaviors in data. Highly anomalous input increases - the variance in the models whilst the system learns whether this is a new step-change - in behavior or a one-off event. In the case where this anomalous input is known - to be a one-off, then it might be appropriate to reset the model state to a time - before this event. For example, you might consider reverting to a saved snapshot - after Black Friday or a critical system failure. + Revert to a snapshot. The machine learning features react quickly to anomalous + input, learning new behaviors in data. Highly anomalous input increases the variance + in the models whilst the system learns whether this is a new step-change in behavior + or a one-off event. In the case where this anomalous input is known to be a one-off, + then it might be appropriate to reset the model state to a time before this event. + For example, you might consider reverting to a saved snapshot after Black Friday + or a critical system failure. ``_ @@ -4226,16 +4227,17 @@ async def set_upgrade_mode( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Sets a cluster wide upgrade_mode setting that prepares machine learning indices - for an upgrade. When upgrading your cluster, in some circumstances you must restart - your nodes and reindex your machine learning indices. In those circumstances, - there must be no machine learning jobs running. You can close the machine learning - jobs, do the upgrade, then open all the jobs again. Alternatively, you can use - this API to temporarily halt tasks associated with the jobs and datafeeds and - prevent new jobs from opening. You can also use this API during upgrades that - do not require you to reindex your machine learning indices, though stopping - jobs is not a requirement in that case. You can see the current value for the - upgrade_mode setting by using the get machine learning info API. + Set upgrade_mode for ML indices. Sets a cluster wide upgrade_mode setting that + prepares machine learning indices for an upgrade. When upgrading your cluster, + in some circumstances you must restart your nodes and reindex your machine learning + indices. In those circumstances, there must be no machine learning jobs running. + You can close the machine learning jobs, do the upgrade, then open all the jobs + again. Alternatively, you can use this API to temporarily halt tasks associated + with the jobs and datafeeds and prevent new jobs from opening. You can also use + this API during upgrades that do not require you to reindex your machine learning + indices, though stopping jobs is not a requirement in that case. You can see + the current value for the upgrade_mode setting by using the get machine learning + info API. ``_ @@ -4281,16 +4283,16 @@ async def start_data_frame_analytics( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Starts a data frame analytics job. A data frame analytics job can be started - and stopped multiple times throughout its lifecycle. If the destination index - does not exist, it is created automatically the first time you start the data - frame analytics job. The `index.number_of_shards` and `index.number_of_replicas` - settings for the destination index are copied from the source index. If there - are multiple source indices, the destination index copies the highest setting - values. The mappings for the destination index are also copied from the source - indices. If there are any mapping conflicts, the job fails to start. If the destination - index exists, it is used as is. You can therefore set up the destination index - in advance with custom settings and mappings. + Start a data frame analytics job. A data frame analytics job can be started and + stopped multiple times throughout its lifecycle. If the destination index does + not exist, it is created automatically the first time you start the data frame + analytics job. The `index.number_of_shards` and `index.number_of_replicas` settings + for the destination index are copied from the source index. If there are multiple + source indices, the destination index copies the highest setting values. The + mappings for the destination index are also copied from the source indices. If + there are any mapping conflicts, the job fails to start. If the destination index + exists, it is used as is. You can therefore set up the destination index in advance + with custom settings and mappings. ``_ @@ -4342,17 +4344,17 @@ async def start_datafeed( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Starts one or more datafeeds. A datafeed must be started in order to retrieve - data from Elasticsearch. A datafeed can be started and stopped multiple times - throughout its lifecycle. Before you can start a datafeed, the anomaly detection - job must be open. Otherwise, an error occurs. If you restart a stopped datafeed, - it continues processing input data from the next millisecond after it was stopped. - If new data was indexed for that exact millisecond between stopping and starting, - it will be ignored. When Elasticsearch security features are enabled, your datafeed - remembers which roles the last user to create or update it had at the time of - creation or update and runs the query using those same roles. If you provided - secondary authorization headers when you created or updated the datafeed, those - credentials are used instead. + Start datafeeds. A datafeed must be started in order to retrieve data from Elasticsearch. + A datafeed can be started and stopped multiple times throughout its lifecycle. + Before you can start a datafeed, the anomaly detection job must be open. Otherwise, + an error occurs. If you restart a stopped datafeed, it continues processing input + data from the next millisecond after it was stopped. If new data was indexed + for that exact millisecond between stopping and starting, it will be ignored. + When Elasticsearch security features are enabled, your datafeed remembers which + roles the last user to create or update it had at the time of creation or update + and runs the query using those same roles. If you provided secondary authorization + headers when you created or updated the datafeed, those credentials are used + instead. ``_ @@ -4421,8 +4423,8 @@ async def start_trained_model_deployment( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Starts a trained model deployment, which allocates the model to every machine - learning node. + Start a trained model deployment. It allocates the model to every machine learning + node. ``_ @@ -4505,8 +4507,8 @@ async def stop_data_frame_analytics( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Stops one or more data frame analytics jobs. A data frame analytics job can be - started and stopped multiple times throughout its lifecycle. + Stop data frame analytics jobs. A data frame analytics job can be started and + stopped multiple times throughout its lifecycle. ``_ @@ -4571,9 +4573,8 @@ async def stop_datafeed( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Stops one or more datafeeds. A datafeed that is stopped ceases to retrieve data - from Elasticsearch. A datafeed can be started and stopped multiple times throughout - its lifecycle. + Stop datafeeds. A datafeed that is stopped ceases to retrieve data from Elasticsearch. + A datafeed can be started and stopped multiple times throughout its lifecycle. ``_ @@ -4635,7 +4636,7 @@ async def stop_trained_model_deployment( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Stops a trained model deployment. + Stop a trained model deployment. ``_ @@ -4700,7 +4701,7 @@ async def update_data_frame_analytics( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Updates an existing data frame analytics job. + Update a data frame analytics job. ``_ @@ -4808,11 +4809,11 @@ async def update_datafeed( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Updates the properties of a datafeed. You must stop and start the datafeed for - the changes to be applied. When Elasticsearch security features are enabled, - your datafeed remembers which roles the user who updated it had at the time of - the update and runs the query using those same roles. If you provide secondary - authorization headers, those credentials are used instead. + Update a datafeed. You must stop and start the datafeed for the changes to be + applied. When Elasticsearch security features are enabled, your datafeed remembers + which roles the user who updated it had at the time of the update and runs the + query using those same roles. If you provide secondary authorization headers, + those credentials are used instead. ``_ @@ -4975,7 +4976,8 @@ async def update_filter( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Updates the description of a filter, adds items, or removes items from the list. + Update a filter. Updates the description of a filter, adds items, or removes + items from the list. ``_ @@ -5065,7 +5067,8 @@ async def update_job( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Updates certain properties of an anomaly detection job. + Update an anomaly detection job. Updates certain properties of an anomaly detection + job. ``_ @@ -5193,7 +5196,7 @@ async def update_model_snapshot( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Updates certain properties of a snapshot. + Update a snapshot. Updates certain properties of a snapshot. ``_ @@ -5254,8 +5257,7 @@ async def update_trained_model_deployment( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Starts a trained model deployment, which allocates the model to every machine - learning node. + Update a trained model deployment. ``_ @@ -5314,13 +5316,14 @@ async def upgrade_job_snapshot( wait_for_completion: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Upgrades an anomaly detection model snapshot to the latest major version. Over - time, older snapshot formats are deprecated and removed. Anomaly detection jobs - support only snapshots that are from the current or previous major version. This - API provides a means to upgrade a snapshot to the current major version. This - aids in preparing the cluster for an upgrade to the next major version. Only - one snapshot per anomaly detection job can be upgraded at a time and the upgraded - snapshot cannot be the current snapshot of the anomaly detection job. + Upgrade a snapshot. Upgrades an anomaly detection model snapshot to the latest + major version. Over time, older snapshot formats are deprecated and removed. + Anomaly detection jobs support only snapshots that are from the current or previous + major version. This API provides a means to upgrade a snapshot to the current + major version. This aids in preparing the cluster for an upgrade to the next + major version. Only one snapshot per anomaly detection job can be upgraded at + a time and the upgraded snapshot cannot be the current snapshot of the anomaly + detection job. ``_ diff --git a/elasticsearch/_async/client/query_rules.py b/elasticsearch/_async/client/query_rules.py index 2eb06da77..5f9a931ee 100644 --- a/elasticsearch/_async/client/query_rules.py +++ b/elasticsearch/_async/client/query_rules.py @@ -261,7 +261,7 @@ async def put_rule( criteria: t.Optional[ t.Union[t.Mapping[str, t.Any], t.Sequence[t.Mapping[str, t.Any]]] ] = None, - type: t.Optional[t.Union[str, t.Literal["pinned"]]] = None, + type: t.Optional[t.Union[str, t.Literal["exclude", "pinned"]]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, diff --git a/elasticsearch/_async/client/synonyms.py b/elasticsearch/_async/client/synonyms.py index 0f6ec9137..153c552af 100644 --- a/elasticsearch/_async/client/synonyms.py +++ b/elasticsearch/_async/client/synonyms.py @@ -262,7 +262,9 @@ async def put_synonym( self, *, id: str, - synonyms_set: t.Optional[t.Sequence[t.Mapping[str, t.Any]]] = None, + synonyms_set: t.Optional[ + t.Union[t.Mapping[str, t.Any], t.Sequence[t.Mapping[str, t.Any]]] + ] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, diff --git a/elasticsearch/_sync/client/__init__.py b/elasticsearch/_sync/client/__init__.py index 59272260f..497172aa3 100644 --- a/elasticsearch/_sync/client/__init__.py +++ b/elasticsearch/_sync/client/__init__.py @@ -2712,6 +2712,7 @@ def msearch( human: t.Optional[bool] = None, ignore_throttled: t.Optional[bool] = None, ignore_unavailable: t.Optional[bool] = None, + include_named_queries_score: t.Optional[bool] = None, max_concurrent_searches: t.Optional[int] = None, max_concurrent_shard_requests: t.Optional[int] = None, pre_filter_shard_size: t.Optional[int] = None, @@ -2745,6 +2746,13 @@ def msearch( when frozen. :param ignore_unavailable: If true, missing or closed indices are not included in the response. + :param include_named_queries_score: Indicates whether hit.matched_queries should + be rendered as a map that includes the name of the matched query associated + with its score (true) or as an array containing the name of the matched queries + (false) This functionality reruns each named query on every hit in a search + response. Typically, this adds a small overhead to a request. However, using + computationally expensive named queries on a large number of hits may add + significant overhead. :param max_concurrent_searches: Maximum number of concurrent searches the multi search API can execute. :param max_concurrent_shard_requests: Maximum number of concurrent shard requests @@ -2794,6 +2802,8 @@ def msearch( __query["ignore_throttled"] = ignore_throttled if ignore_unavailable is not None: __query["ignore_unavailable"] = ignore_unavailable + if include_named_queries_score is not None: + __query["include_named_queries_score"] = include_named_queries_score if max_concurrent_searches is not None: __query["max_concurrent_searches"] = max_concurrent_searches if max_concurrent_shard_requests is not None: @@ -3707,6 +3717,7 @@ def search( human: t.Optional[bool] = None, ignore_throttled: t.Optional[bool] = None, ignore_unavailable: t.Optional[bool] = None, + include_named_queries_score: t.Optional[bool] = None, indices_boost: t.Optional[t.Sequence[t.Mapping[str, float]]] = None, knn: t.Optional[ t.Union[t.Mapping[str, t.Any], t.Sequence[t.Mapping[str, t.Any]]] @@ -3834,6 +3845,13 @@ def search( be ignored when frozen. :param ignore_unavailable: If `false`, the request returns an error if it targets a missing or closed index. + :param include_named_queries_score: Indicates whether hit.matched_queries should + be rendered as a map that includes the name of the matched query associated + with its score (true) or as an array containing the name of the matched queries + (false) This functionality reruns each named query on every hit in a search + response. Typically, this adds a small overhead to a request. However, using + computationally expensive named queries on a large number of hits may add + significant overhead. :param indices_boost: Boosts the _score of documents from specified indices. :param knn: Defines the approximate kNN search to run. :param lenient: If `true`, format-based query failures (such as providing text @@ -4015,6 +4033,8 @@ def search( __query["ignore_throttled"] = ignore_throttled if ignore_unavailable is not None: __query["ignore_unavailable"] = ignore_unavailable + if include_named_queries_score is not None: + __query["include_named_queries_score"] = include_named_queries_score if lenient is not None: __query["lenient"] = lenient if max_concurrent_shard_requests is not None: @@ -4961,6 +4981,7 @@ def update_by_query( pipeline: t.Optional[str] = None, preference: t.Optional[str] = None, pretty: t.Optional[bool] = None, + q: t.Optional[str] = None, query: t.Optional[t.Mapping[str, t.Any]] = None, refresh: t.Optional[bool] = None, request_cache: t.Optional[bool] = None, @@ -5027,6 +5048,7 @@ def update_by_query( parameter. :param preference: Specifies the node or shard the operation should be performed on. Random by default. + :param q: Query in the Lucene query string syntax. :param query: Specifies the documents to update using the Query DSL. :param refresh: If `true`, Elasticsearch refreshes affected shards to make the operation visible to search. @@ -5111,6 +5133,8 @@ def update_by_query( __query["preference"] = preference if pretty is not None: __query["pretty"] = pretty + if q is not None: + __query["q"] = q if refresh is not None: __query["refresh"] = refresh if request_cache is not None: diff --git a/elasticsearch/_sync/client/cat.py b/elasticsearch/_sync/client/cat.py index 0ec202fed..067ededb6 100644 --- a/elasticsearch/_sync/client/cat.py +++ b/elasticsearch/_sync/client/cat.py @@ -310,7 +310,7 @@ def count( ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ Get a document count. Provides quick access to a document count for a data stream, - an index, or an entire cluster.n/ The document count only includes live documents, + an index, or an entire cluster. The document count only includes live documents, not deleted documents which have not yet been removed by the merge process. CAT APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, diff --git a/elasticsearch/_sync/client/cluster.py b/elasticsearch/_sync/client/cluster.py index fac228536..19dc76b99 100644 --- a/elasticsearch/_sync/client/cluster.py +++ b/elasticsearch/_sync/client/cluster.py @@ -427,9 +427,9 @@ def health( ``_ :param index: Comma-separated list of data streams, indices, and index aliases - used to limit the request. Wildcard expressions (*) are supported. To target + used to limit the request. Wildcard expressions (`*`) are supported. To target all data streams and indices in a cluster, omit this parameter or use _all - or *. + or `*`. :param expand_wildcards: Whether to expand wildcard expression to concrete indices that are open, closed or both. :param level: Can be one of cluster, indices or shards. Controls the details @@ -703,7 +703,7 @@ def put_component_template( ``_ :param name: Name of the component template to create. Elasticsearch includes - the following built-in component templates: `logs-mappings`; 'logs-settings`; + the following built-in component templates: `logs-mappings`; `logs-settings`; `metrics-mappings`; `metrics-settings`;`synthetics-mapping`; `synthetics-settings`. Elastic Agent uses these templates to configure backing indices for its data streams. If you use Elastic Agent and want to overwrite one of these templates, diff --git a/elasticsearch/_sync/client/indices.py b/elasticsearch/_sync/client/indices.py index 51c98b84e..3142cdbeb 100644 --- a/elasticsearch/_sync/client/indices.py +++ b/elasticsearch/_sync/client/indices.py @@ -561,7 +561,9 @@ def create_data_stream( error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, + timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ Create a data stream. Creates a data stream. You must have a matching index template @@ -574,6 +576,11 @@ def create_data_stream( `#`, `:`, or a space character; Cannot start with `-`, `_`, `+`, or `.ds-`; Cannot be `.` or `..`; Cannot be longer than 255 bytes. Multi-byte characters count towards this limit faster. + :param master_timeout: Period to wait for a connection to the master node. If + no response is received before the timeout expires, the request fails and + returns an error. + :param timeout: Period to wait for a response. If no response is received before + the timeout expires, the request fails and returns an error. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") @@ -586,8 +593,12 @@ def create_data_stream( __query["filter_path"] = filter_path if human is not None: __query["human"] = human + if master_timeout is not None: + __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty + if timeout is not None: + __query["timeout"] = timeout __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "PUT", @@ -869,6 +880,7 @@ def delete_data_stream( ] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ @@ -880,6 +892,9 @@ def delete_data_stream( are supported. :param expand_wildcards: Type of data stream that wildcard patterns can match. Supports comma-separated values,such as `open,hidden`. + :param master_timeout: Period to wait for a connection to the master node. If + no response is received before the timeout expires, the request fails and + returns an error. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") @@ -894,6 +909,8 @@ def delete_data_stream( __query["filter_path"] = filter_path if human is not None: __query["human"] = human + if master_timeout is not None: + __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} @@ -1928,6 +1945,7 @@ def get_data_lifecycle( filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, include_defaults: t.Optional[bool] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ @@ -1943,6 +1961,9 @@ def get_data_lifecycle( Supports comma-separated values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. :param include_defaults: If `true`, return all default settings in the response. + :param master_timeout: Period to wait for a connection to the master node. If + no response is received before the timeout expires, the request fails and + returns an error. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") @@ -1959,6 +1980,8 @@ def get_data_lifecycle( __query["human"] = human if include_defaults is not None: __query["include_defaults"] = include_defaults + if master_timeout is not None: + __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} @@ -1988,6 +2011,7 @@ def get_data_stream( filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, include_defaults: t.Optional[bool] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ @@ -2002,6 +2026,9 @@ def get_data_stream( Supports comma-separated values, such as `open,hidden`. :param include_defaults: If true, returns all relevant default configurations for the index template. + :param master_timeout: Period to wait for a connection to the master node. If + no response is received before the timeout expires, the request fails and + returns an error. """ __path_parts: t.Dict[str, str] if name not in SKIP_IN_PATH: @@ -2021,6 +2048,8 @@ def get_data_stream( __query["human"] = human if include_defaults is not None: __query["include_defaults"] = include_defaults + if master_timeout is not None: + __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} @@ -2435,7 +2464,9 @@ def migrate_to_data_stream( error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, + timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ Convert an index alias to a data stream. Converts an index alias to a data stream. @@ -2450,6 +2481,11 @@ def migrate_to_data_stream( ``_ :param name: Name of the index alias to convert to a data stream. + :param master_timeout: Period to wait for a connection to the master node. If + no response is received before the timeout expires, the request fails and + returns an error. + :param timeout: Period to wait for a response. If no response is received before + the timeout expires, the request fails and returns an error. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") @@ -2462,8 +2498,12 @@ def migrate_to_data_stream( __query["filter_path"] = filter_path if human is not None: __query["human"] = human + if master_timeout is not None: + __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty + if timeout is not None: + __query["timeout"] = timeout __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "POST", @@ -2620,6 +2660,7 @@ def promote_data_stream( error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ @@ -2629,6 +2670,9 @@ def promote_data_stream( ``_ :param name: The name of the data stream + :param master_timeout: Period to wait for a connection to the master node. If + no response is received before the timeout expires, the request fails and + returns an error. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") @@ -2641,6 +2685,8 @@ def promote_data_stream( __query["filter_path"] = filter_path if human is not None: __query["human"] = human + if master_timeout is not None: + __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} diff --git a/elasticsearch/_sync/client/ml.py b/elasticsearch/_sync/client/ml.py index 76b1fd741..b157260ae 100644 --- a/elasticsearch/_sync/client/ml.py +++ b/elasticsearch/_sync/client/ml.py @@ -1041,14 +1041,14 @@ def flush_job( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Forces any buffered data to be processed by the job. The flush jobs API is only - applicable when sending data for analysis using the post data API. Depending - on the content of the buffer, then it might additionally calculate new results. - Both flush and close operations are similar, however the flush is more efficient - if you are expecting to send more data for analysis. When flushing, the job remains - open and is available to continue analyzing data. A close operation additionally - prunes and persists the model state to disk and the job must be opened again - before analyzing further data. + Force buffered data to be processed. The flush jobs API is only applicable when + sending data for analysis using the post data API. Depending on the content of + the buffer, then it might additionally calculate new results. Both flush and + close operations are similar, however the flush is more efficient if you are + expecting to send more data for analysis. When flushing, the job remains open + and is available to continue analyzing data. A close operation additionally prunes + and persists the model state to disk and the job must be opened again before + analyzing further data. ``_ @@ -1116,10 +1116,10 @@ def forecast( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Predicts the future behavior of a time series by using its historical behavior. - Forecasts are not supported for jobs that perform population analysis; an error - occurs if you try to create a forecast for a job that has an `over_field_name` - in its configuration. + Predict future behavior of a time series. Forecasts are not supported for jobs + that perform population analysis; an error occurs if you try to create a forecast + for a job that has an `over_field_name` in its configuration. Forcasts predict + future behavior based on historical data. ``_ @@ -1201,8 +1201,8 @@ def get_buckets( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves anomaly detection job results for one or more buckets. The API presents - a chronological view of the records, grouped by bucket. + Get anomaly detection job results for buckets. The API presents a chronological + view of the records, grouped by bucket. ``_ @@ -1297,7 +1297,7 @@ def get_calendar_events( start: t.Optional[t.Union[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves information about the scheduled events in calendars. + Get info about events in calendars. ``_ @@ -1363,7 +1363,7 @@ def get_calendars( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves configuration information for calendars. + Get calendar configuration info. ``_ @@ -1436,7 +1436,7 @@ def get_categories( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves anomaly detection job results for one or more categories. + Get anomaly detection job results for categories. ``_ @@ -1518,9 +1518,9 @@ def get_data_frame_analytics( size: t.Optional[int] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves configuration information for data frame analytics jobs. You can get - information for multiple data frame analytics jobs in a single API request by - using a comma-separated list of data frame analytics jobs or a wildcard expression. + Get data frame analytics job configuration info. You can get information for + multiple data frame analytics jobs in a single API request by using a comma-separated + list of data frame analytics jobs or a wildcard expression. ``_ @@ -1592,7 +1592,7 @@ def get_data_frame_analytics_stats( verbose: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves usage information for data frame analytics jobs. + Get data frame analytics jobs usage info. ``_ @@ -1657,12 +1657,12 @@ def get_datafeed_stats( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves usage information for datafeeds. You can get statistics for multiple - datafeeds in a single API request by using a comma-separated list of datafeeds - or a wildcard expression. You can get statistics for all datafeeds by using `_all`, - by specifying `*` as the ``, or by omitting the ``. If the - datafeed is stopped, the only information you receive is the `datafeed_id` and - the `state`. This API returns a maximum of 10,000 datafeeds. + Get datafeeds usage info. You can get statistics for multiple datafeeds in a + single API request by using a comma-separated list of datafeeds or a wildcard + expression. You can get statistics for all datafeeds by using `_all`, by specifying + `*` as the ``, or by omitting the ``. If the datafeed is stopped, + the only information you receive is the `datafeed_id` and the `state`. This API + returns a maximum of 10,000 datafeeds. ``_ @@ -1718,11 +1718,11 @@ def get_datafeeds( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves configuration information for datafeeds. You can get information for - multiple datafeeds in a single API request by using a comma-separated list of - datafeeds or a wildcard expression. You can get information for all datafeeds - by using `_all`, by specifying `*` as the ``, or by omitting the ``. - This API returns a maximum of 10,000 datafeeds. + Get datafeeds configuration info. You can get information for multiple datafeeds + in a single API request by using a comma-separated list of datafeeds or a wildcard + expression. You can get information for all datafeeds by using `_all`, by specifying + `*` as the ``, or by omitting the ``. This API returns a maximum + of 10,000 datafeeds. ``_ @@ -1785,7 +1785,7 @@ def get_filters( size: t.Optional[int] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves filters. You can get a single filter or all filters. + Get filters. You can get a single filter or all filters. ``_ @@ -1847,10 +1847,9 @@ def get_influencers( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves anomaly detection job results for one or more influencers. Influencers - are the entities that have contributed to, or are to blame for, the anomalies. - Influencer results are available only if an `influencer_field_name` is specified - in the job configuration. + Get anomaly detection job results for influencers. Influencers are the entities + that have contributed to, or are to blame for, the anomalies. Influencer results + are available only if an `influencer_field_name` is specified in the job configuration. ``_ @@ -1931,7 +1930,7 @@ def get_job_stats( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves usage information for anomaly detection jobs. + Get anomaly detection jobs usage info. ``_ @@ -1988,11 +1987,11 @@ def get_jobs( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves configuration information for anomaly detection jobs. You can get information - for multiple anomaly detection jobs in a single API request by using a group - name, a comma-separated list of jobs, or a wildcard expression. You can get information - for all anomaly detection jobs by using `_all`, by specifying `*` as the ``, - or by omitting the ``. + Get anomaly detection jobs configuration info. You can get information for multiple + anomaly detection jobs in a single API request by using a group name, a comma-separated + list of jobs, or a wildcard expression. You can get information for all anomaly + detection jobs by using `_all`, by specifying `*` as the ``, or by omitting + the ``. ``_ @@ -2053,9 +2052,9 @@ def get_memory_stats( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Get information about how machine learning jobs and trained models are using - memory, on each node, both within the JVM heap, and natively, outside of the - JVM. + Get machine learning memory usage info. Get information about how machine learning + jobs and trained models are using memory, on each node, both within the JVM heap, + and natively, outside of the JVM. ``_ @@ -2110,7 +2109,7 @@ def get_model_snapshot_upgrade_stats( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves usage information for anomaly detection job model snapshot upgrades. + Get anomaly detection job model snapshot upgrade usage info. ``_ @@ -2181,7 +2180,7 @@ def get_model_snapshots( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves information about model snapshots. + Get model snapshots info. ``_ @@ -2282,19 +2281,19 @@ def get_overall_buckets( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves overall bucket results that summarize the bucket results of multiple - anomaly detection jobs. The `overall_score` is calculated by combining the scores - of all the buckets within the overall bucket span. First, the maximum `anomaly_score` - per anomaly detection job in the overall bucket is calculated. Then the `top_n` - of those scores are averaged to result in the `overall_score`. This means that - you can fine-tune the `overall_score` so that it is more or less sensitive to - the number of jobs that detect an anomaly at the same time. For example, if you - set `top_n` to `1`, the `overall_score` is the maximum bucket score in the overall - bucket. Alternatively, if you set `top_n` to the number of jobs, the `overall_score` - is high only when all jobs detect anomalies in that overall bucket. If you set - the `bucket_span` parameter (to a value greater than its default), the `overall_score` - is the maximum `overall_score` of the overall buckets that have a span equal - to the jobs' largest bucket span. + Get overall bucket results. Retrievs overall bucket results that summarize the + bucket results of multiple anomaly detection jobs. The `overall_score` is calculated + by combining the scores of all the buckets within the overall bucket span. First, + the maximum `anomaly_score` per anomaly detection job in the overall bucket is + calculated. Then the `top_n` of those scores are averaged to result in the `overall_score`. + This means that you can fine-tune the `overall_score` so that it is more or less + sensitive to the number of jobs that detect an anomaly at the same time. For + example, if you set `top_n` to `1`, the `overall_score` is the maximum bucket + score in the overall bucket. Alternatively, if you set `top_n` to the number + of jobs, the `overall_score` is high only when all jobs detect anomalies in that + overall bucket. If you set the `bucket_span` parameter (to a value greater than + its default), the `overall_score` is the maximum `overall_score` of the overall + buckets that have a span equal to the jobs' largest bucket span. ``_ @@ -2391,7 +2390,7 @@ def get_records( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves anomaly records for an anomaly detection job. Records contain the detailed + Get anomaly records for an anomaly detection job. Records contain the detailed analytical results. They describe the anomalous activity that has been identified in the input data based on the detector configuration. There can be many anomaly records depending on the characteristics and size of the input data. In practice, @@ -2494,7 +2493,7 @@ def get_trained_models( tags: t.Optional[t.Union[str, t.Sequence[str]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves configuration information for a trained model. + Get trained model configuration info. ``_ @@ -2576,9 +2575,9 @@ def get_trained_models_stats( size: t.Optional[int] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves usage information for trained models. You can get usage information - for multiple trained models in a single API request by using a comma-separated - list of model IDs or a wildcard expression. + Get trained models usage info. You can get usage information for multiple trained + models in a single API request by using a comma-separated list of model IDs or + a wildcard expression. ``_ @@ -2641,7 +2640,7 @@ def infer_trained_model( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Evaluates a trained model. + Evaluate a trained model. ``_ @@ -2698,12 +2697,12 @@ def info( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns defaults and limits used by machine learning. This endpoint is designed - to be used by a user interface that needs to fully understand machine learning - configurations where some options are not specified, meaning that the defaults - should be used. This endpoint may be used to find out what those defaults are. - It also provides information about the maximum size of machine learning jobs - that could run in the current cluster configuration. + Return ML defaults and limits. Returns defaults and limits used by machine learning. + This endpoint is designed to be used by a user interface that needs to fully + understand machine learning configurations where some options are not specified, + meaning that the defaults should be used. This endpoint may be used to find out + what those defaults are. It also provides information about the maximum size + of machine learning jobs that could run in the current cluster configuration. ``_ """ @@ -2743,12 +2742,12 @@ def open_job( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Open anomaly detection jobs. An anomaly detection job must be opened in order - for it to be ready to receive and analyze data. It can be opened and closed multiple - times throughout its lifecycle. When you open a new job, it starts with an empty - model. When you open an existing job, the most recent model state is automatically - loaded. The job is ready to resume its analysis from where it left off, once - new data is received. + Open anomaly detection jobs. An anomaly detection job must be opened to be ready + to receive and analyze data. It can be opened and closed multiple times throughout + its lifecycle. When you open a new job, it starts with an empty model. When you + open an existing job, the most recent model state is automatically loaded. The + job is ready to resume its analysis from where it left off, once new data is + received. ``_ @@ -2802,7 +2801,7 @@ def post_calendar_events( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Adds scheduled events to a calendar. + Add scheduled events to the calendar. ``_ @@ -2858,7 +2857,7 @@ def post_data( reset_start: t.Optional[t.Union[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Sends data to an anomaly detection job for analysis. IMPORTANT: For each job, + Send data to an anomaly detection job for analysis. IMPORTANT: For each job, data can be accepted from only a single connection at a time. It is not currently possible to post data to multiple jobs using wildcards or a comma-separated list. @@ -2923,7 +2922,8 @@ def preview_data_frame_analytics( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Previews the extracted features used by a data frame analytics config. + Preview features used by data frame analytics. Previews the extracted features + used by a data frame analytics config. ``_ @@ -2985,7 +2985,7 @@ def preview_datafeed( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Previews a datafeed. This API returns the first "page" of search results from + Preview a datafeed. This API returns the first "page" of search results from a datafeed. You can preview an existing datafeed or provide configuration details for a datafeed and anomaly detection job in the API. The preview shows the structure of the data that will be passed to the anomaly detection engine. IMPORTANT: When @@ -3069,7 +3069,7 @@ def put_calendar( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates a calendar. + Create a calendar. ``_ @@ -3123,7 +3123,7 @@ def put_calendar_job( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Adds an anomaly detection job to a calendar. + Add anomaly detection job to calendar. ``_ @@ -3195,9 +3195,9 @@ def put_data_frame_analytics( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Instantiates a data frame analytics job. This API creates a data frame analytics - job that performs an analysis on the source indices and stores the outcome in - a destination index. + Create a data frame analytics job. This API creates a data frame analytics job + that performs an analysis on the source indices and stores the outcome in a destination + index. ``_ @@ -3365,8 +3365,8 @@ def put_datafeed( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Instantiates a datafeed. Datafeeds retrieve data from Elasticsearch for analysis - by an anomaly detection job. You can associate only one datafeed with each anomaly + Create a datafeed. Datafeeds retrieve data from Elasticsearch for analysis by + an anomaly detection job. You can associate only one datafeed with each anomaly detection job. The datafeed contains a query that runs at a defined interval (`frequency`). If you are concerned about delayed data, you can add a delay (`query_delay') at each interval. When Elasticsearch security features are enabled, your datafeed @@ -3528,9 +3528,9 @@ def put_filter( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Instantiates a filter. A filter contains a list of strings. It can be used by - one or more anomaly detection jobs. Specifically, filters are referenced in the - `custom_rules` property of detector configuration objects. + Create a filter. A filter contains a list of strings. It can be used by one or + more anomaly detection jobs. Specifically, filters are referenced in the `custom_rules` + property of detector configuration objects. ``_ @@ -3797,7 +3797,8 @@ def put_trained_model( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Enables you to supply a trained model that is not created by data frame analytics. + Create a trained model. Enable you to supply a trained model that is not created + by data frame analytics. ``_ @@ -3899,15 +3900,15 @@ def put_trained_model_alias( reassign: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates or updates a trained model alias. A trained model alias is a logical - name used to reference a single trained model. You can use aliases instead of - trained model identifiers to make it easier to reference your models. For example, - you can use aliases in inference aggregations and processors. An alias must be - unique and refer to only a single trained model. However, you can have multiple - aliases for each trained model. If you use this API to update an alias such that - it references a different trained model ID and the model uses a different type - of data frame analytics, an error occurs. For example, this situation occurs - if you have a trained model for regression analysis and a trained model for classification + Create or update a trained model alias. A trained model alias is a logical name + used to reference a single trained model. You can use aliases instead of trained + model identifiers to make it easier to reference your models. For example, you + can use aliases in inference aggregations and processors. An alias must be unique + and refer to only a single trained model. However, you can have multiple aliases + for each trained model. If you use this API to update an alias such that it references + a different trained model ID and the model uses a different type of data frame + analytics, an error occurs. For example, this situation occurs if you have a + trained model for regression analysis and a trained model for classification analysis; you cannot reassign an alias from one type of trained model to another. If you use this API to update an alias and there are very few input fields in common between the old and new trained models for the model alias, the API returns @@ -3969,7 +3970,7 @@ def put_trained_model_definition_part( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates part of a trained model definition. + Create part of a trained model definition. ``_ @@ -4046,7 +4047,7 @@ def put_trained_model_vocabulary( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates a trained model vocabulary. This API is supported only for natural language + Create a trained model vocabulary. This API is supported only for natural language processing (NLP) models. The vocabulary is stored in the index as described in `inference_config.*.vocabulary` of the trained model definition. @@ -4104,7 +4105,7 @@ def reset_job( wait_for_completion: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Resets an anomaly detection job. All model state and results are deleted. The + Reset an anomaly detection job. All model state and results are deleted. The job is ready to start over as if it had just been created. It is not currently possible to reset multiple jobs using wildcards or a comma separated list. @@ -4160,13 +4161,13 @@ def revert_model_snapshot( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Reverts to a specific snapshot. The machine learning features react quickly to - anomalous input, learning new behaviors in data. Highly anomalous input increases - the variance in the models whilst the system learns whether this is a new step-change - in behavior or a one-off event. In the case where this anomalous input is known - to be a one-off, then it might be appropriate to reset the model state to a time - before this event. For example, you might consider reverting to a saved snapshot - after Black Friday or a critical system failure. + Revert to a snapshot. The machine learning features react quickly to anomalous + input, learning new behaviors in data. Highly anomalous input increases the variance + in the models whilst the system learns whether this is a new step-change in behavior + or a one-off event. In the case where this anomalous input is known to be a one-off, + then it might be appropriate to reset the model state to a time before this event. + For example, you might consider reverting to a saved snapshot after Black Friday + or a critical system failure. ``_ @@ -4226,16 +4227,17 @@ def set_upgrade_mode( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Sets a cluster wide upgrade_mode setting that prepares machine learning indices - for an upgrade. When upgrading your cluster, in some circumstances you must restart - your nodes and reindex your machine learning indices. In those circumstances, - there must be no machine learning jobs running. You can close the machine learning - jobs, do the upgrade, then open all the jobs again. Alternatively, you can use - this API to temporarily halt tasks associated with the jobs and datafeeds and - prevent new jobs from opening. You can also use this API during upgrades that - do not require you to reindex your machine learning indices, though stopping - jobs is not a requirement in that case. You can see the current value for the - upgrade_mode setting by using the get machine learning info API. + Set upgrade_mode for ML indices. Sets a cluster wide upgrade_mode setting that + prepares machine learning indices for an upgrade. When upgrading your cluster, + in some circumstances you must restart your nodes and reindex your machine learning + indices. In those circumstances, there must be no machine learning jobs running. + You can close the machine learning jobs, do the upgrade, then open all the jobs + again. Alternatively, you can use this API to temporarily halt tasks associated + with the jobs and datafeeds and prevent new jobs from opening. You can also use + this API during upgrades that do not require you to reindex your machine learning + indices, though stopping jobs is not a requirement in that case. You can see + the current value for the upgrade_mode setting by using the get machine learning + info API. ``_ @@ -4281,16 +4283,16 @@ def start_data_frame_analytics( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Starts a data frame analytics job. A data frame analytics job can be started - and stopped multiple times throughout its lifecycle. If the destination index - does not exist, it is created automatically the first time you start the data - frame analytics job. The `index.number_of_shards` and `index.number_of_replicas` - settings for the destination index are copied from the source index. If there - are multiple source indices, the destination index copies the highest setting - values. The mappings for the destination index are also copied from the source - indices. If there are any mapping conflicts, the job fails to start. If the destination - index exists, it is used as is. You can therefore set up the destination index - in advance with custom settings and mappings. + Start a data frame analytics job. A data frame analytics job can be started and + stopped multiple times throughout its lifecycle. If the destination index does + not exist, it is created automatically the first time you start the data frame + analytics job. The `index.number_of_shards` and `index.number_of_replicas` settings + for the destination index are copied from the source index. If there are multiple + source indices, the destination index copies the highest setting values. The + mappings for the destination index are also copied from the source indices. If + there are any mapping conflicts, the job fails to start. If the destination index + exists, it is used as is. You can therefore set up the destination index in advance + with custom settings and mappings. ``_ @@ -4342,17 +4344,17 @@ def start_datafeed( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Starts one or more datafeeds. A datafeed must be started in order to retrieve - data from Elasticsearch. A datafeed can be started and stopped multiple times - throughout its lifecycle. Before you can start a datafeed, the anomaly detection - job must be open. Otherwise, an error occurs. If you restart a stopped datafeed, - it continues processing input data from the next millisecond after it was stopped. - If new data was indexed for that exact millisecond between stopping and starting, - it will be ignored. When Elasticsearch security features are enabled, your datafeed - remembers which roles the last user to create or update it had at the time of - creation or update and runs the query using those same roles. If you provided - secondary authorization headers when you created or updated the datafeed, those - credentials are used instead. + Start datafeeds. A datafeed must be started in order to retrieve data from Elasticsearch. + A datafeed can be started and stopped multiple times throughout its lifecycle. + Before you can start a datafeed, the anomaly detection job must be open. Otherwise, + an error occurs. If you restart a stopped datafeed, it continues processing input + data from the next millisecond after it was stopped. If new data was indexed + for that exact millisecond between stopping and starting, it will be ignored. + When Elasticsearch security features are enabled, your datafeed remembers which + roles the last user to create or update it had at the time of creation or update + and runs the query using those same roles. If you provided secondary authorization + headers when you created or updated the datafeed, those credentials are used + instead. ``_ @@ -4421,8 +4423,8 @@ def start_trained_model_deployment( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Starts a trained model deployment, which allocates the model to every machine - learning node. + Start a trained model deployment. It allocates the model to every machine learning + node. ``_ @@ -4505,8 +4507,8 @@ def stop_data_frame_analytics( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Stops one or more data frame analytics jobs. A data frame analytics job can be - started and stopped multiple times throughout its lifecycle. + Stop data frame analytics jobs. A data frame analytics job can be started and + stopped multiple times throughout its lifecycle. ``_ @@ -4571,9 +4573,8 @@ def stop_datafeed( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Stops one or more datafeeds. A datafeed that is stopped ceases to retrieve data - from Elasticsearch. A datafeed can be started and stopped multiple times throughout - its lifecycle. + Stop datafeeds. A datafeed that is stopped ceases to retrieve data from Elasticsearch. + A datafeed can be started and stopped multiple times throughout its lifecycle. ``_ @@ -4635,7 +4636,7 @@ def stop_trained_model_deployment( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Stops a trained model deployment. + Stop a trained model deployment. ``_ @@ -4700,7 +4701,7 @@ def update_data_frame_analytics( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Updates an existing data frame analytics job. + Update a data frame analytics job. ``_ @@ -4808,11 +4809,11 @@ def update_datafeed( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Updates the properties of a datafeed. You must stop and start the datafeed for - the changes to be applied. When Elasticsearch security features are enabled, - your datafeed remembers which roles the user who updated it had at the time of - the update and runs the query using those same roles. If you provide secondary - authorization headers, those credentials are used instead. + Update a datafeed. You must stop and start the datafeed for the changes to be + applied. When Elasticsearch security features are enabled, your datafeed remembers + which roles the user who updated it had at the time of the update and runs the + query using those same roles. If you provide secondary authorization headers, + those credentials are used instead. ``_ @@ -4975,7 +4976,8 @@ def update_filter( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Updates the description of a filter, adds items, or removes items from the list. + Update a filter. Updates the description of a filter, adds items, or removes + items from the list. ``_ @@ -5065,7 +5067,8 @@ def update_job( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Updates certain properties of an anomaly detection job. + Update an anomaly detection job. Updates certain properties of an anomaly detection + job. ``_ @@ -5193,7 +5196,7 @@ def update_model_snapshot( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Updates certain properties of a snapshot. + Update a snapshot. Updates certain properties of a snapshot. ``_ @@ -5254,8 +5257,7 @@ def update_trained_model_deployment( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Starts a trained model deployment, which allocates the model to every machine - learning node. + Update a trained model deployment. ``_ @@ -5314,13 +5316,14 @@ def upgrade_job_snapshot( wait_for_completion: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Upgrades an anomaly detection model snapshot to the latest major version. Over - time, older snapshot formats are deprecated and removed. Anomaly detection jobs - support only snapshots that are from the current or previous major version. This - API provides a means to upgrade a snapshot to the current major version. This - aids in preparing the cluster for an upgrade to the next major version. Only - one snapshot per anomaly detection job can be upgraded at a time and the upgraded - snapshot cannot be the current snapshot of the anomaly detection job. + Upgrade a snapshot. Upgrades an anomaly detection model snapshot to the latest + major version. Over time, older snapshot formats are deprecated and removed. + Anomaly detection jobs support only snapshots that are from the current or previous + major version. This API provides a means to upgrade a snapshot to the current + major version. This aids in preparing the cluster for an upgrade to the next + major version. Only one snapshot per anomaly detection job can be upgraded at + a time and the upgraded snapshot cannot be the current snapshot of the anomaly + detection job. ``_ diff --git a/elasticsearch/_sync/client/query_rules.py b/elasticsearch/_sync/client/query_rules.py index 088199392..7b66ca7ed 100644 --- a/elasticsearch/_sync/client/query_rules.py +++ b/elasticsearch/_sync/client/query_rules.py @@ -261,7 +261,7 @@ def put_rule( criteria: t.Optional[ t.Union[t.Mapping[str, t.Any], t.Sequence[t.Mapping[str, t.Any]]] ] = None, - type: t.Optional[t.Union[str, t.Literal["pinned"]]] = None, + type: t.Optional[t.Union[str, t.Literal["exclude", "pinned"]]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, diff --git a/elasticsearch/_sync/client/synonyms.py b/elasticsearch/_sync/client/synonyms.py index 2455c3f3d..9e2b66ee6 100644 --- a/elasticsearch/_sync/client/synonyms.py +++ b/elasticsearch/_sync/client/synonyms.py @@ -262,7 +262,9 @@ def put_synonym( self, *, id: str, - synonyms_set: t.Optional[t.Sequence[t.Mapping[str, t.Any]]] = None, + synonyms_set: t.Optional[ + t.Union[t.Mapping[str, t.Any], t.Sequence[t.Mapping[str, t.Any]]] + ] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None,