diff --git a/buildSrc/version.properties b/buildSrc/version.properties index d78663ab26131..dc44c6206d6c9 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -1,4 +1,4 @@ -elasticsearch = 6.4.0 +elasticsearch = 6.5.0 lucene = 7.5.0-snapshot-b9e064b935 # optional dependencies diff --git a/docs/Versions.asciidoc b/docs/Versions.asciidoc index c5170a46daffd..49516efc8dac3 100644 --- a/docs/Versions.asciidoc +++ b/docs/Versions.asciidoc @@ -1,4 +1,4 @@ -:version: 6.4.0 +:version: 6.5.0 :major-version: 6.x :lucene_version: 7.5.0 :lucene_version_path: 7_5_0 diff --git a/docs/painless/painless-contexts.asciidoc b/docs/painless/painless-contexts.asciidoc index ff46f6bd74dde..8b8a3b0eec6b4 100644 --- a/docs/painless/painless-contexts.asciidoc +++ b/docs/painless/painless-contexts.asciidoc @@ -1,9 +1,6 @@ [[painless-contexts]] == Painless contexts -:es_version: https://www.elastic.co/guide/en/elasticsearch/reference/master -:xp_version: https://www.elastic.co/guide/en/x-pack/current - A Painless script is evaluated within a context. Each context has values that are available as local variables, a whitelist that controls the available classes, and the methods and fields within those classes (API), and @@ -18,41 +15,41 @@ specialized code may define new ways to use a Painless script. | Name | Painless Documentation | Elasticsearch Documentation | Update | <> - | {es_version}/docs-update.html[Elasticsearch Documentation] + | {ref}/docs-update.html[Elasticsearch Documentation] | Update by query | <> - | {es_version}/docs-update-by-query.html[Elasticsearch Documentation] + | {ref}/docs-update-by-query.html[Elasticsearch Documentation] | Reindex | <> - | {es_version}/docs-reindex.html[Elasticsearch Documentation] + | {ref}/docs-reindex.html[Elasticsearch Documentation] | Sort | <> - | {es_version}/search-request-sort.html[Elasticsearch Documentation] + | {ref}/search-request-sort.html[Elasticsearch Documentation] | Similarity | <> - | {es_version}/index-modules-similarity.html[Elasticsearch Documentation] -| Weight | <> - | {es_version}/index-modules-similarity.html[Elasticsearch Documentation] + | {ref}/index-modules-similarity.html[Elasticsearch Documentation] +| Weight | <> + | {ref}/index-modules-similarity.html[Elasticsearch Documentation] | Score | <> - | {es_version}/query-dsl-function-score-query.html[Elasticsearch Documentation] + | {ref}/query-dsl-function-score-query.html[Elasticsearch Documentation] | Field | <> - | {es_version}/search-request-script-fields.html[Elasticsearch Documentation] + | {ref}/search-request-script-fields.html[Elasticsearch Documentation] | Filter | <> - | {es_version}/query-dsl-script-query.html[Elasticsearch Documentation] + | {ref}/query-dsl-script-query.html[Elasticsearch Documentation] | Minimum should match | <> - | {es_version}/query-dsl-terms-set-query.html[Elasticsearch Documentation] + | {ref}/query-dsl-terms-set-query.html[Elasticsearch Documentation] | Metric aggregation initialization | <> - | {es_version}/search-aggregations-metrics-scripted-metric-aggregation.html[Elasticsearch Documentation] + | {ref}/search-aggregations-metrics-scripted-metric-aggregation.html[Elasticsearch Documentation] | Metric aggregation map | <> - | {es_version}/search-aggregations-metrics-scripted-metric-aggregation.html[Elasticsearch Documentation] + | {ref}/search-aggregations-metrics-scripted-metric-aggregation.html[Elasticsearch Documentation] | Metric aggregation combine | <> - | {es_version}/search-aggregations-metrics-scripted-metric-aggregation.html[Elasticsearch Documentation] + | {ref}/search-aggregations-metrics-scripted-metric-aggregation.html[Elasticsearch Documentation] | Metric aggregation reduce | <> - | {es_version}/search-aggregations-metrics-scripted-metric-aggregation.html[Elasticsearch Documentation] + | {ref}/search-aggregations-metrics-scripted-metric-aggregation.html[Elasticsearch Documentation] | Bucket aggregation | <> - | {es_version}/search-aggregations-pipeline-bucket-script-aggregation.html[Elasticsearch Documentation] + | {ref}/search-aggregations-pipeline-bucket-script-aggregation.html[Elasticsearch Documentation] | Ingest processor | <> - | {es_version}/script-processor.html[Elasticsearch Documentation] + | {ref}/script-processor.html[Elasticsearch Documentation] | Watcher condition | <> - | {xp_version}/condition-script.html[Elasticsearch Documentation] + | {xpack-ref}/condition-script.html[Elasticsearch Documentation] | Watcher transform | <> - | {xp_version}/transform-script.html[Elasticsearch Documentation] + | {xpack-ref}/transform-script.html[Elasticsearch Documentation] |==== include::painless-contexts/index.asciidoc[] diff --git a/docs/painless/painless-contexts/painless-bucket-agg-context.asciidoc b/docs/painless/painless-contexts/painless-bucket-agg-context.asciidoc index b277055d87d8b..3bb4cae3d3bab 100644 --- a/docs/painless/painless-contexts/painless-bucket-agg-context.asciidoc +++ b/docs/painless/painless-contexts/painless-bucket-agg-context.asciidoc @@ -2,7 +2,7 @@ === Bucket aggregation context Use a Painless script in an -{es_version}/search-aggregations-pipeline-bucket-script-aggregation.html[bucket aggregation] +{ref}/search-aggregations-pipeline-bucket-script-aggregation.html[bucket aggregation] to calculate a value as a result in a bucket. *Variables* diff --git a/docs/painless/painless-contexts/painless-field-context.asciidoc b/docs/painless/painless-contexts/painless-field-context.asciidoc index bf44703001bc0..4c767ca389115 100644 --- a/docs/painless/painless-contexts/painless-field-context.asciidoc +++ b/docs/painless/painless-contexts/painless-field-context.asciidoc @@ -2,7 +2,7 @@ === Field context Use a Painless script to create a -{es_version}/search-request-script-fields.html[script field] to return +{ref}/search-request-script-fields.html[script field] to return a customized value for each document in the results of a query. *Variables* @@ -14,7 +14,7 @@ a customized value for each document in the results of a query. Contains the fields of the specified document where each field is a `List` of values. -{es_version}/mapping-source-field.html[`ctx['_source']`] (`Map`):: +{ref}/mapping-source-field.html[`ctx['_source']`] (`Map`):: Contains extracted JSON in a `Map` and `List` structure for the fields existing in a stored document. diff --git a/docs/painless/painless-contexts/painless-filter-context.asciidoc b/docs/painless/painless-contexts/painless-filter-context.asciidoc index ea0393893c882..96fddf13b500d 100644 --- a/docs/painless/painless-contexts/painless-filter-context.asciidoc +++ b/docs/painless/painless-contexts/painless-filter-context.asciidoc @@ -1,7 +1,7 @@ [[painless-filter-context]] === Filter context -Use a Painless script as a {es_version}/query-dsl-script-query.html[filter] in a +Use a Painless script as a {ref}/query-dsl-script-query.html[filter] in a query to include and exclude documents. diff --git a/docs/painless/painless-contexts/painless-ingest-processor-context.asciidoc b/docs/painless/painless-contexts/painless-ingest-processor-context.asciidoc index ba3be0739631f..5d451268dedcd 100644 --- a/docs/painless/painless-contexts/painless-ingest-processor-context.asciidoc +++ b/docs/painless/painless-contexts/painless-ingest-processor-context.asciidoc @@ -1,7 +1,7 @@ [[painless-ingest-processor-context]] === Ingest processor context -Use a Painless script in an {es_version}/script-processor.html[ingest processor] +Use a Painless script in an {ref}/script-processor.html[ingest processor] to modify documents upon insertion. *Variables* @@ -9,10 +9,10 @@ to modify documents upon insertion. `params` (`Map`, read-only):: User-defined parameters passed in as part of the query. -{es_version}/mapping-index-field.html[`ctx['_index']`] (`String`):: +{ref}/mapping-index-field.html[`ctx['_index']`] (`String`):: The name of the index. -{es_version}/mapping-type-field.html[`ctx['_type']`] (`String`):: +{ref}/mapping-type-field.html[`ctx['_type']`] (`String`):: The type of document within an index. `ctx` (`Map`):: @@ -21,10 +21,10 @@ to modify documents upon insertion. *Side Effects* -{es_version}/mapping-index-field.html[`ctx['_index']`]:: +{ref}/mapping-index-field.html[`ctx['_index']`]:: Modify this to change the destination index for the current document. -{es_version}/mapping-type-field.html[`ctx['_type']`]:: +{ref}/mapping-type-field.html[`ctx['_type']`]:: Modify this to change the type for the current document. `ctx` (`Map`, read-only):: diff --git a/docs/painless/painless-contexts/painless-metric-agg-combine-context.asciidoc b/docs/painless/painless-contexts/painless-metric-agg-combine-context.asciidoc index 1fec63ef4466f..31cb596ae8167 100644 --- a/docs/painless/painless-contexts/painless-metric-agg-combine-context.asciidoc +++ b/docs/painless/painless-contexts/painless-metric-agg-combine-context.asciidoc @@ -2,7 +2,7 @@ === Metric aggregation combine context Use a Painless script to -{es_version}/search-aggregations-metrics-scripted-metric-aggregation.html[combine] +{ref}/search-aggregations-metrics-scripted-metric-aggregation.html[combine] values for use in a scripted metric aggregation. A combine script is run once per shard following a <> and is optional as part of a full metric aggregation. diff --git a/docs/painless/painless-contexts/painless-metric-agg-init-context.asciidoc b/docs/painless/painless-contexts/painless-metric-agg-init-context.asciidoc index ed7e01ddd003a..1503e3abb5891 100644 --- a/docs/painless/painless-contexts/painless-metric-agg-init-context.asciidoc +++ b/docs/painless/painless-contexts/painless-metric-agg-init-context.asciidoc @@ -2,7 +2,7 @@ === Metric aggregation initialization context Use a Painless script to -{es_version}/search-aggregations-metrics-scripted-metric-aggregation.html[initialize] +{ref}/search-aggregations-metrics-scripted-metric-aggregation.html[initialize] values for use in a scripted metric aggregation. An initialization script is run prior to document collection once per shard and is optional as part of the full metric aggregation. diff --git a/docs/painless/painless-contexts/painless-metric-agg-map-context.asciidoc b/docs/painless/painless-contexts/painless-metric-agg-map-context.asciidoc index 51f06e010db35..16016d1cf8171 100644 --- a/docs/painless/painless-contexts/painless-metric-agg-map-context.asciidoc +++ b/docs/painless/painless-contexts/painless-metric-agg-map-context.asciidoc @@ -2,7 +2,7 @@ === Metric aggregation map context Use a Painless script to -{es_version}/search-aggregations-metrics-scripted-metric-aggregation.html[map] +{ref}/search-aggregations-metrics-scripted-metric-aggregation.html[map] values for use in a scripted metric aggregation. A map script is run once per collected document following an optional <> and is required as diff --git a/docs/painless/painless-contexts/painless-metric-agg-reduce-context.asciidoc b/docs/painless/painless-contexts/painless-metric-agg-reduce-context.asciidoc index 1b64b85392d26..b76e02b1b0499 100644 --- a/docs/painless/painless-contexts/painless-metric-agg-reduce-context.asciidoc +++ b/docs/painless/painless-contexts/painless-metric-agg-reduce-context.asciidoc @@ -2,7 +2,7 @@ === Metric aggregation reduce context Use a Painless script to -{es_version}/search-aggregations-metrics-scripted-metric-aggregation.html[reduce] +{ref}/search-aggregations-metrics-scripted-metric-aggregation.html[reduce] values to produce the result of a scripted metric aggregation. A reduce script is run once on the coordinating node following a <> (or a diff --git a/docs/painless/painless-contexts/painless-min-should-match-context.asciidoc b/docs/painless/painless-contexts/painless-min-should-match-context.asciidoc index c310f42928eb4..b2ffb63fd7aea 100644 --- a/docs/painless/painless-contexts/painless-min-should-match-context.asciidoc +++ b/docs/painless/painless-contexts/painless-min-should-match-context.asciidoc @@ -2,7 +2,7 @@ === Minimum should match context Use a Painless script to specify the -{es_version}/query-dsl-terms-set-query.html[minimum] number of terms that a +{ref}/query-dsl-terms-set-query.html[minimum] number of terms that a specified field needs to match with for a document to be part of the query results. diff --git a/docs/painless/painless-contexts/painless-reindex-context.asciidoc b/docs/painless/painless-contexts/painless-reindex-context.asciidoc index a8477c8c61996..ae5445183a6ad 100644 --- a/docs/painless/painless-contexts/painless-reindex-context.asciidoc +++ b/docs/painless/painless-contexts/painless-reindex-context.asciidoc @@ -1,7 +1,7 @@ [[painless-reindex-context]] === Reindex context -Use a Painless script in a {es_version}/docs-reindex.html[reindex] operation to +Use a Painless script in a {ref}/docs-reindex.html[reindex] operation to add, modify, or delete fields within each document in an original index as its reindexed into a target index. @@ -13,22 +13,22 @@ reindexed into a target index. `ctx['_op']` (`String`):: The name of the operation. -{es_version}/mapping-routing-field.html[`ctx['_routing']`] (`String`):: +{ref}/mapping-routing-field.html[`ctx['_routing']`] (`String`):: The value used to select a shard for document storage. -{es_version}/mapping-index-field.html[`ctx['_index']`] (`String`):: +{ref}/mapping-index-field.html[`ctx['_index']`] (`String`):: The name of the index. -{es_version}/mapping-type-field.html[`ctx['_type']`] (`String`):: +{ref}/mapping-type-field.html[`ctx['_type']`] (`String`):: The type of document within an index. -{es_version}/mapping-id-field.html[`ctx['_id']`] (`int`, read-only):: +{ref}/mapping-id-field.html[`ctx['_id']`] (`int`, read-only):: The unique document id. `ctx['_version']` (`int`):: The current version of the document. -{es_version}/mapping-source-field.html[`ctx['_source']`] (`Map`):: +{ref}/mapping-source-field.html[`ctx['_source']`] (`Map`):: Contains extracted JSON in a `Map` and `List` structure for the fields existing in a stored document. @@ -39,22 +39,22 @@ reindexed into a target index. specify no operation or `delete` to delete the current document from the index. -{es_version}/mapping-routing-field.html[`ctx['_routing']`]:: +{ref}/mapping-routing-field.html[`ctx['_routing']`]:: Modify this to change the routing value for the current document. -{es_version}/mapping-index-field.html[`ctx['_index']`]:: +{ref}/mapping-index-field.html[`ctx['_index']`]:: Modify this to change the destination index for the current document. -{es_version}/mapping-type-field.html[`ctx['_type']`]:: +{ref}/mapping-type-field.html[`ctx['_type']`]:: Modify this to change the type for the current document. -{es_version}/mapping-id-field.html[`ctx['_id']`]:: +{ref}/mapping-id-field.html[`ctx['_id']`]:: Modify this to change the id for the current document. `ctx['_version']` (`int`):: Modify this to modify the version for the current document. -{es_version}/mapping-source-field.html[`ctx['_source']`]:: +{ref}/mapping-source-field.html[`ctx['_source']`]:: Modify the values in the `Map/List` structure to add, modify, or delete the fields of a document. diff --git a/docs/painless/painless-contexts/painless-score-context.asciidoc b/docs/painless/painless-contexts/painless-score-context.asciidoc index 21667fd31f3b1..bd1e1de7f777d 100644 --- a/docs/painless/painless-contexts/painless-score-context.asciidoc +++ b/docs/painless/painless-contexts/painless-score-context.asciidoc @@ -2,7 +2,7 @@ === Score context Use a Painless script in a -{es_version}/query-dsl-function-score-query.html[function score] to apply a new +{ref}/query-dsl-function-score-query.html[function score] to apply a new score to documents returned from a query. *Variables* diff --git a/docs/painless/painless-contexts/painless-similarity-context.asciidoc b/docs/painless/painless-contexts/painless-similarity-context.asciidoc index 052844c3111a7..53b37be52b6d7 100644 --- a/docs/painless/painless-contexts/painless-similarity-context.asciidoc +++ b/docs/painless/painless-contexts/painless-similarity-context.asciidoc @@ -2,7 +2,7 @@ === Similarity context Use a Painless script to create a -{es_version}/index-modules-similarity.html[similarity] equation for scoring +{ref}/index-modules-similarity.html[similarity] equation for scoring documents in a query. *Variables* diff --git a/docs/painless/painless-contexts/painless-sort-context.asciidoc b/docs/painless/painless-contexts/painless-sort-context.asciidoc index 7f510fb6a9251..9efd507668839 100644 --- a/docs/painless/painless-contexts/painless-sort-context.asciidoc +++ b/docs/painless/painless-contexts/painless-sort-context.asciidoc @@ -2,7 +2,7 @@ === Sort context Use a Painless script to -{es_version}/search-request-sort.html[sort] the documents in a query. +{ref}/search-request-sort.html[sort] the documents in a query. *Variables* diff --git a/docs/painless/painless-contexts/painless-update-by-query-context.asciidoc b/docs/painless/painless-contexts/painless-update-by-query-context.asciidoc index 65666e15844bf..d8d7754807496 100644 --- a/docs/painless/painless-contexts/painless-update-by-query-context.asciidoc +++ b/docs/painless/painless-contexts/painless-update-by-query-context.asciidoc @@ -2,7 +2,7 @@ === Update by query context Use a Painless script in an -{es_version}/docs-update-by-query.html[update by query] operation to add, +{ref}/docs-update-by-query.html[update by query] operation to add, modify, or delete fields within each of a set of documents collected as the result of query. @@ -14,22 +14,22 @@ result of query. `ctx['_op']` (`String`):: The name of the operation. -{es_version}/mapping-routing-field.html[`ctx['_routing']`] (`String`, read-only):: +{ref}/mapping-routing-field.html[`ctx['_routing']`] (`String`, read-only):: The value used to select a shard for document storage. -{es_version}/mapping-index-field.html[`ctx['_index']`] (`String`, read-only):: +{ref}/mapping-index-field.html[`ctx['_index']`] (`String`, read-only):: The name of the index. -{es_version}/mapping-type-field.html[`ctx['_type']`] (`String`, read-only):: +{ref}/mapping-type-field.html[`ctx['_type']`] (`String`, read-only):: The type of document within an index. -{es_version}/mapping-id-field.html[`ctx['_id']`] (`int`, read-only):: +{ref}/mapping-id-field.html[`ctx['_id']`] (`int`, read-only):: The unique document id. `ctx['_version']` (`int`, read-only):: The current version of the document. -{es_version}/mapping-source-field.html[`ctx['_source']`] (`Map`):: +{ref}/mapping-source-field.html[`ctx['_source']`] (`Map`):: Contains extracted JSON in a `Map` and `List` structure for the fields existing in a stored document. @@ -40,7 +40,7 @@ result of query. specify no operation or `delete` to delete the current document from the index. -{es_version}/mapping-source-field.html[`ctx['_source']`]:: +{ref}/mapping-source-field.html[`ctx['_source']`]:: Modify the values in the `Map/List` structure to add, modify, or delete the fields of a document. diff --git a/docs/painless/painless-contexts/painless-update-context.asciidoc b/docs/painless/painless-contexts/painless-update-context.asciidoc index b04ba8d9ffb56..d1b4b84eb187a 100644 --- a/docs/painless/painless-contexts/painless-update-context.asciidoc +++ b/docs/painless/painless-contexts/painless-update-context.asciidoc @@ -1,7 +1,7 @@ [[painless-update-context]] === Update context -Use a Painless script in an {es_version}/docs-update.html[update] operation to +Use a Painless script in an {ref}/docs-update.html[update] operation to add, modify, or delete fields within a single document. *Variables* @@ -12,16 +12,16 @@ add, modify, or delete fields within a single document. `ctx['_op']` (`String`):: The name of the operation. -{es_version}/mapping-routing-field.html[`ctx['_routing']`] (`String`, read-only):: +{ref}/mapping-routing-field.html[`ctx['_routing']`] (`String`, read-only):: The value used to select a shard for document storage. -{es_version}/mapping-index-field.html[`ctx['_index']`] (`String`, read-only):: +{ref}/mapping-index-field.html[`ctx['_index']`] (`String`, read-only):: The name of the index. -{es_version}/mapping-type-field.html[`ctx['_type']`] (`String`, read-only):: +{ref}/mapping-type-field.html[`ctx['_type']`] (`String`, read-only):: The type of document within an index. -{es_version}/mapping-id-field.html[`ctx['_id']`] (`int`, read-only):: +{ref}/mapping-id-field.html[`ctx['_id']`] (`int`, read-only):: The unique document id. `ctx['_version']` (`int`, read-only):: @@ -30,7 +30,7 @@ add, modify, or delete fields within a single document. `ctx['_now']` (`long`, read-only):: The current timestamp in milliseconds. -{es_version}/mapping-source-field.html[`ctx['_source']`] (`Map`):: +{ref}/mapping-source-field.html[`ctx['_source']`] (`Map`):: Contains extracted JSON in a `Map` and `List` structure for the fields existing in a stored document. @@ -41,7 +41,7 @@ add, modify, or delete fields within a single document. specify no operation or `delete` to delete the current document from the index. -{es_version}/mapping-source-field.html[`ctx['_source']`]:: +{ref}/mapping-source-field.html[`ctx['_source']`]:: Modify the values in the `Map/List` structure to add, modify, or delete the fields of a document. diff --git a/docs/painless/painless-contexts/painless-watcher-condition-context.asciidoc b/docs/painless/painless-contexts/painless-watcher-condition-context.asciidoc index 3a5e460a55de7..26efe24ae07b3 100644 --- a/docs/painless/painless-contexts/painless-watcher-condition-context.asciidoc +++ b/docs/painless/painless-contexts/painless-watcher-condition-context.asciidoc @@ -1,7 +1,7 @@ [[painless-watcher-condition-context]] === Watcher condition context -Use a Painless script as a {xp_version}/condition-script.html[watcher condition] +Use a Painless script as a {xpack-ref}/condition-script.html[watcher condition] to test if a response is necessary. *Variables* @@ -26,7 +26,7 @@ to test if a response is necessary. `ctx['payload']` (`Map`, read-only):: The accessible watch data based upon the - {xp_version}/input.html[watch input]. + {xpack-ref}/input.html[watch input]. *Return* diff --git a/docs/painless/painless-contexts/painless-watcher-transform-context.asciidoc b/docs/painless/painless-contexts/painless-watcher-transform-context.asciidoc index 1831da5a9f87b..9296fdda1c1f2 100644 --- a/docs/painless/painless-contexts/painless-watcher-transform-context.asciidoc +++ b/docs/painless/painless-contexts/painless-watcher-transform-context.asciidoc @@ -1,7 +1,7 @@ [[painless-watcher-transform-context]] === Watcher transform context -Use a Painless script to {xp_version}/transform-script.html[transform] watch +Use a Painless script to {xpack-ref}/transform-script.html[transform] watch data into a new payload for use in a response to a condition. *Variables* @@ -26,7 +26,7 @@ data into a new payload for use in a response to a condition. `ctx['payload']` (`Map`, read-only):: The accessible watch data based upon the - {xp_version}/input.html[watch input]. + {xpack-ref}/input.html[watch input]. *Return* diff --git a/docs/painless/painless-contexts/painless-weight-context.asciidoc b/docs/painless/painless-contexts/painless-weight-context.asciidoc index 0aef936183c0f..ad215d5386b05 100644 --- a/docs/painless/painless-contexts/painless-weight-context.asciidoc +++ b/docs/painless/painless-contexts/painless-weight-context.asciidoc @@ -2,7 +2,7 @@ === Weight context Use a Painless script to create a -{es_version}/index-modules-similarity.html[weight] for use in a +{ref}/index-modules-similarity.html[weight] for use in a <>. Weight is used to prevent recalculation of constants that remain the same across documents. diff --git a/docs/reference/settings/security-settings.asciidoc b/docs/reference/settings/security-settings.asciidoc index 41b268da6244f..e74bec6452d25 100644 --- a/docs/reference/settings/security-settings.asciidoc +++ b/docs/reference/settings/security-settings.asciidoc @@ -27,11 +27,17 @@ For more information about creating and updating the {es} keystore, see `xpack.security.enabled`:: Set to `true` to enable {security} on the node. + + +-- If set to `false`, which is the default value for basic and trial licenses, {security} is disabled. It also affects all {kib} instances that connect to this {es} instance; you do not need to disable {security} in those `kibana.yml` files. For more information about disabling {security} in specific {kib} instances, see {kibana-ref}/security-settings-kb.html[{kib} security settings]. +TIP: If you have gold or higher licenses, the default value is `true`; we +recommend that you explicitly add this setting to avoid confusion. + +-- + `xpack.security.hide_settings`:: A comma-separated list of settings that are omitted from the results of the <>. You can use wildcards to include diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/ConvertProcessor.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/ConvertProcessor.java index 264df6f4c5f24..2e881b82b59de 100644 --- a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/ConvertProcessor.java +++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/ConvertProcessor.java @@ -42,7 +42,11 @@ enum Type { @Override public Object convert(Object value) { try { - return Integer.parseInt(value.toString()); + String strValue = value.toString(); + if (strValue.startsWith("0x") || strValue.startsWith("-0x")) { + return Integer.decode(strValue); + } + return Integer.parseInt(strValue); } catch(NumberFormatException e) { throw new IllegalArgumentException("unable to convert [" + value + "] to integer", e); } @@ -52,7 +56,11 @@ public Object convert(Object value) { @Override public Object convert(Object value) { try { - return Long.parseLong(value.toString()); + String strValue = value.toString(); + if (strValue.startsWith("0x") || strValue.startsWith("-0x")) { + return Long.decode(strValue); + } + return Long.parseLong(strValue); } catch(NumberFormatException e) { throw new IllegalArgumentException("unable to convert [" + value + "] to long", e); } diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/ConvertProcessorTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/ConvertProcessorTests.java index 292a03d7d9033..f0fc31dab3533 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/ConvertProcessorTests.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/ConvertProcessorTests.java @@ -49,6 +49,33 @@ public void testConvertInt() throws Exception { assertThat(ingestDocument.getFieldValue(fieldName, Integer.class), equalTo(randomInt)); } + public void testConvertIntHex() throws Exception { + IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random()); + int randomInt = randomInt(); + String intString = randomInt < 0 ? "-0x" + Integer.toHexString(-randomInt) : "0x" + Integer.toHexString(randomInt); + String fieldName = RandomDocumentPicks.addRandomField(random(), ingestDocument, intString); + Processor processor = new ConvertProcessor(randomAlphaOfLength(10), fieldName, fieldName, Type.INTEGER, false); + processor.execute(ingestDocument); + assertThat(ingestDocument.getFieldValue(fieldName, Integer.class), equalTo(randomInt)); + } + + public void testConvertIntLeadingZero() throws Exception { + IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random()); + String fieldName = RandomDocumentPicks.addRandomField(random(), ingestDocument, "010"); + Processor processor = new ConvertProcessor(randomAlphaOfLength(10), fieldName, fieldName, Type.INTEGER, false); + processor.execute(ingestDocument); + assertThat(ingestDocument.getFieldValue(fieldName, Integer.class), equalTo(10)); + } + + public void testConvertIntHexError() { + IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random()); + String value = "0x" + randomAlphaOfLengthBetween(1, 10); + String fieldName = RandomDocumentPicks.addRandomField(random(), ingestDocument, value); + Processor processor = new ConvertProcessor(randomAlphaOfLength(10), fieldName, fieldName, Type.INTEGER, false); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> processor.execute(ingestDocument)); + assertThat(e.getMessage(), equalTo("unable to convert [" + value + "] to integer")); + } + public void testConvertIntList() throws Exception { IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random()); int numItems = randomIntBetween(1, 10); @@ -92,6 +119,33 @@ public void testConvertLong() throws Exception { assertThat(ingestDocument.getFieldValue(fieldName, Long.class), equalTo(randomLong)); } + public void testConvertLongHex() throws Exception { + IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random()); + long randomLong = randomLong(); + String longString = randomLong < 0 ? "-0x" + Long.toHexString(-randomLong) : "0x" + Long.toHexString(randomLong); + String fieldName = RandomDocumentPicks.addRandomField(random(), ingestDocument, longString); + Processor processor = new ConvertProcessor(randomAlphaOfLength(10), fieldName, fieldName, Type.LONG, false); + processor.execute(ingestDocument); + assertThat(ingestDocument.getFieldValue(fieldName, Long.class), equalTo(randomLong)); + } + + public void testConvertLongLeadingZero() throws Exception { + IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random()); + String fieldName = RandomDocumentPicks.addRandomField(random(), ingestDocument, "010"); + Processor processor = new ConvertProcessor(randomAlphaOfLength(10), fieldName, fieldName, Type.LONG, false); + processor.execute(ingestDocument); + assertThat(ingestDocument.getFieldValue(fieldName, Long.class), equalTo(10L)); + } + + public void testConvertLongHexError() { + IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random()); + String value = "0x" + randomAlphaOfLengthBetween(1, 10); + String fieldName = RandomDocumentPicks.addRandomField(random(), ingestDocument, value); + Processor processor = new ConvertProcessor(randomAlphaOfLength(10), fieldName, fieldName, Type.LONG, false); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> processor.execute(ingestDocument)); + assertThat(e.getMessage(), equalTo("unable to convert [" + value + "] to long")); + } + public void testConvertLongList() throws Exception { IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random()); int numItems = randomIntBetween(1, 10); diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessScriptEngine.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessScriptEngine.java index 1687cb24cb639..a5a9823d13018 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessScriptEngine.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessScriptEngine.java @@ -103,10 +103,10 @@ public PainlessScriptEngine(Settings settings, Map, List context = entry.getKey(); if (context.instanceClazz.equals(SearchScript.class) || context.instanceClazz.equals(ExecutableScript.class)) { contextsToCompilers.put(context, new Compiler(GenericElasticsearchScript.class, - new PainlessLookupBuilder(entry.getValue()).build())); + PainlessLookupBuilder.buildFromWhitelists(entry.getValue()))); } else { contextsToCompilers.put(context, new Compiler(context.instanceClazz, - new PainlessLookupBuilder(entry.getValue()).build())); + PainlessLookupBuilder.buildFromWhitelists(entry.getValue()))); } } diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessLookupBuilder.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessLookupBuilder.java index b15f1f13f203a..519227bb901b2 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessLookupBuilder.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessLookupBuilder.java @@ -126,14 +126,55 @@ public int hashCode() { private static final Pattern METHOD_NAME_PATTERN = Pattern.compile("^[_a-zA-Z][_a-zA-Z0-9]*$"); private static final Pattern FIELD_NAME_PATTERN = Pattern.compile("^[_a-zA-Z][_a-zA-Z0-9]*$"); - private final List whitelists; + public static PainlessLookup buildFromWhitelists(List whitelists) { + PainlessLookupBuilder painlessLookupBuilder = new PainlessLookupBuilder(); + String origin = "internal error"; + + try { + for (Whitelist whitelist : whitelists) { + for (WhitelistClass whitelistClass : whitelist.whitelistStructs) { + origin = whitelistClass.origin; + painlessLookupBuilder.addPainlessClass( + whitelist.javaClassLoader, whitelistClass.javaClassName, whitelistClass.onlyFQNJavaClassName == false); + } + } + + for (Whitelist whitelist : whitelists) { + for (WhitelistClass whitelistClass : whitelist.whitelistStructs) { + String targetCanonicalClassName = whitelistClass.javaClassName.replace('$', '.'); + + for (WhitelistConstructor whitelistConstructor : whitelistClass.whitelistConstructors) { + origin = whitelistConstructor.origin; + painlessLookupBuilder.addPainlessConstructor( + targetCanonicalClassName, whitelistConstructor.painlessParameterTypeNames); + } + + for (WhitelistMethod whitelistMethod : whitelistClass.whitelistMethods) { + origin = whitelistMethod.origin; + painlessLookupBuilder.addPainlessMethod( + whitelist.javaClassLoader, targetCanonicalClassName, whitelistMethod.javaAugmentedClassName, + whitelistMethod.javaMethodName, whitelistMethod.painlessReturnTypeName, + whitelistMethod.painlessParameterTypeNames); + } + + for (WhitelistField whitelistField : whitelistClass.whitelistFields) { + origin = whitelistField.origin; + painlessLookupBuilder.addPainlessField( + targetCanonicalClassName, whitelistField.javaFieldName, whitelistField.painlessFieldTypeName); + } + } + } + } catch (Exception exception) { + throw new IllegalArgumentException("error loading whitelist(s) " + origin, exception); + } + + return painlessLookupBuilder.build(); + } private final Map> canonicalClassNamesToClasses; private final Map, PainlessClassBuilder> classesToPainlessClassBuilders; - public PainlessLookupBuilder(List whitelists) { - this.whitelists = whitelists; - + public PainlessLookupBuilder() { canonicalClassNamesToClasses = new HashMap<>(); classesToPainlessClassBuilders = new HashMap<>(); @@ -666,60 +707,6 @@ public void addPainlessField(Class targetClass, String fieldName, Class ty } public PainlessLookup build() { - String origin = "internal error"; - - try { - // first iteration collects all the Painless type names that - // are used for validation during the second iteration - for (Whitelist whitelist : whitelists) { - for (WhitelistClass whitelistStruct : whitelist.whitelistStructs) { - String painlessTypeName = whitelistStruct.javaClassName.replace('$', '.'); - PainlessClassBuilder painlessStruct = - classesToPainlessClassBuilders.get(canonicalClassNamesToClasses.get(painlessTypeName)); - - if (painlessStruct != null && painlessStruct.clazz.getName().equals(whitelistStruct.javaClassName) == false) { - throw new IllegalArgumentException("struct [" + painlessStruct.name + "] cannot represent multiple classes " + - "[" + painlessStruct.clazz.getName() + "] and [" + whitelistStruct.javaClassName + "]"); - } - - origin = whitelistStruct.origin; - addPainlessClass( - whitelist.javaClassLoader, whitelistStruct.javaClassName, whitelistStruct.onlyFQNJavaClassName == false); - - painlessStruct = classesToPainlessClassBuilders.get(canonicalClassNamesToClasses.get(painlessTypeName)); - classesToPainlessClassBuilders.put(painlessStruct.clazz, painlessStruct); - } - } - - // second iteration adds all the constructors, methods, and fields that will - // be available in Painless along with validating they exist and all their types have - // been white-listed during the first iteration - for (Whitelist whitelist : whitelists) { - for (WhitelistClass whitelistStruct : whitelist.whitelistStructs) { - String painlessTypeName = whitelistStruct.javaClassName.replace('$', '.'); - - for (WhitelistConstructor whitelistConstructor : whitelistStruct.whitelistConstructors) { - origin = whitelistConstructor.origin; - addPainlessConstructor(painlessTypeName, whitelistConstructor.painlessParameterTypeNames); - } - - for (WhitelistMethod whitelistMethod : whitelistStruct.whitelistMethods) { - origin = whitelistMethod.origin; - addPainlessMethod(whitelist.javaClassLoader, painlessTypeName, whitelistMethod.javaAugmentedClassName, - whitelistMethod.javaMethodName, whitelistMethod.painlessReturnTypeName, - whitelistMethod.painlessParameterTypeNames); - } - - for (WhitelistField whitelistField : whitelistStruct.whitelistFields) { - origin = whitelistField.origin; - addPainlessField(painlessTypeName, whitelistField.javaFieldName, whitelistField.painlessFieldTypeName); - } - } - } - } catch (Exception exception) { - throw new IllegalArgumentException("error loading whitelist(s) " + origin, exception); - } - copyPainlessClassMembers(); cacheRuntimeHandles(); setFunctionalInterfaceMethods(); diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/AnalyzerCasterTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/AnalyzerCasterTests.java index a702490fff9d4..34bc2c78de662 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/AnalyzerCasterTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/AnalyzerCasterTests.java @@ -20,7 +20,6 @@ package org.elasticsearch.painless; import org.elasticsearch.painless.lookup.PainlessCast; - import org.elasticsearch.test.ESTestCase; public class AnalyzerCasterTests extends ESTestCase { diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/BaseClassTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/BaseClassTests.java index c0e0bd7ed9d05..c852d5a41dec1 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/BaseClassTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/BaseClassTests.java @@ -19,14 +19,14 @@ package org.elasticsearch.painless; -import java.util.Collections; -import java.util.HashMap; -import java.util.Map; - import org.elasticsearch.painless.lookup.PainlessLookup; import org.elasticsearch.painless.lookup.PainlessLookupBuilder; import org.elasticsearch.painless.spi.Whitelist; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; + import static java.util.Collections.emptyMap; import static java.util.Collections.singletonMap; import static org.hamcrest.Matchers.containsString; @@ -38,7 +38,7 @@ */ public class BaseClassTests extends ScriptTestCase { - private final PainlessLookup painlessLookup = new PainlessLookupBuilder(Whitelist.BASE_WHITELISTS).build(); + private final PainlessLookup painlessLookup = PainlessLookupBuilder.buildFromWhitelists(Whitelist.BASE_WHITELISTS); public abstract static class Gets { diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/BasicStatementTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/BasicStatementTests.java index e397013e06f20..0f5844c65990a 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/BasicStatementTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/BasicStatementTests.java @@ -2,6 +2,9 @@ import java.util.ArrayList; import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; /* * Licensed to Elasticsearch under one or more contributor @@ -22,10 +25,6 @@ * under the License. */ -import java.util.HashMap; -import java.util.List; -import java.util.Map; - public class BasicStatementTests extends ScriptTestCase { public void testIfStatement() { diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/DebugTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/DebugTests.java index 7edc90bb0a001..f0f784c089a23 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/DebugTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/DebugTests.java @@ -37,7 +37,7 @@ import static org.hamcrest.Matchers.not; public class DebugTests extends ScriptTestCase { - private final PainlessLookup painlessLookup = new PainlessLookupBuilder(Whitelist.BASE_WHITELISTS).build(); + private final PainlessLookup painlessLookup = PainlessLookupBuilder.buildFromWhitelists(Whitelist.BASE_WHITELISTS); public void testExplain() { // Debug.explain can explain an object diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/Debugger.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/Debugger.java index 73adf92779d48..48af3898e0952 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/Debugger.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/Debugger.java @@ -40,7 +40,7 @@ static String toString(Class iface, String source, CompilerSettings settings) PrintWriter outputWriter = new PrintWriter(output); Textifier textifier = new Textifier(); try { - new Compiler(iface, new PainlessLookupBuilder(Whitelist.BASE_WHITELISTS).build()) + new Compiler(iface, PainlessLookupBuilder.buildFromWhitelists(Whitelist.BASE_WHITELISTS)) .compile("", source, settings, textifier); } catch (RuntimeException e) { textifier.print(outputWriter); diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/DefBootstrapTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/DefBootstrapTests.java index 87e6847b5e4f8..799be9f93f031 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/DefBootstrapTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/DefBootstrapTests.java @@ -19,6 +19,11 @@ package org.elasticsearch.painless; +import org.elasticsearch.painless.lookup.PainlessLookup; +import org.elasticsearch.painless.lookup.PainlessLookupBuilder; +import org.elasticsearch.painless.spi.Whitelist; +import org.elasticsearch.test.ESTestCase; + import java.lang.invoke.CallSite; import java.lang.invoke.MethodHandle; import java.lang.invoke.MethodHandles; @@ -27,13 +32,8 @@ import java.util.Collections; import java.util.HashMap; -import org.elasticsearch.painless.lookup.PainlessLookup; -import org.elasticsearch.painless.lookup.PainlessLookupBuilder; -import org.elasticsearch.painless.spi.Whitelist; -import org.elasticsearch.test.ESTestCase; - public class DefBootstrapTests extends ESTestCase { - private final PainlessLookup painlessLookup = new PainlessLookupBuilder(Whitelist.BASE_WHITELISTS).build(); + private final PainlessLookup painlessLookup = PainlessLookupBuilder.buildFromWhitelists(Whitelist.BASE_WHITELISTS); /** calls toString() on integers, twice */ public void testOneType() throws Throwable { diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/LangPainlessClientYamlTestSuiteIT.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/LangPainlessClientYamlTestSuiteIT.java index 2d3f299bbcb2c..1ab53b4e430b1 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/LangPainlessClientYamlTestSuiteIT.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/LangPainlessClientYamlTestSuiteIT.java @@ -21,7 +21,6 @@ import com.carrotsearch.randomizedtesting.annotations.Name; import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; - import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate; import org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase; diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/PainlessDocGenerator.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/PainlessDocGenerator.java index e26a5a38c76b8..b2cc5e48ad8c3 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/PainlessDocGenerator.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/PainlessDocGenerator.java @@ -52,7 +52,7 @@ */ public class PainlessDocGenerator { - private static final PainlessLookup PAINLESS_LOOKUP = new PainlessLookupBuilder(Whitelist.BASE_WHITELISTS).build(); + private static final PainlessLookup PAINLESS_LOOKUP = PainlessLookupBuilder.buildFromWhitelists(Whitelist.BASE_WHITELISTS); private static final Logger logger = ESLoggerFactory.getLogger(PainlessDocGenerator.class); private static final Comparator FIELD_NAME = comparing(f -> f.name); private static final Comparator METHOD_NAME = comparing(m -> m.name); diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/ScriptTestCase.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/ScriptTestCase.java index eebf1d701ee32..96cc296a1af52 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/ScriptTestCase.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/ScriptTestCase.java @@ -92,7 +92,7 @@ public Object exec(String script, Map vars, boolean picky) { public Object exec(String script, Map vars, Map compileParams, Scorer scorer, boolean picky) { // test for ambiguity errors before running the actual script if picky is true if (picky) { - PainlessLookup painlessLookup = new PainlessLookupBuilder(Whitelist.BASE_WHITELISTS).build(); + PainlessLookup painlessLookup = PainlessLookupBuilder.buildFromWhitelists(Whitelist.BASE_WHITELISTS); ScriptClassInfo scriptClassInfo = new ScriptClassInfo(painlessLookup, GenericElasticsearchScript.class); CompilerSettings pickySettings = new CompilerSettings(); pickySettings.setPicky(true); diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/ScriptedMetricAggContextsTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/ScriptedMetricAggContextsTests.java index 66d49be16ba9a..6ee021c695f99 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/ScriptedMetricAggContextsTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/ScriptedMetricAggContextsTests.java @@ -22,8 +22,8 @@ import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.Scorer; import org.elasticsearch.painless.spi.Whitelist; -import org.elasticsearch.script.ScriptedMetricAggContexts; import org.elasticsearch.script.ScriptContext; +import org.elasticsearch.script.ScriptedMetricAggContexts; import java.util.ArrayList; import java.util.Collections; diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/WhenThingsGoWrongTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/WhenThingsGoWrongTests.java index 1bb754db84745..8eeb25c9676c7 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/WhenThingsGoWrongTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/WhenThingsGoWrongTests.java @@ -20,7 +20,6 @@ package org.elasticsearch.painless; import junit.framework.AssertionFailedError; - import org.apache.lucene.util.Constants; import org.elasticsearch.script.ScriptException; diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/node/NodeToStringTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/node/NodeToStringTests.java index cd3e4123e1267..84452b4843d96 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/node/NodeToStringTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/node/NodeToStringTests.java @@ -20,21 +20,21 @@ package org.elasticsearch.painless.node; import org.elasticsearch.painless.CompilerSettings; -import org.elasticsearch.painless.lookup.PainlessLookup; -import org.elasticsearch.painless.lookup.PainlessCast; -import org.elasticsearch.painless.lookup.PainlessField; -import org.elasticsearch.painless.lookup.PainlessLookupBuilder; -import org.elasticsearch.painless.lookup.PainlessLookupUtility; -import org.elasticsearch.painless.lookup.PainlessMethod; -import org.elasticsearch.painless.lookup.PainlessClass; import org.elasticsearch.painless.FeatureTest; import org.elasticsearch.painless.GenericElasticsearchScript; import org.elasticsearch.painless.Locals.Variable; import org.elasticsearch.painless.Location; import org.elasticsearch.painless.Operation; import org.elasticsearch.painless.ScriptClassInfo; -import org.elasticsearch.painless.spi.Whitelist; import org.elasticsearch.painless.antlr.Walker; +import org.elasticsearch.painless.lookup.PainlessCast; +import org.elasticsearch.painless.lookup.PainlessClass; +import org.elasticsearch.painless.lookup.PainlessField; +import org.elasticsearch.painless.lookup.PainlessLookup; +import org.elasticsearch.painless.lookup.PainlessLookupBuilder; +import org.elasticsearch.painless.lookup.PainlessLookupUtility; +import org.elasticsearch.painless.lookup.PainlessMethod; +import org.elasticsearch.painless.spi.Whitelist; import org.elasticsearch.test.ESTestCase; import java.util.Arrays; @@ -49,7 +49,7 @@ * Tests {@link Object#toString} implementations on all extensions of {@link ANode}. */ public class NodeToStringTests extends ESTestCase { - private final PainlessLookup painlessLookup = new PainlessLookupBuilder(Whitelist.BASE_WHITELISTS).build(); + private final PainlessLookup painlessLookup = PainlessLookupBuilder.buildFromWhitelists(Whitelist.BASE_WHITELISTS); public void testEAssignment() { assertToString( diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpServerPipeliningTests.java b/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpServerPipeliningTests.java index 4f96f185a8abf..77f8ec29db514 100644 --- a/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpServerPipeliningTests.java +++ b/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpServerPipeliningTests.java @@ -240,37 +240,43 @@ class PossiblySlowRunnable implements Runnable { @Override public void run() { - final String uri; - if (pipelinedRequest != null && pipelinedRequest.last() instanceof FullHttpRequest) { - uri = ((FullHttpRequest) pipelinedRequest.last()).uri(); - } else { - uri = fullHttpRequest.uri(); - } + try { + final String uri; + if (pipelinedRequest != null && pipelinedRequest.last() instanceof FullHttpRequest) { + uri = ((FullHttpRequest) pipelinedRequest.last()).uri(); + } else { + uri = fullHttpRequest.uri(); + } - final ByteBuf buffer = Unpooled.copiedBuffer(uri, StandardCharsets.UTF_8); + final ByteBuf buffer = Unpooled.copiedBuffer(uri, StandardCharsets.UTF_8); - final DefaultFullHttpResponse httpResponse = new DefaultFullHttpResponse(HttpVersion.HTTP_1_1, HttpResponseStatus.OK, buffer); - httpResponse.headers().add(HttpHeaderNames.CONTENT_LENGTH, buffer.readableBytes()); + final FullHttpResponse httpResponse = new DefaultFullHttpResponse(HttpVersion.HTTP_1_1, HttpResponseStatus.OK, buffer); + httpResponse.headers().add(HttpHeaderNames.CONTENT_LENGTH, buffer.readableBytes()); - final boolean slow = uri.matches("/slow/\\d+"); - if (slow) { - try { - Thread.sleep(scaledRandomIntBetween(500, 1000)); - } catch (InterruptedException e) { - throw new RuntimeException(e); + final boolean slow = uri.matches("/slow/\\d+"); + if (slow) { + try { + Thread.sleep(scaledRandomIntBetween(500, 1000)); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + } else { + assert uri.matches("/\\d+"); } - } else { - assert uri.matches("/\\d+"); - } - final ChannelPromise promise = ctx.newPromise(); - final Object msg; - if (pipelinedRequest != null) { - msg = pipelinedRequest.createHttpResponse(httpResponse, promise); - } else { - msg = httpResponse; + final ChannelPromise promise = ctx.newPromise(); + final Object msg; + if (pipelinedRequest != null) { + msg = pipelinedRequest.createHttpResponse(httpResponse, promise); + } else { + msg = httpResponse; + } + ctx.writeAndFlush(msg, promise); + } finally { + if (pipelinedRequest != null) { + pipelinedRequest.release(); + } } - ctx.writeAndFlush(msg, promise); } } diff --git a/plugins/repository-s3/build.gradle b/plugins/repository-s3/build.gradle index 13119913672af..6001ed570652e 100644 --- a/plugins/repository-s3/build.gradle +++ b/plugins/repository-s3/build.gradle @@ -115,7 +115,7 @@ if (!s3PermanentAccessKey && !s3PermanentSecretKey && !s3PermanentBucket && !s3P useFixture = true } else if (!s3PermanentAccessKey || !s3PermanentSecretKey || !s3PermanentBucket || !s3PermanentBasePath) { - throw new IllegalArgumentException("not all options specified to run against external S3 service") + throw new IllegalArgumentException("not all options specified to run against external S3 service as permanent credentials are present") } if (!s3TemporaryAccessKey && !s3TemporarySecretKey && !s3TemporaryBucket && !s3TemporaryBasePath && !s3TemporarySessionToken) { @@ -126,7 +126,7 @@ if (!s3TemporaryAccessKey && !s3TemporarySecretKey && !s3TemporaryBucket && !s3T s3TemporarySessionToken = 's3_integration_test_temporary_session_token' } else if (!s3TemporaryAccessKey || !s3TemporarySecretKey || !s3TemporaryBucket || !s3TemporaryBasePath || !s3TemporarySessionToken) { - throw new IllegalArgumentException("not all options specified to run against external S3 service") + throw new IllegalArgumentException("not all options specified to run against external S3 service as temporary credentials are present") } final String minioVersion = 'RELEASE.2018-06-22T23-48-46Z' @@ -381,31 +381,31 @@ integTestCluster { integTestRunner.systemProperty 'tests.rest.blacklist', 'repository_s3/50_repository_ecs_credentials/*' -/// -RestIntegTestTask integTestECS = project.tasks.create('integTestECS', RestIntegTestTask.class) { - description = "Runs tests using the ECS repository." -} +if (useFixture) { + RestIntegTestTask integTestECS = project.tasks.create('integTestECS', RestIntegTestTask.class) { + description = "Runs tests using the ECS repository." + } // The following closure must execute before the afterEvaluate block in the constructor of the following integrationTest tasks: -project.afterEvaluate { - ClusterConfiguration cluster = project.extensions.getByName('integTestECSCluster') as ClusterConfiguration - cluster.dependsOn(project.s3Fixture) - - cluster.setting 's3.client.integration_test_ecs.endpoint', "http://${-> s3Fixture.addressAndPort}" - - Task integTestECSTask = project.tasks.getByName('integTestECS') - integTestECSTask.clusterConfig.plugin(project.path) - integTestECSTask.clusterConfig.environment 'AWS_CONTAINER_CREDENTIALS_FULL_URI', - "http://${-> s3Fixture.addressAndPort}/ecs_credentials_endpoint" - integTestECSRunner.systemProperty 'tests.rest.blacklist', [ - 'repository_s3/10_basic/*', - 'repository_s3/20_repository_permanent_credentials/*', - 'repository_s3/30_repository_temporary_credentials/*', - 'repository_s3/40_repository_ec2_credentials/*' - ].join(",") + project.afterEvaluate { + ClusterConfiguration cluster = project.extensions.getByName('integTestECSCluster') as ClusterConfiguration + cluster.dependsOn(project.s3Fixture) + + cluster.setting 's3.client.integration_test_ecs.endpoint', "http://${-> s3Fixture.addressAndPort}" + + Task integTestECSTask = project.tasks.getByName('integTestECS') + integTestECSTask.clusterConfig.plugin(project.path) + integTestECSTask.clusterConfig.environment 'AWS_CONTAINER_CREDENTIALS_FULL_URI', + "http://${-> s3Fixture.addressAndPort}/ecs_credentials_endpoint" + integTestECSRunner.systemProperty 'tests.rest.blacklist', [ + 'repository_s3/10_basic/*', + 'repository_s3/20_repository_permanent_credentials/*', + 'repository_s3/30_repository_temporary_credentials/*', + 'repository_s3/40_repository_ec2_credentials/*' + ].join(",") + } + project.check.dependsOn(integTestECS) } -project.check.dependsOn(integTestECS) -/// thirdPartyAudit.excludes = [ // classes are missing diff --git a/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/ArchiveTestCase.java b/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/ArchiveTestCase.java index 3aada7837d8ae..1f569c2b36156 100644 --- a/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/ArchiveTestCase.java +++ b/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/ArchiveTestCase.java @@ -128,7 +128,7 @@ public void test30AbortWhenJavaMissing() { }); Platforms.onLinux(() -> { - final String javaPath = sh.run("which java").stdout.trim(); + final String javaPath = sh.run("command -v java").stdout.trim(); try { sh.run("chmod -x '" + javaPath + "'"); diff --git a/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/PackageTestCase.java b/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/PackageTestCase.java index 1990840db7201..8a21381cbdfe5 100644 --- a/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/PackageTestCase.java +++ b/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/PackageTestCase.java @@ -31,6 +31,8 @@ import java.io.IOException; import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; import java.util.regex.Matcher; import java.util.regex.Pattern; @@ -38,11 +40,13 @@ import static junit.framework.TestCase.assertTrue; import static org.elasticsearch.packaging.util.Cleanup.cleanEverything; import static org.elasticsearch.packaging.util.FileUtils.assertPathsDontExist; +import static org.elasticsearch.packaging.util.FileUtils.mv; import static org.elasticsearch.packaging.util.Packages.SYSTEMD_SERVICE; import static org.elasticsearch.packaging.util.Packages.assertInstalled; import static org.elasticsearch.packaging.util.Packages.assertRemoved; import static org.elasticsearch.packaging.util.Packages.install; import static org.elasticsearch.packaging.util.Packages.remove; +import static org.elasticsearch.packaging.util.Packages.runInstallCommand; import static org.elasticsearch.packaging.util.Packages.startElasticsearch; import static org.elasticsearch.packaging.util.Packages.verifyPackageInstallation; import static org.elasticsearch.packaging.util.Platforms.getOsRelease; @@ -77,6 +81,21 @@ public void onlyCompatibleDistributions() { assumeTrue("only compatible distributions", distribution().packaging.compatible); } + public void test05InstallFailsWhenJavaMissing() { + final Shell sh = new Shell(); + final Result java = sh.run("command -v java"); + + final Path originalJavaPath = Paths.get(java.stdout.trim()); + final Path relocatedJavaPath = originalJavaPath.getParent().resolve("java.relocated"); + try { + mv(originalJavaPath, relocatedJavaPath); + final Result installResult = runInstallCommand(distribution()); + assertThat(installResult.exitCode, is(1)); + } finally { + mv(relocatedJavaPath, originalJavaPath); + } + } + public void test10InstallPackage() { assertRemoved(distribution()); installation = install(distribution()); diff --git a/qa/vagrant/src/main/java/org/elasticsearch/packaging/util/Packages.java b/qa/vagrant/src/main/java/org/elasticsearch/packaging/util/Packages.java index 6e80d9e027df2..be7edc5e8f9e4 100644 --- a/qa/vagrant/src/main/java/org/elasticsearch/packaging/util/Packages.java +++ b/qa/vagrant/src/main/java/org/elasticsearch/packaging/util/Packages.java @@ -67,7 +67,10 @@ public static void assertRemoved(Distribution distribution) { Platforms.onDPKG(() -> { assertThat(status.exitCode, anyOf(is(0), is(1))); if (status.exitCode == 0) { - assertTrue(Pattern.compile("(?m)^Status:.+deinstall ok").matcher(status.stdout).find()); + assertTrue("an uninstalled status should be indicated: " + status.stdout, + Pattern.compile("(?m)^Status:.+deinstall ok").matcher(status.stdout).find() || + Pattern.compile("(?m)^Status:.+ok not-installed").matcher(status.stdout).find() + ); } }); } @@ -90,13 +93,27 @@ public static Installation install(Distribution distribution) { } public static Installation install(Distribution distribution, String version) { + final Result result = runInstallCommand(distribution, version); + if (result.exitCode != 0) { + throw new RuntimeException("Installing distribution " + distribution + " version " + version + " failed: " + result); + } + + return Installation.ofPackage(distribution.packaging); + } + + public static Result runInstallCommand(Distribution distribution) { + return runInstallCommand(distribution, getCurrentVersion()); + } + + public static Result runInstallCommand(Distribution distribution, String version) { final Shell sh = new Shell(); final Path distributionFile = getDistributionFile(distribution, version); - Platforms.onRPM(() -> sh.run("rpm -i " + distributionFile)); - Platforms.onDPKG(() -> sh.run("dpkg -i " + distributionFile)); - - return Installation.ofPackage(distribution.packaging); + if (Platforms.isRPM()) { + return sh.runIgnoreExitCode("rpm -i " + distributionFile); + } else { + return sh.runIgnoreExitCode("dpkg -i " + distributionFile); + } } public static void remove(Distribution distribution) { diff --git a/server/src/main/java/org/elasticsearch/Version.java b/server/src/main/java/org/elasticsearch/Version.java index e7201b2ed53f0..cf68eabee56e1 100644 --- a/server/src/main/java/org/elasticsearch/Version.java +++ b/server/src/main/java/org/elasticsearch/Version.java @@ -173,9 +173,11 @@ public class Version implements Comparable, ToXContentFragment { public static final int V_6_3_3_ID = 6030399; public static final Version V_6_3_3 = new Version(V_6_3_3_ID, org.apache.lucene.util.Version.LUCENE_7_3_1); public static final int V_6_4_0_ID = 6040099; - public static final Version V_6_4_0 = new Version(V_6_4_0_ID, org.apache.lucene.util.Version.LUCENE_7_5_0); + public static final Version V_6_4_0 = new Version(V_6_4_0_ID, org.apache.lucene.util.Version.LUCENE_7_4_0); + public static final int V_6_5_0_ID = 6050099; + public static final Version V_6_5_0 = new Version(V_6_5_0_ID, org.apache.lucene.util.Version.LUCENE_7_5_0); - public static final Version CURRENT = V_6_4_0; + public static final Version CURRENT = V_6_5_0; static { assert CURRENT.luceneVersion.equals(org.apache.lucene.util.Version.LATEST) : "Version must be upgraded to [" @@ -188,6 +190,8 @@ public static Version readVersion(StreamInput in) throws IOException { public static Version fromId(int id) { switch (id) { + case V_6_5_0_ID: + return V_6_5_0; case V_6_4_0_ID: return V_6_4_0; case V_6_3_3_ID: diff --git a/server/src/main/java/org/elasticsearch/common/settings/KeyStoreWrapper.java b/server/src/main/java/org/elasticsearch/common/settings/KeyStoreWrapper.java index 3a8a06949b29c..eee45743ee32e 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/KeyStoreWrapper.java +++ b/server/src/main/java/org/elasticsearch/common/settings/KeyStoreWrapper.java @@ -166,6 +166,13 @@ private KeyStoreWrapper(int formatVersion, boolean hasPassword, byte[] dataBytes this.dataBytes = dataBytes; } + /** + * Get the metadata format version for the keystore + **/ + public int getFormatVersion() { + return formatVersion; + } + /** Returns a path representing the ES keystore in the given config dir. */ public static Path keystorePath(Path configDir) { return configDir.resolve(KEYSTORE_FILENAME); @@ -593,8 +600,10 @@ private void ensureOpen() { @Override public synchronized void close() { this.closed = true; - for (Entry entry : entries.get().values()) { - Arrays.fill(entry.bytes, (byte)0); + if (null != entries.get() && entries.get().isEmpty() == false) { + for (Entry entry : entries.get().values()) { + Arrays.fill(entry.bytes, (byte) 0); + } } } } diff --git a/x-pack/docs/build.gradle b/x-pack/docs/build.gradle index 8500162c260d7..4e989b4b67dd3 100644 --- a/x-pack/docs/build.gradle +++ b/x-pack/docs/build.gradle @@ -310,6 +310,16 @@ setups['farequote_datafeed'] = setups['farequote_job'] + ''' "job_id":"farequote", "indexes":"farequote" } +''' +setups['ml_filter_safe_domains'] = ''' + - do: + xpack.ml.put_filter: + filter_id: "safe_domains" + body: > + { + "description": "A list of safe domains", + "items": ["*.google.com", "wikipedia.org"] + } ''' setups['server_metrics_index'] = ''' - do: diff --git a/x-pack/docs/en/ml/aggregations.asciidoc b/x-pack/docs/en/ml/aggregations.asciidoc index 5ff54b76f01b3..07f465015696d 100644 --- a/x-pack/docs/en/ml/aggregations.asciidoc +++ b/x-pack/docs/en/ml/aggregations.asciidoc @@ -105,8 +105,8 @@ For all other aggregations, if the aggregation name doesn't match the field name there are limitations in the drill-down functionality within the {ml} page in {kib}. -{dfeeds} support complex nested aggregations, this example uses the `derivative` -pipeline aggregation to find the 1st order derivative of the counter +{dfeeds-cap} support complex nested aggregations, this example uses the `derivative` +pipeline aggregation to find the first order derivative of the counter `system.network.out.bytes` for each value of the field `beat.name`. [source,js] diff --git a/x-pack/docs/en/ml/api-quickref.asciidoc b/x-pack/docs/en/ml/api-quickref.asciidoc index dc87a6ba209c2..be74167862e15 100644 --- a/x-pack/docs/en/ml/api-quickref.asciidoc +++ b/x-pack/docs/en/ml/api-quickref.asciidoc @@ -47,6 +47,15 @@ The main {ml} resources can be accessed with a variety of endpoints: * {ref}/ml-delete-calendar-job.html[DELETE /calendars/+++/jobs/+++]: Disassociate a job from a calendar * {ref}/ml-delete-calendar.html[DELETE /calendars/+++]: Delete a calendar +[float] +[[ml-api-filters]] +=== /filters/ + +* {ref}/ml-put-filter.html[PUT /filters/+++]: Create a filter +* {ref}/ml-update-filter.html[POST /filters/+++/_update]: Update a filter +* {ref}/ml-get-filter.html[GET /filters/+++]: List filters +* {ref}/ml-delete-filter.html[DELETE /filter/+++]: Delete a filter + [float] [[ml-api-datafeeds]] === /datafeeds/ diff --git a/x-pack/docs/en/ml/configuring.asciidoc b/x-pack/docs/en/ml/configuring.asciidoc index c2c6e69a71128..e35f046a82bd9 100644 --- a/x-pack/docs/en/ml/configuring.asciidoc +++ b/x-pack/docs/en/ml/configuring.asciidoc @@ -34,6 +34,7 @@ The scenarios in this section describe some best practices for generating useful * <> * <> * <> +* <> :edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/ml/customurl.asciidoc include::customurl.asciidoc[] @@ -49,3 +50,6 @@ include::populations.asciidoc[] :edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/ml/transforms.asciidoc include::transforms.asciidoc[] + +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/ml/detector-custom-rules.asciidoc +include::detector-custom-rules.asciidoc[] \ No newline at end of file diff --git a/x-pack/docs/en/ml/detector-custom-rules.asciidoc b/x-pack/docs/en/ml/detector-custom-rules.asciidoc new file mode 100644 index 0000000000000..18d516fae2de0 --- /dev/null +++ b/x-pack/docs/en/ml/detector-custom-rules.asciidoc @@ -0,0 +1,230 @@ +[role="xpack"] +[[ml-configuring-detector-custom-rules]] +=== Customizing detectors with rules and filters + +<> enable you to change the behavior of anomaly +detectors based on domain-specific knowledge. + +Rules describe _when_ a detector should take a certain _action_ instead +of following its default behavior. To specify the _when_ a rule uses +a `scope` and `conditions`. You can think of `scope` as the categorical +specification of a rule, while `conditions` are the numerical part. +A rule can have a scope, one or more conditions, or a combination of +scope and conditions. + +Let us see how those can be configured by examples. + +==== Specifying rule scope + +Let us assume we are configuring a job in order to DNS data exfiltration. +Our data contain fields "subdomain" and "highest_registered_domain". +We can use a detector that looks like `high_info_content(subdomain) over highest_registered_domain`. +If we run such a job it is possible that we discover a lot of anomalies on +frequently used domains that we have reasons to trust. As security analysts, we +are not interested in such anomalies. Ideally, we could instruct the detector to +skip results for domains that we consider safe. Using a rule with a scope allows +us to achieve this. + +First, we need to create a list with our safe domains. Those lists are called +`filters` in {ml}. Filters can be shared across jobs. + +We create our filter using the {ref}/ml-put-filter.html[put filter API]: + +[source,js] +---------------------------------- +PUT _xpack/ml/filters/safe_domains +{ + "description": "Our list of safe domains", + "items": ["safe.com", "trusted.com"] +} +---------------------------------- +// CONSOLE + +Now, we can create our job specifying a scope that uses the filter for the +`highest_registered_domain` field: + +[source,js] +---------------------------------- +PUT _xpack/ml/anomaly_detectors/dns_exfiltration_with_rule +{ + "analysis_config" : { + "bucket_span":"5m", + "detectors" :[{ + "function":"high_info_content", + "field_name": "subdomain", + "over_field_name": "highest_registered_domain", + "custom_rules": [{ + "actions": ["skip_result"], + "scope": { + "highest_registered_domain": { + "filter_id": "safe_domains", + "filter_type": "include" + } + } + }] + }] + }, + "data_description" : { + "time_field":"timestamp" + } +} +---------------------------------- +// CONSOLE + +As time advances and we see more data and more results, we might encounter new +domains that we want to add in the filter. We can do that by using the +{ref}/ml-update-filter.html[update filter API]: + +[source,js] +---------------------------------- +POST _xpack/ml/filters/safe_domains/_update +{ + "add_items": ["another-safe.com"] +} +---------------------------------- +// CONSOLE +// TEST[setup:ml_filter_safe_domains] + +Note that we can provide scope for any of the partition/over/by fields. +In the following example we scope multiple fields: + +[source,js] +---------------------------------- +PUT _xpack/ml/anomaly_detectors/scoping_multiple_fields +{ + "analysis_config" : { + "bucket_span":"5m", + "detectors" :[{ + "function":"count", + "partition_field_name": "my_partition", + "over_field_name": "my_over", + "by_field_name": "my_by", + "custom_rules": [{ + "actions": ["skip_result"], + "scope": { + "my_partition": { + "filter_id": "filter_1" + }, + "my_over": { + "filter_id": "filter_2" + }, + "my_by": { + "filter_id": "filter_3" + } + } + }] + }] + }, + "data_description" : { + "time_field":"timestamp" + } +} +---------------------------------- +// CONSOLE + +Such a detector will skip results when the values of all 3 scoped fields +are included in the referenced filters. + +==== Specifying rule conditions + +Imagine a detector that looks for anomalies in CPU utilization. +Given a machine that is idle for long enough, small movement in CPU could +result in anomalous results where the `actual` value is quite small, for +example, 0.02. Given our knowledge about how CPU utilization behaves we might +determine that anomalies with such small actual values are not interesting for +investigation. + +Let us now configure a job with a rule that will skip results where CPU +utilization is less than 0.20. + +[source,js] +---------------------------------- +PUT _xpack/ml/anomaly_detectors/cpu_with_rule +{ + "analysis_config" : { + "bucket_span":"5m", + "detectors" :[{ + "function":"high_mean", + "field_name": "cpu_utilization", + "custom_rules": [{ + "actions": ["skip_result"], + "conditions": [ + { + "applies_to": "actual", + "operator": "lt", + "value": 0.20 + } + ] + }] + }] + }, + "data_description" : { + "time_field":"timestamp" + } +} +---------------------------------- +// CONSOLE + +When there are multiple conditions they are combined with a logical `and`. +This is useful when we want the rule to apply to a range. We simply create +a rule with two conditions, one for each end of the desired range. + +Here is an example where a count detector will skip results when the count +is greater than 30 and less than 50: + +[source,js] +---------------------------------- +PUT _xpack/ml/anomaly_detectors/rule_with_range +{ + "analysis_config" : { + "bucket_span":"5m", + "detectors" :[{ + "function":"count", + "custom_rules": [{ + "actions": ["skip_result"], + "conditions": [ + { + "applies_to": "actual", + "operator": "gt", + "value": 30 + }, + { + "applies_to": "actual", + "operator": "lt", + "value": 50 + } + ] + }] + }] + }, + "data_description" : { + "time_field":"timestamp" + } +} +---------------------------------- +// CONSOLE + +==== Rules in the life-cycle of a job + +Rules only affect results created after the rules were applied. +Let us imagine that we have configured a job and it has been running +for some time. After observing its results we decide that we can employ +rules in order to get rid of some uninteresting results. We can use +the update-job API to do so. However, the rule we added will only be in effect +for any results created from the moment we added the rule onwards. Past results +will remain unaffected. + +==== Using rules VS filtering data + +It might appear like using rules is just another way of filtering the data +that feeds into a job. For example, a rule that skips results when the +partition field value is in a filter sounds equivalent to having a query +that filters out such documents. But it is not. There is a fundamental +difference. When the data is filtered before reaching a job it is as if they +never existed for the job. With rules, the data still reaches the job and +affects its behavior (depending on the rule actions). + +For example, a rule with the `skip_result` action means all data will still +be modeled. On the other hand, a rule with the `skip_model_update` action means +results will still be created even though the model will not be updated by +data matched by a rule. diff --git a/x-pack/docs/en/ml/functions/geo.asciidoc b/x-pack/docs/en/ml/functions/geo.asciidoc index e9685b46e1677..5bcf6c3394558 100644 --- a/x-pack/docs/en/ml/functions/geo.asciidoc +++ b/x-pack/docs/en/ml/functions/geo.asciidoc @@ -8,6 +8,8 @@ input data. The {xpackml} features include the following geographic function: `lat_long`. NOTE: You cannot create forecasts for jobs that contain geographic functions. +You also cannot add rules with conditions to detectors that use geographic +functions. [float] [[ml-lat-long]] diff --git a/x-pack/docs/en/ml/functions/metric.asciidoc b/x-pack/docs/en/ml/functions/metric.asciidoc index 3ee5179702720..9d6f3515a029c 100644 --- a/x-pack/docs/en/ml/functions/metric.asciidoc +++ b/x-pack/docs/en/ml/functions/metric.asciidoc @@ -15,6 +15,9 @@ The {xpackml} features include the following metric functions: * <> * xref:ml-metric-varp[`varp`, `high_varp`, `low_varp`] +NOTE: You cannot add rules with conditions to detectors that use the `metric` +function. + [float] [[ml-metric-min]] ==== Min @@ -221,7 +224,6 @@ mean `responsetime` for each application over time. It detects when the mean The `metric` function combines `min`, `max`, and `mean` functions. You can use it as a shorthand for a combined analysis. If you do not specify a function in a detector, this is the default function. -//TBD: Is that default behavior still true? High- and low-sided functions are not applicable. You cannot use this function when a `summary_count_field_name` is specified. diff --git a/x-pack/docs/en/ml/functions/rare.asciidoc b/x-pack/docs/en/ml/functions/rare.asciidoc index fc30918b508f1..1531285a7add2 100644 --- a/x-pack/docs/en/ml/functions/rare.asciidoc +++ b/x-pack/docs/en/ml/functions/rare.asciidoc @@ -15,6 +15,8 @@ number of times (frequency) rare values occur. `exclude_frequent`. * You cannot create forecasts for jobs that contain `rare` or `freq_rare` functions. +* You cannot add rules with conditions to detectors that use `rare` or +`freq_rare` functions. * Shorter bucket spans (less than 1 hour, for example) are recommended when looking for rare events. The functions model whether something happens in a bucket at least once. With longer bucket spans, it is more likely that diff --git a/x-pack/docs/en/rest-api/defs.asciidoc b/x-pack/docs/en/rest-api/defs.asciidoc index 99600472a0930..349ce343c7ae9 100644 --- a/x-pack/docs/en/rest-api/defs.asciidoc +++ b/x-pack/docs/en/rest-api/defs.asciidoc @@ -8,6 +8,7 @@ job configuration options. * <> * <> * <> +* <> * <> * <> * <> @@ -19,6 +20,8 @@ include::ml/calendarresource.asciidoc[] [role="xpack"] include::ml/datafeedresource.asciidoc[] [role="xpack"] +include::ml/filterresource.asciidoc[] +[role="xpack"] include::ml/jobresource.asciidoc[] [role="xpack"] include::ml/jobcounts.asciidoc[] diff --git a/x-pack/docs/en/rest-api/ml-api.asciidoc b/x-pack/docs/en/rest-api/ml-api.asciidoc index e9a987cc4a709..b48e9f934042d 100644 --- a/x-pack/docs/en/rest-api/ml-api.asciidoc +++ b/x-pack/docs/en/rest-api/ml-api.asciidoc @@ -15,6 +15,14 @@ machine learning APIs and in advanced job configuration options in Kibana. * <>, <> * <>, <> +[float] +[[ml-api-filter-endpoint]] +=== Filters + +* <>, <> +* <> +* <> + [float] [[ml-api-datafeed-endpoint]] === {dfeeds-cap} @@ -69,11 +77,13 @@ include::ml/close-job.asciidoc[] //CREATE include::ml/put-calendar.asciidoc[] include::ml/put-datafeed.asciidoc[] +include::ml/put-filter.asciidoc[] include::ml/put-job.asciidoc[] //DELETE include::ml/delete-calendar.asciidoc[] include::ml/delete-datafeed.asciidoc[] include::ml/delete-calendar-event.asciidoc[] +include::ml/delete-filter.asciidoc[] include::ml/delete-job.asciidoc[] include::ml/delete-calendar-job.asciidoc[] include::ml/delete-snapshot.asciidoc[] @@ -93,6 +103,7 @@ include::ml/get-job.asciidoc[] include::ml/get-job-stats.asciidoc[] include::ml/get-snapshot.asciidoc[] include::ml/get-calendar-event.asciidoc[] +include::ml/get-filter.asciidoc[] include::ml/get-record.asciidoc[] //OPEN include::ml/open-job.asciidoc[] @@ -107,6 +118,7 @@ include::ml/start-datafeed.asciidoc[] include::ml/stop-datafeed.asciidoc[] //UPDATE include::ml/update-datafeed.asciidoc[] +include::ml/update-filter.asciidoc[] include::ml/update-job.asciidoc[] include::ml/update-snapshot.asciidoc[] //VALIDATE diff --git a/x-pack/docs/en/rest-api/ml/delete-filter.asciidoc b/x-pack/docs/en/rest-api/ml/delete-filter.asciidoc new file mode 100644 index 0000000000000..b58d2980b888a --- /dev/null +++ b/x-pack/docs/en/rest-api/ml/delete-filter.asciidoc @@ -0,0 +1,53 @@ +[role="xpack"] +[[ml-delete-filter]] +=== Delete Filter API +++++ +Delete Filter +++++ + +Deletes a filter. + + +==== Request + +`DELETE _xpack/ml/filters/` + + +==== Description + +This API deletes a {stack-ov}/ml-rules.html[filter]. +If a {ml} job references the filter, you cannot delete the filter. You must +update or delete the job before you can delete the filter. + + +==== Path Parameters + +`filter_id` (required):: + (string) Identifier for the filter. + + +==== Authorization + +You must have `manage_ml`, or `manage` cluster privileges to use this API. +For more information, see {xpack-ref}/security-privileges.html[Security Privileges]. + + +==== Examples + +The following example deletes the `safe_domains` filter: + +[source,js] +-------------------------------------------------- +DELETE _xpack/ml/filters/safe_domains +-------------------------------------------------- +// CONSOLE +// TEST[setup:ml_filter_safe_domains] + +When the filter is deleted, you receive the following results: +[source,js] +---- +{ + "acknowledged": true +} +---- +//TESTRESPONSE diff --git a/x-pack/docs/en/rest-api/ml/filterresource.asciidoc b/x-pack/docs/en/rest-api/ml/filterresource.asciidoc new file mode 100644 index 0000000000000..64768da4911c4 --- /dev/null +++ b/x-pack/docs/en/rest-api/ml/filterresource.asciidoc @@ -0,0 +1,16 @@ +[role="xpack"] +[[ml-filter-resource]] +=== Filter Resources + +A filter resource has the following properties: + +`filter_id`:: + (string) A string that uniquely identifies the filter. + +`description`:: + (array) A description of the filter. + +`items`:: + (array of strings) An array of strings which is the filter item list. + +For more information, see {stack-ov}/ml-rules.html[Machine learning rules and filters]. diff --git a/x-pack/docs/en/rest-api/ml/get-filter.asciidoc b/x-pack/docs/en/rest-api/ml/get-filter.asciidoc new file mode 100644 index 0000000000000..89f40cf331251 --- /dev/null +++ b/x-pack/docs/en/rest-api/ml/get-filter.asciidoc @@ -0,0 +1,84 @@ +[role="xpack"] +[[ml-get-filter]] +=== Get Filters API +++++ +Get Filters +++++ + +Retrieves filters. + + +==== Request + +`GET _xpack/ml/filters/` + + +`GET _xpack/ml/filters/` + + +===== Description + +You can get a single filter or all filters. For more information, see +{stack-ov}/ml-rules.html[Machine learning rules and filters]. + + +==== Path Parameters + +`filter_id`:: + (string) Identifier for the filter. + + +==== Request Body + +`from`::: + (integer) Skips the specified number of filters. + +`size`::: + (integer) Specifies the maximum number of filters to obtain. + + +==== Results + +The API returns the following information: + +`filters`:: + (array) An array of filter resources. + For more information, see <>. + + +==== Authorization + +You must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` cluster +privileges to use this API. For more information, see +{xpack-ref}/security-privileges.html[Security Privileges]. + + +==== Examples + +The following example gets configuration information for the `safe_domains` +filter: + +[source,js] +-------------------------------------------------- +GET _xpack/ml/filters/safe_domains +-------------------------------------------------- +// CONSOLE +// TEST[setup:ml_filter_safe_domains] + +The API returns the following results: +[source,js] +---- +{ + "count": 1, + "filters": [ + { + "filter_id": "safe_domains", + "description": "A list of safe domains", + "items": [ + "*.google.com", + "wikipedia.org" + ] + } + ] +} +---- +//TESTRESPONSE diff --git a/x-pack/docs/en/rest-api/ml/jobresource.asciidoc b/x-pack/docs/en/rest-api/ml/jobresource.asciidoc index bb959fd728cb6..3c4e330722ff5 100644 --- a/x-pack/docs/en/rest-api/ml/jobresource.asciidoc +++ b/x-pack/docs/en/rest-api/ml/jobresource.asciidoc @@ -106,7 +106,8 @@ An analysis configuration object has the following properties: `bucket_span`:: (time units) The size of the interval that the analysis is aggregated into, - typically between `5m` and `1h`. The default value is `5m`. + typically between `5m` and `1h`. The default value is `5m`. For more + information about time units, see <>. `categorization_field_name`:: (string) If this property is specified, the values of the specified field will @@ -160,8 +161,7 @@ no analysis can occur and an error is returned. (time units) The size of the window in which to expect data that is out of time order. The default value is 0 (no latency). If you specify a non-zero value, it must be greater than or equal to one second. For more information - about time units, see - {ref}/common-options.html#time-units[Time Units]. + about time units, see <>. + -- NOTE: Latency is only applicable when you send data by using @@ -245,7 +245,7 @@ NOTE: The `field_name` cannot contain double quotes or backslashes. -- `function`:: - (string) The analysis function that is used. + (string) The analysis function that is used. For example, `count`, `rare`, `mean`, `min`, `max`, and `sum`. For more information, see {xpack-ref}/ml-functions.html[Function Reference]. @@ -262,7 +262,12 @@ NOTE: The `field_name` cannot contain double quotes or backslashes. `use_null`:: (boolean) Defines whether a new series is used as the null series - when there is no value for the by or partition fields. The default value is `false`. + + when there is no value for the by or partition fields. The default value is `false`. + +`custom_rules`:: + (array) An array of rule objects, which enable customizing how the detector works. + For example, a rule may dictate to the detector conditions under which results should be skipped. + For more information see <>. + + -- IMPORTANT: Field names are case sensitive, for example a field named 'Bytes' @@ -270,9 +275,9 @@ is different from one named 'bytes'. -- -After you create a job, the only property you can change in the detector -configuration object is the `detector_description`; all other properties are -informational. +After you create a job, the only properties you can change in the detector +configuration object are the `detector_description` and the `custom_rules`; +all other properties are informational. [float] [[ml-datadescription]] @@ -408,6 +413,64 @@ the categorization field value came from. For more information, see {xpack-ref}/ml-configuring-categories.html[Categorizing Log Messages]. +[float] +[[ml-detector-custom-rule]] +==== Detector Custom Rule + +{stack-ov}/ml-rules.html[Custom rules] enable you to customize the way detectors +operate. + +A rule has the following properties: + +`actions`:: + (array) The set of actions to be triggered when the rule applies. + If more than one action is specified the effects of all actions are combined. + The available actions include: + + `skip_result`::: The result will not be created. This is the default value. + Unless you also specify `skip_model_update`, the model will be updated as + usual with the corresponding series value. + `skip_model_update`::: The value for that series will not be used to update + the model. Unless you also specify `skip_result`, the results will be created + as usual. This action is suitable when certain values are expected to be + consistently anomalous and they affect the model in a way that negatively + impacts the rest of the results. +`scope`:: + (object) An optional scope of series where the rule applies. By default the scope + includes all series. Scoping is allowed for any of the partition/by/over fields. + To add a scope for a field add the field name as a key in the scope object and + set its value to an object with properties: + `filter_id`:: + (string) The id of the <> to be used. + `filter_type`:: + (string) Either `include` (the rule applies for values in the filter) + or `exclude` (the rule applies for values not in the filter). Defaults + to `include`. + +`conditions`:: + (array) An optional array of numeric conditions when the rule applies. + Multiple conditions are combined together with a logical `AND`. ++ +-- +NOTE: If your detector uses `lat_long`, `metric`, `rare`, or `freq_rare` +functions, you cannot specify `conditions` for your rule. + + +A condition has the following properties: + +`applies_to`::: + (string) Specifies the result property to which the condition applies. + The available options are `actual`, `typical`, `diff_from_typical`, `time`. +`operator`::: + (string) Specifies the condition operator. The available options are + `gt` (greater than), `gte` (greater than or equals), `lt` (less than) and `lte` (less than or equals). +`value`::: + (double) The value that is compared against the `applied_to` field using the `operator`. +-- + +A rule is required to either have a non-empty scope or at least one condition. +For more examples see +{stack-ov}/ml-configuring-detector-custom-rules.html[Configuring Detector Custom Rules]. + [float] [[ml-apilimits]] ==== Analysis Limits @@ -448,8 +511,7 @@ Specifying a string is recommended for clarity. If you specify a byte size unit of `b` or `kb` and the number does not equate to a discrete number of megabytes, it is rounded down to the closest MiB. The minimum valid value is 1 MiB. If you specify a value less than 1 MiB, an error occurs. For more information about -supported byte size units, see -{ref}/common-options.html#byte-units[Byte size units]. +supported byte size units, see <>. If your `elasticsearch.yml` file contains an `xpack.ml.max_model_memory_limit` setting, an error occurs when you try to create jobs that have diff --git a/x-pack/docs/en/rest-api/ml/put-filter.asciidoc b/x-pack/docs/en/rest-api/ml/put-filter.asciidoc new file mode 100644 index 0000000000000..d2982a56f612e --- /dev/null +++ b/x-pack/docs/en/rest-api/ml/put-filter.asciidoc @@ -0,0 +1,68 @@ +[role="xpack"] +[[ml-put-filter]] +=== Create Filter API +++++ +Create Filter +++++ + +Instantiates a filter. + +==== Request + +`PUT _xpack/ml/filters/` + +===== Description + +A {stack-ov}/ml-rules.html[filter] contains a list of strings. +It can be used by one or more jobs. Specifically, filters are referenced in +the `custom_rules` property of <>. + +==== Path Parameters + +`filter_id` (required):: + (string) Identifier for the filter. + + +==== Request Body + +`description`:: + (string) A description of the filter. + +`items`:: + (array of strings) The items of the filter. + A wildcard `*` can be used at the beginning + or the end of an item. Up to 10000 items + are allowed in each filter. + + +==== Authorization + +You must have `manage_ml`, or `manage` cluster privileges to use this API. +For more information, see +{xpack-ref}/security-privileges.html[Security Privileges]. + + +==== Examples + +The following example creates the `safe_domains` filter: + +[source,js] +-------------------------------------------------- +PUT _xpack/ml/filters/safe_domains +{ + "description": "A list of safe domains", + "items": ["*.google.com", "wikipedia.org"] +} +-------------------------------------------------- +// CONSOLE + +When the filter is created, you receive the following response: +[source,js] +---- +{ + "filter_id": "safe_domains", + "description": "A list of safe domains", + "items": ["*.google.com", "wikipedia.org"] +} +---- +//TESTRESPONSE diff --git a/x-pack/docs/en/rest-api/ml/update-filter.asciidoc b/x-pack/docs/en/rest-api/ml/update-filter.asciidoc new file mode 100644 index 0000000000000..1b6760dfed654 --- /dev/null +++ b/x-pack/docs/en/rest-api/ml/update-filter.asciidoc @@ -0,0 +1,67 @@ +[role="xpack"] +[[ml-update-filter]] +=== Update Filter API +++++ +Update Filter +++++ + +Updates the description of a filter, adds items, or removes items. + +==== Request + +`POST _xpack/ml/filters//_update` + +//==== Description + +==== Path Parameters + +`filter_id` (required):: + (string) Identifier for the filter. + + +==== Request Body + +`description`:: + (string) A description for the filter. See <>. + +`add_items`:: + (array of strings) The items to add to the filter. + +`remove_items`:: + (array of strings) The items to remove from the filter. + + +==== Authorization + +You must have `manage_ml`, or `manage` cluster privileges to use this API. +For more information, see +{xpack-ref}/security-privileges.html[Security Privileges]. + + +==== Examples + +You can change the description, add and remove items to the `safe_domains` filter as follows: + +[source,js] +-------------------------------------------------- +POST _xpack/ml/filters/safe_domains/_update +{ + "description": "Updated list of domains", + "add_items": ["*.myorg.com"], + "remove_items": ["wikipedia.org"] +} +-------------------------------------------------- +// CONSOLE +// TEST[setup:ml_filter_safe_domains] + +The API returns the following results: + +[source,js] +---- +{ + "filter_id": "safe_domains", + "description": "Updated list of domains", + "items": ["*.google.com", "*.myorg.com"] +} +---- +//TESTRESPONSE diff --git a/x-pack/docs/en/rest-api/ml/update-job.asciidoc b/x-pack/docs/en/rest-api/ml/update-job.asciidoc index f68737a3408c4..f916aef8f85a0 100644 --- a/x-pack/docs/en/rest-api/ml/update-job.asciidoc +++ b/x-pack/docs/en/rest-api/ml/update-job.asciidoc @@ -35,6 +35,8 @@ each periodic persistence of the model. See <>. | Yes |`description` |A description of the job. See <>. | No +|`detectors` |An array of <>. | No + |`groups` |A list of job groups. See <>. | No |`model_plot_config`: `enabled` |If true, enables calculation and storage of the @@ -58,12 +60,6 @@ if the job is open when you make the update, you must stop the data feed, close the job, then restart the data feed and open the job for the changes to take effect. -//|`analysis_config`: `detectors`: `detector_index` | A unique identifier of the -//detector. Matches the order of detectors returned by -//<>, starting from 0. | No -//|`analysis_config`: `detectors`: `detector_description` |A description of the -//detector. See <>. | No - [NOTE] -- * You can update the `analysis_limits` only while the job is closed. @@ -73,6 +69,21 @@ of `hard_limit`, this means that it was unable to process some data. You might want to re-run this job with an increased `model_memory_limit`. -- +[[ml-detector-update]] +==== Detector Update Objects + +A detector update object has the following properties: + +`detector_index`:: + (integer) The identifier of the detector to update. + +`description`:: + (string) The new description for the detector. + +`custom_rules`:: + (array) The new list of <> for the detector. + +No other detector property can be updated. ==== Authorization diff --git a/x-pack/docs/en/watcher/actions/slack.asciidoc b/x-pack/docs/en/watcher/actions/slack.asciidoc index 587d2ed7f8e59..05ee7b7b340d9 100644 --- a/x-pack/docs/en/watcher/actions/slack.asciidoc +++ b/x-pack/docs/en/watcher/actions/slack.asciidoc @@ -169,9 +169,9 @@ aggregation and the Slack action: more information, see <>. -| `proxy.host` | no | - | The proxy host to use (only in combination with `proxy.port`) +| `proxy.host` | no | The proxy host to use (only in combination with `proxy.port`) -| `proxy.port` | no | - | The proxy port to use (only in combination with `proxy.host`) +| `proxy.port` | no | The proxy port to use (only in combination with `proxy.host`) |====== [[configuring-slack]] diff --git a/x-pack/plugin/core/src/main/resources/security_audit_log.json b/x-pack/plugin/core/src/main/resources/security_audit_log.json index f5decbb4019be..75c25ff53e250 100644 --- a/x-pack/plugin/core/src/main/resources/security_audit_log.json +++ b/x-pack/plugin/core/src/main/resources/security_audit_log.json @@ -80,6 +80,9 @@ }, "rule": { "type": "keyword" + }, + "opaque_id": { + "type": "keyword" } } } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/FIPS140JKSKeystoreBootstrapCheck.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/FIPS140JKSKeystoreBootstrapCheck.java new file mode 100644 index 0000000000000..4a2c7b97195eb --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/FIPS140JKSKeystoreBootstrapCheck.java @@ -0,0 +1,54 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security; + +import org.elasticsearch.bootstrap.BootstrapCheck; +import org.elasticsearch.bootstrap.BootstrapContext; +import org.elasticsearch.common.settings.Settings; + + +public class FIPS140JKSKeystoreBootstrapCheck implements BootstrapCheck { + + private final boolean fipsModeEnabled; + + FIPS140JKSKeystoreBootstrapCheck(Settings settings) { + this.fipsModeEnabled = Security.FIPS_MODE_ENABLED.get(settings); + } + + /** + * Test if the node fails the check. + * + * @param context the bootstrap context + * @return the result of the bootstrap check + */ + @Override + public BootstrapCheckResult check(BootstrapContext context) { + + if (fipsModeEnabled) { + final Settings settings = context.settings; + Settings keystoreTypeSettings = settings.filter(k -> k.endsWith("keystore.type")) + .filter(k -> settings.get(k).equalsIgnoreCase("jks")); + if (keystoreTypeSettings.isEmpty() == false) { + return BootstrapCheckResult.failure("JKS Keystores cannot be used in a FIPS 140 compliant JVM. Please " + + "revisit [" + keystoreTypeSettings.toDelimitedString(',') + "] settings"); + } + // Default Keystore type is JKS if not explicitly set + Settings keystorePathSettings = settings.filter(k -> k.endsWith("keystore.path")) + .filter(k -> settings.hasValue(k.replace(".path", ".type")) == false); + if (keystorePathSettings.isEmpty() == false) { + return BootstrapCheckResult.failure("JKS Keystores cannot be used in a FIPS 140 compliant JVM. Please " + + "revisit [" + keystorePathSettings.toDelimitedString(',') + "] settings"); + } + + } + return BootstrapCheckResult.success(); + } + + @Override + public boolean alwaysEnforce() { + return fipsModeEnabled; + } +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/FIPS140PasswordHashingAlgorithmBootstrapCheck.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/FIPS140PasswordHashingAlgorithmBootstrapCheck.java new file mode 100644 index 0000000000000..57e12a211a38d --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/FIPS140PasswordHashingAlgorithmBootstrapCheck.java @@ -0,0 +1,41 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security; + +import org.elasticsearch.bootstrap.BootstrapCheck; +import org.elasticsearch.bootstrap.BootstrapContext; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.xpack.core.XPackSettings; + +import java.util.Locale; + +public class FIPS140PasswordHashingAlgorithmBootstrapCheck implements BootstrapCheck { + + private final boolean fipsModeEnabled; + + FIPS140PasswordHashingAlgorithmBootstrapCheck(final Settings settings) { + this.fipsModeEnabled = Security.FIPS_MODE_ENABLED.get(settings); + } + + /** + * Test if the node fails the check. + * + * @param context the bootstrap context + * @return the result of the bootstrap check + */ + @Override + public BootstrapCheckResult check(final BootstrapContext context) { + if (fipsModeEnabled) { + final String selectedAlgorithm = XPackSettings.PASSWORD_HASHING_ALGORITHM.get(context.settings); + if (selectedAlgorithm.toLowerCase(Locale.ROOT).startsWith("pbkdf2") == false) { + return BootstrapCheckResult.failure("Only PBKDF2 is allowed for password hashing in a FIPS-140 JVM. Please set the " + + "appropriate value for [ " + XPackSettings.PASSWORD_HASHING_ALGORITHM.getKey() + " ] setting."); + } + } + return BootstrapCheckResult.success(); + } + +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/FIPS140SecureSettingsBootstrapCheck.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/FIPS140SecureSettingsBootstrapCheck.java new file mode 100644 index 0000000000000..c766dd0ffaa2b --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/FIPS140SecureSettingsBootstrapCheck.java @@ -0,0 +1,52 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security; + +import org.elasticsearch.bootstrap.BootstrapCheck; +import org.elasticsearch.bootstrap.BootstrapContext; +import org.elasticsearch.common.settings.KeyStoreWrapper; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.env.Environment; + +import java.io.IOException; +import java.io.UncheckedIOException; + +public class FIPS140SecureSettingsBootstrapCheck implements BootstrapCheck { + + private final boolean fipsModeEnabled; + private final Environment environment; + + FIPS140SecureSettingsBootstrapCheck(Settings settings, Environment environment) { + this.fipsModeEnabled = Security.FIPS_MODE_ENABLED.get(settings); + this.environment = environment; + } + + /** + * Test if the node fails the check. + * + * @param context the bootstrap context + * @return the result of the bootstrap check + */ + @Override + public BootstrapCheckResult check(BootstrapContext context) { + if (fipsModeEnabled) { + try (KeyStoreWrapper secureSettings = KeyStoreWrapper.load(environment.configFile())) { + if (secureSettings != null && secureSettings.getFormatVersion() < 3) { + return BootstrapCheckResult.failure("Secure settings store is not of the latest version. Please use " + + "bin/elasticsearch-keystore create to generate a new secure settings store and migrate the secure settings there."); + } + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } + return BootstrapCheckResult.success(); + } + + @Override + public boolean alwaysEnforce() { + return fipsModeEnabled; + } +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java index 2bbc0b6d5bc05..5e46d1659782a 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java @@ -256,6 +256,8 @@ public class Security extends Plugin implements ActionPlugin, IngestPlugin, Netw ExtensiblePlugin { private static final Logger logger = Loggers.getLogger(Security.class); + static final Setting FIPS_MODE_ENABLED = + Setting.boolSetting("xpack.security.fips_mode.enabled", false, Property.NodeScope); public static final String NAME4 = XPackField.SECURITY + "4"; public static final Setting> USER_SETTING = @@ -311,6 +313,9 @@ public Security(Settings settings, final Path configPath) { new PkiRealmBootstrapCheck(getSslService()), new TLSLicenseBootstrapCheck(), new PasswordHashingAlgorithmBootstrapCheck(), + new FIPS140SecureSettingsBootstrapCheck(settings, env), + new FIPS140JKSKeystoreBootstrapCheck(settings), + new FIPS140PasswordHashingAlgorithmBootstrapCheck(settings), new KerberosRealmBootstrapCheck(env))); checks.addAll(InternalRealms.getBootstrapChecks(settings, env)); this.bootstrapChecks = Collections.unmodifiableList(checks); @@ -613,6 +618,7 @@ public static List> getSettings(boolean transportClientMode, List lastClusterState = new AtomicReference<>(); + final boolean found = awaitSecurityAuditIndex(lastClusterState, QueryBuilders.matchQuery("principal", USER)); + + assertTrue("Did not find security audit index. Current cluster state:\n" + lastClusterState.get().toString(), found); + + SearchResponse searchResponse = client().prepareSearch(".security_audit_log*").setQuery( + QueryBuilders.matchQuery("principal", USER)).get(); + assertThat(searchResponse.getHits().getHits().length, greaterThan(0)); + assertThat(searchResponse.getHits().getAt(0).getSourceAsMap().get("principal"), is(USER)); + } + + public void testAuditTrailTemplateIsRecreatedAfterDelete() throws Exception { + // this is already "tested" by the test framework since we wipe the templates before and after, + // but lets be explicit about the behavior + awaitIndexTemplateCreation(); + + // delete the template + DeleteIndexTemplateResponse deleteResponse = client().admin().indices() + .prepareDeleteTemplate(IndexAuditTrail.INDEX_TEMPLATE_NAME).execute().actionGet(); + assertThat(deleteResponse.isAcknowledged(), is(true)); + awaitIndexTemplateCreation(); + } + + public void testOpaqueIdWorking() throws Exception { + Request request = new Request("GET", "/"); + RequestOptions.Builder options = request.getOptions().toBuilder(); + options.addHeader(Task.X_OPAQUE_ID, "foo"); + options.addHeader(UsernamePasswordToken.BASIC_AUTH_HEADER, + UsernamePasswordToken.basicAuthHeaderValue(USER, new SecureString(PASS.toCharArray()))); + request.setOptions(options); + Response response = getRestClient().performRequest(request); + assertThat(response.getStatusLine().getStatusCode(), is(200)); + final AtomicReference lastClusterState = new AtomicReference<>(); + final boolean found = awaitSecurityAuditIndex(lastClusterState, QueryBuilders.matchQuery("opaque_id", "foo")); + + assertTrue("Did not find security audit index. Current cluster state:\n" + lastClusterState.get().toString(), found); + + SearchResponse searchResponse = client().prepareSearch(".security_audit_log*").setQuery( + QueryBuilders.matchQuery("opaque_id", "foo")).get(); + assertThat(searchResponse.getHits().getHits().length, greaterThan(0)); + + assertThat(searchResponse.getHits().getAt(0).getSourceAsMap().get("opaque_id"), is("foo")); + } + + private boolean awaitSecurityAuditIndex(AtomicReference lastClusterState, + QueryBuilder query) throws InterruptedException { final AtomicBoolean indexExists = new AtomicBoolean(false); - final boolean found = awaitBusy(() -> { + return awaitBusy(() -> { if (indexExists.get() == false) { ClusterState state = client().admin().cluster().prepareState().get().getState(); lastClusterState.set(state); @@ -138,28 +187,9 @@ public void testIndexAuditTrailWorking() throws Exception { logger.info("refreshing audit indices"); client().admin().indices().prepareRefresh(".security_audit_log*").get(); logger.info("refreshed audit indices"); - return client().prepareSearch(".security_audit_log*").setQuery(QueryBuilders.matchQuery("principal", USER)) - .get().getHits().getTotalHits() > 0; + return client().prepareSearch(".security_audit_log*").setQuery(query) + .get().getHits().getTotalHits() > 0; }, 60L, TimeUnit.SECONDS); - - assertTrue("Did not find security audit index. Current cluster state:\n" + lastClusterState.get().toString(), found); - - SearchResponse searchResponse = client().prepareSearch(".security_audit_log*").setQuery( - QueryBuilders.matchQuery("principal", USER)).get(); - assertThat(searchResponse.getHits().getHits().length, greaterThan(0)); - assertThat(searchResponse.getHits().getAt(0).getSourceAsMap().get("principal"), is(USER)); - } - - public void testAuditTrailTemplateIsRecreatedAfterDelete() throws Exception { - // this is already "tested" by the test framework since we wipe the templates before and after, - // but lets be explicit about the behavior - awaitIndexTemplateCreation(); - - // delete the template - DeleteIndexTemplateResponse deleteResponse = client().admin().indices() - .prepareDeleteTemplate(IndexAuditTrail.INDEX_TEMPLATE_NAME).execute().actionGet(); - assertThat(deleteResponse.isAcknowledged(), is(true)); - awaitIndexTemplateCreation(); } private void awaitIndexTemplateCreation() throws InterruptedException {