Skip to content

Commit

Permalink
Merge branch 'master' into 20640-auto-create-templates
Browse files Browse the repository at this point in the history
  • Loading branch information
elasticmachine authored Oct 19, 2020
2 parents ed692c7 + 3deebc2 commit e8617b6
Show file tree
Hide file tree
Showing 76 changed files with 1,268 additions and 793 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@
import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryRequest;
import org.elasticsearch.action.admin.cluster.repositories.verify.VerifyRepositoryRequest;
import org.elasticsearch.action.admin.cluster.repositories.verify.VerifyRepositoryResponse;
import org.elasticsearch.action.admin.cluster.snapshots.clone.CloneSnapshotRequest;
import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotRequest;
import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse;
import org.elasticsearch.action.admin.cluster.snapshots.delete.DeleteSnapshotRequest;
Expand Down Expand Up @@ -236,6 +237,32 @@ public Cancellable createAsync(CreateSnapshotRequest createSnapshotRequest, Requ
CreateSnapshotResponse::fromXContent, listener, emptySet());
}

/**
* Clones a snapshot.
* <p>
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-snapshots.html"> Snapshot and Restore
* API on elastic.co</a>
*/
public AcknowledgedResponse clone(CloneSnapshotRequest cloneSnapshotRequest, RequestOptions options)
throws IOException {
return restHighLevelClient.performRequestAndParseEntity(cloneSnapshotRequest, SnapshotRequestConverters::cloneSnapshot, options,
AcknowledgedResponse::fromXContent, emptySet());
}

/**
* Asynchronously clones a snapshot.
* <p>
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-snapshots.html"> Snapshot and Restore
* API on elastic.co</a>
* @return cancellable that may be used to cancel the request
*/
public Cancellable cloneAsync(CloneSnapshotRequest cloneSnapshotRequest, RequestOptions options,
ActionListener<AcknowledgedResponse> listener) {
return restHighLevelClient.performRequestAsyncAndParseEntity(cloneSnapshotRequest,
SnapshotRequestConverters::cloneSnapshot, options,
AcknowledgedResponse::fromXContent, listener, emptySet());
}

/**
* Get snapshots.
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-snapshots.html"> Snapshot and Restore
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@
import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesRequest;
import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryRequest;
import org.elasticsearch.action.admin.cluster.repositories.verify.VerifyRepositoryRequest;
import org.elasticsearch.action.admin.cluster.snapshots.clone.CloneSnapshotRequest;
import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotRequest;
import org.elasticsearch.action.admin.cluster.snapshots.delete.DeleteSnapshotRequest;
import org.elasticsearch.action.admin.cluster.snapshots.get.GetSnapshotsRequest;
Expand Down Expand Up @@ -123,6 +124,21 @@ static Request createSnapshot(CreateSnapshotRequest createSnapshotRequest) throw
return request;
}

static Request cloneSnapshot(CloneSnapshotRequest cloneSnapshotRequest) throws IOException {
String endpoint = new RequestConverters.EndpointBuilder().addPathPart("_snapshot")
.addPathPart(cloneSnapshotRequest.repository())
.addPathPart(cloneSnapshotRequest.source())
.addPathPart("_clone")
.addPathPart(cloneSnapshotRequest.target())
.build();
Request request = new Request(HttpPut.METHOD_NAME, endpoint);
RequestConverters.Params params = new RequestConverters.Params();
params.withMasterTimeout(cloneSnapshotRequest.masterNodeTimeout());
request.addParameters(params.asMap());
request.setEntity(RequestConverters.createEntity(cloneSnapshotRequest, RequestConverters.REQUEST_BODY_CONTENT_TYPE));
return request;
}

static Request getSnapshots(GetSnapshotsRequest getSnapshotsRequest) {
RequestConverters.EndpointBuilder endpointBuilder = new RequestConverters.EndpointBuilder().addPathPartAsIs("_snapshot")
.addCommaSeparatedPathParts(getSnapshotsRequest.repositories());
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@
import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryRequest;
import org.elasticsearch.action.admin.cluster.repositories.verify.VerifyRepositoryRequest;
import org.elasticsearch.action.admin.cluster.repositories.verify.VerifyRepositoryResponse;
import org.elasticsearch.action.admin.cluster.snapshots.clone.CloneSnapshotRequest;
import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotRequest;
import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse;
import org.elasticsearch.action.admin.cluster.snapshots.delete.DeleteSnapshotRequest;
Expand Down Expand Up @@ -350,6 +351,30 @@ public void testDeleteSnapshot() throws IOException {
assertTrue(response.isAcknowledged());
}

public void testCloneSnapshot() throws IOException {
String repository = "test_repository";
String snapshot = "source_snapshot";
String targetSnapshot = "target_snapshot";
final String testIndex = "test_idx";

createIndex(testIndex, Settings.EMPTY);
assertTrue("index [" + testIndex + "] should have been created", indexExists(testIndex));

AcknowledgedResponse putRepositoryResponse = createTestRepository(repository, FsRepository.TYPE, "{\"location\": \".\"}");
assertTrue(putRepositoryResponse.isAcknowledged());

CreateSnapshotRequest createSnapshotRequest = new CreateSnapshotRequest(repository, snapshot);
createSnapshotRequest.waitForCompletion(true);

CreateSnapshotResponse createSnapshotResponse = createTestSnapshot(createSnapshotRequest);
assertEquals(RestStatus.OK, createSnapshotResponse.status());

CloneSnapshotRequest request = new CloneSnapshotRequest(repository, snapshot, targetSnapshot, new String[]{testIndex});
AcknowledgedResponse response = execute(request, highLevelClient().snapshot()::clone, highLevelClient().snapshot()::cloneAsync);

assertTrue(response.isAcknowledged());
}

private static Map<String, Object> randomUserMetadata() {
if (randomBoolean()) {
return null;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ example, if the interval is a calendar day, `2020-01-03T07:00:01Z` is rounded to

[source,java]
----
bucket_key = Math.floor(value / interval) * interval)
bucket_key = Math.floor(value / interval) * interval
----

[[calendar_and_fixed_intervals]]
Expand Down
4 changes: 2 additions & 2 deletions docs/reference/cluster/nodes-stats.asciidoc
Original file line number Diff line number Diff line change
Expand Up @@ -163,7 +163,7 @@ is not `0`, a reason for the rejection or failure is included in the response.

`cluster_name`::
(string)
Name of the cluster. Based on the <<cluster.name>> setting.
Name of the cluster. Based on the <<cluster-name>> setting.

`nodes`::
(object)
Expand All @@ -186,7 +186,7 @@ since the {wikipedia}/Unix_time[Unix Epoch].

`name`::
(string)
Human-readable identifier for the node. Based on the <<node.name>> setting.
Human-readable identifier for the node. Based on the <<node-name>> setting.

`transport_address`::
(string)
Expand Down
2 changes: 1 addition & 1 deletion docs/reference/cluster/stats.asciidoc
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,7 @@ is not `0`, a reason for the rejection or failure is included in the response.

`cluster_name`::
(string)
Name of the cluster, based on the <<cluster.name>> setting.
Name of the cluster, based on the <<cluster-name>> setting.

`cluster_uuid`::
(string)
Expand Down
2 changes: 1 addition & 1 deletion docs/reference/eql/detect-threats-with-eql.asciidoc
Original file line number Diff line number Diff line change
Expand Up @@ -220,7 +220,7 @@ The response also includes other valuable information about how the

You now know that a `regsvr32.exe` process was used to register a potentially
malicious script, `RegSvr32.sct`. Next, see if `regsvr32.exe` later loads the
`scrob.dll` library.
`scrobj.dll` library.

Modify the previous EQL query as follows:

Expand Down
20 changes: 18 additions & 2 deletions docs/reference/eql/eql-search-api.asciidoc
Original file line number Diff line number Diff line change
Expand Up @@ -56,9 +56,25 @@ To search all data streams and indices in a cluster, use
[[eql-search-api-query-params]]
==== {api-query-parms-title}

include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=allow-no-indices]
`allow_no_indices`::
(Optional, boolean)
+
NOTE: This parameter's behavior differs from the `allow_no_indices` parameter
used in other <<multi-index,multi-target APIs>>.
+
If `false`, the request returns an error if any wildcard expression,
<<indices-aliases,index alias>>, or `_all` value targets only missing or closed
indices. This behavior applies even if the request targets other open indices.
For example, a request targeting `foo*,bar*` returns an error if an index
starts with `foo` but no index starts with `bar`.
+
Defaults to `false`.
If `true`, only requests that exclusively target missing or closed indices
return an error. For example, a request targeting `foo*,bar*` does not return an
error if an index starts with `foo` but no index starts with `bar`. However, a
request that targets only `bar*` still returns an error.
+
Defaults to `true`.


include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=expand-wildcards]
+
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -92,7 +92,7 @@ include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=allow-no-jobs]
--------------------------------------------------
POST _ml/anomaly_detectors/low_request_rate/_close
--------------------------------------------------
// TEST[skip:sometimes fails due to https://github.com/elastic/elasticsearch/pull/48583#issuecomment-552991325]
// TEST[skip:Kibana sample data]

When the job is closed, you receive the following results:

Expand Down
6 changes: 3 additions & 3 deletions docs/reference/modules/discovery/bootstrapping.asciidoc
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ The initial set of master-eligible nodes is defined in the
set to a list containing one of the following items for each master-eligible
node:

- The <<node.name,node name>> of the node.
- The <<node-name,node name>> of the node.
- The node's hostname if `node.name` is not set, because `node.name` defaults
to the node's hostname. You must use either the fully-qualified hostname or
the bare hostname <<modules-discovery-bootstrap-cluster-fqdns,depending on
Expand Down Expand Up @@ -44,7 +44,7 @@ WARNING: You must set `cluster.initial_master_nodes` to the same list of nodes
on each node on which it is set in order to be sure that only a single cluster
forms during bootstrapping and therefore to avoid the risk of data loss.

For a cluster with 3 master-eligible nodes (with <<node.name,node names>>
For a cluster with 3 master-eligible nodes (with <<node-name,node names>>
`master-a`, `master-b` and `master-c`) the configuration will look as follows:

[source,yaml]
Expand Down Expand Up @@ -97,7 +97,7 @@ match exactly.
[discrete]
==== Choosing a cluster name

The <<cluster.name,`cluster.name`>> setting enables you to create multiple
The <<cluster-name,`cluster.name`>> setting enables you to create multiple
clusters which are separated from each other. Nodes verify that they agree on
their cluster name when they first connect to each other, and Elasticsearch
will only form a cluster from nodes that all have the same cluster name. The
Expand Down
6 changes: 3 additions & 3 deletions docs/reference/modules/node.asciidoc
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ A node that has the `master` role (default), which makes it eligible to be
<<data-node,Data node>>::

A node that has the `data` role (default). Data nodes hold data and perform data
related operations such as CRUD, search, and aggregations. A node with the `data` role can fill any of the specialised data node roles.
related operations such as CRUD, search, and aggregations. A node with the `data` role can fill any of the specialised data node roles.

<<node-ingest-node,Ingest node>>::

Expand Down Expand Up @@ -439,6 +439,6 @@ The RPM and Debian distributions do this for you already.
More node settings can be found in <<settings>> and <<important-settings>>,
including:

* <<cluster.name,`cluster.name`>>
* <<node.name,`node.name`>>
* <<cluster-name,`cluster.name`>>
* <<node-name,`node.name`>>
* <<modules-network,network settings>>
10 changes: 10 additions & 0 deletions docs/reference/redirects.asciidoc
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,16 @@

The following pages have moved or been deleted.

[role="exclude",id="node.name"]
=== Node name setting

See <<node-name,Node name setting>>.

[role="exclude",id="cluster.name"]
=== Cluster name setting

See <<cluster-name,Cluster name setting>>.

[role="exclude",id="ccr-remedy-follower-index"]
=== Leader index retaining operations for replication

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ is not `0`, a reason for the rejection or failure is included in the response.

`cluster_name`::
(string)
Name of the cluster. Based on the <<cluster.name>> setting.
Name of the cluster. Based on the <<cluster-name>> setting.

`nodes`::
(object)
Expand Down
9 changes: 6 additions & 3 deletions docs/reference/rest-api/common-parms.asciidoc
Original file line number Diff line number Diff line change
Expand Up @@ -38,9 +38,12 @@ end::target-index-aliases[]

tag::allow-no-indices[]
`allow_no_indices`::
(Optional, boolean) If `false`, the request returns an error when a
wildcard expression, <<indices-aliases,index alias>>, or `_all` value targets
only missing or closed indices.
(Optional, boolean)
If `false`, the request returns an error if any wildcard expression,
<<indices-aliases,index alias>>, or `_all` value targets only missing or closed
indices. This behavior applies even if the request targets other open indices.
For example, a request targeting `foo*,bar*` returns an error if an index
starts with `foo` but no index starts with `bar`.
end::allow-no-indices[]

tag::allow-no-match-transforms1[]
Expand Down
4 changes: 2 additions & 2 deletions docs/reference/settings/audit-settings.asciidoc
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,7 @@ audited in plain text when including the request body in audit events.
// tag::xpack-sa-lf-emit-node-name-tag[]
`xpack.security.audit.logfile.emit_node_name`::
(<<dynamic-cluster-setting,Dynamic>>)
Specifies whether to include the <<node.name,node name>> as a field in
Specifies whether to include the <<node-name,node name>> as a field in
each audit event. The default value is `false`.
// end::xpack-sa-lf-emit-node-name-tag[]

Expand All @@ -101,7 +101,7 @@ The default value is `false`.
Specifies whether to include the node id as a field in each audit event.
This is available for the new format only. That is to say, this information
does not exist in the `<clustername>_access.log` file.
Unlike <<node.name,node name>>, whose value might change if the administrator
Unlike <<node-name,node name>>, whose value might change if the administrator
changes the setting in the config file, the node id will persist across cluster
restarts and the administrator cannot change it.
The default value is `true`.
Expand Down
15 changes: 8 additions & 7 deletions docs/reference/setup/important-settings.asciidoc
Original file line number Diff line number Diff line change
Expand Up @@ -7,14 +7,15 @@ settings which need to be considered before going into production.
The following settings *must* be considered before going to production:

* <<path-settings,Path settings>>
* <<cluster.name,Cluster name>>
* <<node.name,Node name>>
* <<network.host,Network host>>
* <<cluster-name,Cluster name setting>>
* <<node-name,Node name setting>>
* <<network.host,Network host settings>>
* <<discovery-settings,Discovery settings>>
* <<heap-size,Heap size>>
* <<heap-dump-path,Heap dump path>>
* <<gc-logging,GC logging>>
* <<es-tmpdir,Temp directory>>
* <<heap-size,Heap size settings>>
* <<heap-dump-path,JVM heap dump path setting>>
* <<gc-logging,GC logging settings>>
* <<es-tmpdir,Temporary directory settings>>
* <<error-file-path,JVM fatal error log setting>>

include::important-settings/path-settings.asciidoc[]

Expand Down
11 changes: 6 additions & 5 deletions docs/reference/setup/important-settings/cluster-name.asciidoc
Original file line number Diff line number Diff line change
@@ -1,14 +1,15 @@
[[cluster.name]]
=== `cluster.name`
[[cluster-name]]
[discrete]
=== Cluster name setting

A node can only join a cluster when it shares its `cluster.name` with all the
other nodes in the cluster. The default name is `elasticsearch`, but you should
change it to an appropriate name which describes the purpose of the cluster.
change it to an appropriate name that describes the purpose of the cluster.

[source,yaml]
--------------------------------------------------
cluster.name: logging-prod
--------------------------------------------------

Make sure that you don't reuse the same cluster names in different environments,
otherwise you might end up with nodes joining the wrong cluster.
IMPORTANT: Do not reuse the same cluster names in different environments.
Otherwise, nodes might join the wrong cluster.
Loading

0 comments on commit e8617b6

Please sign in to comment.