Skip to content

Commit

Permalink
Merge remote-tracking branch 'elastic/master' into build-flavor-type-…
Browse files Browse the repository at this point in the history
…leniency

* elastic/master:
  SQL: Fix deserialisation issue of TimeProcessor (elastic#40776)
  Improve GCS docs for using keystore (elastic#40605)
  Add Restore Operation to SnapshotResiliencyTests (elastic#40634)
  Small refactorings to analysis components (elastic#40745)
  SQL: Fix display size for DATE/DATETIME (elastic#40669)
  add HLRC protocol tests for transform state and stats (elastic#40766)
  Inline TransportReplAction#registerRequestHandlers (elastic#40762)
  remove experimental label from search_as_you_type documentation (elastic#40744)
  Remove some abstractions from `TransportReplicationAction` (elastic#40706)
  Upgrade to latest build scan plugin (elastic#40702)
  Use default memory lock setting in testing (elastic#40730)
  Add Bulk Delete Api to BlobStore (elastic#40322)
  Remove yaml skips older than 7.0 (elastic#40183)
  Docs: Move id in the java-api (elastic#40748)
  • Loading branch information
jasontedor committed Apr 3, 2019
2 parents 82f2f2d + cfea348 commit 34daff9
Show file tree
Hide file tree
Showing 187 changed files with 817 additions and 1,057 deletions.
2 changes: 1 addition & 1 deletion build.gradle
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ import org.gradle.util.DistributionLocator
import org.gradle.plugins.ide.eclipse.model.SourceFolder

plugins {
id 'com.gradle.build-scan' version '2.0.2'
id 'com.gradle.build-scan' version '2.2.1'
id 'base'
}
if (Boolean.valueOf(project.findProperty('org.elasticsearch.acceptScanTOS') ?: "false")) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -47,11 +47,11 @@ public abstract class IndexerJobStats {
private final long indexFailures;
private final long searchFailures;

public IndexerJobStats(long numPages, long numInputDocuments, long numOuputDocuments, long numInvocations,
public IndexerJobStats(long numPages, long numInputDocuments, long numOutputDocuments, long numInvocations,
long indexTime, long searchTime, long indexTotal, long searchTotal, long indexFailures, long searchFailures) {
this.numPages = numPages;
this.numInputDocuments = numInputDocuments;
this.numOuputDocuments = numOuputDocuments;
this.numOuputDocuments = numOutputDocuments;
this.numInvocations = numInvocations;
this.indexTime = indexTime;
this.indexTotal = indexTotal;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -29,25 +29,25 @@

public class DataFrameIndexerTransformStats extends IndexerJobStats {

public static final ConstructingObjectParser<DataFrameIndexerTransformStats, Void> PARSER = new ConstructingObjectParser<>(
public static final ConstructingObjectParser<DataFrameIndexerTransformStats, Void> LENIENT_PARSER = new ConstructingObjectParser<>(
NAME, true, args -> new DataFrameIndexerTransformStats((long) args[0], (long) args[1], (long) args[2],
(long) args[3], (long) args[4], (long) args[5], (long) args[6], (long) args[7], (long) args[8], (long) args[9]));

static {
PARSER.declareLong(constructorArg(), NUM_PAGES);
PARSER.declareLong(constructorArg(), NUM_INPUT_DOCUMENTS);
PARSER.declareLong(constructorArg(), NUM_OUTPUT_DOCUMENTS);
PARSER.declareLong(constructorArg(), NUM_INVOCATIONS);
PARSER.declareLong(constructorArg(), INDEX_TIME_IN_MS);
PARSER.declareLong(constructorArg(), SEARCH_TIME_IN_MS);
PARSER.declareLong(constructorArg(), INDEX_TOTAL);
PARSER.declareLong(constructorArg(), SEARCH_TOTAL);
PARSER.declareLong(constructorArg(), INDEX_FAILURES);
PARSER.declareLong(constructorArg(), SEARCH_FAILURES);
LENIENT_PARSER.declareLong(constructorArg(), NUM_PAGES);
LENIENT_PARSER.declareLong(constructorArg(), NUM_INPUT_DOCUMENTS);
LENIENT_PARSER.declareLong(constructorArg(), NUM_OUTPUT_DOCUMENTS);
LENIENT_PARSER.declareLong(constructorArg(), NUM_INVOCATIONS);
LENIENT_PARSER.declareLong(constructorArg(), INDEX_TIME_IN_MS);
LENIENT_PARSER.declareLong(constructorArg(), SEARCH_TIME_IN_MS);
LENIENT_PARSER.declareLong(constructorArg(), INDEX_TOTAL);
LENIENT_PARSER.declareLong(constructorArg(), SEARCH_TOTAL);
LENIENT_PARSER.declareLong(constructorArg(), INDEX_FAILURES);
LENIENT_PARSER.declareLong(constructorArg(), SEARCH_FAILURES);
}

public static DataFrameIndexerTransformStats fromXContent(XContentParser parser) throws IOException {
return PARSER.parse(parser, null);
return LENIENT_PARSER.parse(parser, null);
}

public DataFrameIndexerTransformStats(long numPages, long numInputDocuments, long numOuputDocuments,
Expand Down
2 changes: 1 addition & 1 deletion docs/java-api/index.asciidoc
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
[[java-api]]
= Java API

include::../Versions.asciidoc[]

[[java-api]]
[preface]
== Preface

Expand Down
5 changes: 3 additions & 2 deletions docs/plugins/repository-gcs.asciidoc
Original file line number Diff line number Diff line change
Expand Up @@ -88,8 +88,9 @@ A JSON service account file looks like this:
----
// NOTCONSOLE

To provide this file to the plugin, it must be stored in the {ref}/secure-settings.html[Elasticsearch keystore]. You must add a setting name of the form `gcs.client.NAME.credentials_file`, where `NAME`
is the name of the client configuration for the repository. The implicit client
To provide this file to the plugin, it must be stored in the {ref}/secure-settings.html[Elasticsearch keystore]. You must
add a `file` setting with the name `gcs.client.NAME.credentials_file` using the `add-file` subcommand.
`NAME` is the name of the client configuration for the repository. The implicit client
name is `default`, but a different client name can be specified in the
repository settings with the `client` key.

Expand Down
2 changes: 0 additions & 2 deletions docs/reference/mapping/types/search-as-you-type.asciidoc
Original file line number Diff line number Diff line change
@@ -1,8 +1,6 @@
[[search-as-you-type]]
=== Search as you type datatype

experimental[]

The `search_as_you_type` field type is a text-like field that is optimized to
provide out-of-the-box support for queries that serve an as-you-type completion
use case. It creates a series of subfields that are analyzed to index terms
Expand Down
4 changes: 4 additions & 0 deletions docs/reference/sql/functions/date-time.asciidoc
Original file line number Diff line number Diff line change
Expand Up @@ -187,6 +187,10 @@ relative date/time filtering:
include-tagged::{sql-specs}/docs/docs.csv-spec[filterNow]
--------------------------------------------------

[IMPORTANT]
Currently, Using a _precision_ greater than 3 doesn't make any difference to the output of the
function as the maximum number of second fractional digits returned is 3 (milliseconds).

[[sql-functions-datetime-day]]
==== `DAY_OF_MONTH/DOM/DAY`

Expand Down
4 changes: 2 additions & 2 deletions docs/reference/sql/language/data-types.asciidoc
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ s|SQL precision
| <<keyword, `keyword`>> | keyword | VARCHAR | 32,766
| <<text, `text`>> | text | VARCHAR | 2,147,483,647
| <<binary, `binary`>> | binary | VARBINARY | 2,147,483,647
| <<date, `date`>> | datetime | TIMESTAMP | 24
| <<date, `date`>> | datetime | TIMESTAMP | 29
| <<ip, `ip`>> | ip | VARCHAR | 39

4+h| Complex types
Expand Down Expand Up @@ -66,7 +66,7 @@ s|SQL type
s|SQL precision


| date | 24
| date | 29
| time | 18
| interval_year | 7
| interval_month | 7
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -63,8 +63,8 @@ public void setup() {
Supplier<QueryShardContext> queryShardContext = () -> {
return indexService.newQueryShardContext(0, null, () -> { throw new UnsupportedOperationException(); }, null);
};
parser = new DocumentMapperParser(indexService.getIndexSettings(), indexService.mapperService(), indexService.getIndexAnalyzers(),
indexService.xContentRegistry(), indexService.similarityService(), mapperRegistry, queryShardContext);
parser = new DocumentMapperParser(indexService.getIndexSettings(), indexService.mapperService(), indexService.xContentRegistry(),
indexService.similarityService(), mapperRegistry, queryShardContext);
}

@Override
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@
import com.amazonaws.services.s3.model.AbortMultipartUploadRequest;
import com.amazonaws.services.s3.model.AmazonS3Exception;
import com.amazonaws.services.s3.model.CompleteMultipartUploadRequest;
import com.amazonaws.services.s3.model.DeleteObjectsRequest;
import com.amazonaws.services.s3.model.InitiateMultipartUploadRequest;
import com.amazonaws.services.s3.model.ObjectListing;
import com.amazonaws.services.s3.model.ObjectMetadata;
Expand Down Expand Up @@ -56,6 +57,12 @@

class S3BlobContainer extends AbstractBlobContainer {

/**
* Maximum number of deletes in a {@link DeleteObjectsRequest}.
* @see <a href="https://docs.aws.amazon.com/AmazonS3/latest/API/multiobjectdeleteapi.html">S3 Documentation</a>.
*/
private static final int MAX_BULK_DELETES = 1000;

private final S3BlobStore blobStore;
private final String keyPath;

Expand Down Expand Up @@ -118,6 +125,51 @@ public void deleteBlob(String blobName) throws IOException {
deleteBlobIgnoringIfNotExists(blobName);
}

@Override
public void deleteBlobsIgnoringIfNotExists(List<String> blobNames) throws IOException {
if (blobNames.isEmpty()) {
return;
}
try (AmazonS3Reference clientReference = blobStore.clientReference()) {
// S3 API only allows 1k blobs per delete so we split up the given blobs into requests of max. 1k deletes
final List<DeleteObjectsRequest> deleteRequests = new ArrayList<>();
final List<String> partition = new ArrayList<>();
for (String blob : blobNames) {
partition.add(buildKey(blob));
if (partition.size() == MAX_BULK_DELETES ) {
deleteRequests.add(bulkDelete(blobStore.bucket(), partition));
partition.clear();
}
}
if (partition.isEmpty() == false) {
deleteRequests.add(bulkDelete(blobStore.bucket(), partition));
}
SocketAccess.doPrivilegedVoid(() -> {
AmazonClientException aex = null;
for (DeleteObjectsRequest deleteRequest : deleteRequests) {
try {
clientReference.client().deleteObjects(deleteRequest);
} catch (AmazonClientException e) {
if (aex == null) {
aex = e;
} else {
aex.addSuppressed(e);
}
}
}
if (aex != null) {
throw aex;
}
});
} catch (final AmazonClientException e) {
throw new IOException("Exception when deleting blobs [" + blobNames + "]", e);
}
}

private static DeleteObjectsRequest bulkDelete(String bucket, List<String> blobs) {
return new DeleteObjectsRequest(bucket).withKeys(blobs.toArray(Strings.EMPTY_ARRAY)).withQuiet(true);
}

@Override
public void deleteBlobIgnoringIfNotExists(String blobName) throws IOException {
try (AmazonS3Reference clientReference = blobStore.clientReference()) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -324,7 +324,7 @@ private PathTrie<RequestHandler> defaultHandlers(final Map<String, Bucket> bucke
// Delete Multiple Objects
//
// https://docs.aws.amazon.com/AmazonS3/latest/API/multiobjectdeleteapi.html
handlers.insert(nonAuthPath(HttpPost.METHOD_NAME, "/"), (request) -> {
final RequestHandler bulkDeleteHandler = request -> {
final List<String> deletes = new ArrayList<>();
final List<String> errors = new ArrayList<>();

Expand All @@ -344,7 +344,6 @@ private PathTrie<RequestHandler> defaultHandlers(final Map<String, Bucket> bucke
if (closingOffset != -1) {
offset = offset + startMarker.length();
final String objectName = requestBody.substring(offset, closingOffset);

boolean found = false;
for (Bucket bucket : buckets.values()) {
if (bucket.objects.containsKey(objectName)) {
Expand All @@ -369,7 +368,9 @@ private PathTrie<RequestHandler> defaultHandlers(final Map<String, Bucket> bucke
}
}
return newInternalError(request.getId(), "Something is wrong with this POST multiple deletes request");
});
};
handlers.insert(nonAuthPath(HttpPost.METHOD_NAME, "/"), bulkDeleteHandler);
handlers.insert(nonAuthPath(HttpPost.METHOD_NAME, "/{bucket}"), bulkDeleteHandler);

// non-authorized requests

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -158,11 +158,7 @@ public DeleteObjectsResult deleteObjects(DeleteObjectsRequest request) throws Sd

final List<DeleteObjectsResult.DeletedObject> deletions = new ArrayList<>();
for (DeleteObjectsRequest.KeyVersion key : request.getKeys()) {
if (blobs.remove(key.getKey()) == null) {
AmazonS3Exception exception = new AmazonS3Exception("[" + key + "] does not exist.");
exception.setStatusCode(404);
throw exception;
} else {
if (blobs.remove(key.getKey()) != null) {
DeleteObjectsResult.DeletedObject deletion = new DeleteObjectsResult.DeletedObject();
deletion.setKey(key.getKey());
deletions.add(deletion);
Expand Down
Original file line number Diff line number Diff line change
@@ -1,9 +1,7 @@
---
"Array of objects":

- skip:
version: " - 6.99.99"
reason: types are required in requests before 7.0.0


- do:
bulk:
Expand All @@ -29,9 +27,7 @@
---
"Empty _id":

- skip:
version: " - 6.99.99"
reason: types are required in requests before 7.0.0


- do:
bulk:
Expand Down Expand Up @@ -65,9 +61,7 @@
"empty action":

- skip:
version: " - 6.99.99"
features: headers
reason: types are required in requests before 7.0.0

- do:
catch: /Malformed action\/metadata line \[3\], expected FIELD_NAME but found \[END_OBJECT\]/
Expand Down
Original file line number Diff line number Diff line change
@@ -1,9 +1,7 @@
---
"List of strings":

- skip:
version: " - 6.99.99"
reason: types are required in requests before 7.0.0

- do:
bulk:
refresh: true
Expand Down
Original file line number Diff line number Diff line change
@@ -1,9 +1,7 @@
---
"One big string":

- skip:
version: " - 6.99.99"
reason: types are required in requests before 7.0.0

- do:
bulk:
refresh: true
Expand Down
Original file line number Diff line number Diff line change
@@ -1,9 +1,7 @@
---
"Source filtering":

- skip:
version: " - 6.99.99"
reason: types are required in requests before 7.0.0

- do:
index:
refresh: true
Expand Down
Original file line number Diff line number Diff line change
@@ -1,9 +1,7 @@
---
"refresh=true immediately makes changes are visible in search":

- skip:
version: " - 6.99.99"
reason: types are required in requests before 7.0.0

- do:
bulk:
refresh: true
Expand All @@ -21,9 +19,7 @@
---
"refresh=empty string immediately makes changes are visible in search":

- skip:
version: " - 6.99.99"
reason: types are required in requests before 7.0.0

- do:
bulk:
refresh: ""
Expand All @@ -42,9 +38,7 @@
---
"refresh=wait_for waits until changes are visible in search":

- skip:
version: " - 6.99.99"
reason: types are required in requests before 7.0.0

- do:
bulk:
refresh: wait_for
Expand Down
Original file line number Diff line number Diff line change
@@ -1,10 +1,6 @@
---
"bulk without types on an index that has types":

- skip:
version: " - 6.99.99"
reason: Typeless APIs were introduced in 7.0.0

- do:
indices.create: # not using include_type_name: false on purpose
include_type_name: true
Expand Down
Original file line number Diff line number Diff line change
@@ -1,10 +1,6 @@
---
"Compare And Swap Sequence Numbers":

- skip:
version: " - 6.6.99"
reason: cas operations with sequence numbers was added in 6.7

- do:
index:
index: test_1
Expand Down
Original file line number Diff line number Diff line change
@@ -1,8 +1,5 @@
---
"Test cat thread_pool output":
- skip:
version: " - 6.99.99"
reason: this API was changed in a backwards-incompatible fashion in 7.0.0 so we need to skip in a mixed cluster

- do:
cat.thread_pool: {}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -94,9 +94,6 @@

---
"cluster health basic test, one index with wait for no initializing shards":
- skip:
version: " - 6.1.99"
reason: "wait_for_no_initializing_shards is introduced in 6.2.0"

- do:
indices.create:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -7,9 +7,6 @@

---
"get cluster state returns cluster_uuid at the top level":
- skip:
version: " - 6.3.99"
reason: "cluster state including cluster_uuid at the top level is new in v6.4.0 and higher"

- do:
cluster.state:
Expand Down
Loading

0 comments on commit 34daff9

Please sign in to comment.