From ac4dfe6d3eb8546906f708929aec1f17394372b9 Mon Sep 17 00:00:00 2001 From: "Abhijit P. Pai" Date: Thu, 13 Jun 2019 23:46:38 +0530 Subject: [PATCH 01/12] Batch CRUD API --- .../src/Batch/BatchExecUtils.cs | 161 +++++ .../src/Batch/BatchExecutor.cs | 139 ++++ .../src/Batch/BatchSchemaProvider.cs | 58 ++ .../src/Batch/CosmosBatch.cs | 240 +++++++ .../src/Batch/CosmosBatchOperationResult.cs | 200 ++++++ .../src/Batch/CosmosBatchResponse.cs | 360 ++++++++++ .../src/Batch/HybridRowBatchSchemas.json | 132 ++++ .../src/Batch/ItemBatchOperation.cs | 357 ++++++++++ .../src/Batch/ServerBatchRequest.cs | 170 +++++ .../SinglePartitionKeyServerBatchRequest.cs | 60 ++ .../src/ClientResources.Designer.cs | 125 +++- .../src/ClientResources.resx | 30 + .../src/Microsoft.Azure.Cosmos.csproj | 170 +++-- .../src/Resource/Container/CosmosContainer.cs | 7 + .../Container/CosmosContainerCore.Items.cs | 5 + .../src/Resource/Scripts/CosmosScripts.cs | 2 +- .../Resource/Settings/IndexingDirective.cs | 17 + .../Batch/BatchSinglePartitionKeyTests.cs | 650 ++++++++++++++++++ .../Batch/CosmosBatchTestBase.cs | 631 +++++++++++++++++ ...icrosoft.Azure.Cosmos.EmulatorTests.csproj | 12 + .../Batch/BatchExecUtilsUnitTests.cs | 248 +++++++ .../Batch/BatchRequestPayloadReader.cs | 249 +++++++ .../Batch/BatchResponsePayloadWriter.cs | 112 +++ .../Batch/BatchSchemaTests.cs | 270 ++++++++ .../Batch/CosmosBatchUnitTests.cs | 538 +++++++++++++++ .../BatchUnitTests.cs | 34 - .../Microsoft.Azure.Cosmos.Tests.csproj | 13 +- 27 files changed, 4858 insertions(+), 132 deletions(-) create mode 100644 Microsoft.Azure.Cosmos/src/Batch/BatchExecUtils.cs create mode 100644 Microsoft.Azure.Cosmos/src/Batch/BatchExecutor.cs create mode 100644 Microsoft.Azure.Cosmos/src/Batch/BatchSchemaProvider.cs create mode 100644 Microsoft.Azure.Cosmos/src/Batch/CosmosBatch.cs create mode 100644 Microsoft.Azure.Cosmos/src/Batch/CosmosBatchOperationResult.cs create mode 100644 Microsoft.Azure.Cosmos/src/Batch/CosmosBatchResponse.cs create mode 100644 Microsoft.Azure.Cosmos/src/Batch/HybridRowBatchSchemas.json create mode 100644 Microsoft.Azure.Cosmos/src/Batch/ItemBatchOperation.cs create mode 100644 Microsoft.Azure.Cosmos/src/Batch/ServerBatchRequest.cs create mode 100644 Microsoft.Azure.Cosmos/src/Batch/SinglePartitionKeyServerBatchRequest.cs create mode 100644 Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.EmulatorTests/Batch/BatchSinglePartitionKeyTests.cs create mode 100644 Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.EmulatorTests/Batch/CosmosBatchTestBase.cs create mode 100644 Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Batch/BatchExecUtilsUnitTests.cs create mode 100644 Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Batch/BatchRequestPayloadReader.cs create mode 100644 Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Batch/BatchResponsePayloadWriter.cs create mode 100644 Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Batch/BatchSchemaTests.cs create mode 100644 Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Batch/CosmosBatchUnitTests.cs delete mode 100644 Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/BatchUnitTests.cs diff --git a/Microsoft.Azure.Cosmos/src/Batch/BatchExecUtils.cs b/Microsoft.Azure.Cosmos/src/Batch/BatchExecUtils.cs new file mode 100644 index 0000000000..d1f41a14d2 --- /dev/null +++ b/Microsoft.Azure.Cosmos/src/Batch/BatchExecUtils.cs @@ -0,0 +1,161 @@ +//------------------------------------------------------------ +// Copyright (c) Microsoft Corporation. All rights reserved. +//------------------------------------------------------------ + +namespace Microsoft.Azure.Cosmos +{ + using System; + using System.Collections.Generic; + using System.IO; + using System.Net; + using System.Threading; + using System.Threading.Tasks; + using Microsoft.Azure.Documents; + + /// + /// Util methods for batch requests. + /// + internal static class BatchExecUtils + { + /// + /// Converts a Stream to a Memory{byte} wrapping a byte array honoring a provided maximum length for the returned Memory. + /// + /// Stream to be converted to bytes. + /// Desired maximum length of the Memory{byte}. + /// to cancel the operation. + /// A Memory{byte} with length at most maximumLength. + /// Throws RequestEntityTooLargeException if the input stream has more bytes than maximumLength. + public static async Task> StreamToMemoryAsync(Stream stream, int maximumLength, CancellationToken cancellationToken) + { + if (stream.CanSeek) + { + if (stream.Length > maximumLength) + { + throw new RequestEntityTooLargeException(RMResources.RequestTooLarge); + } + + // Some derived implementations of MemoryStream (such as versions of RecyclableMemoryStream prior to 1.2.2 that we may be using) + // return an incorrect response from TryGetBuffer. Use TryGetBuffer only on the MemoryStream type and not derived types. + MemoryStream memStream = stream as MemoryStream; + if (memStream != null + && memStream.GetType() == typeof(MemoryStream) + && memStream.TryGetBuffer(out ArraySegment memBuffer)) + { + return memBuffer; + } + + byte[] bytes = new byte[stream.Length]; + int sum = 0; + int count; + while ((count = await stream.ReadAsync(bytes, sum, bytes.Length - sum, cancellationToken)) > 0) + { + sum += count; + } + + return bytes; + } + else + { + int bufferSize = 81920; // Using the same buffer size as the Stream.DefaultCopyBufferSize + byte[] buffer = new byte[bufferSize]; + + using (MemoryStream memoryStream = new MemoryStream(bufferSize)) // using bufferSize as initial capacity as well + { + int sum = 0; + int count; + while ((count = await stream.ReadAsync(buffer, 0, bufferSize, cancellationToken)) > 0) + { + sum += count; + if (sum > maximumLength) + { + throw new RequestEntityTooLargeException(RMResources.RequestTooLarge); + } + +#pragma warning disable VSTHRD103 // Call async methods when in an async method + memoryStream.Write(buffer, 0, count); +#pragma warning restore VSTHRD103 // Call async methods when in an async method + } + + return new Memory(memoryStream.GetBuffer(), 0, (int)memoryStream.Length); + } + } + } + + public static void GetServerRequestLimits(out int maxServerRequestBodyLength, out int maxServerRequestOperationCount) + { + maxServerRequestBodyLength = Constants.MaxDirectModeBatchRequestBodySizeInBytes; + maxServerRequestOperationCount = Constants.MaxOperationsInDirectModeBatchRequest; + } + + public static CosmosResponseMessage Validate( + IReadOnlyList operations, + RequestOptions batchOptions, + CosmosClient client, + int? maxOperationCount = null) + { + string errorMessage = null; + + if (operations.Count == 0) + { + errorMessage = ClientResources.BatchNoOperations; + } + + if (maxOperationCount.HasValue && operations.Count > maxOperationCount.Value) + { + errorMessage = ClientResources.BatchTooLarge; + } + + if (errorMessage == null && batchOptions != null) + { + if (batchOptions.IfMatchEtag != null || batchOptions.IfNoneMatchEtag != null) + { + errorMessage = ClientResources.BatchRequestOptionNotSupported; + } + } + + if (errorMessage == null) + { + foreach (ItemBatchOperation operation in operations) + { + if (operation.RequestOptions != null) + { + if (operation.RequestOptions.ConsistencyLevel.HasValue + || operation.RequestOptions.PreTriggers != null + || operation.RequestOptions.PostTriggers != null + || operation.RequestOptions.SessionToken != null) + { + errorMessage = ClientResources.BatchItemRequestOptionNotSupported; + } + + if (operation.RequestOptions.Properties != null + && (operation.RequestOptions.Properties.TryGetValue(WFConstants.BackendHeaders.EffectivePartitionKey, out object epkObj) + | operation.RequestOptions.Properties.TryGetValue(WFConstants.BackendHeaders.EffectivePartitionKeyString, out object epkStrObj))) + { + byte[] epk = epkObj as byte[]; + string epkStr = epkStrObj as string; + if (epk == null || epkStr == null) + { + errorMessage = string.Format( + ClientResources.EpkPropertiesPairingExpected, + WFConstants.BackendHeaders.EffectivePartitionKey, + WFConstants.BackendHeaders.EffectivePartitionKeyString); + } + + if (operation.PartitionKey != null) + { + errorMessage = ClientResources.PKAndEpkSetTogether; + } + } + } + } + } + + if (errorMessage != null) + { + return new CosmosResponseMessage(HttpStatusCode.BadRequest, errorMessage: errorMessage); + } + + return new CosmosResponseMessage(HttpStatusCode.OK); + } + } +} \ No newline at end of file diff --git a/Microsoft.Azure.Cosmos/src/Batch/BatchExecutor.cs b/Microsoft.Azure.Cosmos/src/Batch/BatchExecutor.cs new file mode 100644 index 0000000000..b3374930da --- /dev/null +++ b/Microsoft.Azure.Cosmos/src/Batch/BatchExecutor.cs @@ -0,0 +1,139 @@ +//------------------------------------------------------------ +// Copyright (c) Microsoft Corporation. All rights reserved. +//------------------------------------------------------------ + +namespace Microsoft.Azure.Cosmos +{ + using System; + using System.Collections.Generic; + using System.Diagnostics; + using System.IO; + using System.Linq; + using System.Net; + using System.Threading; + using System.Threading.Tasks; + using Microsoft.Azure.Documents; + + internal sealed class BatchExecutor + { + private readonly CosmosContainerCore container; + + private readonly CosmosClient client; + + private readonly IReadOnlyList inputOperations; + + private readonly PartitionKey partitionKey; + + private readonly RequestOptions batchOptions; + + private readonly int maxServerRequestBodyLength; + + private readonly int maxServerRequestOperationCount; + + public BatchExecutor( + CosmosContainerCore container, + PartitionKey partitionKey, + IReadOnlyList operations, + RequestOptions batchOptions, + int maxServerRequestBodyLength, + int maxServerRequestOperationCount) + { + this.container = container; + this.client = this.container.ClientContext.Client; + this.inputOperations = operations; + this.partitionKey = partitionKey; + this.batchOptions = batchOptions; + this.maxServerRequestBodyLength = maxServerRequestBodyLength; + this.maxServerRequestOperationCount = maxServerRequestOperationCount; + } + + public async Task ExecuteAsync(CancellationToken cancellationToken) + { + CosmosResponseMessage validationResult = BatchExecUtils.Validate( + this.inputOperations, + this.batchOptions, + this.client, + this.maxServerRequestOperationCount); + + if (!validationResult.IsSuccessStatusCode) + { + return new CosmosBatchResponse( + validationResult.StatusCode, + validationResult.Headers.SubStatusCode, + validationResult.ErrorMessage, + this.inputOperations); + } + + SinglePartitionKeyServerBatchRequest serverRequest; + try + { + serverRequest = await SinglePartitionKeyServerBatchRequest.CreateAsync( + this.partitionKey, + new ArraySegment(this.inputOperations.ToArray()), + this.maxServerRequestBodyLength, + this.maxServerRequestOperationCount, + serializer: this.client.ClientOptions.CosmosSerializerWithWrapperOrDefault, + cancellationToken: cancellationToken); + } + catch (RequestEntityTooLargeException ex) + { + return new CosmosBatchResponse(ex.StatusCode ?? HttpStatusCode.RequestEntityTooLarge, ex.GetSubStatus(), ClientResources.BatchOperationTooLarge, this.inputOperations); + } + + if (serverRequest.Operations.Count != this.inputOperations.Count) + { + // todo: should this be PayloadTooLarge + return new CosmosBatchResponse(HttpStatusCode.RequestEntityTooLarge, SubStatusCodes.Unknown, ClientResources.BatchTooLarge, this.inputOperations); + } + + return await this.ExecuteServerRequestAsync(serverRequest, cancellationToken); + } + + /// + /// Makes a single batch request to the server. + /// + /// A server request with a set of operations on items. + /// representing request cancellation. + /// Response from the server or ServiceUnavailable response in case of exceptions. + private async Task ExecuteServerRequestAsync(SinglePartitionKeyServerBatchRequest serverRequest, CancellationToken cancellationToken) + { + try + { + using (Stream serverRequestPayload = serverRequest.TransferBodyStream()) + { + Debug.Assert(serverRequestPayload != null, "Server request payload expected to be non-null"); + CosmosResponseMessage cosmosResponseMessage = await ExecUtils.ProcessResourceOperationAsync( + this.client, + this.container.LinkUri, + ResourceType.Document, + OperationType.Batch, + this.batchOptions, + this.container, + serverRequest.PartitionKey, + serverRequestPayload, + requestMessage => + { + requestMessage.Headers.Add(HttpConstants.HttpHeaders.IsBatchRequest, bool.TrueString); + requestMessage.Headers.Add(HttpConstants.HttpHeaders.IsBatchAtomic, bool.TrueString); + requestMessage.Headers.Add(HttpConstants.HttpHeaders.IsBatchOrdered, bool.TrueString); + }, + responseMessage => responseMessage, // response creator + cancellationToken); + + return await CosmosBatchResponse.FromResponseMessageAsync( + cosmosResponseMessage, + serverRequest, + this.client.ClientOptions.CosmosSerializerWithWrapperOrDefault); + } + } + catch (CosmosException ex) + { + return new CosmosBatchResponse( + HttpStatusCode.ServiceUnavailable, + SubStatusCodes.Unknown, + ex.Message, + serverRequest.Operations); + } + } + } +} diff --git a/Microsoft.Azure.Cosmos/src/Batch/BatchSchemaProvider.cs b/Microsoft.Azure.Cosmos/src/Batch/BatchSchemaProvider.cs new file mode 100644 index 0000000000..3ccab72fc6 --- /dev/null +++ b/Microsoft.Azure.Cosmos/src/Batch/BatchSchemaProvider.cs @@ -0,0 +1,58 @@ +//------------------------------------------------------------ +// Copyright (c) Microsoft Corporation. All rights reserved. +//------------------------------------------------------------ + +namespace Microsoft.Azure.Cosmos +{ + using System.IO; + using System.Reflection; + using Microsoft.Azure.Cosmos.Serialization.HybridRow.Layouts; + using Microsoft.Azure.Cosmos.Serialization.HybridRow.Schemas; + + internal static class BatchSchemaProvider + { + static BatchSchemaProvider() + { + string json = BatchSchemaProvider.GetEmbeddedResource(@"Batch\HybridRowBatchSchemas.json"); + BatchSchemaProvider.BatchSchemaNamespace = Namespace.Parse(json); + BatchSchemaProvider.BatchLayoutResolver = new LayoutResolverNamespace(BatchSchemaProvider.BatchSchemaNamespace); + + BatchSchemaProvider.BatchOperationLayout = BatchSchemaProvider.BatchLayoutResolver.Resolve(BatchSchemaProvider.BatchSchemaNamespace.Schemas.Find(x => x.Name == "BatchOperation").SchemaId); + BatchSchemaProvider.BatchResultLayout = BatchSchemaProvider.BatchLayoutResolver.Resolve(BatchSchemaProvider.BatchSchemaNamespace.Schemas.Find(x => x.Name == "BatchResult").SchemaId); + } + + public static Namespace BatchSchemaNamespace { get; private set; } + + public static LayoutResolverNamespace BatchLayoutResolver { get; private set; } + + public static Layout BatchOperationLayout { get; private set; } + + public static Layout BatchResultLayout { get; private set; } + + private static string GetEmbeddedResource(string resourceName) + { + Assembly assembly = Assembly.GetAssembly(typeof(BatchSchemaProvider)); + + // Assumes BatchSchemaProvider is in the default namespace of the assembly. + resourceName = BatchSchemaProvider.FormatResourceName(typeof(BatchSchemaProvider).Namespace, resourceName); + + using (Stream resourceStream = assembly.GetManifestResourceStream(resourceName)) + { + if (resourceStream == null) + { + return null; + } + + using (StreamReader reader = new StreamReader(resourceStream)) + { + return reader.ReadToEnd(); + } + } + } + + private static string FormatResourceName(string namespaceName, string resourceName) + { + return namespaceName + "." + resourceName.Replace(" ", "_").Replace("\\", ".").Replace("/", "."); + } + } +} \ No newline at end of file diff --git a/Microsoft.Azure.Cosmos/src/Batch/CosmosBatch.cs b/Microsoft.Azure.Cosmos/src/Batch/CosmosBatch.cs new file mode 100644 index 0000000000..a81c436218 --- /dev/null +++ b/Microsoft.Azure.Cosmos/src/Batch/CosmosBatch.cs @@ -0,0 +1,240 @@ +//------------------------------------------------------------ +// Copyright (c) Microsoft Corporation. All rights reserved. +//------------------------------------------------------------ + +namespace Microsoft.Azure.Cosmos +{ + using System.Collections.Generic; + using System.IO; + using System.Threading; + using System.Threading.Tasks; + using Microsoft.Azure.Documents; + + /// + /// Represents a batch of requests to Cosmos DB. + /// + public class CosmosBatch + { + private readonly PartitionKey partitionKey; + + private readonly CosmosContainerCore container; + + private List operations; + + /// + /// Initializes a new instance of the class. + /// + /// Container that has items on which batch operations are to be performed. + /// The partition key for all items in the batch. . + internal CosmosBatch(CosmosContainerCore container, PartitionKey partitionKey) + { + this.container = container; + this.partitionKey = partitionKey; + this.operations = new List(); + } + + /// + /// Adds an operation to create an item into the batch. + /// + /// A JSON serializable object that must contain an id property. to implement a custom serializer. + /// (Optional) The options for the item request. . + /// The instance with the operation added. + /// The type of item to be created. + public virtual CosmosBatch CreateItem(T item, ItemRequestOptions itemRequestOptions = null) + { + this.operations.Add(new ItemBatchOperation( + operationType: OperationType.Create, + operationIndex: this.operations.Count, + resource: item, + requestOptions: itemRequestOptions)); + + return this; + } + + /// + /// Adds an operation to create an item into the batch. + /// + /// + /// A containing the payload of the item. + /// The stream must have a UTF-8 encoded JSON object which contains an id property. + /// + /// (Optional) The options for the item request. . + /// The instance with the operation added. + public virtual CosmosBatch CreateItemStream(Stream resourceStream, ItemRequestOptions itemRequestOptions = null) + { + this.operations.Add(new ItemBatchOperation( + operationType: OperationType.Create, + operationIndex: this.operations.Count, + resourceStream: resourceStream, + requestOptions: itemRequestOptions)); + + return this; + } + + /// + /// Adds an operation to read an item into the batch. + /// + /// The cosmos item id. + /// (Optional) The options for the item request. . + /// The instance with the operation added. + public virtual CosmosBatch ReadItem(string id, ItemRequestOptions itemRequestOptions = null) + { + this.operations.Add(new ItemBatchOperation( + operationType: OperationType.Read, + operationIndex: this.operations.Count, + id: id, + requestOptions: itemRequestOptions)); + + return this; + } + + /// + /// Adds an operation to upsert an item into the batch. + /// + /// A JSON serializable object that must contain an id property. to implement a custom serializer. + /// (Optional) The options for the item request. . + /// The instance with the operation added. + /// The type of item to be created. + public virtual CosmosBatch UpsertItem(T item, ItemRequestOptions itemRequestOptions = null) + { + this.operations.Add(new ItemBatchOperation( + operationType: OperationType.Upsert, + operationIndex: this.operations.Count, + resource: item, + requestOptions: itemRequestOptions)); + + return this; + } + + /// + /// Adds an operation to upsert an item into the batch. + /// + /// + /// A containing the payload of the item. + /// The stream must have a UTF-8 encoded JSON object which contains an id property. + /// + /// (Optional) The options for the item request. . + /// The instance with the operation added. + public virtual CosmosBatch UpsertItemStream(Stream resourceStream, ItemRequestOptions itemRequestOptions = null) + { + this.operations.Add(new ItemBatchOperation( + operationType: OperationType.Upsert, + operationIndex: this.operations.Count, + resourceStream: resourceStream, + requestOptions: itemRequestOptions)); + + return this; + } + + /// + /// Adds an operation to replace an item into the batch. + /// + /// The cosmos item id. + /// A JSON serializable object that must contain an id property. to implement a custom serializer. + /// (Optional) The options for the item request. . + /// The instance with the operation added. + /// The type of item to be created. + public virtual CosmosBatch ReplaceItem(string id, T item, ItemRequestOptions itemRequestOptions = null) + { + this.operations.Add(new ItemBatchOperation( + operationType: OperationType.Replace, + operationIndex: this.operations.Count, + id: id, + resource: item, + requestOptions: itemRequestOptions)); + + return this; + } + + /// + /// Adds an operation to replace an item into the batch. + /// + /// The cosmos item id. + /// + /// A containing the payload of the item. + /// The stream must have a UTF-8 encoded JSON object which contains an id property. + /// + /// (Optional) The options for the item request. . + /// The instance with the operation added. + public virtual CosmosBatch ReplaceItemStream(string id, Stream resourceStream, ItemRequestOptions itemRequestOptions = null) + { + this.operations.Add(new ItemBatchOperation( + operationType: OperationType.Replace, + operationIndex: this.operations.Count, + id: id, + resourceStream: resourceStream, + requestOptions: itemRequestOptions)); + + return this; + } + + /// + /// Adds an operation to delete an item into the batch. + /// + /// The cosmos item id. + /// (Optional) The options for the item request. . + /// The instance with the operation added. + public virtual CosmosBatch DeleteItem(string id, ItemRequestOptions itemRequestOptions = null) + { + this.operations.Add(new ItemBatchOperation( + operationType: OperationType.Delete, + operationIndex: this.operations.Count, + id: id, + requestOptions: itemRequestOptions)); + + return this; + } + + /// + /// Executes the batch at the Azure Cosmos service as an asynchronous operation. + /// + /// (Optional) representing request cancellation. + /// An awaitable which contains the completion status and results of each operation. + public virtual Task ExecuteAsync(CancellationToken cancellationToken = default(CancellationToken)) + { + return this.ExecuteAsync(requestOptions: null, cancellationToken: cancellationToken); + } + + /// + /// Adds an operation to patch an item into the batch. + /// + /// The cosmos item id. + /// A containing the patch specification. + /// (Optional) The options for the item request. . + /// The instance with the operation added. + internal virtual CosmosBatch PatchItemStream(string id, Stream patchStream, ItemRequestOptions itemRequestOptions = null) + { + this.operations.Add(new ItemBatchOperation( + operationType: OperationType.Patch, + operationIndex: this.operations.Count, + id: id, + resourceStream: patchStream, + requestOptions: itemRequestOptions)); + + return this; + } + + /// + /// Executes the batch at the Azure Cosmos service as an asynchronous operation. + /// + /// Options that apply to the batch. + /// (Optional) representing request cancellation. + /// An awaitable which contains the completion status and results of each operation. + internal virtual Task ExecuteAsync(RequestOptions requestOptions, CancellationToken cancellationToken = default(CancellationToken)) + { + BatchExecUtils.GetServerRequestLimits(out int maxServerRequestBodyLength, out int maxServerRequestOperationCount); + return this.ExecuteAsync(maxServerRequestBodyLength, maxServerRequestOperationCount, requestOptions, cancellationToken); + } + + internal Task ExecuteAsync( + int maxServerRequestBodyLength, + int maxServerRequestOperationCount, + RequestOptions requestOptions = null, + CancellationToken cancellationToken = default(CancellationToken)) + { + BatchExecutor executor = new BatchExecutor(this.container, this.partitionKey, this.operations, requestOptions, maxServerRequestBodyLength, maxServerRequestOperationCount); + this.operations = new List(); + return executor.ExecuteAsync(cancellationToken); + } + } +} diff --git a/Microsoft.Azure.Cosmos/src/Batch/CosmosBatchOperationResult.cs b/Microsoft.Azure.Cosmos/src/Batch/CosmosBatchOperationResult.cs new file mode 100644 index 0000000000..f137a58813 --- /dev/null +++ b/Microsoft.Azure.Cosmos/src/Batch/CosmosBatchOperationResult.cs @@ -0,0 +1,200 @@ +//------------------------------------------------------------ +// Copyright (c) Microsoft Corporation. All rights reserved. +//------------------------------------------------------------ + +namespace Microsoft.Azure.Cosmos +{ + using System; + using System.IO; + using System.Net; + using Microsoft.Azure.Cosmos.Serialization.HybridRow; + using Microsoft.Azure.Cosmos.Serialization.HybridRow.IO; + using Microsoft.Azure.Documents; + + /// + /// Represents a result for a specific operation that was part of a batch request. + /// + public class CosmosBatchOperationResult + { + internal CosmosBatchOperationResult(HttpStatusCode statusCode) + { + this.StatusCode = statusCode; + } + + internal CosmosBatchOperationResult(CosmosBatchOperationResult other) + { + this.StatusCode = other.StatusCode; + this.SubStatusCode = other.SubStatusCode; + this.ETag = other.ETag; + this.ResourceStream = other.ResourceStream; + this.RetryAfter = other.RetryAfter; + } + + private CosmosBatchOperationResult() + { + } + + /// + /// Gets the completion status of the operation. + /// + public virtual HttpStatusCode StatusCode { get; private set; } + + /// + /// Gets a value indicating whether the current operation completed successfully. + /// + public virtual bool IsSuccessStatusCode + { + get + { + int statusCodeInt = (int)this.StatusCode; + return statusCodeInt >= 200 && statusCodeInt <= 299; + } + } + + /// + /// Gets the entity tag associated with the resource. + /// + /// + /// The entity tag associated with the resource. + /// + /// + /// ETags are used for concurrency checking when updating resources. + /// + public virtual string ETag { get; internal set; } + + /// + /// Gets the content of the resource. + /// + /// + /// The content of the resource as a MemoryStream. + /// + public virtual MemoryStream ResourceStream { get; internal set; } + + /// + /// In case the operation is rate limited, indicates the time post which a retry can be attempted. + /// + public virtual TimeSpan RetryAfter { get; internal set; } + + /// + /// Gets detail on the completion status of the operation. + /// + internal virtual SubStatusCodes SubStatusCode { get; set; } + + internal static Result ReadOperationResult(Memory input, out CosmosBatchOperationResult batchOperationResult) + { + RowBuffer row = new RowBuffer(input.Length); + if (!row.ReadFrom( + input.Span, + HybridRowVersion.V1, + BatchSchemaProvider.BatchLayoutResolver)) + { + batchOperationResult = null; + return Result.Failure; + } + + RowReader reader = new RowReader(ref row); + Result result = CosmosBatchOperationResult.ReadOperationResult(ref reader, out batchOperationResult); + if (result != Result.Success) + { + return result; + } + + // Ensure the mandatory fields were populated + if (batchOperationResult.StatusCode == default(HttpStatusCode)) + { + return Result.Failure; + } + + return Result.Success; + } + + private static Result ReadOperationResult(ref RowReader reader, out CosmosBatchOperationResult batchOperationResult) + { + batchOperationResult = new CosmosBatchOperationResult(); + while (reader.Read()) + { + Result r; + switch (reader.Path) + { + case "statusCode": + r = reader.ReadInt32(out int statusCode); + if (r != Result.Success) + { + return r; + } + + batchOperationResult.StatusCode = (HttpStatusCode)statusCode; + break; + + case "subStatusCode": + r = reader.ReadInt32(out int subStatusCode); + if (r != Result.Success) + { + return r; + } + + batchOperationResult.SubStatusCode = (SubStatusCodes)subStatusCode; + break; + + case "eTag": + r = reader.ReadString(out string eTag); + if (r != Result.Success) + { + return r; + } + + batchOperationResult.ETag = eTag; + break; + + case "resourceBody": + r = reader.ReadBinary(out byte[] resourceBody); + if (r != Result.Success) + { + return r; + } + + batchOperationResult.ResourceStream = new MemoryStream( + buffer: resourceBody, index: 0, count: resourceBody.Length, writable: false, publiclyVisible: true); + break; + + case "retryAfterMilliseconds": + r = reader.ReadUInt32(out uint retryAfterMilliseconds); + if (r != Result.Success) + { + return r; + } + + batchOperationResult.RetryAfter = TimeSpan.FromMilliseconds(retryAfterMilliseconds); + break; + } + } + + return Result.Success; + } + } + + /// + /// Represents a result for a specific operation that is part of a batch. + /// + /// The type of the Resource which this class wraps. +#pragma warning disable SA1402 // File may only contain a single type + public class CosmosBatchOperationResult : CosmosBatchOperationResult +#pragma warning restore SA1402 // File may only contain a single type + { + /// + /// Initializes a new instance of the class. + /// + /// CosmosBatchOperationResult with stream resource. + /// Deserialized resource. + internal CosmosBatchOperationResult(CosmosBatchOperationResult result, T resource) + : base(result) + { + this.Resource = resource; + } + + /// + /// Gets the content of the resource. + /// + public virtual T Resource { get; set; } + } +} \ No newline at end of file diff --git a/Microsoft.Azure.Cosmos/src/Batch/CosmosBatchResponse.cs b/Microsoft.Azure.Cosmos/src/Batch/CosmosBatchResponse.cs new file mode 100644 index 0000000000..e1b228a937 --- /dev/null +++ b/Microsoft.Azure.Cosmos/src/Batch/CosmosBatchResponse.cs @@ -0,0 +1,360 @@ +//------------------------------------------------------------ +// Copyright (c) Microsoft Corporation. All rights reserved. +//------------------------------------------------------------ + +namespace Microsoft.Azure.Cosmos +{ + using System; + using System.Collections; + using System.Collections.Generic; + using System.Net; + using System.Threading.Tasks; + using Microsoft.Azure.Cosmos.Serialization.HybridRow; + using Microsoft.Azure.Cosmos.Serialization.HybridRow.RecordIO; + using Microsoft.Azure.Documents; + + /// + /// Response of a batch request. + /// +#pragma warning disable CA1710 // Identifiers should have correct suffix + public class CosmosBatchResponse : IReadOnlyList, IDisposable +#pragma warning restore CA1710 // Identifiers should have correct suffix + { + private bool isDisposed; + + private List results; + + internal CosmosBatchResponse( + HttpStatusCode statusCode, + SubStatusCodes subStatusCode, + string errorMessage, + double requestCharge, + TimeSpan? retryAfter, + string activityId, + ServerBatchRequest serverRequest, + CosmosJsonSerializer serializer) + : this(statusCode, subStatusCode, errorMessage, requestCharge, retryAfter, activityId, serverRequest.Operations, serializer) + { + } + + /// + /// Initializes a new instance of the class. + /// This method is intended to be used only when a response from the server is not available. + /// + /// Indicates why the batch was not processed. + /// Provides further details about why the batch was not processed. + /// The reason for failure. + /// Operations that were to be executed. + internal CosmosBatchResponse( + HttpStatusCode statusCode, + SubStatusCodes subStatusCode, + string errorMessage, + IReadOnlyList operations) + : this(statusCode, + subStatusCode, + errorMessage, + requestCharge: 0, + retryAfter: null, + activityId: Guid.Empty.ToString(), + operations: operations, + serializer: null) + { + } + + /// + /// Initializes a new instance of the class. + /// + protected CosmosBatchResponse() + { + } + + private CosmosBatchResponse( + HttpStatusCode statusCode, + SubStatusCodes subStatusCode, + string errorMessage, + double requestCharge, + TimeSpan? retryAfter, + string activityId, + IReadOnlyList operations, + CosmosJsonSerializer serializer) + { + this.StatusCode = statusCode; + this.SubStatusCode = subStatusCode; + this.ErrorMessage = errorMessage; + this.Operations = operations; + this.Serializer = serializer; + this.RequestCharge = requestCharge; + this.RetryAfter = retryAfter; + this.ActivityId = activityId; + } + + /// + /// Gets the ActivityId that identifies the server request made to execute the batch. + /// + public virtual string ActivityId { get; } + + /// + /// Gets the request charge for the batch request. + /// + /// + /// The request charge measured in request units. + /// + public virtual double RequestCharge { get; } + + /// + /// Gets the amount of time to wait before retrying this or any other request within Cosmos container or collection due to throttling. + /// + public virtual TimeSpan? RetryAfter { get; } + + /// + /// Gets the completion status code of the batch request. + /// + /// The request completion status code. + public virtual HttpStatusCode StatusCode { get; } + + /// + /// Gets the reason for failure of the batch request. + /// + /// The reason for failure, if any. + public virtual string ErrorMessage { get; } + + /// + /// Gets a value indicating whether the batch was processed. + /// + public virtual bool IsSuccessStatusCode + { + get + { + int statusCodeInt = (int)this.StatusCode; + return statusCodeInt >= 200 && statusCodeInt <= 299; + } + } + + /// + /// Gets the number of operation results. + /// + public virtual int Count => this.results?.Count ?? 0; + + internal virtual SubStatusCodes SubStatusCode { get; } + + internal virtual CosmosJsonSerializer Serializer { get; } + + internal IReadOnlyList Operations { get; set; } + + /// + /// Gets the result of the operation at the provided index in the batch. + /// + /// 0-based index of the operation in the batch whose result needs to be returned. + /// Result of operation at the provided index in the batch. + public virtual CosmosBatchOperationResult this[int index] + { + get + { + return this.results[index]; + } + } + + /// + /// Gets the result of the operation at the provided index in the batch - the returned result has a Resource of provided type. + /// + /// Type to which the Resource in the operation result needs to be deserialized to, when present. + /// 0-based index of the operation in the batch whose result needs to be returned. + /// Result of batch operation that contains a Resource deserialized to specified type. + public virtual CosmosBatchOperationResult GetOperationResultAtIndex(int index) + { + CosmosBatchOperationResult result = this.results[index]; + + T resource = default(T); + if (result.ResourceStream != null) + { + resource = this.Serializer.FromStream(result.ResourceStream); + } + + return new CosmosBatchOperationResult(result, resource); + } + + /// + /// Gets an enumerator over the operation results. + /// + /// Enumerator over the operation results. + public virtual IEnumerator GetEnumerator() + { + return this.results.GetEnumerator(); + } + + /// + /// Gets all the Activity IDs associated with the response. + /// + /// An enumerable that contains the Activity IDs. + public virtual IEnumerable GetActivityIds() + { + yield return this.ActivityId; + } + + /// + /// Disposes the current . + /// + public void Dispose() + { + this.Dispose(true); + GC.SuppressFinalize(this); + } + + /// + IEnumerator IEnumerable.GetEnumerator() + { + return this.GetEnumerator(); + } + + internal static async Task FromResponseMessageAsync( + CosmosResponseMessage responseMessage, + ServerBatchRequest serverRequest, + CosmosJsonSerializer serializer) + { + using (responseMessage) + { + CosmosBatchResponse response = null; + if (responseMessage.IsSuccessStatusCode && responseMessage.Content != null) + { + response = await CosmosBatchResponse.PopulateFromContentAsync(responseMessage, serverRequest, serializer); + if (response == null) + { + // Convert any payload read failures as InternalServerError + response = new CosmosBatchResponse( + HttpStatusCode.InternalServerError, + SubStatusCodes.Unknown, + ClientResources.ServerResponseDeserializationFailure, + responseMessage.Headers.RequestCharge, + responseMessage.Headers.RetryAfter, + responseMessage.Headers.ActivityId, + serverRequest, + serializer); + } + } + else + { + response = new CosmosBatchResponse( + responseMessage.StatusCode, + responseMessage.Headers.SubStatusCode, + responseMessage.ErrorMessage, + responseMessage.Headers.RequestCharge, + responseMessage.Headers.RetryAfter, + responseMessage.Headers.ActivityId, + serverRequest, + serializer); + } + + if (response.results == null || response.results.Count != serverRequest.Operations.Count) + { + if (responseMessage.IsSuccessStatusCode) + { + // Server should be guaranteeing number of results equal to operations when + // batch request is successful - so fail as InternalServerError if this is not the case. + response = new CosmosBatchResponse( + HttpStatusCode.InternalServerError, + SubStatusCodes.Unknown, + ClientResources.InvalidServerResponse, + responseMessage.Headers.RequestCharge, + responseMessage.Headers.RetryAfter, + responseMessage.Headers.ActivityId, + serverRequest, + serializer); + } + + // When the overall response status code is TooManyRequests, propagate the RetryAfter into the individual operations. + int retryAfterMilliseconds = 0; + + if ((int)responseMessage.StatusCode == (int)StatusCodes.TooManyRequests) + { + if (!responseMessage.Headers.TryGetValue(HttpConstants.HttpHeaders.RetryAfterInMilliseconds, out string retryAfter) || + retryAfter == null || + !int.TryParse(retryAfter, out retryAfterMilliseconds)) + { + retryAfterMilliseconds = 0; + } + } + + response.results = new List(); + for (int i = 0; i < serverRequest.Operations.Count; i++) + { + response.results.Add( + new CosmosBatchOperationResult(response.StatusCode) + { + SubStatusCode = response.SubStatusCode, + RetryAfter = TimeSpan.FromMilliseconds(retryAfterMilliseconds), + }); + } + } + + return response; + } + } + + internal static async Task PopulateFromContentAsync( + CosmosResponseMessage responseMessage, + ServerBatchRequest serverRequest, + CosmosJsonSerializer serializer) + { + List results = new List(); + + int resizerInitialCapacity = 81920; + if (responseMessage.Content.CanSeek) + { + resizerInitialCapacity = (int)responseMessage.Content.Length; + } + + Result res = await responseMessage.Content.ReadRecordIOAsync( + record => + { + Result r = CosmosBatchOperationResult.ReadOperationResult(record, out CosmosBatchOperationResult operationResult); + if (r != Result.Success) + { + return r; + } + + results.Add(operationResult); + return r; + }, + resizer: new MemorySpanResizer(resizerInitialCapacity)); + + if (res != Result.Success) + { + return null; + } + + CosmosBatchResponse response = new CosmosBatchResponse( + responseMessage.StatusCode, + responseMessage.Headers.SubStatusCode, + responseMessage.ErrorMessage, + responseMessage.Headers.RequestCharge, + responseMessage.Headers.RetryAfter, + responseMessage.Headers.ActivityId, + serverRequest, + serializer); + + response.results = results; + return response; + } + + /// + /// Disposes the disposable members held by this class. + /// + /// Indicates whether to dispose managed resources or not. + protected virtual void Dispose(bool disposing) + { + if (disposing && !this.isDisposed) + { + this.isDisposed = true; + if (this.Operations != null) + { + foreach (ItemBatchOperation operation in this.Operations) + { + operation.Dispose(); + } + + this.Operations = null; + } + } + } + } +} \ No newline at end of file diff --git a/Microsoft.Azure.Cosmos/src/Batch/HybridRowBatchSchemas.json b/Microsoft.Azure.Cosmos/src/Batch/HybridRowBatchSchemas.json new file mode 100644 index 0000000000..995fb56e41 --- /dev/null +++ b/Microsoft.Azure.Cosmos/src/Batch/HybridRowBatchSchemas.json @@ -0,0 +1,132 @@ +{ + "name": "Microsoft.Azure.Cosmos.BatchApi", + "version": "v1", + "schemas": [ + { + "name": "BatchOperation", + "id": 2145473648, + "type": "schema", + "properties": [ + { + "path": "operationType", + "type": { + "type": "int32", + "storage": "fixed" + } + }, + { + "path": "resourceType", + "type": { + "type": "int32", + "storage": "fixed" + } + }, + { + "path": "partitionKey", + "type": { + "type": "utf8", + "storage": "variable" + } + }, + { + "path": "effectivePartitionKey", + "type": { + "type": "binary", + "storage": "variable" + } + }, + { + "path": "id", + "type": { + "type": "utf8", + "storage": "variable" + } + }, + { + "path": "binaryId", + "type": { + "type": "binary", + "storage": "variable" + } + }, + { + "path": "resourceBody", + "type": { + "type": "binary", + "storage": "variable" + } + }, + { + "path": "indexingDirective", + "type": { + "type": "utf8", + "storage": "sparse" + } + }, + { + "path": "ifMatch", + "type": { + "type": "utf8", + "storage": "sparse" + } + }, + { + "path": "ifNoneMatch", + "type": { + "type": "utf8", + "storage": "sparse" + } + }, + { + "path": "timeToLiveInSeconds", + "type": { + "type": "int32", + "storage": "sparse" + } + } + ] + }, + { + "name": "BatchResult", + "id": 2145473649, + "type": "schema", + "properties": [ + { + "path": "statusCode", + "type": { + "type": "int32", + "storage": "fixed" + } + }, + { + "path": "subStatusCode", + "type": { + "type": "int32", + "storage": "fixed" + } + }, + { + "path": "eTag", + "type": { + "type": "utf8", + "storage": "variable" + } + }, + { + "path": "resourceBody", + "type": { + "type": "binary", + "storage": "variable" + } + }, + { + "path": "retryAfterMilliseconds", + "type": { + "type": "uint32", + "storage": "sparse" + } + } + ] + } + ] +} \ No newline at end of file diff --git a/Microsoft.Azure.Cosmos/src/Batch/ItemBatchOperation.cs b/Microsoft.Azure.Cosmos/src/Batch/ItemBatchOperation.cs new file mode 100644 index 0000000000..8f7851f049 --- /dev/null +++ b/Microsoft.Azure.Cosmos/src/Batch/ItemBatchOperation.cs @@ -0,0 +1,357 @@ +//------------------------------------------------------------ +// Copyright (c) Microsoft Corporation. All rights reserved. +//------------------------------------------------------------ + +namespace Microsoft.Azure.Cosmos +{ + using System; + using System.Diagnostics; + using System.IO; + using System.Threading; + using System.Threading.Tasks; + using Microsoft.Azure.Cosmos.Serialization.HybridRow; + using Microsoft.Azure.Cosmos.Serialization.HybridRow.IO; + using Microsoft.Azure.Cosmos.Serialization.HybridRow.Layouts; + using Microsoft.Azure.Documents; + + /// + /// Represents an operation on an item which will be executed as part of a batch request + /// on a container. + /// + internal class ItemBatchOperation : IDisposable + { +#pragma warning disable SA1401 // Fields should be private + protected Memory body; +#pragma warning restore SA1401 // Fields should be private + private bool isDisposed; + + public ItemBatchOperation( + OperationType operationType, + int operationIndex, + PartitionKey partitionKey, + string id = null, + Stream resourceStream = null, + ItemRequestOptions requestOptions = null) + { + this.OperationType = operationType; + this.OperationIndex = operationIndex; + this.PartitionKey = partitionKey; + this.ParsedPartitionKey = new Documents.PartitionKey(partitionKey.Value); + this.PartitionKeyJson = this.ParsedPartitionKey.ToString(); + this.Id = id; + this.ResourceStream = resourceStream; + this.RequestOptions = requestOptions; + } + + public ItemBatchOperation( + OperationType operationType, + int operationIndex, + string id = null, + Stream resourceStream = null, + ItemRequestOptions requestOptions = null) + { + this.OperationType = operationType; + this.OperationIndex = operationIndex; + this.Id = id; + this.ResourceStream = resourceStream; + this.RequestOptions = requestOptions; + } + + public PartitionKey PartitionKey { get; } + + public string Id { get; } + + public OperationType OperationType { get; } + + public Stream ResourceStream { get; protected set; } + + public ItemRequestOptions RequestOptions { get; } + + public int OperationIndex { get; } + + internal string PartitionKeyJson { get; } + + internal Documents.PartitionKey ParsedPartitionKey { get; set; } + + internal Memory ResourceBody + { + get + { + Debug.Assert( + this.ResourceStream == null || !this.body.IsEmpty, + "ResourceBody read without materialization of ResourceStream"); + + return this.body; + } + + set + { + this.body = value; + } + } + + /// + /// Disposes the current . + /// + public void Dispose() + { + this.Dispose(true); + } + + internal static Result WriteOperation(ref RowWriter writer, TypeArgument typeArg, ItemBatchOperation operation) + { + Result r = writer.WriteInt32("operationType", (int)operation.OperationType); + if (r != Result.Success) + { + return r; + } + + r = writer.WriteInt32("resourceType", (int)ResourceType.Document); + if (r != Result.Success) + { + return r; + } + + if (operation.PartitionKeyJson != null) + { + r = writer.WriteString("partitionKey", operation.PartitionKeyJson); + if (r != Result.Success) + { + return r; + } + } + + if (operation.Id != null) + { + r = writer.WriteString("id", operation.Id); + if (r != Result.Success) + { + return r; + } + } + + if (!operation.ResourceBody.IsEmpty) + { + r = writer.WriteBinary("resourceBody", operation.ResourceBody.Span); + if (r != Result.Success) + { + return r; + } + } + + if (operation.RequestOptions != null) + { + ItemRequestOptions options = operation.RequestOptions; + if (options.IndexingDirective.HasValue) + { + string indexingDirectiveString = IndexingDirectiveStrings.FromIndexingDirective(options.IndexingDirective.Value); + r = writer.WriteString("indexingDirective", indexingDirectiveString); + if (r != Result.Success) + { + return r; + } + } + + if (options.IfMatchEtag != null) + { + r = writer.WriteString("ifMatch", options.IfMatchEtag); + if (r != Result.Success) + { + return r; + } + } + else if (options.IfNoneMatchEtag != null) + { + r = writer.WriteString("ifNoneMatch", options.IfNoneMatchEtag); + if (r != Result.Success) + { + return r; + } + } + + if (options.Properties != null) + { + if (options.Properties.TryGetValue(WFConstants.BackendHeaders.BinaryId, out object binaryIdObj)) + { + byte[] binaryId = binaryIdObj as byte[]; + if (binaryId != null) + { + r = writer.WriteBinary("binaryId", binaryId); + if (r != Result.Success) + { + return r; + } + } + } + + if (options.Properties.TryGetValue(WFConstants.BackendHeaders.EffectivePartitionKey, out object epkObj)) + { + byte[] epk = epkObj as byte[]; + if (epk != null) + { + r = writer.WriteBinary("effectivePartitionKey", epk); + if (r != Result.Success) + { + return r; + } + } + } + + if (options.Properties.TryGetValue(WFConstants.BackendHeaders.TimeToLiveInSeconds, out object ttlObj)) + { + string ttlStr = ttlObj as string; + if (ttlStr != null && int.TryParse(ttlStr, out int ttl)) + { + r = writer.WriteInt32("timeToLiveInSeconds", ttl); + if (r != Result.Success) + { + return r; + } + } + } + } + } + + return Result.Success; + } + + /// + /// Computes and returns an approximation for the length of this . + /// when serialized. + /// + /// An under-estimate of the length. + internal int GetApproximateSerializedLength() + { + int length = 0; + + if (this.PartitionKeyJson != null) + { + length += this.PartitionKeyJson.Length; + } + + if (this.Id != null) + { + length += this.Id.Length; + } + + length += this.body.Length; + + if (this.RequestOptions != null) + { + if (this.RequestOptions.IfMatchEtag != null) + { + length += this.RequestOptions.IfMatchEtag.Length; + } + + if (this.RequestOptions.IfNoneMatchEtag != null) + { + length += this.RequestOptions.IfNoneMatchEtag.Length; + } + + if (this.RequestOptions.IndexingDirective.HasValue) + { + length += 7; // "Default", "Include", "Exclude" are possible values + } + + if (this.RequestOptions.Properties != null) + { + if (this.RequestOptions.Properties.TryGetValue(WFConstants.BackendHeaders.BinaryId, out object binaryIdObj)) + { + byte[] binaryId = binaryIdObj as byte[]; + if (binaryId != null) + { + length += binaryId.Length; + } + } + + if (this.RequestOptions.Properties.TryGetValue(WFConstants.BackendHeaders.EffectivePartitionKey, out object epkObj)) + { + byte[] epk = epkObj as byte[]; + if (epk != null) + { + length += epk.Length; + } + } + } + } + + return length; + } + + /// + /// Materializes the operation's resource into a Memory{byte} wrapping a byte array. + /// + /// Serializer to serialize user provided objects to JSON. + /// for cancellation. + internal virtual async Task MaterializeResourceAsync(CosmosJsonSerializer serializer, CancellationToken cancellationToken) + { + if (this.body.IsEmpty && this.ResourceStream != null) + { + this.body = await BatchExecUtils.StreamToMemoryAsync(this.ResourceStream, Constants.MaxResourceSizeInBytes, cancellationToken); + } + } + + /// + /// Disposes the disposable members held by this class. + /// + /// Indicates whether to dispose managed resources or not. + protected virtual void Dispose(bool disposing) + { + if (disposing && !this.isDisposed) + { + this.isDisposed = true; + if (this.ResourceStream != null) + { + this.ResourceStream.Dispose(); + this.ResourceStream = null; + } + } + } + } + +#pragma warning disable SA1402 // File may only contain a single type + internal class ItemBatchOperation : ItemBatchOperation +#pragma warning restore SA1402 // File may only contain a single type + { + public ItemBatchOperation( + OperationType operationType, + int operationIndex, + PartitionKey partitionKey, + T resource, + string id = null, + ItemRequestOptions requestOptions = null) + : base(operationType, operationIndex, partitionKey: partitionKey, id: id, requestOptions: requestOptions) + { + this.Resource = resource; + } + + public ItemBatchOperation( + OperationType operationType, + int operationIndex, + T resource, + string id = null, + ItemRequestOptions requestOptions = null) + : base(operationType, operationIndex, id: id, requestOptions: requestOptions) + { + this.Resource = resource; + } + + public T Resource { get; private set; } + + /// + /// Materializes the operation's resource into a Memory{byte} wrapping a byte array. + /// + /// Serializer to serialize user provided objects to JSON. + /// for cancellation. + internal override Task MaterializeResourceAsync(CosmosJsonSerializer serializer, CancellationToken cancellationToken) + { + if (this.body.IsEmpty && this.Resource != null) + { + this.ResourceStream = serializer.ToStream(this.Resource); + return base.MaterializeResourceAsync(serializer, cancellationToken); + } + + return Task.FromResult(true); + } + } +} \ No newline at end of file diff --git a/Microsoft.Azure.Cosmos/src/Batch/ServerBatchRequest.cs b/Microsoft.Azure.Cosmos/src/Batch/ServerBatchRequest.cs new file mode 100644 index 0000000000..974a4c7f73 --- /dev/null +++ b/Microsoft.Azure.Cosmos/src/Batch/ServerBatchRequest.cs @@ -0,0 +1,170 @@ +//------------------------------------------------------------ +// Copyright (c) Microsoft Corporation. All rights reserved. +//------------------------------------------------------------ +namespace Microsoft.Azure.Cosmos +{ + using System; + using System.Collections.Generic; + using System.Diagnostics; + using System.IO; + using System.Threading; + using System.Threading.Tasks; + using Microsoft.Azure.Cosmos.Serialization.HybridRow; + using Microsoft.Azure.Cosmos.Serialization.HybridRow.IO; + using Microsoft.Azure.Cosmos.Serialization.HybridRow.RecordIO; + using Microsoft.Azure.Documents; + +#pragma warning disable CA1001 // Types that own disposable fields should be disposable + internal abstract class ServerBatchRequest +#pragma warning restore CA1001 // Types that own disposable fields should be disposable + { + private readonly int maxBodyLength; + + private readonly int maxOperationCount; + + private readonly CosmosJsonSerializer serializer; + + private ArraySegment operations; + + private MemorySpanResizer operationResizableWriteBuffer; + + private MemoryStream bodyStream; + + private long bodyStreamPositionBeforeWritingCurrentRecord; + + private bool shouldDeleteLastWrittenRecord; + + private int lastWrittenOperationIndex; + + /// + /// Initializes a new instance of the class. + /// + /// Maximum length allowed for the request body. + /// Maximum number of operations allowed in the request. + /// Serializer to serialize user provided objects to JSON. + protected ServerBatchRequest(int maxBodyLength, int maxOperationCount, CosmosJsonSerializer serializer) + { + this.maxBodyLength = maxBodyLength; + this.maxOperationCount = maxOperationCount; + this.serializer = serializer; + } + + public IReadOnlyList Operations => this.operations; + + /// + /// Returns the body Stream. + /// Caller is responsible for disposing it after use. + /// + /// Body stream. + public MemoryStream TransferBodyStream() + { + MemoryStream bodyStream = this.bodyStream; + this.bodyStream = null; + return bodyStream; + } + + /// + /// Adds as many operations as possible from the provided list of operations + /// in the list order while having the body stream not exceed maxBodySize. + /// + /// Operations to be added; read-only. + /// representing request cancellation. + /// Whether to stop adding operations to the request once there is non-continuity in the operation indexes. + protected async Task CreateBodyStreamAsync( + ArraySegment operations, + CancellationToken cancellationToken, + bool ensureContinuousOperationIndexes = false) + { + int estimatedMaxOperationLength = 0; + int approximateTotalLength = 0; + + int previousOperationIndex = -1; + int materializedCount = 0; + foreach (ItemBatchOperation operation in operations) + { + if (ensureContinuousOperationIndexes && previousOperationIndex != -1 && operation.OperationIndex != previousOperationIndex + 1) + { + break; + } + + await operation.MaterializeResourceAsync(this.serializer, cancellationToken); + materializedCount++; + + previousOperationIndex = operation.OperationIndex; + + int currentLength = operation.GetApproximateSerializedLength(); + estimatedMaxOperationLength = Math.Max(currentLength, estimatedMaxOperationLength); + + approximateTotalLength += currentLength; + if (approximateTotalLength > this.maxBodyLength) + { + break; + } + + if (materializedCount == this.maxOperationCount) + { + break; + } + } + + this.operations = new ArraySegment(operations.Array, operations.Offset, materializedCount); + + const int operationSerializationOverheadOverEstimateInBytes = 200; + this.bodyStream = new MemoryStream(approximateTotalLength + (operationSerializationOverheadOverEstimateInBytes * materializedCount)); + this.operationResizableWriteBuffer = new MemorySpanResizer(estimatedMaxOperationLength + operationSerializationOverheadOverEstimateInBytes); + + Result r = await this.bodyStream.WriteRecordIOAsync(default(Segment), this.WriteOperation); + Debug.Assert(r == Result.Success, "Failed to serialize batch request"); + + this.bodyStream.Position = 0; + + if (this.shouldDeleteLastWrittenRecord) + { + this.bodyStream.SetLength(this.bodyStreamPositionBeforeWritingCurrentRecord); + this.operations = new ArraySegment(operations.Array, operations.Offset, this.lastWrittenOperationIndex); + } + else + { + this.operations = new ArraySegment(operations.Array, operations.Offset, this.lastWrittenOperationIndex + 1); + } + + if (this.operations.Count == 0) + { + throw new RequestEntityTooLargeException(RMResources.RequestTooLarge); + } + } + + private Result WriteOperation(long index, out ReadOnlyMemory buffer) + { + if (this.bodyStream.Length > this.maxBodyLength) + { + this.shouldDeleteLastWrittenRecord = true; + buffer = default(ReadOnlyMemory); + return Result.Success; + } + + this.bodyStreamPositionBeforeWritingCurrentRecord = this.bodyStream.Length; + + if (index >= this.operations.Count) + { + buffer = default(ReadOnlyMemory); + return Result.Success; + } + + ItemBatchOperation operation = this.operations.Array[this.operations.Offset + (int)index]; + + RowBuffer row = new RowBuffer(this.operationResizableWriteBuffer.Memory.Length, this.operationResizableWriteBuffer); + row.InitLayout(HybridRowVersion.V1, BatchSchemaProvider.BatchOperationLayout, BatchSchemaProvider.BatchLayoutResolver); + Result r = RowWriter.WriteBuffer(ref row, operation, ItemBatchOperation.WriteOperation); + if (r != Result.Success) + { + buffer = null; + return r; + } + + this.lastWrittenOperationIndex = (int)index; + buffer = this.operationResizableWriteBuffer.Memory.Slice(0, row.Length); + return Result.Success; + } + } +} \ No newline at end of file diff --git a/Microsoft.Azure.Cosmos/src/Batch/SinglePartitionKeyServerBatchRequest.cs b/Microsoft.Azure.Cosmos/src/Batch/SinglePartitionKeyServerBatchRequest.cs new file mode 100644 index 0000000000..06df013ff3 --- /dev/null +++ b/Microsoft.Azure.Cosmos/src/Batch/SinglePartitionKeyServerBatchRequest.cs @@ -0,0 +1,60 @@ +// ------------------------------------------------------------ +// Copyright (c) Microsoft Corporation. All rights reserved. +// ------------------------------------------------------------ + +namespace Microsoft.Azure.Cosmos +{ + using System; + using System.Threading; + using System.Threading.Tasks; + + internal sealed class SinglePartitionKeyServerBatchRequest : ServerBatchRequest + { + /// + /// Initializes a new instance of the class. + /// Single partition key server request. + /// + /// Partition key that applies to all operations in this request. + /// Maximum length allowed for the request body. + /// Maximum number of operations allowed in the request. + /// Serializer to serialize user provided objects to JSON. + private SinglePartitionKeyServerBatchRequest( + PartitionKey partitionKey, + int maxBodyLength, + int maxOperationCount, + CosmosJsonSerializer serializer) + : base(maxBodyLength, maxOperationCount, serializer) + { + this.PartitionKey = partitionKey; + } + + /// + /// PartitionKey that applies to all operations in this request. + /// + public PartitionKey PartitionKey { get; } + + /// + /// Creates an instance of . + /// The body of the request is populated with operations till it reaches the provided maxBodyLength. + /// + /// Partition key of the request. + /// Operations to be added into this batch request. + /// Desired maximum length of the request body. + /// Maximum number of operations allowed in the request. + /// Serializer to serialize user provided objects to JSON. + /// representing request cancellation. + /// A newly created instance of . + public static async Task CreateAsync( + PartitionKey partitionKey, + ArraySegment operations, + int maxBodyLength, + int maxOperationCount, + CosmosJsonSerializer serializer, + CancellationToken cancellationToken) + { + SinglePartitionKeyServerBatchRequest request = new SinglePartitionKeyServerBatchRequest(partitionKey, maxBodyLength, maxOperationCount, serializer); + await request.CreateBodyStreamAsync(operations, cancellationToken); + return request; + } + } +} diff --git a/Microsoft.Azure.Cosmos/src/ClientResources.Designer.cs b/Microsoft.Azure.Cosmos/src/ClientResources.Designer.cs index fd8a71b438..2c6810f738 100644 --- a/Microsoft.Azure.Cosmos/src/ClientResources.Designer.cs +++ b/Microsoft.Azure.Cosmos/src/ClientResources.Designer.cs @@ -10,9 +10,8 @@ namespace Microsoft.Azure.Cosmos { using System; - using System.Reflection; - - + + /// /// A strongly-typed resource class, for looking up localized strings, etc. /// @@ -20,7 +19,7 @@ namespace Microsoft.Azure.Cosmos { // class via a tool like ResGen or Visual Studio. // To add or remove a member, edit your .ResX file then rerun ResGen // with the /str option, or rebuild your VS project. - [global::System.CodeDom.Compiler.GeneratedCodeAttribute("System.Resources.Tools.StronglyTypedResourceBuilder", "4.0.0.0")] + [global::System.CodeDom.Compiler.GeneratedCodeAttribute("System.Resources.Tools.StronglyTypedResourceBuilder", "15.0.0.0")] [global::System.Diagnostics.DebuggerNonUserCodeAttribute()] [global::System.Runtime.CompilerServices.CompilerGeneratedAttribute()] internal class ClientResources { @@ -40,8 +39,7 @@ internal ClientResources() { internal static global::System.Resources.ResourceManager ResourceManager { get { if (object.ReferenceEquals(resourceMan, null)) { - global::System.Resources.ResourceManager temp = new global::System.Resources.ResourceManager("Microsoft.Azure.Cosmos.ClientResources", typeof(ClientResources).GetTypeInfo().Assembly); - + global::System.Resources.ResourceManager temp = new global::System.Resources.ResourceManager("Microsoft.Azure.Cosmos.ClientResources", typeof(ClientResources).Assembly); resourceMan = temp; } return resourceMan; @@ -62,6 +60,15 @@ internal ClientResources() { } } + /// + /// Looks up a localized string similar to Sorry, we are currently experiencing high demand in this region, and cannot fulfill your request at this time. We work continuously to bring more and more capacity online, and encourage you to try again shortly. Please do not hesitate to contact us via Azure support at any time or for any reason using this link http://aka.ms/azuresupport.. + /// + internal static string AllServicePoolsEmpty { + get { + return ResourceManager.GetString("AllServicePoolsEmpty", resourceCulture); + } + } + /// /// Looks up a localized string similar to The client does not have any valid token for the requested resource {0}.. /// @@ -197,6 +204,51 @@ internal static string BadSession { } } + /// + /// Looks up a localized string similar to One or more provided request options are not supported on items that are part of a batch request.. + /// + internal static string BatchItemRequestOptionNotSupported { + get { + return ResourceManager.GetString("BatchItemRequestOptionNotSupported", resourceCulture); + } + } + + /// + /// Looks up a localized string similar to The batch request did not have any operations to be executed.. + /// + internal static string BatchNoOperations { + get { + return ResourceManager.GetString("BatchNoOperations", resourceCulture); + } + } + + /// + /// Looks up a localized string similar to One or more batch operations are larger than the allowed limit.. + /// + internal static string BatchOperationTooLarge { + get { + return ResourceManager.GetString("BatchOperationTooLarge", resourceCulture); + } + } + + /// + /// Looks up a localized string similar to One or more request options provided on the batch request are not supported.. + /// + internal static string BatchRequestOptionNotSupported { + get { + return ResourceManager.GetString("BatchRequestOptionNotSupported", resourceCulture); + } + } + + /// + /// Looks up a localized string similar to This batch request cannot be executed as it is larger than the allowed limit. Please reduce the number of operations in the batch and try again.. + /// + internal static string BatchTooLarge { + get { + return ResourceManager.GetString("BatchTooLarge", resourceCulture); + } + } + /// /// Looks up a localized string similar to Binary operator '{0}' is not supported.. /// @@ -224,6 +276,15 @@ internal static string ConstructorInvocationNotSupported { } } + /// + /// Looks up a localized string similar to Expected valid byte[] value for {0} and string value for {1} when either property is set.. + /// + internal static string EpkPropertiesPairingExpected { + get { + return ResourceManager.GetString("EpkPropertiesPairingExpected", resourceCulture); + } + } + /// /// Looks up a localized string similar to Expected a static IQueryable or IEnumerable extension method, received an instance method.. /// @@ -286,20 +347,27 @@ internal static string InvalidRangeError { return ResourceManager.GetString("InvalidRangeError", resourceCulture); } } - + /// - /// Looks up a localized string similar to The count value provided for a Skip expression must be an integer.. + /// Looks up a localized string similar to Received an invalid response from the server.. /// - internal static string InvalidSkipValue - { - get - { + internal static string InvalidServerResponse { + get { + return ResourceManager.GetString("InvalidServerResponse", resourceCulture); + } + } + + /// + /// Looks up a localized string similar to The count value provided for a Skip expression must be a non-negative integer.. + /// + internal static string InvalidSkipValue { + get { return ResourceManager.GetString("InvalidSkipValue", resourceCulture); } } - + /// - /// Looks up a localized string similar to The count value provided for a Take expression must be an integer.. + /// Looks up a localized string similar to The count value provided for a Take expression must be a non-negative integer.. /// internal static string InvalidTakeValue { get { @@ -406,6 +474,15 @@ internal static string PathExpressionsOnly { } } + /// + /// Looks up a localized string similar to Partition key and effective partition key may not both be set.. + /// + internal static string PKAndEpkSetTogether { + get { + return ResourceManager.GetString("PKAndEpkSetTogether", resourceCulture); + } + } + /// /// Looks up a localized string similar to A containing range for {0} doesn't exist in the partition map.. /// @@ -415,6 +492,15 @@ internal static string RangeNotFoundError { } } + /// + /// Looks up a localized string similar to Failed to deserialize response returned by server.. + /// + internal static string ServerResponseDeserializationFailure { + get { + return ResourceManager.GetString("ServerResponseDeserializationFailure", resourceCulture); + } + } + /// /// Looks up a localized string similar to The right hand side of string.CompareTo() comparison must be constant '0'. /// @@ -486,16 +572,5 @@ internal static string ValueAndAnonymousTypesAndGeometryOnly { return ResourceManager.GetString("ValueAndAnonymousTypesAndGeometryOnly", resourceCulture); } } - - /// - /// Looks up a localized string similar to Sorry, we are currently experiencing high demand in this region, and cannot fulfill your request at this time. We work continuously to bring more and more capacity online, and encourage you to try again shortly. Please do not hesitate to contact us via Azure support at any time or for any reason using this link http://aka.ms/azuresupport.. - /// - internal static string AllServicePoolsEmpty - { - get - { - return ResourceManager.GetString("AllServicePoolsEmpty", resourceCulture); - } - } } } diff --git a/Microsoft.Azure.Cosmos/src/ClientResources.resx b/Microsoft.Azure.Cosmos/src/ClientResources.resx index b337c51af5..146cefa808 100644 --- a/Microsoft.Azure.Cosmos/src/ClientResources.resx +++ b/Microsoft.Azure.Cosmos/src/ClientResources.resx @@ -117,6 +117,9 @@ System.Resources.ResXResourceWriter, System.Windows.Forms, Version=4.0.0.0, Culture=neutral, PublicKeyToken=b77a5c561934e089 + + Sorry, we are currently experiencing high demand in this region, and cannot fulfill your request at this time. We work continuously to bring more and more capacity online, and encourage you to try again shortly. Please do not hesitate to contact us via Azure support at any time or for any reason using this link http://aka.ms/azuresupport. + The client does not have any valid token for the requested resource {0}. @@ -162,6 +165,21 @@ Session object retrieved from client with endpoint {0} cannot be used on a client initialized to endpoint {1}. + + One or more provided request options are not supported on items that are part of a batch request. + + + The batch request did not have any operations to be executed. + + + One or more batch operations are larger than the allowed limit. + + + One or more request options provided on the batch request are not supported. + + + This batch request cannot be executed as it is larger than the allowed limit. Please reduce the number of operations in the batch and try again. + Binary operator '{0}' is not supported. @@ -171,6 +189,9 @@ Constructor invocation is not supported. + + Expected valid byte[] value for {0} and string value for {1} when either property is set. + Expected a static IQueryable or IEnumerable extension method, received an instance method. @@ -192,6 +213,9 @@ Range low value must be less than or equal the high value. + + Received an invalid response from the server. + The count value provided for a Skip expression must be a non-negative integer. @@ -231,9 +255,15 @@ Only path expressions are supported for SelectMany. + + Partition key and effective partition key may not both be set. + A containing range for {0} doesn't exist in the partition map. + + Failed to deserialize response returned by server. + The right hand side of string.CompareTo() comparison must be constant '0' diff --git a/Microsoft.Azure.Cosmos/src/Microsoft.Azure.Cosmos.csproj b/Microsoft.Azure.Cosmos/src/Microsoft.Azure.Cosmos.csproj index 97db0928e3..8bfe6571f5 100644 --- a/Microsoft.Azure.Cosmos/src/Microsoft.Azure.Cosmos.csproj +++ b/Microsoft.Azure.Cosmos/src/Microsoft.Azure.Cosmos.csproj @@ -1,71 +1,99 @@ - - - - Microsoft Corporation - Microsoft(R) Azure Cosmos - This client library enables client applications to connect to Azure Cosmos via the SQL API. Azure Cosmos is a globally distributed, multi-model database service. For more information, refer to http://azure.microsoft.com/services/cosmos-db/. - © Microsoft Corporation. All rights reserved. - en-US - 3.0.0.17-preview - 3.0.0.31-preview - $(ClientVersion)-nightly$(CurrentDate) - $(ClientVersion) - $(VersionPrefix) - Microsoft - netstandard2.0 - true - true - Microsoft.Azure.Cosmos.Client - Microsoft Azure Cosmos DB Client library - Microsoft.Azure.Cosmos - microsoft;azure;cosmos;cosmosdb;documentdb;docdb;nosql;azureofficial;dotnetcore;netcore;netstandard - The change log for this SDK is made available at https://github.com/Azure/azure-cosmos-dotnet-v3/blob/master/changelog.md at the time of release. - https://aka.ms/netcoregaeula - https://github.com/Azure/azure-cosmos-dotnet-v3 - http://go.microsoft.com/fwlink/?LinkID=288890 - true - true - false - true - AnyCPU - External - Product - portable - false - false - $([System.DateTime]::Now.ToString(yyyyMMdd)) - Microsoft.Azure.Cosmos - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - $(DefineConstants);DOCDBCLIENT;NETSTANDARD20 - $(DefineConstants);SignAssembly - - + + + + Microsoft Corporation + Microsoft(R) Azure Cosmos + This client library enables client applications to connect to Azure Cosmos via the SQL API. Azure Cosmos is a globally distributed, multi-model database service. For more information, refer to http://azure.microsoft.com/services/cosmos-db/. + © Microsoft Corporation. All rights reserved. + en-US + 3.0.0.17-preview + 3.0.0.31-preview + $(ClientVersion)-nightly$(CurrentDate) + $(ClientVersion) + $(VersionPrefix) + Microsoft + netstandard2.0 + true + true + Microsoft.Azure.Cosmos.Client + Microsoft Azure Cosmos DB Client library + Microsoft.Azure.Cosmos + microsoft;azure;cosmos;cosmosdb;documentdb;docdb;nosql;azureofficial;dotnetcore;netcore;netstandard + The change log for this SDK is made available at https://github.com/Azure/azure-cosmos-dotnet-v3/blob/master/changelog.md at the time of release. + https://aka.ms/netcoregaeula + https://github.com/Azure/azure-cosmos-dotnet-v3 + http://go.microsoft.com/fwlink/?LinkID=288890 + true + true + false + true + AnyCPU + External + Product + portable + false + false + $([System.DateTime]::Now.ToString(yyyyMMdd)) + Microsoft.Azure.Cosmos + + + + + Never + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + C:\Users\abpai\.nuget\packages\microsoft.azure.cosmos.direct.myget\3.0.0.33-preview\runtimes\any\lib\netstandard2.0\Microsoft.Azure.Cosmos.Core.dll + + + C:\Users\abpai\.nuget\packages\microsoft.azure.cosmos.direct.myget\3.0.0.33-preview\runtimes\any\lib\netstandard2.0\Microsoft.Azure.Cosmos.Serialization.HybridRow.dll + + + + + + True + True + ClientResources.resx + + + + + + ResXFileCodeGenerator + ClientResources.Designer.cs + + + + + $(DefineConstants);DOCDBCLIENT;NETSTANDARD20 + $(DefineConstants);SignAssembly + + diff --git a/Microsoft.Azure.Cosmos/src/Resource/Container/CosmosContainer.cs b/Microsoft.Azure.Cosmos/src/Resource/Container/CosmosContainer.cs index e0f5137fe0..1499bf3250 100644 --- a/Microsoft.Azure.Cosmos/src/Resource/Container/CosmosContainer.cs +++ b/Microsoft.Azure.Cosmos/src/Resource/Container/CosmosContainer.cs @@ -1111,5 +1111,12 @@ public abstract ChangeFeedProcessorBuilder CreateChangeFeedEstimatorBuilder( string processorName, Func estimationDelegate, TimeSpan? estimationPeriod = null); + + /// + /// Initializes a new instance of the class. + /// + /// The partition key for all items in the batch. . + /// An instance of + public abstract CosmosBatch CreateBatch(PartitionKey partitionKey); } } \ No newline at end of file diff --git a/Microsoft.Azure.Cosmos/src/Resource/Container/CosmosContainerCore.Items.cs b/Microsoft.Azure.Cosmos/src/Resource/Container/CosmosContainerCore.Items.cs index c88b76afcd..e305c5db7f 100644 --- a/Microsoft.Azure.Cosmos/src/Resource/Container/CosmosContainerCore.Items.cs +++ b/Microsoft.Azure.Cosmos/src/Resource/Container/CosmosContainerCore.Items.cs @@ -448,6 +448,11 @@ public override ChangeFeedProcessorBuilder CreateChangeFeedEstimatorBuilder( applyBuilderConfiguration: changeFeedEstimatorCore.ApplyBuildConfiguration); } + public override CosmosBatch CreateBatch(PartitionKey partitionKey) + { + return new CosmosBatch(this, partitionKey); + } + internal FeedIterator GetStandByFeedIterator( string continuationToken = null, int? maxItemCount = null, diff --git a/Microsoft.Azure.Cosmos/src/Resource/Scripts/CosmosScripts.cs b/Microsoft.Azure.Cosmos/src/Resource/Scripts/CosmosScripts.cs index c10519499a..6ed61574c5 100644 --- a/Microsoft.Azure.Cosmos/src/Resource/Scripts/CosmosScripts.cs +++ b/Microsoft.Azure.Cosmos/src/Resource/Scripts/CosmosScripts.cs @@ -385,7 +385,7 @@ public abstract Task ExecuteStoredProcedureStreamAsync( /// TriggerType = TriggerType.Pre /// }); /// - /// CosmosItemRequestOptions options = new CosmosItemRequestOptions() + /// ItemRequestOptions options = new ItemRequestOptions() /// { /// PreTriggers = new List() { cosmosTrigger.Id }, /// }; diff --git a/Microsoft.Azure.Cosmos/src/Resource/Settings/IndexingDirective.cs b/Microsoft.Azure.Cosmos/src/Resource/Settings/IndexingDirective.cs index c5dd4e49a3..afeafb8a64 100644 --- a/Microsoft.Azure.Cosmos/src/Resource/Settings/IndexingDirective.cs +++ b/Microsoft.Azure.Cosmos/src/Resource/Settings/IndexingDirective.cs @@ -3,6 +3,8 @@ //------------------------------------------------------------ namespace Microsoft.Azure.Cosmos { + using System; + /// /// Specifies whether or not the resource in the Azure Cosmos DB database is to be indexed. /// @@ -31,5 +33,20 @@ internal static class IndexingDirectiveStrings public static readonly string Default = IndexingDirective.Default.ToString(); public static readonly string Include = IndexingDirective.Include.ToString(); public static readonly string Exclude = IndexingDirective.Exclude.ToString(); + + public static string FromIndexingDirective(IndexingDirective directive) + { + switch (directive) + { + case IndexingDirective.Default: + return IndexingDirectiveStrings.Default; + case IndexingDirective.Exclude: + return IndexingDirectiveStrings.Exclude; + case IndexingDirective.Include: + return IndexingDirectiveStrings.Include; + default: + throw new ArgumentException(string.Format("Missing indexing directive string for {0}", directive)); + } + } } } diff --git a/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.EmulatorTests/Batch/BatchSinglePartitionKeyTests.cs b/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.EmulatorTests/Batch/BatchSinglePartitionKeyTests.cs new file mode 100644 index 0000000000..8e457b19dc --- /dev/null +++ b/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.EmulatorTests/Batch/BatchSinglePartitionKeyTests.cs @@ -0,0 +1,650 @@ +//------------------------------------------------------------ +// Copyright (c) Microsoft Corporation. All rights reserved. +//------------------------------------------------------------ + +namespace Microsoft.Azure.Cosmos.SDK.EmulatorTests +{ + using System; + using System.Net; + using System.Threading.Tasks; + using Microsoft.Azure.Cosmos.Fluent; + using Microsoft.Azure.Documents; + using Microsoft.VisualStudio.TestTools.UnitTesting; + + [TestClass] + public class BatchSinglePartitionKeyTests : CosmosBatchTestBase + { + [ClassInitialize] + public static void ClassInitialize(TestContext context) + { + CosmosBatchTestBase.ClassInit(context); + } + + [ClassCleanup] + public static void ClassCleanup() + { + CosmosBatchTestBase.ClassClean(); + } + + [TestMethod] + [Owner("abpai")] + [Description("Verify batch CRUD with default options at all levels (client/batch/operation) and all operations expected to pass works")] + public async Task BatchCrudAsync() + { + await this.RunCrudAsync(isStream: false, isSchematized: false, useEpk: false, container: CosmosBatchTestBase.JsonContainer); + } + + [TestMethod] + [Owner("abpai")] + [Description("Verify batch CRUD with JSON stream operation resource bodies with default options and all operations expected to pass works")] + public async Task BatchCrudStreamAsync() + { + await this.RunCrudAsync(isStream: true, isSchematized: false, useEpk: false, container: CosmosBatchTestBase.JsonContainer); + } + + [TestMethod] + [Owner("abpai")] + [Description("Verify batch CRUD with HybridRow stream operation resource bodies and EPK with default options and all operations expected to pass works")] + public async Task BatchCrudHybridRowStreamWithEpkAsync() + { + await this.RunCrudAsync(isStream: true, isSchematized: true, useEpk: true, container: CosmosBatchTestBase.SchematizedContainer); + } + + [TestMethod] + [Owner("rakkuma")] + [Description("Verify batch CRUD with default options at all levels (client/batch/operation) and all operations expected to pass works in gateway mode")] + public async Task BatchCrudGatewayAsync() + { + await this.RunCrudAsync(isStream: false, isSchematized: false, useEpk: false, container: CosmosBatchTestBase.GatewayJsonContainer); + } + + [TestMethod] + [Owner("rakkuma")] + [Description("Verify batch CRUD with JSON stream operation resource bodies with default options and all operations expected to pass works in gateway mode")] + public async Task BatchCrudStreamGatewayAsync() + { + await this.RunCrudAsync(isStream: true, isSchematized: false, useEpk: false, container: CosmosBatchTestBase.GatewayJsonContainer); + } + + [TestMethod] + [Owner("rakkuma")] + [Description("Verify batch CRUD with default options at all levels (client/batch/operation) and all operations expected to pass works in shared throughput ")] + public async Task BatchCrudSharedThroughputAsync() + { + await this.RunCrudAsync(isStream: false, isSchematized: false, useEpk: false, container: CosmosBatchTestBase.SharedThroughputContainer); + } + + [TestMethod] + [Owner("rakkuma")] + [Description("Verify batch CRUD with JSON stream operation resource bodies with default options and all operations expected to pass works in shared throughput")] + public async Task BatchCrudSharedThroughputStreamAsync() + { + await this.RunCrudAsync(isStream: true, isSchematized: false, useEpk: false, container: CosmosBatchTestBase.SharedThroughputContainer); + } + + [TestMethod] + [Owner("abpai")] + [Description("Verify batch with multiple operations on the same entity works")] + public async Task BatchOrderedAsync() + { + CosmosContainer container = CosmosBatchTestBase.JsonContainer; + await this.CreateJsonTestDocsAsync(container); + + TestDoc firstDoc = CosmosBatchTestBase.PopulateTestDoc(this.PartitionKey1); + + TestDoc replaceDoc = this.GetTestDocCopy(firstDoc); + replaceDoc.Cost += 20; + + CosmosBatchResponse batchResponse = await container.CreateBatch(CosmosBatchTestBase.GetPartitionKey(this.PartitionKey1)) + .CreateItem(firstDoc) + .ReplaceItem(replaceDoc.Id, replaceDoc) + .ExecuteAsync(); + + BatchSinglePartitionKeyTests.VerifyBatchProcessed(batchResponse, numberOfOperations: 2); + + Assert.AreEqual(HttpStatusCode.Created, batchResponse[0].StatusCode); + Assert.AreEqual(HttpStatusCode.OK, batchResponse[1].StatusCode); + + // Ensure that the replace overwrote the doc from the first operation + await CosmosBatchTestBase.VerifyByReadAsync(container, replaceDoc); + } + + [TestMethod] + [Owner("abpai")] + [Description("Verify eTags passed to batch operations or returned in batch results flow as expected")] + public async Task BatchItemETagAsync() + { + CosmosContainer container = CosmosBatchTestBase.JsonContainer; + await this.CreateJsonTestDocsAsync(container); + { + TestDoc testDocToCreate = CosmosBatchTestBase.PopulateTestDoc(this.PartitionKey1); + + TestDoc testDocToReplace = this.GetTestDocCopy(this.TestDocPk1ExistingA); + testDocToReplace.Cost++; + + ItemResponse readResponse = await CosmosBatchTestBase.JsonContainer.ReadItemAsync( + CosmosBatchTestBase.GetPartitionKey(this.PartitionKey1), + this.TestDocPk1ExistingA.Id); + + ItemRequestOptions firstReplaceOptions = new ItemRequestOptions() + { + IfMatchEtag = readResponse.ETag + }; + + CosmosBatchResponse batchResponse = await container.CreateBatch(CosmosBatchTestBase.GetPartitionKey(this.PartitionKey1)) + .CreateItem(testDocToCreate) + .ReplaceItem(testDocToReplace.Id, testDocToReplace, itemRequestOptions: firstReplaceOptions) + .ExecuteAsync(); + + BatchSinglePartitionKeyTests.VerifyBatchProcessed(batchResponse, numberOfOperations: 2); + + Assert.AreEqual(HttpStatusCode.Created, batchResponse[0].StatusCode); + Assert.AreEqual(HttpStatusCode.OK, batchResponse[1].StatusCode); + + await CosmosBatchTestBase.VerifyByReadAsync(container, testDocToCreate, eTag: batchResponse[0].ETag); + await CosmosBatchTestBase.VerifyByReadAsync(container, testDocToReplace, eTag: batchResponse[1].ETag); + } + + { + TestDoc testDocToReplace = this.GetTestDocCopy(this.TestDocPk1ExistingB); + testDocToReplace.Cost++; + + ItemRequestOptions replaceOptions = new ItemRequestOptions() + { + IfMatchEtag = CosmosBatchTestBase.Random.Next().ToString() + }; + + CosmosBatchResponse batchResponse = await container.CreateBatch(CosmosBatchTestBase.GetPartitionKey(this.PartitionKey1)) + .ReplaceItem(testDocToReplace.Id, testDocToReplace, itemRequestOptions: replaceOptions) + .ExecuteAsync(); + + BatchSinglePartitionKeyTests.VerifyBatchProcessed( + batchResponse, + numberOfOperations: 1, + expectedStatusCode: (HttpStatusCode)StatusCodes.MultiStatus); + + Assert.AreEqual(HttpStatusCode.PreconditionFailed, batchResponse[0].StatusCode); + + // ensure the document was not updated + await CosmosBatchTestBase.VerifyByReadAsync(container, this.TestDocPk1ExistingB); + } + } + + [TestMethod] + [Owner("abpai")] + [Description("Verify TTL passed to binary passthrough batch operations flow as expected")] + public async Task BatchItemTimeToLiveAsync() + { + // Verify with schematized containers where we are allowed to send TTL as a header + const bool isSchematized = true; + const bool isStream = true; + CosmosContainer container = CosmosBatchTestBase.SchematizedContainer; + await this.CreateSchematizedTestDocsAsync(container); + { + TestDoc testDocToCreate = CosmosBatchTestBase.PopulateTestDoc(this.PartitionKey1); + TestDoc anotherTestDocToCreate = CosmosBatchTestBase.PopulateTestDoc(this.PartitionKey1); + + TestDoc testDocToReplace = this.GetTestDocCopy(this.TestDocPk1ExistingA); + testDocToReplace.Cost++; + + const int ttlInSeconds = 3; + const int infiniteTtl = -1; + + TestDoc testDocToUpsert = await CosmosBatchTestBase.CreateSchematizedTestDocAsync(container, this.PartitionKey1, ttlInSeconds: ttlInSeconds); + testDocToUpsert.Cost++; + + CosmosBatchResponse batchResponse = await container.CreateBatch(CosmosBatchTestBase.GetPartitionKey(this.PartitionKey1)) + .CreateItemStream( + CosmosBatchTestBase.TestDocToStream(testDocToCreate, isSchematized), + CosmosBatchTestBase.GetItemRequestOptions(testDocToCreate, isSchematized, ttlInSeconds: ttlInSeconds)) + .CreateItemStream( + CosmosBatchTestBase.TestDocToStream(anotherTestDocToCreate, isSchematized), + CosmosBatchTestBase.GetItemRequestOptions(anotherTestDocToCreate, isSchematized)) + .ReplaceItemStream( + CosmosBatchTestBase.GetId(testDocToReplace, isSchematized), + CosmosBatchTestBase.TestDocToStream(testDocToReplace, isSchematized), + CosmosBatchTestBase.GetItemRequestOptions(testDocToReplace, isSchematized, ttlInSeconds: ttlInSeconds)) + .UpsertItemStream( + CosmosBatchTestBase.TestDocToStream(testDocToUpsert, isSchematized), + CosmosBatchTestBase.GetItemRequestOptions(testDocToUpsert, isSchematized, ttlInSeconds: infiniteTtl)) + .ExecuteAsync(CosmosBatchTestBase.GetUpdatedBatchRequestOptions(isSchematized: true)); + + BatchSinglePartitionKeyTests.VerifyBatchProcessed(batchResponse, numberOfOperations: 4); + + Assert.AreEqual(HttpStatusCode.Created, batchResponse[0].StatusCode); + Assert.AreEqual(HttpStatusCode.Created, batchResponse[1].StatusCode); + Assert.AreEqual(HttpStatusCode.OK, batchResponse[2].StatusCode); + Assert.AreEqual(HttpStatusCode.OK, batchResponse[3].StatusCode); + + // wait for TTL to expire + await Task.Delay(TimeSpan.FromSeconds(ttlInSeconds + 1)); + + await CosmosBatchTestBase.VerifyNotFoundAsync(container, testDocToCreate, isSchematized); + await CosmosBatchTestBase.VerifyByReadAsync(container, anotherTestDocToCreate, isStream, isSchematized); + await CosmosBatchTestBase.VerifyNotFoundAsync(container, testDocToReplace, isSchematized); + await CosmosBatchTestBase.VerifyByReadAsync(container, testDocToUpsert, isStream, isSchematized); + } + } + + [TestMethod] + [Owner("abpai")] + public async Task BatchLargerThanServerRequestAsync() + { + CosmosContainer container = CosmosBatchTestBase.JsonContainer; + const int operationCount = 20; + int appxDocSize = Constants.MaxDirectModeBatchRequestBodySizeInBytes / operationCount; + + // Increase the doc size by a bit so all docs won't fit in one server request. + appxDocSize = (int)(appxDocSize * 1.05); + { + CosmosBatch batch = container.CreateBatch(CosmosBatchTestBase.GetPartitionKey(this.PartitionKey1)); + for (int i = 0; i < operationCount; i++) + { + TestDoc doc = CosmosBatchTestBase.PopulateTestDoc(this.PartitionKey1, appxDocSize); + batch.CreateItem(doc); + } + + CosmosBatchResponse batchResponse = await batch.ExecuteAsync(); + + Assert.AreEqual(HttpStatusCode.RequestEntityTooLarge, batchResponse.StatusCode); + } + + // Validate the server enforces this as well + { + CosmosBatch batch = container.CreateBatch(CosmosBatchTestBase.GetPartitionKey(this.PartitionKey1)); + for (int i = 0; i < operationCount; i++) + { + TestDoc doc = CosmosBatchTestBase.PopulateTestDoc(this.PartitionKey1, appxDocSize); + batch.CreateItem(doc); + } + + CosmosBatchResponse batchResponse = await batch.ExecuteAsync( + maxServerRequestBodyLength: int.MaxValue, + maxServerRequestOperationCount: int.MaxValue); + + Assert.AreEqual(HttpStatusCode.RequestEntityTooLarge, batchResponse.StatusCode); + } + } + + [TestMethod] + [Owner("abpai")] + public async Task BatchWithTooManyOperationsAsync() + { + CosmosContainer container = CosmosBatchTestBase.JsonContainer; + await this.CreateJsonTestDocsAsync(container); + + const int operationCount = Constants.MaxOperationsInDirectModeBatchRequest + 1; + + // Validate SDK enforces this + { + CosmosBatch batch = container.CreateBatch(CosmosBatchTestBase.GetPartitionKey(this.PartitionKey1)); + for (int i = 0; i < operationCount; i++) + { + batch.ReadItem(this.TestDocPk1ExistingA.Id); + } + + CosmosBatchResponse batchResponse = await batch.ExecuteAsync(); + + Assert.AreEqual(HttpStatusCode.BadRequest, batchResponse.StatusCode); + } + + // Validate the server enforces this as well + { + CosmosBatch batch = container.CreateBatch(CosmosBatchTestBase.GetPartitionKey(this.PartitionKey1)); + for (int i = 0; i < operationCount; i++) + { + batch.ReadItem(this.TestDocPk1ExistingA.Id); + } + + CosmosBatchResponse batchResponse = await batch.ExecuteAsync( + maxServerRequestBodyLength: int.MaxValue, + maxServerRequestOperationCount: int.MaxValue); + + Assert.AreEqual(HttpStatusCode.BadRequest, batchResponse.StatusCode); + } + } + + [TestMethod] + [Owner("abpai")] + public async Task BatchServerResponseTooLargeAsync() + { + CosmosContainer container = CosmosBatchTestBase.JsonContainer; + const int operationCount = 10; + int appxDocSizeInBytes = 1 * 1024 * 1024; + + TestDoc doc = await CosmosBatchTestBase.CreateJsonTestDocAsync(container, this.PartitionKey1, appxDocSizeInBytes); + + CosmosBatch batch = container.CreateBatch(CosmosBatchTestBase.GetPartitionKey(this.PartitionKey1)); + for (int i = 0; i < operationCount; i++) + { + batch.ReadItem(doc.Id); + } + + CosmosBatchResponse batchResponse = await batch.ExecuteAsync(); + + BatchSinglePartitionKeyTests.VerifyBatchProcessed( + batchResponse, + numberOfOperations: operationCount, + expectedStatusCode: (HttpStatusCode)StatusCodes.MultiStatus); + + Assert.AreEqual((int)StatusCodes.FailedDependency, (int)batchResponse[0].StatusCode); + Assert.AreEqual(HttpStatusCode.RequestEntityTooLarge, batchResponse[operationCount - 1].StatusCode); + } + + [TestMethod] + [Owner("abpai")] + public async Task BatchReadsOnlyAsync() + { + CosmosContainer container = CosmosBatchTestBase.JsonContainer; + await this.CreateJsonTestDocsAsync(container); + + CosmosBatchResponse batchResponse = await container.CreateBatch(CosmosBatchTestBase.GetPartitionKey(this.PartitionKey1)) + .ReadItem(this.TestDocPk1ExistingA.Id) + .ReadItem(this.TestDocPk1ExistingB.Id) + .ReadItem(this.TestDocPk1ExistingC.Id) + .ExecuteAsync(); + + BatchSinglePartitionKeyTests.VerifyBatchProcessed(batchResponse, numberOfOperations: 3); + + Assert.AreEqual(HttpStatusCode.OK, batchResponse[0].StatusCode); + Assert.AreEqual(HttpStatusCode.OK, batchResponse[1].StatusCode); + Assert.AreEqual(HttpStatusCode.OK, batchResponse[2].StatusCode); + + Assert.AreEqual(this.TestDocPk1ExistingA, batchResponse.GetOperationResultAtIndex(0).Resource); + Assert.AreEqual(this.TestDocPk1ExistingB, batchResponse.GetOperationResultAtIndex(1).Resource); + Assert.AreEqual(this.TestDocPk1ExistingC, batchResponse.GetOperationResultAtIndex(2).Resource); + } + + private async Task RunCrudAsync(bool isStream, bool isSchematized, bool useEpk, CosmosContainer container, RequestOptions batchOptions = null) + { + if (isSchematized) + { + await this.CreateSchematizedTestDocsAsync(container); + + batchOptions = CosmosBatchTestBase.GetUpdatedBatchRequestOptions(batchOptions, isSchematized, useEpk, this.PartitionKey1); + } + else + { + await this.CreateJsonTestDocsAsync(container); + } + + TestDoc testDocToCreate = CosmosBatchTestBase.PopulateTestDoc(this.PartitionKey1); + + TestDoc testDocToUpsert = CosmosBatchTestBase.PopulateTestDoc(this.PartitionKey1); + + TestDoc anotherTestDocToUpsert = this.GetTestDocCopy(this.TestDocPk1ExistingA); + anotherTestDocToUpsert.Cost++; + + TestDoc testDocToReplace = this.GetTestDocCopy(this.TestDocPk1ExistingB); + testDocToReplace.Cost++; + + // We run CRUD operations where all are expected to return HTTP 2xx. + CosmosBatchResponse batchResponse; + if (!isStream) + { + batchResponse = await container.CreateBatch(CosmosBatchTestBase.GetPartitionKey(this.PartitionKey1)) + .CreateItem(testDocToCreate) + .ReadItem(this.TestDocPk1ExistingC.Id) + .ReplaceItem(testDocToReplace.Id, testDocToReplace) + .UpsertItem(testDocToUpsert) + .UpsertItem(anotherTestDocToUpsert) + .DeleteItem(this.TestDocPk1ExistingD.Id) + .ExecuteAsync(batchOptions); + } + else + { + batchResponse = await container.CreateBatch(CosmosBatchTestBase.GetPartitionKey(this.PartitionKey1, useEpk)) + .CreateItemStream( + CosmosBatchTestBase.TestDocToStream(testDocToCreate, isSchematized), + CosmosBatchTestBase.GetItemRequestOptions(testDocToCreate, isSchematized)) + .ReadItem( + CosmosBatchTestBase.GetId(this.TestDocPk1ExistingC, isSchematized), + CosmosBatchTestBase.GetItemRequestOptions(this.TestDocPk1ExistingC, isSchematized)) + .ReplaceItemStream( + CosmosBatchTestBase.GetId(testDocToReplace, isSchematized), + CosmosBatchTestBase.TestDocToStream(testDocToReplace, isSchematized), + CosmosBatchTestBase.GetItemRequestOptions(testDocToReplace, isSchematized)) + .UpsertItemStream( + CosmosBatchTestBase.TestDocToStream(testDocToUpsert, isSchematized), + CosmosBatchTestBase.GetItemRequestOptions(testDocToUpsert, isSchematized)) + .UpsertItemStream( + CosmosBatchTestBase.TestDocToStream(anotherTestDocToUpsert, isSchematized), + CosmosBatchTestBase.GetItemRequestOptions(anotherTestDocToUpsert, isSchematized)) + .DeleteItem( + CosmosBatchTestBase.GetId(this.TestDocPk1ExistingD, isSchematized), + CosmosBatchTestBase.GetItemRequestOptions(this.TestDocPk1ExistingD, isSchematized)) + .ExecuteAsync(batchOptions); + } + + BatchSinglePartitionKeyTests.VerifyBatchProcessed(batchResponse, numberOfOperations: 6); + + Assert.AreEqual(HttpStatusCode.Created, batchResponse[0].StatusCode); + Assert.AreEqual(HttpStatusCode.OK, batchResponse[1].StatusCode); + Assert.AreEqual(HttpStatusCode.OK, batchResponse[2].StatusCode); + Assert.AreEqual(HttpStatusCode.Created, batchResponse[3].StatusCode); + Assert.AreEqual(HttpStatusCode.OK, batchResponse[4].StatusCode); + Assert.AreEqual(HttpStatusCode.NoContent, batchResponse[5].StatusCode); + + if (!isStream) + { + Assert.AreEqual(this.TestDocPk1ExistingC, batchResponse.GetOperationResultAtIndex(1).Resource); + } + else + { + Assert.AreEqual(this.TestDocPk1ExistingC, CosmosBatchTestBase.StreamToTestDoc(batchResponse[1].ResourceStream, isSchematized)); + } + + await CosmosBatchTestBase.VerifyByReadAsync(container, testDocToCreate, isStream, isSchematized, useEpk); + await CosmosBatchTestBase.VerifyByReadAsync(container, testDocToReplace, isStream, isSchematized, useEpk); + await CosmosBatchTestBase.VerifyByReadAsync(container, testDocToUpsert, isStream, isSchematized, useEpk); + await CosmosBatchTestBase.VerifyByReadAsync(container, anotherTestDocToUpsert, isStream, isSchematized, useEpk); + await CosmosBatchTestBase.VerifyNotFoundAsync(container, this.TestDocPk1ExistingD, isSchematized, useEpk); + + return batchResponse; + } + + [TestMethod] + [Owner("abpai")] + [Description("Verify batch with a large set of read operations that is expected to be rate limited.")] + public async Task BatchRateLimitingAsync() + { + CosmosContainer containerWithDefaultRetryPolicy = CosmosBatchTestBase.LowThroughputJsonContainer; + + await this.CreateJsonTestDocsAsync(containerWithDefaultRetryPolicy); + CosmosClient clientWithNoThrottleRetry = new CosmosClientBuilder( + CosmosBatchTestBase.Client.ClientOptions.EndPoint.ToString(), + CosmosBatchTestBase.Client.ClientOptions.AccountKey.Key) + .WithThrottlingRetryOptions( + maxRetryWaitTimeOnThrottledRequests: default(TimeSpan), + maxRetryAttemptsOnThrottledRequests: 0) + .Build(); + + CosmosContainer containerWithNoThrottleRetry = + clientWithNoThrottleRetry.GetContainer(CosmosBatchTestBase.Database.Id, CosmosBatchTestBase.LowThroughputJsonContainer.Id); + + // The second batch started should be rate limited by the backend in admission control. + { + CosmosBatchResponse[] batchResponses = await this.RunTwoLargeBatchesAsync(containerWithNoThrottleRetry); + + Assert.AreEqual(HttpStatusCode.OK, batchResponses[0].StatusCode); + Assert.AreEqual((int)StatusCodes.TooManyRequests, (int)batchResponses[1].StatusCode); + Assert.AreEqual(3200, (int)batchResponses[1].SubStatusCode); + } + + // The default retry policy around throttling should ensure the second batch also succeeds. + { + CosmosBatchResponse[] batchResponses = await this.RunTwoLargeBatchesAsync(containerWithDefaultRetryPolicy); + + Assert.AreEqual(HttpStatusCode.OK, batchResponses[0].StatusCode); + Assert.AreEqual(HttpStatusCode.OK, batchResponses[1].StatusCode); + } + } + + private async Task RunTwoLargeBatchesAsync(CosmosContainer container) + { + CosmosBatch batch1 = container.CreateBatch(CosmosBatchTestBase.GetPartitionKey(this.PartitionKey1)); + CosmosBatch batch2 = container.CreateBatch(CosmosBatchTestBase.GetPartitionKey(this.PartitionKey1)); + + for (int i = 0; i < Constants.MaxOperationsInDirectModeBatchRequest; i++) + { + batch1.CreateItem(BatchSinglePartitionKeyTests.PopulateTestDoc(this.PartitionKey1)); + batch2.CreateItem(BatchSinglePartitionKeyTests.PopulateTestDoc(this.PartitionKey1)); + } + + Task batch1Task = batch1.ExecuteAsync(); + await Task.Delay(50); + Task batch2Task = batch2.ExecuteAsync(); + + CosmosBatchResponse[] batchResponses = await Task.WhenAll(batch1Task, batch2Task); + return batchResponses; + } + + [TestMethod] + [Owner("abpai")] + [Description("Verify batch with a create operation having a conflict rolls back prior operations")] + public async Task BatchWithCreateConflictAsync() + { + await this.RunBatchWithCreateConflictAsync(CosmosBatchTestBase.JsonContainer); + } + + [TestMethod] + [Owner("rakkuma")] + [Description("Verify batch with a create operation having a conflict rolls back prior operations in gateway mode")] + public async Task BatchWithCreateConflictGatewayAsync() + { + await this.RunBatchWithCreateConflictAsync(CosmosBatchTestBase.GatewayJsonContainer); + } + + [TestMethod] + [Owner("abpai")] + [Description("Verify batch with a create operation having a conflict rolls back prior operations in shared throughput")] + public async Task BatchWithCreateConflictSharedThroughputAsync() + { + await this.RunBatchWithCreateConflictAsync(CosmosBatchTestBase.SharedThroughputContainer); + } + + private async Task RunBatchWithCreateConflictAsync(CosmosContainer container) + { + await this.CreateJsonTestDocsAsync(container); + + // try to create a doc with id that already exists (should return a Conflict) + TestDoc conflictingTestDocToCreate = this.GetTestDocCopy(this.TestDocPk1ExistingA); + conflictingTestDocToCreate.Cost++; + + await this.RunWithErrorAsync( + container, + batch => batch.CreateItem(conflictingTestDocToCreate), + HttpStatusCode.Conflict); + + // make sure the conflicted doc hasn't changed + await CosmosBatchTestBase.VerifyByReadAsync(container, this.TestDocPk1ExistingA); + } + + [TestMethod] + [Owner("abpai")] + [Description("Verify batch with an invalid create operation rolls back prior operations")] + public async Task BatchWithInvalidCreateAsync() + { + CosmosContainer container = CosmosBatchTestBase.JsonContainer; + + // partition key mismatch between doc and and value passed in to the operation + await this.RunWithErrorAsync( + container, + batch => batch.CreateItem(CosmosBatchTestBase.PopulateTestDoc(partitionKey: Guid.NewGuid().ToString())), + HttpStatusCode.BadRequest); + } + + [TestMethod] + [Owner("abpai")] + [Description("Verify batch with a read operation on a non-existent entity rolls back prior operations")] + public async Task BatchWithReadOfNonExistentEntityAsync() + { + CosmosContainer container = CosmosBatchTestBase.JsonContainer; + await this.RunWithErrorAsync( + container, + batch => batch.ReadItem(Guid.NewGuid().ToString()), + HttpStatusCode.NotFound); + } + + [TestMethod] + [Owner("abpai")] + [Description("Verify batch with a replace operation on a stale entity rolls back prior operations")] + public async Task BatchWithReplaceOfStaleEntityAsync() + { + CosmosContainer container = CosmosBatchTestBase.JsonContainer; + await this.CreateJsonTestDocsAsync(container); + + TestDoc staleTestDocToReplace = this.GetTestDocCopy(this.TestDocPk1ExistingA); + staleTestDocToReplace.Cost++; + ItemRequestOptions staleReplaceOptions = new ItemRequestOptions() + { + IfMatchEtag = Guid.NewGuid().ToString() + }; + + await this.RunWithErrorAsync( + container, + batch => batch.ReplaceItem(staleTestDocToReplace.Id, staleTestDocToReplace, staleReplaceOptions), + HttpStatusCode.PreconditionFailed); + + // make sure the stale doc hasn't changed + await CosmosBatchTestBase.VerifyByReadAsync(container, this.TestDocPk1ExistingA); + } + + [TestMethod] + [Owner("abpai")] + [Description("Verify batch with a delete operation on a non-existent entity rolls back prior operations")] + public async Task BatchWithDeleteOfNonExistentEntityAsync() + { + CosmosContainer container = CosmosBatchTestBase.JsonContainer; + await this.RunWithErrorAsync( + container, + batch => batch.DeleteItem(Guid.NewGuid().ToString()), + HttpStatusCode.NotFound); + } + + private async Task RunWithErrorAsync( + CosmosContainer container, + Action appendOperation, + HttpStatusCode expectedFailedOperationStatusCode) + { + TestDoc testDocToCreate = CosmosBatchTestBase.PopulateTestDoc(this.PartitionKey1); + TestDoc anotherTestDocToCreate = CosmosBatchTestBase.PopulateTestDoc(this.PartitionKey1); + + CosmosBatch batch = container.CreateBatch(CosmosBatchTestBase.GetPartitionKey(this.PartitionKey1)) + .CreateItem(testDocToCreate); + + appendOperation(batch); + + CosmosBatchResponse batchResponse = await batch + .CreateItem(anotherTestDocToCreate) + .ExecuteAsync(); + + BatchSinglePartitionKeyTests.VerifyBatchProcessed( + batchResponse, + numberOfOperations: 3, + expectedStatusCode: (HttpStatusCode)StatusCodes.MultiStatus); + + Assert.AreEqual((HttpStatusCode)StatusCodes.FailedDependency, batchResponse[0].StatusCode); + Assert.AreEqual(expectedFailedOperationStatusCode, batchResponse[1].StatusCode); + Assert.AreEqual((HttpStatusCode)StatusCodes.FailedDependency, batchResponse[2].StatusCode); + + await CosmosBatchTestBase.VerifyNotFoundAsync(container, testDocToCreate); + await CosmosBatchTestBase.VerifyNotFoundAsync(container, anotherTestDocToCreate); + return container; + } + + private static void VerifyBatchProcessed(CosmosBatchResponse batchResponse, int numberOfOperations, HttpStatusCode expectedStatusCode = HttpStatusCode.OK) + { + Assert.IsNotNull(batchResponse); + Assert.AreEqual( + expectedStatusCode, + batchResponse.StatusCode, + string.Format("Batch server response had StatusCode {0} instead of {1} expected and had ErrorMessage {2}", + batchResponse.StatusCode, + expectedStatusCode, + batchResponse.ErrorMessage)); + + Assert.AreEqual(numberOfOperations, batchResponse.Count); + } + } +} diff --git a/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.EmulatorTests/Batch/CosmosBatchTestBase.cs b/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.EmulatorTests/Batch/CosmosBatchTestBase.cs new file mode 100644 index 0000000000..d2e4ec9f9d --- /dev/null +++ b/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.EmulatorTests/Batch/CosmosBatchTestBase.cs @@ -0,0 +1,631 @@ +//------------------------------------------------------------ +// Copyright (c) Microsoft Corporation. All rights reserved. +//------------------------------------------------------------ + +namespace Microsoft.Azure.Cosmos.SDK.EmulatorTests +{ + using System; + using System.Collections.Generic; + using System.IO; + using System.Net; + using System.Text; + using System.Threading; + using System.Threading.Tasks; + using Microsoft.Azure.Cosmos.Serialization.HybridRow; + using Microsoft.Azure.Cosmos.Serialization.HybridRow.IO; + using Microsoft.Azure.Cosmos.Serialization.HybridRow.Layouts; + using Microsoft.Azure.Cosmos.Serialization.HybridRow.Schemas; + using Microsoft.Azure.Documents; + using Microsoft.VisualStudio.TestTools.UnitTesting; + using Newtonsoft.Json; + + public class CosmosBatchTestBase + { + protected static CosmosClient Client { get; set; } + + protected static CosmosClient GatewayClient { get; set; } + + protected static CosmosDatabase Database { get; set; } + + protected static CosmosDatabase SharedThroughputDatabase { get; set; } + + protected static CosmosDatabase GatewayDatabase { get; set; } + + protected static CosmosContainer JsonContainer { get; set; } + + protected static CosmosContainer GatewayJsonContainer { get; set; } + + protected static CosmosContainer LowThroughputJsonContainer { get; set; } + + protected static CosmosContainer GatewayLowThroughputJsonContainer { get; set; } + + protected static CosmosContainer SchematizedContainer { get; set; } + + protected static CosmosContainer GatewaySchematizedContainer { get; set; } + + protected static CosmosContainer SharedThroughputContainer { get; set; } + + internal static PartitionKeyDefinition PartitionKeyDefinition { get; set; } + + protected static Random Random { get; set; } = new Random(); + + protected static LayoutResolverNamespace LayoutResolver { get; set; } + + protected static Layout TestDocLayout { get; set; } + + protected object PartitionKey1 { get; set; } = "TBD1"; + + // Documents in PartitionKey1 + protected TestDoc TestDocPk1ExistingA { get; set; } + + // Documents in PartitionKey1 + protected TestDoc TestDocPk1ExistingB { get; set; } + + // Documents in PartitionKey1 + protected TestDoc TestDocPk1ExistingC { get; set; } + + // Documents in PartitionKey1 + protected TestDoc TestDocPk1ExistingD { get; set; } + + public static void ClassInit(TestContext context) + { + InitializeDirectContainers(); + InitializeGatewayContainers(); + InitializeSharedThroughputContainer(); + } + + private static void InitializeDirectContainers() + { + CosmosBatchTestBase.Client = TestCommon.CreateCosmosClient(); + CosmosBatchTestBase.Database = CosmosBatchTestBase.Client.CreateDatabaseAsync(Guid.NewGuid().ToString()) + .GetAwaiter().GetResult().Database; + + PartitionKeyDefinition partitionKeyDefinition = new PartitionKeyDefinition(); + partitionKeyDefinition.Paths.Add("/Status"); + + CosmosBatchTestBase.LowThroughputJsonContainer = CosmosBatchTestBase.Database.CreateContainerAsync( + new CosmosContainerSettings() + { + Id = Guid.NewGuid().ToString(), + PartitionKey = partitionKeyDefinition + }, + requestUnitsPerSecond: 400).GetAwaiter().GetResult().Container; + + CosmosBatchTestBase.PartitionKeyDefinition = ((CosmosContainerCore)CosmosBatchTestBase.LowThroughputJsonContainer).GetPartitionKeyDefinitionAsync(CancellationToken.None).GetAwaiter().GetResult(); + + // Create a container with at least 2 physical partitions for effective cross-partition testing + CosmosBatchTestBase.JsonContainer = CosmosBatchTestBase.Database.CreateContainerAsync( + new CosmosContainerSettings() + { + Id = Guid.NewGuid().ToString(), + PartitionKey = CosmosBatchTestBase.PartitionKeyDefinition + }, + requestUnitsPerSecond: 12000).GetAwaiter().GetResult().Container; + + Serialization.HybridRow.Schemas.Schema testSchema = TestDoc.GetSchema(); + Namespace testNamespace = new Namespace() + { + Name = "Test", + Version = SchemaLanguageVersion.V1, + Schemas = new List() + { + testSchema + } + }; + + CosmosBatchTestBase.LayoutResolver = new LayoutResolverNamespace(testNamespace); + CosmosBatchTestBase.TestDocLayout = CosmosBatchTestBase.LayoutResolver.Resolve(testSchema.SchemaId); + + BatchContainerSettings schematizedContainerSettings = new BatchContainerSettings() + { + Id = Guid.NewGuid().ToString(), + PartitionKey = CosmosBatchTestBase.PartitionKeyDefinition, + DefaultTimeToLive = (int)TimeSpan.FromDays(1).TotalSeconds // allow for TTL testing + }; + + SchemaPolicy schemaPolicy = new SchemaPolicy() + { + TableSchema = testNamespace, + }; + + schematizedContainerSettings.SchemaPolicy = schemaPolicy; + + CosmosBatchTestBase.SchematizedContainer = CosmosBatchTestBase.Database.CreateContainerAsync( + schematizedContainerSettings, + requestUnitsPerSecond: 12000).GetAwaiter().GetResult().Container; + } + + private static void InitializeGatewayContainers() + { + CosmosBatchTestBase.GatewayClient = TestCommon.CreateCosmosClient(useGateway: true); + CosmosBatchTestBase.GatewayDatabase = GatewayClient.GetDatabase(CosmosBatchTestBase.Database.Id); + + CosmosBatchTestBase.GatewayLowThroughputJsonContainer = CosmosBatchTestBase.GatewayDatabase.GetContainer(CosmosBatchTestBase.LowThroughputJsonContainer.Id); + CosmosBatchTestBase.GatewayJsonContainer = CosmosBatchTestBase.GatewayDatabase.GetContainer(CosmosBatchTestBase.JsonContainer.Id); + CosmosBatchTestBase.GatewaySchematizedContainer = CosmosBatchTestBase.GatewayDatabase.GetContainer(CosmosBatchTestBase.SchematizedContainer.Id); + } + + private static void InitializeSharedThroughputContainer() + { + CosmosClient client = TestCommon.CreateCosmosClient(); + CosmosDatabase db = client.CreateDatabaseAsync(string.Format("Shared_{0}", Guid.NewGuid().ToString("N")), requestUnitsPerSecond: 20000).GetAwaiter().GetResult().Database; + + for (int index = 0; index < 5; index++) + { + ContainerResponse containerResponse = db.CreateContainerAsync( + new CosmosContainerSettings + { + Id = Guid.NewGuid().ToString(), + PartitionKey = CosmosBatchTestBase.PartitionKeyDefinition + }) + .GetAwaiter().GetResult(); + + Assert.AreEqual(true, bool.Parse(containerResponse.Headers.Get(WFConstants.BackendHeaders.ShareThroughput))); + + if (index == 2) + { + CosmosBatchTestBase.SharedThroughputContainer = containerResponse.Container; + } + } + + CosmosBatchTestBase.SharedThroughputDatabase = db; + } + + public static void ClassClean() + { + if (CosmosBatchTestBase.Client == null) + { + return; + } + + if (CosmosBatchTestBase.Database != null) + { + CosmosBatchTestBase.Database.DeleteAsync().GetAwaiter().GetResult(); + } + + if (CosmosBatchTestBase.SharedThroughputDatabase != null) + { + CosmosBatchTestBase.SharedThroughputDatabase.DeleteAsync().GetAwaiter().GetResult(); + } + + CosmosBatchTestBase.Client.Dispose(); + } + + protected virtual async Task CreateJsonTestDocsAsync(CosmosContainer container) + { + this.TestDocPk1ExistingA = await CosmosBatchTestBase.CreateJsonTestDocAsync(container, this.PartitionKey1); + this.TestDocPk1ExistingB = await CosmosBatchTestBase.CreateJsonTestDocAsync(container, this.PartitionKey1); + this.TestDocPk1ExistingC = await CosmosBatchTestBase.CreateJsonTestDocAsync(container, this.PartitionKey1); + this.TestDocPk1ExistingD = await CosmosBatchTestBase.CreateJsonTestDocAsync(container, this.PartitionKey1); + } + + protected virtual async Task CreateSchematizedTestDocsAsync(CosmosContainer container) + { + this.TestDocPk1ExistingA = await CosmosBatchTestBase.CreateSchematizedTestDocAsync(container, this.PartitionKey1); + this.TestDocPk1ExistingB = await CosmosBatchTestBase.CreateSchematizedTestDocAsync(container, this.PartitionKey1); + this.TestDocPk1ExistingC = await CosmosBatchTestBase.CreateSchematizedTestDocAsync(container, this.PartitionKey1); + this.TestDocPk1ExistingD = await CosmosBatchTestBase.CreateSchematizedTestDocAsync(container, this.PartitionKey1); + } + + protected static TestDoc PopulateTestDoc(object partitionKey, int minDesiredSize = 20) + { + string description = new string('x', minDesiredSize); + return new TestDoc() + { + Id = Guid.NewGuid().ToString(), + Cost = CosmosBatchTestBase.Random.Next(), + Description = description, + Status = partitionKey.ToString() + }; + } + + protected TestDoc GetTestDocCopy(TestDoc testDoc) + { + return new TestDoc() + { + Id = testDoc.Id, + Cost = testDoc.Cost, + Description = testDoc.Description, + Status = testDoc.Status + }; + } + + protected static Stream TestDocToStream(TestDoc testDoc, bool isSchematized) + { + if (isSchematized) + { + return testDoc.ToHybridRowStream(); + } + else + { + CosmosJsonSerializerCore serializer = new CosmosJsonSerializerCore(); + return serializer.ToStream(testDoc); + } + } + + protected static TestDoc StreamToTestDoc(Stream stream, bool isSchematized) + { + if (isSchematized) + { + return TestDoc.FromHybridRowStream(stream); + } + else + { + CosmosJsonSerializerCore serializer = new CosmosJsonSerializerCore(); + return serializer.FromStream(stream); + } + } + + protected static async Task VerifyByReadAsync(CosmosContainer container, TestDoc doc, bool isStream = false, bool isSchematized = false, bool useEpk = false, string eTag = null) + { + Cosmos.PartitionKey partitionKey = CosmosBatchTestBase.GetPartitionKey(doc.Status, useEpk); + + if (isStream) + { + string id = CosmosBatchTestBase.GetId(doc, isSchematized); + ItemRequestOptions requestOptions = CosmosBatchTestBase.GetItemRequestOptions(doc, isSchematized, useEpk, isPartOfBatch: false); + CosmosResponseMessage response = await container.ReadItemStreamAsync(partitionKey, id, requestOptions); + + Assert.AreEqual(HttpStatusCode.OK, response.StatusCode); + Assert.AreEqual(doc, CosmosBatchTestBase.StreamToTestDoc(response.Content, isSchematized)); + + if (eTag != null) + { + Assert.AreEqual(eTag, response.Headers.ETag); + } + } + else + { + ItemResponse response = await container.ReadItemAsync(partitionKey, doc.Id); + + Assert.AreEqual(HttpStatusCode.OK, response.StatusCode); + Assert.AreEqual(doc, response.Resource); + + if (eTag != null) + { + Assert.AreEqual(eTag, response.Headers.ETag); + } + } + } + + protected static async Task VerifyNotFoundAsync(CosmosContainer container, TestDoc doc, bool isSchematized = false, bool useEpk = false) + { + string id = CosmosBatchTestBase.GetId(doc, isSchematized); + Cosmos.PartitionKey partitionKey = CosmosBatchTestBase.GetPartitionKey(doc.Status, useEpk); + ItemRequestOptions requestOptions = CosmosBatchTestBase.GetItemRequestOptions(doc, isSchematized, useEpk, isPartOfBatch: false); + + CosmosResponseMessage response = await container.ReadItemStreamAsync(partitionKey, id, requestOptions); + + Assert.AreEqual(HttpStatusCode.NotFound, response.StatusCode); + } + + protected static RequestOptions GetUpdatedBatchRequestOptions( + RequestOptions batchOptions = null, + bool isSchematized = false, + bool useEpk = false, + object partitionKey = null) + { + if (isSchematized) + { + if (batchOptions == null) + { + batchOptions = new RequestOptions(); + } + + if (batchOptions.Properties == null) + { + batchOptions.Properties = new Dictionary(); + } + + batchOptions.Properties.Add(WFConstants.BackendHeaders.BinaryPassthroughRequest, bool.TrueString); + + if (useEpk) + { + string epk = new Microsoft.Azure.Documents.PartitionKey(partitionKey) + .InternalKey + .GetEffectivePartitionKeyString(CosmosBatchTestBase.PartitionKeyDefinition); + + batchOptions.Properties.Add(WFConstants.BackendHeaders.EffectivePartitionKeyString, epk); + batchOptions.Properties.Add(WFConstants.BackendHeaders.EffectivePartitionKey, CosmosBatchTestBase.HexStringToBytes(epk)); + } + } + + return batchOptions; + } + + protected static Cosmos.PartitionKey GetPartitionKey(object partitionKey, bool useEpk = false) + { + return useEpk ? null : new Cosmos.PartitionKey(partitionKey); + } + + protected static string GetId(TestDoc doc, bool isSchematized) + { + if (isSchematized) + { + return "cdbBinaryIdRequest"; + } + + return doc.Id; + } + + + protected static ItemRequestOptions GetItemRequestOptions(TestDoc doc, bool isSchematized, bool useEpk = false, bool isPartOfBatch = true, int? ttlInSeconds = null) + { + ItemRequestOptions requestOptions = null; + if (isSchematized) + { + requestOptions = new ItemRequestOptions() + { + Properties = new Dictionary + { + { WFConstants.BackendHeaders.BinaryId, Encoding.UTF8.GetBytes(doc.Id) } + } + }; + + if (ttlInSeconds.HasValue) + { + requestOptions.Properties.Add(WFConstants.BackendHeaders.TimeToLiveInSeconds, ttlInSeconds.Value.ToString()); + } + + if (useEpk) + { + string epk = new Microsoft.Azure.Documents.PartitionKey(doc.Status) + .InternalKey + .GetEffectivePartitionKeyString(CosmosBatchTestBase.PartitionKeyDefinition); + + requestOptions.Properties.Add(WFConstants.BackendHeaders.EffectivePartitionKeyString, epk); + requestOptions.Properties.Add(WFConstants.BackendHeaders.EffectivePartitionKey, CosmosBatchTestBase.HexStringToBytes(epk)); + } + + if (!isPartOfBatch) + { + requestOptions.Properties.Add(WFConstants.BackendHeaders.BinaryPassthroughRequest, bool.TrueString); + } + } + + return requestOptions; + } + + protected static async Task CreateJsonTestDocAsync(CosmosContainer container, object partitionKey, int minDesiredSize = 20) + { + TestDoc doc = CosmosBatchTestBase.PopulateTestDoc(partitionKey, minDesiredSize); + ItemResponse createResponse = await container.CreateItemAsync(doc, CosmosBatchTestBase.GetPartitionKey(partitionKey)); + Assert.AreEqual(HttpStatusCode.Created, createResponse.StatusCode); + return doc; + } + + protected static async Task CreateSchematizedTestDocAsync(CosmosContainer container, object partitionKey, int? ttlInSeconds = null) + { + TestDoc doc = CosmosBatchTestBase.PopulateTestDoc(partitionKey); + CosmosResponseMessage createResponse = await container.CreateItemStreamAsync( + CosmosBatchTestBase.GetPartitionKey(partitionKey), + doc.ToHybridRowStream(), + CosmosBatchTestBase.GetItemRequestOptions(doc, isSchematized: true, isPartOfBatch: false, ttlInSeconds: ttlInSeconds)); + Assert.AreEqual( + HttpStatusCode.Created, + createResponse.StatusCode); + return doc; + } + + protected static byte[] HexStringToBytes(string input) + { + byte[] bytes = new byte[input.Length / 2]; + for (int i = 0; i < input.Length; i += 2) + { + bytes[i / 2] = Convert.ToByte(input.Substring(i, 2), 16); + } + + return bytes; + } + +#pragma warning disable CA1034 + public class TestDoc +#pragma warning restore CA1034 + { + [JsonProperty("id")] + public string Id { get; set; } + + public int Cost { get; set; } + + public string Description { get; set; } + + public string Status { get; set; } + + public override bool Equals(object obj) + { + return obj is TestDoc doc + && this.Id == doc.Id + && this.Cost == doc.Cost + && this.Description == doc.Description + && this.Status == doc.Status; + } + + public override int GetHashCode() + { + int hashCode = 1652434776; + hashCode = (hashCode * -1521134295) + EqualityComparer.Default.GetHashCode(this.Id); + hashCode = (hashCode * -1521134295) + this.Cost.GetHashCode(); + hashCode = (hashCode * -1521134295) + EqualityComparer.Default.GetHashCode(this.Description); + hashCode = (hashCode * -1521134295) + EqualityComparer.Default.GetHashCode(this.Status); + return hashCode; + } + + public override string ToString() + { + return JsonConvert.SerializeObject(this); + } + + public static TestDoc FromHybridRowStream(Stream stream) + { + uint length = 0; + using (BinaryReader binaryReader = new BinaryReader(stream, Encoding.Default, leaveOpen: true)) + { + TestDoc.SkipBinaryField(binaryReader); // binaryId + TestDoc.SkipBinaryField(binaryReader); // EPK + + binaryReader.ReadByte(); + length = binaryReader.ReadUInt32(); + } + + RowBuffer row = new RowBuffer((int)length); + Assert.IsTrue(row.ReadFrom(stream, (int)length, HybridRowVersion.V1, CosmosBatchTestBase.LayoutResolver)); + RowReader reader = new RowReader(ref row); + + TestDoc testDoc = new TestDoc(); + while (reader.Read()) + { + Result r; + switch (reader.Path) + { + case "Id": + r = reader.ReadString(out string id); + Assert.AreEqual(Result.Success, r); + testDoc.Id = id; + break; + + case "Cost": + r = reader.ReadInt32(out int cost); + Assert.AreEqual(Result.Success, r); + testDoc.Cost = cost; + break; + + case "Status": + r = reader.ReadString(out string status); + Assert.AreEqual(Result.Success, r); + testDoc.Status = status; + break; + + case "Description": + r = reader.ReadString(out string description); + Assert.AreEqual(Result.Success, r); + testDoc.Description = description; + break; + } + } + + return testDoc; + } + + public MemoryStream ToHybridRowStream() + { + RowBuffer row = new RowBuffer(80000); + row.InitLayout(HybridRowVersion.V1, CosmosBatchTestBase.TestDocLayout, CosmosBatchTestBase.LayoutResolver); + Result r = RowWriter.WriteBuffer(ref row, this, TestDoc.WriteDoc); + Assert.AreEqual(Result.Success, r); + MemoryStream output = new MemoryStream(row.Length); + row.WriteTo(output); + output.Position = 0; + return output; + } + + public static Serialization.HybridRow.Schemas.Schema GetSchema() + { + return new Serialization.HybridRow.Schemas.Schema() + { + SchemaId = new SchemaId(-1), + Name = "TestDoc", + Type = TypeKind.Schema, + Properties = new List() + { + new Property() + { + Path = "Id", + PropertyType = new PrimitivePropertyType() + { + Type = TypeKind.Utf8, + Storage = StorageKind.Variable + } + }, + new Property() + { + Path = "Cost", + PropertyType = new PrimitivePropertyType() + { + Type = TypeKind.Int32, + Storage = StorageKind.Fixed + } + }, + new Property() + { + Path = "Status", + PropertyType = new PrimitivePropertyType() + { + Type = TypeKind.Utf8, + Storage = StorageKind.Variable + } + }, + new Property() + { + Path = "Description", + PropertyType = new PrimitivePropertyType() + { + Type = TypeKind.Utf8, + Storage = StorageKind.Variable + } + } + }, + PartitionKeys = new List() + { + new Serialization.HybridRow.Schemas.PartitionKey() + { + Path = "Status" + } + }, + Options = new SchemaOptions() + { + DisallowUnschematized = true + } + }; + } + + private static Result WriteDoc(ref RowWriter writer, TypeArgument typeArg, TestDoc doc) + { + Result r = writer.WriteString("Id", doc.Id); + if (r != Result.Success) + { + return r; + } + + r = writer.WriteInt32("Cost", doc.Cost); + if (r != Result.Success) + { + return r; + } + + r = writer.WriteString("Status", doc.Status); + if (r != Result.Success) + { + return r; + } + + r = writer.WriteString("Description", doc.Description); + if (r != Result.Success) + { + return r; + } + + return Result.Success; + } + + private static void SkipBinaryField(BinaryReader binaryReader) + { + binaryReader.ReadByte(); + uint length = binaryReader.ReadUInt32(); + binaryReader.ReadBytes((int)length); + } + } + + private class BatchContainerSettings : CosmosContainerSettings + { + [JsonProperty("schemaPolicy")] + public SchemaPolicy SchemaPolicy { get; set; } + } + + private class SchemaPolicy + { + public Namespace TableSchema { get; set; } + + public Namespace TypeSchema { get; set; } + } + } +} diff --git a/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.EmulatorTests/Microsoft.Azure.Cosmos.EmulatorTests.csproj b/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.EmulatorTests/Microsoft.Azure.Cosmos.EmulatorTests.csproj index 2ed4dc0674..9c70a42770 100644 --- a/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.EmulatorTests/Microsoft.Azure.Cosmos.EmulatorTests.csproj +++ b/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.EmulatorTests/Microsoft.Azure.Cosmos.EmulatorTests.csproj @@ -16,6 +16,7 @@ + @@ -235,6 +236,17 @@ PreserveNewest + + + + + + C:\Users\abpai\.nuget\packages\microsoft.azure.cosmos.direct.myget\3.0.0.33-preview\runtimes\any\lib\netstandard2.0\Microsoft.Azure.Cosmos.Core.dll + + + C:\Users\abpai\.nuget\packages\microsoft.azure.cosmos.direct.myget\3.0.0.33-preview\runtimes\any\lib\netstandard2.0\Microsoft.Azure.Cosmos.Serialization.HybridRow.dll + + false diff --git a/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Batch/BatchExecUtilsUnitTests.cs b/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Batch/BatchExecUtilsUnitTests.cs new file mode 100644 index 0000000000..4e658c7af2 --- /dev/null +++ b/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Batch/BatchExecUtilsUnitTests.cs @@ -0,0 +1,248 @@ +//------------------------------------------------------------ +// Copyright (c) Microsoft Corporation. All rights reserved. +//------------------------------------------------------------ + +namespace Microsoft.Azure.Cosmos.Tests +{ + using System; + using System.IO; + using System.Linq; + using System.Threading; + using System.Threading.Tasks; + using Microsoft.Azure.Documents; + using Microsoft.VisualStudio.TestTools.UnitTesting; + + [TestClass] + public class BatchExecUtilsUnitTests + { + private Random random = new Random(); + + [TestMethod] + [Owner("abpai")] + public async Task StreamToBytesAsyncSeekableAsync() + { + const int bytesLength = 10; + byte[] bytes = new byte[bytesLength]; + this.random.NextBytes(bytes); + const int maximumLength = 100; + { + Stream stream = new MemoryStream(bytes); + Memory actual = await BatchExecUtils.StreamToMemoryAsync(stream, maximumLength, CancellationToken.None); + Assert.IsTrue(actual.Span.SequenceEqual(bytes)); + } + + { + Stream stream = new MemoryStream(bytes, 2, 5, writable: false, publiclyVisible: true); + Memory actual = await BatchExecUtils.StreamToMemoryAsync(stream, maximumLength, CancellationToken.None); + Assert.IsTrue(actual.Span.SequenceEqual(bytes.Skip(2).Take(5).ToArray())); + } + + { + Stream stream = new MemoryStream(bytes, 2, 5, writable: false, publiclyVisible: false); + Memory actual = await BatchExecUtils.StreamToMemoryAsync(stream, maximumLength, CancellationToken.None); + Assert.IsTrue(actual.Span.SequenceEqual(bytes.Skip(2).Take(5).ToArray())); + } + + { + Stream stream = new MemoryStream(bytes.Length * 2); + await stream.WriteAsync(bytes, 0, bytes.Length); + stream.Position = 0; + Memory actual = await BatchExecUtils.StreamToMemoryAsync(stream, maximumLength, CancellationToken.None); + Assert.IsTrue(actual.Span.SequenceEqual(bytes)); + } + + { + Stream stream = new TestSeekableStream(bytes, maxLengthToReturnPerRead: 3); + Memory actual = await BatchExecUtils.StreamToMemoryAsync(stream, maximumLength, CancellationToken.None); + Assert.IsTrue(actual.Span.SequenceEqual(bytes)); + } + } + + [TestMethod] + [Owner("abpai")] + public async Task StreamToBytesAsyncSeekableMaximumLengthAsync() + { + byte[] bytes = new byte[10]; + this.random.NextBytes(bytes); + + Stream stream = new MemoryStream(bytes); + { + Memory actual = await BatchExecUtils.StreamToMemoryAsync(stream, maximumLength: 100, cancellationToken: CancellationToken.None); + Assert.IsTrue(actual.Span.SequenceEqual(bytes)); + } + + { + stream.Position = 0; + Memory actual = await BatchExecUtils.StreamToMemoryAsync(stream, maximumLength: 10, cancellationToken: CancellationToken.None); + Assert.IsTrue(actual.Span.SequenceEqual(bytes)); + } + + { + stream.Position = 0; + try + { + Memory actual = await BatchExecUtils.StreamToMemoryAsync(stream, maximumLength: 9, cancellationToken: CancellationToken.None); + Assert.Fail("Expected " + nameof(RequestEntityTooLargeException)); + } + catch (RequestEntityTooLargeException) + { + } + } + } + + + [TestMethod] + [Owner("abpai")] + public async Task StreamToBytesAsyncNonSeekableAsync() + { + byte[] bytes = new byte[10]; + this.random.NextBytes(bytes); + TestNonSeekableStream stream = new TestNonSeekableStream(bytes, maxLengthToReturnPerRead: 3); + { + Memory actual = await BatchExecUtils.StreamToMemoryAsync(stream, maximumLength: 100, cancellationToken: CancellationToken.None); + Assert.IsTrue(actual.Span.SequenceEqual(bytes)); + } + + { + stream.Reset(); + Memory actual = await BatchExecUtils.StreamToMemoryAsync(stream, maximumLength: 10, cancellationToken: CancellationToken.None); + Assert.IsTrue(actual.Span.SequenceEqual(bytes)); + } + + { + stream.Reset(); + try + { + Memory actual = await BatchExecUtils.StreamToMemoryAsync(stream, maximumLength: 9, cancellationToken: CancellationToken.None); + Assert.Fail("Expected " + nameof(RequestEntityTooLargeException)); + } + catch (RequestEntityTooLargeException) + { + } + } + } + + /// + /// Seekable stream that is not a derived class of MemoryStream for testing. + /// Caller controls max count actually set into the buffer during Read() + /// to simulate Socket like read. + /// + private class TestSeekableStream : Stream + { + private readonly int maxLengthToReturnPerRead; + + private readonly MemoryStream memoryStream; + + public override bool CanRead => true; + + public override bool CanSeek => true; + + public override bool CanWrite => true; + + public override long Length => this.memoryStream.Length; + + public override long Position + { + get => this.memoryStream.Position; + set => this.memoryStream.Position = value; + } + + public TestSeekableStream(byte[] bytes, int maxLengthToReturnPerRead) + { + this.memoryStream = new MemoryStream(bytes); + this.maxLengthToReturnPerRead = maxLengthToReturnPerRead; + } + + public override void Flush() + { + this.memoryStream.Flush(); + } + + public override int Read(byte[] buffer, int offset, int count) + { + count = Math.Min(count, this.maxLengthToReturnPerRead); + return this.memoryStream.Read(buffer, offset, count); + } + + public override long Seek(long offset, SeekOrigin origin) + { + return this.memoryStream.Seek(offset, origin); + } + + public override void SetLength(long value) + { + this.memoryStream.SetLength(value); + } + + public override void Write(byte[] buffer, int offset, int count) + { + this.memoryStream.Write(buffer, offset, count); + } + } + + /// + /// Non-seekable stream to test Read() where count actually set into the buffer can be controlled to simulate Socket like read. + /// + private class TestNonSeekableStream : Stream + { + private byte[] data; + + private int currentIndex; + + private readonly int maxLengthToReturnPerRead; + + public TestNonSeekableStream(byte[] data, int maxLengthToReturnPerRead) + { + this.data = data; + this.maxLengthToReturnPerRead = maxLengthToReturnPerRead; + } + + public void Reset() + { + this.currentIndex = 0; + } + + public override bool CanRead => true; + + public override bool CanSeek => false; + + public override bool CanWrite => false; + + public override long Length => throw new NotSupportedException(); + + public override long Position { get => throw new NotSupportedException(); set => throw new NotSupportedException(); } + + public override int Read(byte[] buffer, int offset, int count) + { + int copyCount = Math.Min(count, Math.Min(this.data.Length - this.currentIndex, this.maxLengthToReturnPerRead)); + for (int i = 0; i < copyCount; i++) + { + buffer[offset + i] = this.data[this.currentIndex + i]; + } + + this.currentIndex += copyCount; + return copyCount; + } + + public override void Flush() + { + throw new NotSupportedException(); + } + + public override long Seek(long offset, SeekOrigin origin) + { + throw new NotSupportedException(); + } + + public override void SetLength(long value) + { + throw new NotSupportedException(); + } + + public override void Write(byte[] buffer, int offset, int count) + { + throw new NotSupportedException(); + } + } + } +} diff --git a/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Batch/BatchRequestPayloadReader.cs b/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Batch/BatchRequestPayloadReader.cs new file mode 100644 index 0000000000..42604bdf0a --- /dev/null +++ b/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Batch/BatchRequestPayloadReader.cs @@ -0,0 +1,249 @@ +//------------------------------------------------------------ +// Copyright (c) Microsoft Corporation. All rights reserved. +//------------------------------------------------------------ + +namespace Microsoft.Azure.Cosmos.Tests +{ + using System; + using System.Collections.Generic; + using System.IO; + using System.Threading.Tasks; + using Microsoft.Azure.Cosmos.Serialization.HybridRow; + using Microsoft.Azure.Cosmos.Serialization.HybridRow.IO; + using Microsoft.Azure.Cosmos.Serialization.HybridRow.RecordIO; + using Microsoft.Azure.Documents; + using Microsoft.VisualStudio.TestTools.UnitTesting; + + internal class BatchRequestPayloadReader + { + private List operations = new List(); + + internal async Task> ReadPayloadAsync(Stream payload) + { + await payload.ReadRecordIOAsync( + record => + { + + Result r = this.ReadOperation(record, out ItemBatchOperation operation); + if (r != Result.Success) + { + return r; + } + + this.operations.Add(operation); + return r; + }, + resizer: new MemorySpanResizer((int)payload.Length)); + + return this.operations; + } + + private Result ReadOperation(Memory input, out ItemBatchOperation operation) + { + RowBuffer row = new RowBuffer(input.Length); + if (!row.ReadFrom(input.Span, HybridRowVersion.V1, BatchSchemaProvider.BatchLayoutResolver)) + { + operation = null; + return Result.Failure; + } + + RowReader reader = new RowReader(ref row); + return BatchRequestPayloadReader.ReadOperation(ref reader, this.operations.Count, out operation); + } + + private static Result ReadOperation(ref RowReader reader, int operationIndex, out ItemBatchOperation operation) + { + operation = null; + + OperationType operationType = OperationType.Invalid; + string partitionKeyJson = null; + byte[] effectivePartitionKey = null; + string id = null; + byte[] binaryId = null; + byte[] resourceBody = null; + Cosmos.IndexingDirective? indexingDirective = null; + string ifMatch = null; + string ifNoneMatch = null; + int? ttlInSeconds = null; + + while (reader.Read()) + { + Result r; + switch (reader.Path) + { + case "operationType": + r = reader.ReadInt32(out int operationTypeInt); + if (r != Result.Success) + { + return r; + } + + operationType = (OperationType)operationTypeInt; + break; + + case "resourceType": + r = reader.ReadInt32(out int resourceType); + if (r != Result.Success) + { + return r; + } + + Assert.AreEqual(ResourceType.Document, (ResourceType)resourceType); + break; + + case "partitionKey": + r = reader.ReadString(out partitionKeyJson); + if (r != Result.Success) + { + return r; + } + + break; + + case "effectivePartitionKey": + r = reader.ReadBinary(out effectivePartitionKey); + if (r != Result.Success) + { + return r; + } + + break; + + case "id": + r = reader.ReadString(out id); + if (r != Result.Success) + { + return r; + } + + break; + + case "binaryId": + r = reader.ReadBinary(out binaryId); + if (r != Result.Success) + { + return r; + } + + break; + + case "resourceBody": + r = reader.ReadBinary(out resourceBody); + if (r != Result.Success) + { + return r; + } + + break; + + case "indexingDirective": + r = reader.ReadString(out string indexingDirectiveStr); + if (r != Result.Success) + { + return r; + } + + if (!Enum.TryParse(indexingDirectiveStr, out Cosmos.IndexingDirective indexingDirectiveEnum)) + { + return Result.Failure; + } + + indexingDirective = indexingDirectiveEnum; + + break; + + case "ifMatch": + r = reader.ReadString(out ifMatch); + if (r != Result.Success) + { + return r; + } + + break; + + case "ifNoneMatch": + r = reader.ReadString(out ifNoneMatch); + if (r != Result.Success) + { + return r; + } + + break; + + case "timeToLiveInSeconds": + r = reader.ReadInt32(out int ttl); + if (r != Result.Success) + { + return r; + } + + ttlInSeconds = ttl; + break; + } + } + + // Ensure the mandatory fields were populated + if (operationType == OperationType.Invalid) + { + return Result.Failure; + } + + ItemRequestOptions requestOptions = null; + if (indexingDirective.HasValue || ifMatch != null || ifNoneMatch != null || binaryId != null || effectivePartitionKey != null || ttlInSeconds.HasValue) + { + requestOptions = new ItemRequestOptions(); + if (indexingDirective.HasValue) + { + requestOptions.IndexingDirective = indexingDirective; + } + + if (ifMatch != null) + { + requestOptions.IfMatchEtag = ifMatch; + } + else if (ifNoneMatch != null) + { + requestOptions.IfNoneMatchEtag = ifNoneMatch; + } + + if (binaryId != null || effectivePartitionKey != null || ttlInSeconds.HasValue) + { + requestOptions.Properties = new Dictionary(); + + if (binaryId != null) + { + requestOptions.Properties.Add(WFConstants.BackendHeaders.BinaryId, binaryId); + } + + if (effectivePartitionKey != null) + { + requestOptions.Properties.Add(WFConstants.BackendHeaders.EffectivePartitionKey, effectivePartitionKey); + } + + if (ttlInSeconds.HasValue) + { + requestOptions.Properties.Add(WFConstants.BackendHeaders.TimeToLiveInSeconds, ttlInSeconds.ToString()); + } + } + } + + Documents.PartitionKey parsedPartitionKey = null; + if (partitionKeyJson != null) + { + parsedPartitionKey = Documents.PartitionKey.FromJsonString(partitionKeyJson); + } + + operation = new ItemBatchOperation( + operationType: operationType, + operationIndex: operationIndex, + id: id, + requestOptions: requestOptions) + { + ParsedPartitionKey = parsedPartitionKey, + ResourceBody = resourceBody + }; + + return Result.Success; + } + } +} diff --git a/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Batch/BatchResponsePayloadWriter.cs b/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Batch/BatchResponsePayloadWriter.cs new file mode 100644 index 0000000000..5f18953896 --- /dev/null +++ b/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Batch/BatchResponsePayloadWriter.cs @@ -0,0 +1,112 @@ +//------------------------------------------------------------ +// Copyright (c) Microsoft Corporation. All rights reserved. +//------------------------------------------------------------ + +namespace Microsoft.Azure.Cosmos.Tests +{ + using System; + using System.Collections.Generic; + using System.IO; + using System.Threading.Tasks; + using Microsoft.Azure.Cosmos.Serialization.HybridRow; + using Microsoft.Azure.Cosmos.Serialization.HybridRow.IO; + using Microsoft.Azure.Cosmos.Serialization.HybridRow.Layouts; + using Microsoft.Azure.Cosmos.Serialization.HybridRow.RecordIO; + using Microsoft.Azure.Documents; + using Microsoft.VisualStudio.TestTools.UnitTesting; + + internal class BatchResponsePayloadWriter + { + private List results; + + public BatchResponsePayloadWriter(List results) + { + this.results = results; + } + + internal async Task GeneratePayloadAsync() + { + MemoryStream responseStream = new MemoryStream(); + Assert.AreEqual(Result.Success, await responseStream.WriteRecordIOAsync(default(Segment), this.WriteOperationResult)); + responseStream.Position = 0; + return responseStream; + } + + private Result WriteOperationResult(long index, out ReadOnlyMemory buffer) + { + if (index >= this.results.Count) + { + buffer = ReadOnlyMemory.Empty; + return Result.Success; + } + + RowBuffer row = new RowBuffer(2 * 1024); + row.InitLayout(HybridRowVersion.V1, BatchSchemaProvider.BatchResultLayout, BatchSchemaProvider.BatchLayoutResolver); + Result r = RowWriter.WriteBuffer(ref row, this.results[(int)index], BatchResponsePayloadWriter.WriteResult); + if (r != Result.Success) + { + buffer = null; + return r; + } + + MemoryStream output = new MemoryStream(row.Length); + row.WriteTo(output); + buffer = new Memory(output.GetBuffer(), 0, (int)output.Length); + return r; + } + + private static Result WriteResult(ref RowWriter writer, TypeArgument typeArg, CosmosBatchOperationResult result) + { + Result r = writer.WriteInt32("statusCode", (int)result.StatusCode); + if (r != Result.Success) + { + return r; + } + + if (result.SubStatusCode != SubStatusCodes.Unknown) + { + r = writer.WriteInt32("subStatusCode", (int)result.SubStatusCode); + if (r != Result.Success) + { + return r; + } + } + + if (result.ETag != null) + { + r = writer.WriteString("eTag", result.ETag); + if (r != Result.Success) + { + return r; + } + } + + if (result.ResourceStream != null) + { + r = writer.WriteBinary("resourceBody", BatchResponsePayloadWriter.StreamToBytes(result.ResourceStream)); + if (r != Result.Success) + { + return r; + } + } + + if (result.RetryAfter != null) + { + r = writer.WriteUInt32("retryAfterMilliseconds", (uint)result.RetryAfter.TotalMilliseconds); + if (r != Result.Success) + { + return r; + } + } + + return Result.Success; + } + + private static byte[] StreamToBytes(MemoryStream stream) + { + byte[] bytes = new byte[stream.Length]; + stream.Read(bytes, 0, bytes.Length); + return bytes; + } + } +} diff --git a/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Batch/BatchSchemaTests.cs b/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Batch/BatchSchemaTests.cs new file mode 100644 index 0000000000..1148dd54bb --- /dev/null +++ b/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Batch/BatchSchemaTests.cs @@ -0,0 +1,270 @@ +//------------------------------------------------------------ +// Copyright (c) Microsoft Corporation. All rights reserved. +//------------------------------------------------------------ + +namespace Microsoft.Azure.Cosmos.Tests +{ + using System; + using System.Collections.Generic; + using System.IO; + using System.Linq; + using System.Net; + using System.Text; + using System.Threading; + using System.Threading.Tasks; + using Microsoft.Azure.Documents; + using Microsoft.VisualStudio.TestTools.UnitTesting; + + [TestClass] + public class BatchSchemaTests + { + [TestMethod] + [Owner("abpai")] + public async Task BatchRequestSerializationAsync() + { + const int maxBodySize = 5 * 1024; + const int maxOperationCount = 10; + const string partitionKey1 = "pk1"; + + ItemBatchOperation[] operations = new ItemBatchOperation[] + { + new ItemBatchOperation( + operationType: OperationType.Create, + operationIndex: 0) + { + ResourceBody = new byte[] { 0x41, 0x42 } + }, + new ItemBatchOperation( + id: "id2", + operationType: OperationType.Replace, + operationIndex: 1, + requestOptions: new ItemRequestOptions() + { + IfMatchEtag = "theCondition" + }) + }; + + ServerBatchRequest batchRequest = await SinglePartitionKeyServerBatchRequest.CreateAsync( + new Cosmos.PartitionKey(partitionKey1), + new ArraySegment(operations), + maxBodySize, + maxOperationCount, + serializer: new CosmosJsonSerializerCore(), + cancellationToken: CancellationToken.None); + + Assert.AreEqual(2, batchRequest.Operations.Count); + + using (MemoryStream payload = batchRequest.TransferBodyStream()) + { + Assert.IsNotNull(payload); + + List readOperations = await new BatchRequestPayloadReader().ReadPayloadAsync(payload); + Assert.AreEqual(2, readOperations.Count); + ItemBatchOperationEqualityComparer comparer = new ItemBatchOperationEqualityComparer(); + Assert.IsTrue(comparer.Equals(operations[0], readOperations[0])); + Assert.IsTrue(comparer.Equals(operations[1], readOperations[1])); + } + } + + [TestMethod] + [Owner("abpai")] + public async Task BatchRequestSerializationFillAsync() + { + const int maxBodySize = 5 * 1024; + const int maxOperationCount = 10; + const int operationBodySize = 2 * 1024; + const string partitionKey1 = "pk1"; + const string id = "random"; + ItemBatchOperation[] operations = new ItemBatchOperation[] + { + new ItemBatchOperation( + operationType: OperationType.Replace, + id: id, + operationIndex: 0) + { + ResourceBody = Encoding.UTF8.GetBytes(new string('w', operationBodySize)) + }, + new ItemBatchOperation( + operationType: OperationType.Create, + operationIndex: 1) + { + ResourceBody = Encoding.UTF8.GetBytes(new string('x', operationBodySize)) + }, + new ItemBatchOperation( + operationType: OperationType.Upsert, + operationIndex: 2) + { + ResourceBody = Encoding.UTF8.GetBytes(new string('y', operationBodySize)) + }, + new ItemBatchOperation( + operationType: OperationType.Create, + operationIndex: 3) + { + ResourceBody = Encoding.UTF8.GetBytes(new string('z', operationBodySize)) + } + }; + ServerBatchRequest batchRequest = await SinglePartitionKeyServerBatchRequest.CreateAsync( + new Cosmos.PartitionKey(partitionKey1), + new ArraySegment(operations), + maxBodySize, + maxOperationCount, + serializer: new CosmosJsonSerializerCore(), + cancellationToken: CancellationToken.None); + + Assert.AreEqual(2, batchRequest.Operations.Count); + + using (MemoryStream payload = batchRequest.TransferBodyStream()) + { + Assert.IsNotNull(payload); + + List readOperations = await new BatchRequestPayloadReader().ReadPayloadAsync(payload); + Assert.AreEqual(2, readOperations.Count); + + ItemBatchOperationEqualityComparer comparer = new ItemBatchOperationEqualityComparer(); + Assert.IsTrue(comparer.Equals(operations[0], readOperations[0])); + Assert.IsTrue(comparer.Equals(operations[1], readOperations[1])); + } + } + + [TestMethod] + [Owner("abpai")] + public async Task BatchResponseDeserializationAsync() + { + List results = new List(); + + results.Add(new CosmosBatchOperationResult(HttpStatusCode.Conflict)); + + results.Add( + new CosmosBatchOperationResult(HttpStatusCode.OK) + { + ResourceStream = new MemoryStream(new byte[] { 0x41, 0x42 }, index: 0, count: 2, writable: false, publiclyVisible: true), + ETag = "1234" + }); + + results.Add( + new CosmosBatchOperationResult((HttpStatusCode)StatusCodes.TooManyRequests) + { + RetryAfter = TimeSpan.FromMilliseconds(360) + }); + + MemoryStream responseContent = await new BatchResponsePayloadWriter(results).GeneratePayloadAsync(); + + CosmosJsonSerializer serializer = new CosmosJsonSerializerCore(); + SinglePartitionKeyServerBatchRequest batchResponse = await SinglePartitionKeyServerBatchRequest.CreateAsync( + partitionKey: null, + operations: new ArraySegment( + new ItemBatchOperation[] + { + new ItemBatchOperation(OperationType.Read, operationIndex: 0, id: "someId") + }), + maxBodyLength: 100, + maxOperationCount: 1, + serializer: serializer, + cancellationToken: CancellationToken.None); + CosmosBatchResponse batchresponse = await CosmosBatchResponse.PopulateFromContentAsync( + new CosmosResponseMessage(HttpStatusCode.OK) { Content = responseContent }, + batchResponse, + serializer); + + Assert.IsNotNull(batchresponse); + Assert.IsTrue(batchresponse.IsSuccessStatusCode, "batchresponse.IsSuccessStatusCode" + batchresponse.StatusCode); + Assert.AreEqual(3, batchresponse.Count); + + CosmosBatchOperationResultEqualityComparer comparer = new CosmosBatchOperationResultEqualityComparer(); + Assert.IsTrue(comparer.Equals(results[0], batchresponse[0])); + Assert.IsTrue(comparer.Equals(results[1], batchresponse[1])); + Assert.IsTrue(comparer.Equals(results[2], batchresponse[2])); + } + + private class ItemBatchOperationEqualityComparer : IEqualityComparer + { + public bool Equals(ItemBatchOperation x, ItemBatchOperation y) + { + return x.Id == y.Id + && x.OperationType == y.OperationType + && x.OperationIndex == y.OperationIndex + && this.Equals(x.RequestOptions, y.RequestOptions) + && x.ResourceBody.Span.SequenceEqual(y.ResourceBody.Span); + } + + private bool Equals(ItemRequestOptions x, ItemRequestOptions y) + { + if (x == null && y == null) + { + return true; + } + else if (x != null && y != null) + { + CosmosRequestMessage xMessage = new CosmosRequestMessage(); + CosmosRequestMessage yMessage = new CosmosRequestMessage(); + x.FillRequestOptions(xMessage); + y.FillRequestOptions(yMessage); + + foreach (string headerName in xMessage.Headers) + { + if (xMessage.Headers[headerName] != yMessage.Headers[headerName]) + { + return false; + } + } + + return true; + } + + return false; + } + + public int GetHashCode(ItemBatchOperation obj) + { + int hashCode = 1660235553; + hashCode = (hashCode * -1521134295) + EqualityComparer.Default.GetHashCode(obj.Id); + hashCode = (hashCode * -1521134295) + obj.OperationType.GetHashCode(); + hashCode = (hashCode * -1521134295) + EqualityComparer.Default.GetHashCode(obj.RequestOptions); + hashCode = (hashCode * -1521134295) + obj.OperationIndex.GetHashCode(); + hashCode = (hashCode * -1521134295) + EqualityComparer>.Default.GetHashCode(obj.ResourceBody); + return hashCode; + } + } + + private class CosmosBatchOperationResultEqualityComparer : IEqualityComparer + { + public bool Equals(CosmosBatchOperationResult x, CosmosBatchOperationResult y) + { + return x.StatusCode == y.StatusCode + && x.SubStatusCode == y.SubStatusCode + && x.ETag == y.ETag + && x.RetryAfter == y.RetryAfter + && this.Equals(x.ResourceStream, y.ResourceStream); + } + + private bool Equals(MemoryStream x, MemoryStream y) + { + if (x == null && y == null) + { + return true; + } + else if (x != null && y != null) + { + if (x.Length != y.Length) + { + return false; + } + + return x.GetBuffer().SequenceEqual(y.GetBuffer()); + } + + return false; + } + + public int GetHashCode(CosmosBatchOperationResult obj) + { + int hashCode = 1176625765; + hashCode = (hashCode * -1521134295) + obj.StatusCode.GetHashCode(); + hashCode = (hashCode * -1521134295) + EqualityComparer.Default.GetHashCode(obj.ETag); + hashCode = (hashCode * -1521134295) + EqualityComparer.Default.GetHashCode(obj.RetryAfter); + hashCode = (hashCode * -1521134295) + EqualityComparer.Default.GetHashCode(obj.SubStatusCode); + return hashCode; + } + } + } +} diff --git a/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Batch/CosmosBatchUnitTests.cs b/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Batch/CosmosBatchUnitTests.cs new file mode 100644 index 0000000000..587bddab98 --- /dev/null +++ b/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Batch/CosmosBatchUnitTests.cs @@ -0,0 +1,538 @@ +//------------------------------------------------------------ +// Copyright (c) Microsoft Corporation. All rights reserved. +//------------------------------------------------------------ + +namespace Microsoft.Azure.Cosmos.Tests +{ + using System; + using System.Collections.Generic; + using System.IO; + using System.Linq; + using System.Net; + using System.Threading; + using System.Threading.Tasks; + using Microsoft.Azure.Cosmos.Client.Core.Tests; + using Microsoft.Azure.Documents; + using Microsoft.VisualStudio.TestTools.UnitTesting; + using Moq; + using Newtonsoft.Json; + + [TestClass] + public class CosmosBatchUnitTests + { + private const string DatabaseId = "mockDatabase"; + + private const string ContainerId = "mockContainer"; + + private const string PartitionKey1 = "somePartKey"; + + [TestMethod] + [Owner("abpai")] + public async Task BatchInvalidOptionsAsync() + { + CosmosContainer container = CosmosBatchUnitTests.GetCosmosContainer(); + + List badBatchOptionsList = new List() + { + new RequestOptions() + { + IfMatchEtag = "cond", + } + }; + + foreach (RequestOptions batchOptions in badBatchOptionsList) + { + CosmosBatchResponse batchResponse = await container.CreateBatch(new Cosmos.PartitionKey(CosmosBatchUnitTests.PartitionKey1)) + .ReadItem("someId") + .ExecuteAsync(batchOptions); + + Assert.AreEqual(HttpStatusCode.BadRequest, batchResponse.StatusCode); + } + } + + [TestMethod] + [Owner("abpai")] + public async Task BatchInvalidItemOptionsAsync() + { + CosmosContainer container = CosmosBatchUnitTests.GetCosmosContainer(); + + List badItemOptionsList = new List() + { + new ItemRequestOptions() + { + ConsistencyLevel = Microsoft.Azure.Cosmos.ConsistencyLevel.Strong + }, + new ItemRequestOptions() + { + PreTriggers = new List() { "pre" } + }, + new ItemRequestOptions() + { + PostTriggers = new List() { "post" } + }, + new ItemRequestOptions() + { + SessionToken = "sess" + }, + new ItemRequestOptions() + { + Properties = new Dictionary + { + // EPK without string representation + { WFConstants.BackendHeaders.EffectivePartitionKey, new byte[1] { 0x41 } } + } + }, + new ItemRequestOptions() + { + Properties = new Dictionary + { + // EPK string without corresponding byte representation + { WFConstants.BackendHeaders.EffectivePartitionKeyString, "epk" } + } + } + }; + + foreach (ItemRequestOptions itemOptions in badItemOptionsList) + { + CosmosBatchResponse batchResponse = await container.CreateBatch(new Cosmos.PartitionKey(CosmosBatchUnitTests.PartitionKey1)) + .ReplaceItem("someId", new TestItem("repl"), itemOptions) + .ExecuteAsync(); + + Assert.AreEqual(HttpStatusCode.BadRequest, batchResponse.StatusCode); + } + } + + [TestMethod] + [Owner("abpai")] + public async Task BatchNoOperationsAsync() + { + CosmosContainer container = CosmosBatchUnitTests.GetCosmosContainer(); + CosmosBatchResponse batchResponse = await container.CreateBatch(new Cosmos.PartitionKey(CosmosBatchUnitTests.PartitionKey1)) + .ExecuteAsync(); + + Assert.AreEqual(HttpStatusCode.BadRequest, batchResponse.StatusCode); + } + + [TestMethod] + [Owner("abpai")] + public async Task BatchCrudRequestAsync() + { + Random random = new Random(); + + TestItem createItem = new TestItem("create"); + byte[] createStreamContent = new byte[20]; + random.NextBytes(createStreamContent); + byte[] createStreamBinaryId = new byte[20]; + random.NextBytes(createStreamBinaryId); + int createTtl = 45; + ItemRequestOptions createRequestOptions = new ItemRequestOptions() + { + Properties = new Dictionary() + { + { WFConstants.BackendHeaders.BinaryId, createStreamBinaryId }, + { WFConstants.BackendHeaders.TimeToLiveInSeconds, createTtl.ToString() }, + }, + IndexingDirective = Microsoft.Azure.Cosmos.IndexingDirective.Exclude + }; + + string readId = Guid.NewGuid().ToString(); + byte[] readStreamBinaryId = new byte[20]; + random.NextBytes(readStreamBinaryId); + ItemRequestOptions readRequestOptions = new ItemRequestOptions() + { + Properties = new Dictionary() + { + { WFConstants.BackendHeaders.BinaryId, readStreamBinaryId } + }, + IfNoneMatchEtag = "readCondition" + }; + + TestItem replaceItem = new TestItem("repl"); + byte[] replaceStreamContent = new byte[20]; + random.NextBytes(replaceStreamContent); + const string replaceStreamId = "replStream"; + byte[] replaceStreamBinaryId = new byte[20]; + random.NextBytes(replaceStreamBinaryId); + ItemRequestOptions replaceRequestOptions = new ItemRequestOptions() + { + Properties = new Dictionary() + { + { WFConstants.BackendHeaders.BinaryId, replaceStreamBinaryId } + }, + IfMatchEtag = "replCondition", + IndexingDirective = Microsoft.Azure.Cosmos.IndexingDirective.Exclude + }; + + TestItem upsertItem = new TestItem("upsert"); + byte[] upsertStreamContent = new byte[20]; + random.NextBytes(upsertStreamContent); + byte[] upsertStreamBinaryId = new byte[20]; + random.NextBytes(upsertStreamBinaryId); + ItemRequestOptions upsertRequestOptions = new ItemRequestOptions() + { + Properties = new Dictionary() + { + { WFConstants.BackendHeaders.BinaryId, upsertStreamBinaryId } + }, + IfMatchEtag = "upsertCondition", + IndexingDirective = Microsoft.Azure.Cosmos.IndexingDirective.Exclude + }; + + string deleteId = Guid.NewGuid().ToString(); + byte[] deleteStreamBinaryId = new byte[20]; + random.NextBytes(deleteStreamBinaryId); + ItemRequestOptions deleteRequestOptions = new ItemRequestOptions() + { + Properties = new Dictionary() + { + { WFConstants.BackendHeaders.BinaryId, deleteStreamBinaryId } + }, + IfNoneMatchEtag = "delCondition" + }; + + CosmosJsonSerializerCore jsonSerializer = new CosmosJsonSerializerCore(); + BatchTestHandler testHandler = new BatchTestHandler((request, operations) => + { + Assert.AreEqual(new Cosmos.PartitionKey(CosmosBatchUnitTests.PartitionKey1).ToString(), request.Headers.PartitionKey); + Assert.AreEqual(bool.TrueString, request.Headers[HttpConstants.HttpHeaders.IsBatchAtomic]); + Assert.AreEqual(bool.TrueString, request.Headers[HttpConstants.HttpHeaders.IsBatchOrdered]); + Assert.IsFalse(request.Headers.TryGetValue(HttpConstants.HttpHeaders.ShouldBatchContinueOnError, out string unused)); + + Assert.AreEqual(16, operations.Count); + + int operationIndex = 0; + + // run the loop twice, once for operations without item request options, and one for with item request options + for (int loopCount = 0; loopCount < 2; loopCount++) + { + bool hasItemRequestOptions = loopCount == 1; + + ItemBatchOperation operation = operations[operationIndex++]; + Assert.AreEqual(OperationType.Create, operation.OperationType); + Assert.IsNull(operation.Id); + Assert.AreEqual(createItem, CosmosBatchUnitTests.Deserialize(operation.ResourceBody, jsonSerializer)); + CosmosBatchUnitTests.VerifyItemRequestOptionsAreEqual(hasItemRequestOptions ? createRequestOptions : null, operation.RequestOptions); + + operation = operations[operationIndex++]; + Assert.AreEqual(OperationType.Read, operation.OperationType); + Assert.AreEqual(readId, operation.Id); + CosmosBatchUnitTests.VerifyItemRequestOptionsAreEqual(hasItemRequestOptions ? readRequestOptions : null, operation.RequestOptions); + + operation = operations[operationIndex++]; + Assert.AreEqual(OperationType.Replace, operation.OperationType); + Assert.AreEqual(replaceItem.Id, operation.Id); + Assert.AreEqual(replaceItem, CosmosBatchUnitTests.Deserialize(operation.ResourceBody, jsonSerializer)); + CosmosBatchUnitTests.VerifyItemRequestOptionsAreEqual(hasItemRequestOptions ? replaceRequestOptions : null, operation.RequestOptions); + + operation = operations[operationIndex++]; + Assert.AreEqual(OperationType.Upsert, operation.OperationType); + Assert.IsNull(operation.Id); + Assert.AreEqual(upsertItem, CosmosBatchUnitTests.Deserialize(operation.ResourceBody, jsonSerializer)); + CosmosBatchUnitTests.VerifyItemRequestOptionsAreEqual(hasItemRequestOptions ? upsertRequestOptions : null, operation.RequestOptions); + + operation = operations[operationIndex++]; + Assert.AreEqual(OperationType.Delete, operation.OperationType); + Assert.AreEqual(deleteId, operation.Id); + CosmosBatchUnitTests.VerifyItemRequestOptionsAreEqual(hasItemRequestOptions ? deleteRequestOptions : null, operation.RequestOptions); + + operation = operations[operationIndex++]; + Assert.AreEqual(OperationType.Create, operation.OperationType); + Assert.IsNull(operation.Id); + Assert.IsTrue(operation.ResourceBody.Span.SequenceEqual(createStreamContent)); + CosmosBatchUnitTests.VerifyItemRequestOptionsAreEqual(hasItemRequestOptions ? createRequestOptions : null, operation.RequestOptions); + + operation = operations[operationIndex++]; + Assert.AreEqual(OperationType.Replace, operation.OperationType); + Assert.AreEqual(replaceStreamId, operation.Id); + Assert.IsTrue(operation.ResourceBody.Span.SequenceEqual(replaceStreamContent)); + CosmosBatchUnitTests.VerifyItemRequestOptionsAreEqual(hasItemRequestOptions ? replaceRequestOptions : null, operation.RequestOptions); + + operation = operations[operationIndex++]; + Assert.AreEqual(OperationType.Upsert, operation.OperationType); + Assert.IsNull(operation.Id); + Assert.IsTrue(operation.ResourceBody.Span.SequenceEqual(upsertStreamContent)); + CosmosBatchUnitTests.VerifyItemRequestOptionsAreEqual(hasItemRequestOptions ? upsertRequestOptions : null, operation.RequestOptions); + } + + return Task.FromResult(new CosmosResponseMessage(HttpStatusCode.OK)); + }); + + CosmosContainer container = CosmosBatchUnitTests.GetCosmosContainer(testHandler); + + CosmosBatchResponse batchResponse = await container.CreateBatch(new Cosmos.PartitionKey(CosmosBatchUnitTests.PartitionKey1)) + .CreateItem(createItem) + .ReadItem(readId) + .ReplaceItem(replaceItem.Id, replaceItem) + .UpsertItem(upsertItem) + .DeleteItem(deleteId) + + // stream + .CreateItemStream(new MemoryStream(createStreamContent)) + .ReplaceItemStream(replaceStreamId, new MemoryStream(replaceStreamContent)) + .UpsertItemStream(new MemoryStream(upsertStreamContent)) + + // regular with options + .CreateItem(createItem, createRequestOptions) + .ReadItem(readId, readRequestOptions) + .ReplaceItem(replaceItem.Id, replaceItem, replaceRequestOptions) + .UpsertItem(upsertItem, upsertRequestOptions) + .DeleteItem(deleteId, deleteRequestOptions) + + // stream with options + .CreateItemStream(new MemoryStream(createStreamContent), createRequestOptions) + .ReplaceItemStream(replaceStreamId, new MemoryStream(replaceStreamContent), replaceRequestOptions) + .UpsertItemStream(new MemoryStream(upsertStreamContent), upsertRequestOptions) + .ExecuteAsync(); + } + + [TestMethod] + [Owner("abpai")] + public async Task BatchSingleServerResponseAsync() + { + List expectedResults = new List(); + CosmosJsonSerializerCore jsonSerializer = new CosmosJsonSerializerCore(); + TestItem testItem = new TestItem("tst"); + + Stream itemStream = jsonSerializer.ToStream(testItem); + MemoryStream resourceStream = itemStream as MemoryStream; + if (resourceStream == null) + { + await itemStream.CopyToAsync(resourceStream); + resourceStream.Position = 0; + } + + expectedResults.Add( + new CosmosBatchOperationResult(HttpStatusCode.OK) + { + ETag = "theETag", + SubStatusCode = (SubStatusCodes)1100, + ResourceStream = resourceStream + }); + expectedResults.Add(new CosmosBatchOperationResult(HttpStatusCode.Conflict)); + + double requestCharge = 3.6; + + TestHandler testHandler = new TestHandler(async (request, cancellationToken) => + { + CosmosResponseMessage responseMessage = new CosmosResponseMessage(HttpStatusCode.OK, requestMessage: null, errorMessage: null) + { + Content = await new BatchResponsePayloadWriter(expectedResults).GeneratePayloadAsync() + }; + + responseMessage.Headers.RequestCharge = requestCharge; + return responseMessage; + }); + + CosmosContainer container = CosmosBatchUnitTests.GetCosmosContainer(testHandler); + + CosmosBatchResponse batchResponse = await container.CreateBatch(new Cosmos.PartitionKey(CosmosBatchUnitTests.PartitionKey1)) + .ReadItem("id1") + .ReadItem("id2") + .ExecuteAsync(); + + Assert.AreEqual(HttpStatusCode.OK, batchResponse.StatusCode); + Assert.AreEqual(requestCharge, batchResponse.RequestCharge); + + CosmosBatchOperationResult result0 = batchResponse.GetOperationResultAtIndex(0); + Assert.AreEqual(expectedResults[0].StatusCode, result0.StatusCode); + Assert.AreEqual(expectedResults[0].SubStatusCode, result0.SubStatusCode); + Assert.AreEqual(expectedResults[0].ETag, result0.ETag); + Assert.AreEqual(testItem, result0.Resource); + + Assert.AreEqual(expectedResults[1].StatusCode, batchResponse[1].StatusCode); + Assert.AreEqual(SubStatusCodes.Unknown, batchResponse[1].SubStatusCode); + Assert.IsNull(batchResponse[1].ETag); + Assert.IsNull(batchResponse[1].ResourceStream); + } + + /// + /// Test to make sure IsFeedRequest is true for Batch operation + /// + [TestMethod] + public void BatchIsFeedRequest() + { + Assert.IsTrue(GatewayStoreClient.IsFeedRequest(OperationType.Batch)); + } + + /// + /// Test to make sure IsWriteOperation is true for batch operation + /// + [TestMethod] + public void BatchIsWriteOperation() + { + Assert.IsTrue(OperationType.Batch.IsWriteOperation()); + } + + private static async Task GetBatchResponseMessageAsync(List operations, int rateLimitedOperationCount = 0) + { + CosmosBatchOperationResult okOperationResult = new CosmosBatchOperationResult(HttpStatusCode.OK); + CosmosBatchOperationResult rateLimitedOperationResult = new CosmosBatchOperationResult((HttpStatusCode)StatusCodes.TooManyRequests); + + List resultsFromServer = new List(); + for (int operationIndex = 0; operationIndex < operations.Count - rateLimitedOperationCount; operationIndex++) + { + resultsFromServer.Add(okOperationResult); + } + + for (int index = 0; index < rateLimitedOperationCount; index++) + { + resultsFromServer.Add(rateLimitedOperationResult); + } + + HttpStatusCode batchStatus = rateLimitedOperationCount > 0 ? (HttpStatusCode)StatusCodes.MultiStatus : HttpStatusCode.OK; + + return new CosmosResponseMessage(batchStatus, requestMessage: null, errorMessage: null) + { + Content = await new BatchResponsePayloadWriter(resultsFromServer).GeneratePayloadAsync() + }; + } + + private static void VerifyItemRequestOptionsAreEqual(ItemRequestOptions expected, ItemRequestOptions actual) + { + if (expected != null) + { + Assert.AreEqual(expected.IfMatchEtag, actual.IfMatchEtag); + Assert.AreEqual(expected.IfNoneMatchEtag, actual.IfNoneMatchEtag); + + if (expected.IndexingDirective.HasValue) + { + Assert.AreEqual(expected.IndexingDirective.Value, actual.IndexingDirective.Value); + } + else + { + Assert.IsTrue(!actual.IndexingDirective.HasValue); + } + + if (expected.Properties != null) + { + Assert.IsNotNull(actual.Properties); + if (expected.Properties.TryGetValue(WFConstants.BackendHeaders.BinaryId, out object expectedBinaryIdObj)) + { + byte[] expectedBinaryId = expectedBinaryIdObj as byte[]; + Assert.IsTrue(actual.Properties.TryGetValue(WFConstants.BackendHeaders.BinaryId, out object actualBinaryIdObj)); + byte[] actualBinaryId = actualBinaryIdObj as byte[]; + CollectionAssert.AreEqual(expectedBinaryId, actualBinaryId); + } + + if (expected.Properties.TryGetValue(WFConstants.BackendHeaders.EffectivePartitionKey, out object expectedEpkObj)) + { + byte[] expectedEpk = expectedEpkObj as byte[]; + Assert.IsTrue(actual.Properties.TryGetValue(WFConstants.BackendHeaders.EffectivePartitionKey, out object actualEpkObj)); + byte[] actualEpk = actualEpkObj as byte[]; + CollectionAssert.AreEqual(expectedEpk, actualEpk); + } + + if (expected.Properties.TryGetValue(WFConstants.BackendHeaders.TimeToLiveInSeconds, out object expectedTtlObj)) + { + string expectedTtlStr = expectedTtlObj as string; + Assert.IsTrue(actual.Properties.TryGetValue(WFConstants.BackendHeaders.TimeToLiveInSeconds, out object actualTtlObj)); + Assert.AreEqual(expectedTtlStr, actualTtlObj as string); + } + } + } + else + { + Assert.IsNull(actual); + } + } + + private static CosmosContainer GetCosmosContainer(TestHandler testHandler = null) + { + CosmosClient client = MockCosmosUtil.CreateMockCosmosClient((builder) => builder.AddCustomHandlers(testHandler)); + CosmosDatabaseCore database = new CosmosDatabaseCore(client.ClientContext, CosmosBatchUnitTests.DatabaseId); + CosmosContainerCore cosmosContainerCore = new CosmosContainerCore(client.ClientContext, database, CosmosBatchUnitTests.ContainerId); + return cosmosContainerCore; + } + + private static CosmosContainerCore GetMockedContainer(string containerName = null) + { + Mock mockedContainer = MockCosmosUtil.CreateMockContainer(containerName: containerName); + mockedContainer.Setup(c => c.ClientContext).Returns(CosmosBatchUnitTests.GetMockedClientContext()); + return mockedContainer.Object; + } + + private static CosmosClientContext GetMockedClientContext() + { + Mock mockContext = new Mock(); + mockContext.Setup(x => x.ClientOptions).Returns(MockCosmosUtil.GetDefaultConfiguration()); + mockContext.Setup(x => x.DocumentClient).Returns(new MockDocumentClient()); + mockContext.Setup(x => x.CosmosSerializer).Returns(new CosmosJsonSerializerCore()); + return mockContext.Object; + } + + private static TestItem Deserialize(Memory body, CosmosJsonSerializer serializer) + { + return serializer.FromStream(new MemoryStream(body.Span.ToArray())); + } + + private class BatchTestHandler : TestHandler + { + private readonly Func, Task> func; + + public BatchTestHandler(Func, Task> func) + { + this.func = func; + } + + public List>> Received { get; } = new List>>(); + + public override async Task SendAsync( + CosmosRequestMessage request, CancellationToken cancellationToken) + { + BatchTestHandler.VerifyServerRequestProperties(request); + List operations = await new BatchRequestPayloadReader().ReadPayloadAsync(request.Content); + + this.Received.Add(new Tuple>(request, operations)); + return await this.func(request, operations); + } + + private static void VerifyServerRequestProperties(CosmosRequestMessage request) + { + Assert.AreEqual(OperationType.Batch, request.OperationType); + Assert.AreEqual(ResourceType.Document, request.ResourceType); + Assert.AreEqual(HttpConstants.HttpMethods.Post, request.Method.ToString()); + + Uri expectedRequestUri = new Uri( + string.Format( + "dbs/{0}/colls/{1}", + CosmosBatchUnitTests.DatabaseId, + CosmosBatchUnitTests.ContainerId), + UriKind.Relative); + Assert.AreEqual(expectedRequestUri, request.RequestUri); + } + } + + private class TestItem + { + [JsonProperty("id")] + public string Id { get; set; } + + public string Attr { get; set; } + + public TestItem(string attr) + { + this.Id = Guid.NewGuid().ToString(); + this.Attr = attr; + } + + public override bool Equals(object obj) + { + TestItem other = obj as TestItem; + if (other == null) + { + return false; + } + + return this.Id == other.Id && this.Attr == other.Attr; + } + + public override int GetHashCode() + { + int hashCode = -2138196334; + hashCode = (hashCode * -1521134295) + EqualityComparer.Default.GetHashCode(this.Id); + hashCode = (hashCode * -1521134295) + EqualityComparer.Default.GetHashCode(this.Attr); + return hashCode; + } + } + } +} \ No newline at end of file diff --git a/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/BatchUnitTests.cs b/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/BatchUnitTests.cs deleted file mode 100644 index 9ddcd09049..0000000000 --- a/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/BatchUnitTests.cs +++ /dev/null @@ -1,34 +0,0 @@ -//------------------------------------------------------------ -// Copyright (c) Microsoft Corporation. All rights reserved. -//------------------------------------------------------------ - -namespace Microsoft.Azure.Cosmos -{ - using Microsoft.Azure.Documents; - using Microsoft.VisualStudio.TestTools.UnitTesting; - - /// - /// Test cases for Batch. More test cases to come after hybrid row support is enabled - /// - [TestClass] - public class BatchUnitTests - { - /// - /// Test to make sure IsFeedRequest is true for Batch operation - /// - [TestMethod] - public void TestIsFeedRequestForBatchOperation() - { - Assert.IsTrue(GatewayStoreClient.IsFeedRequest(OperationType.Batch)); - } - - /// - /// Test to make sure IsWriteOperation is true for batch operation - /// - [TestMethod] - public void BatchIsWriteOperation() - { - Assert.IsTrue(OperationType.Batch.IsWriteOperation()); - } - } -} \ No newline at end of file diff --git a/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Microsoft.Azure.Cosmos.Tests.csproj b/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Microsoft.Azure.Cosmos.Tests.csproj index 34a206c01b..b96bdb595b 100644 --- a/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Microsoft.Azure.Cosmos.Tests.csproj +++ b/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Microsoft.Azure.Cosmos.Tests.csproj @@ -22,7 +22,7 @@ - + @@ -130,6 +130,17 @@ PreserveNewest + + + + + + C:\Users\abpai\.nuget\packages\microsoft.azure.cosmos.direct.myget\3.0.0.33-preview\runtimes\any\lib\netstandard2.0\Microsoft.Azure.Cosmos.Core.dll + + + C:\Users\abpai\.nuget\packages\microsoft.azure.cosmos.direct.myget\3.0.0.33-preview\runtimes\any\lib\netstandard2.0\Microsoft.Azure.Cosmos.Serialization.HybridRow.dll + + false From 1f1e8f0fed66033226c2706321ce06c421264504 Mon Sep 17 00:00:00 2001 From: "Abhijit P. Pai" Date: Fri, 14 Jun 2019 11:05:04 +0530 Subject: [PATCH 02/12] Renames due to merge --- .../src/Batch/BatchExecutor.cs | 4 +- .../src/Batch/CosmosBatch.cs | 10 ++-- .../src/Batch/CosmosBatchResponse.cs | 10 ++-- .../src/Batch/ItemBatchOperation.cs | 4 +- .../src/Batch/ServerBatchRequest.cs | 4 +- .../SinglePartitionKeyServerBatchRequest.cs | 4 +- .../Batch/BatchSinglePartitionKeyTests.cs | 44 +++++++------- .../Batch/CosmosBatchTestBase.cs | 58 +++++++++---------- .../Batch/BatchSchemaTests.cs | 6 +- .../Batch/CosmosBatchUnitTests.cs | 22 +++---- 10 files changed, 83 insertions(+), 83 deletions(-) diff --git a/Microsoft.Azure.Cosmos/src/Batch/BatchExecutor.cs b/Microsoft.Azure.Cosmos/src/Batch/BatchExecutor.cs index b3374930da..654237e89d 100644 --- a/Microsoft.Azure.Cosmos/src/Batch/BatchExecutor.cs +++ b/Microsoft.Azure.Cosmos/src/Batch/BatchExecutor.cs @@ -16,7 +16,7 @@ namespace Microsoft.Azure.Cosmos internal sealed class BatchExecutor { - private readonly CosmosContainerCore container; + private readonly ContainerCore container; private readonly CosmosClient client; @@ -31,7 +31,7 @@ internal sealed class BatchExecutor private readonly int maxServerRequestOperationCount; public BatchExecutor( - CosmosContainerCore container, + ContainerCore container, PartitionKey partitionKey, IReadOnlyList operations, RequestOptions batchOptions, diff --git a/Microsoft.Azure.Cosmos/src/Batch/CosmosBatch.cs b/Microsoft.Azure.Cosmos/src/Batch/CosmosBatch.cs index a81c436218..f28c3a9d52 100644 --- a/Microsoft.Azure.Cosmos/src/Batch/CosmosBatch.cs +++ b/Microsoft.Azure.Cosmos/src/Batch/CosmosBatch.cs @@ -17,7 +17,7 @@ public class CosmosBatch { private readonly PartitionKey partitionKey; - private readonly CosmosContainerCore container; + private readonly ContainerCore container; private List operations; @@ -26,7 +26,7 @@ public class CosmosBatch /// /// Container that has items on which batch operations are to be performed. /// The partition key for all items in the batch. . - internal CosmosBatch(CosmosContainerCore container, PartitionKey partitionKey) + internal CosmosBatch(ContainerCore container, PartitionKey partitionKey) { this.container = container; this.partitionKey = partitionKey; @@ -36,7 +36,7 @@ internal CosmosBatch(CosmosContainerCore container, PartitionKey partitionKey) /// /// Adds an operation to create an item into the batch. /// - /// A JSON serializable object that must contain an id property. to implement a custom serializer. + /// A JSON serializable object that must contain an id property. to implement a custom serializer. /// (Optional) The options for the item request. . /// The instance with the operation added. /// The type of item to be created. @@ -91,7 +91,7 @@ public virtual CosmosBatch ReadItem(string id, ItemRequestOptions itemRequestOpt /// /// Adds an operation to upsert an item into the batch. /// - /// A JSON serializable object that must contain an id property. to implement a custom serializer. + /// A JSON serializable object that must contain an id property. to implement a custom serializer. /// (Optional) The options for the item request. . /// The instance with the operation added. /// The type of item to be created. @@ -130,7 +130,7 @@ public virtual CosmosBatch UpsertItemStream(Stream resourceStream, ItemRequestOp /// Adds an operation to replace an item into the batch. /// /// The cosmos item id. - /// A JSON serializable object that must contain an id property. to implement a custom serializer. + /// A JSON serializable object that must contain an id property. to implement a custom serializer. /// (Optional) The options for the item request. . /// The instance with the operation added. /// The type of item to be created. diff --git a/Microsoft.Azure.Cosmos/src/Batch/CosmosBatchResponse.cs b/Microsoft.Azure.Cosmos/src/Batch/CosmosBatchResponse.cs index e1b228a937..9ee031de13 100644 --- a/Microsoft.Azure.Cosmos/src/Batch/CosmosBatchResponse.cs +++ b/Microsoft.Azure.Cosmos/src/Batch/CosmosBatchResponse.cs @@ -32,7 +32,7 @@ internal CosmosBatchResponse( TimeSpan? retryAfter, string activityId, ServerBatchRequest serverRequest, - CosmosJsonSerializer serializer) + CosmosSerializer serializer) : this(statusCode, subStatusCode, errorMessage, requestCharge, retryAfter, activityId, serverRequest.Operations, serializer) { } @@ -76,7 +76,7 @@ private CosmosBatchResponse( TimeSpan? retryAfter, string activityId, IReadOnlyList operations, - CosmosJsonSerializer serializer) + CosmosSerializer serializer) { this.StatusCode = statusCode; this.SubStatusCode = subStatusCode; @@ -137,7 +137,7 @@ public virtual bool IsSuccessStatusCode internal virtual SubStatusCodes SubStatusCode { get; } - internal virtual CosmosJsonSerializer Serializer { get; } + internal virtual CosmosSerializer Serializer { get; } internal IReadOnlyList Operations { get; set; } @@ -209,7 +209,7 @@ IEnumerator IEnumerable.GetEnumerator() internal static async Task FromResponseMessageAsync( CosmosResponseMessage responseMessage, ServerBatchRequest serverRequest, - CosmosJsonSerializer serializer) + CosmosSerializer serializer) { using (responseMessage) { @@ -293,7 +293,7 @@ internal static async Task FromResponseMessageAsync( internal static async Task PopulateFromContentAsync( CosmosResponseMessage responseMessage, ServerBatchRequest serverRequest, - CosmosJsonSerializer serializer) + CosmosSerializer serializer) { List results = new List(); diff --git a/Microsoft.Azure.Cosmos/src/Batch/ItemBatchOperation.cs b/Microsoft.Azure.Cosmos/src/Batch/ItemBatchOperation.cs index 8f7851f049..135eb623f2 100644 --- a/Microsoft.Azure.Cosmos/src/Batch/ItemBatchOperation.cs +++ b/Microsoft.Azure.Cosmos/src/Batch/ItemBatchOperation.cs @@ -283,7 +283,7 @@ internal int GetApproximateSerializedLength() /// /// Serializer to serialize user provided objects to JSON. /// for cancellation. - internal virtual async Task MaterializeResourceAsync(CosmosJsonSerializer serializer, CancellationToken cancellationToken) + internal virtual async Task MaterializeResourceAsync(CosmosSerializer serializer, CancellationToken cancellationToken) { if (this.body.IsEmpty && this.ResourceStream != null) { @@ -343,7 +343,7 @@ public ItemBatchOperation( /// /// Serializer to serialize user provided objects to JSON. /// for cancellation. - internal override Task MaterializeResourceAsync(CosmosJsonSerializer serializer, CancellationToken cancellationToken) + internal override Task MaterializeResourceAsync(CosmosSerializer serializer, CancellationToken cancellationToken) { if (this.body.IsEmpty && this.Resource != null) { diff --git a/Microsoft.Azure.Cosmos/src/Batch/ServerBatchRequest.cs b/Microsoft.Azure.Cosmos/src/Batch/ServerBatchRequest.cs index 974a4c7f73..29ebc57e06 100644 --- a/Microsoft.Azure.Cosmos/src/Batch/ServerBatchRequest.cs +++ b/Microsoft.Azure.Cosmos/src/Batch/ServerBatchRequest.cs @@ -22,7 +22,7 @@ internal abstract class ServerBatchRequest private readonly int maxOperationCount; - private readonly CosmosJsonSerializer serializer; + private readonly CosmosSerializer serializer; private ArraySegment operations; @@ -42,7 +42,7 @@ internal abstract class ServerBatchRequest /// Maximum length allowed for the request body. /// Maximum number of operations allowed in the request. /// Serializer to serialize user provided objects to JSON. - protected ServerBatchRequest(int maxBodyLength, int maxOperationCount, CosmosJsonSerializer serializer) + protected ServerBatchRequest(int maxBodyLength, int maxOperationCount, CosmosSerializer serializer) { this.maxBodyLength = maxBodyLength; this.maxOperationCount = maxOperationCount; diff --git a/Microsoft.Azure.Cosmos/src/Batch/SinglePartitionKeyServerBatchRequest.cs b/Microsoft.Azure.Cosmos/src/Batch/SinglePartitionKeyServerBatchRequest.cs index 06df013ff3..69e1af237d 100644 --- a/Microsoft.Azure.Cosmos/src/Batch/SinglePartitionKeyServerBatchRequest.cs +++ b/Microsoft.Azure.Cosmos/src/Batch/SinglePartitionKeyServerBatchRequest.cs @@ -22,7 +22,7 @@ private SinglePartitionKeyServerBatchRequest( PartitionKey partitionKey, int maxBodyLength, int maxOperationCount, - CosmosJsonSerializer serializer) + CosmosSerializer serializer) : base(maxBodyLength, maxOperationCount, serializer) { this.PartitionKey = partitionKey; @@ -49,7 +49,7 @@ public static async Task CreateAsync( ArraySegment operations, int maxBodyLength, int maxOperationCount, - CosmosJsonSerializer serializer, + CosmosSerializer serializer, CancellationToken cancellationToken) { SinglePartitionKeyServerBatchRequest request = new SinglePartitionKeyServerBatchRequest(partitionKey, maxBodyLength, maxOperationCount, serializer); diff --git a/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.EmulatorTests/Batch/BatchSinglePartitionKeyTests.cs b/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.EmulatorTests/Batch/BatchSinglePartitionKeyTests.cs index 8e457b19dc..567f329e74 100644 --- a/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.EmulatorTests/Batch/BatchSinglePartitionKeyTests.cs +++ b/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.EmulatorTests/Batch/BatchSinglePartitionKeyTests.cs @@ -87,7 +87,7 @@ public async Task BatchCrudSharedThroughputStreamAsync() [Description("Verify batch with multiple operations on the same entity works")] public async Task BatchOrderedAsync() { - CosmosContainer container = CosmosBatchTestBase.JsonContainer; + Container container = CosmosBatchTestBase.JsonContainer; await this.CreateJsonTestDocsAsync(container); TestDoc firstDoc = CosmosBatchTestBase.PopulateTestDoc(this.PartitionKey1); @@ -114,7 +114,7 @@ public async Task BatchOrderedAsync() [Description("Verify eTags passed to batch operations or returned in batch results flow as expected")] public async Task BatchItemETagAsync() { - CosmosContainer container = CosmosBatchTestBase.JsonContainer; + Container container = CosmosBatchTestBase.JsonContainer; await this.CreateJsonTestDocsAsync(container); { TestDoc testDocToCreate = CosmosBatchTestBase.PopulateTestDoc(this.PartitionKey1); @@ -123,8 +123,8 @@ public async Task BatchItemETagAsync() testDocToReplace.Cost++; ItemResponse readResponse = await CosmosBatchTestBase.JsonContainer.ReadItemAsync( - CosmosBatchTestBase.GetPartitionKey(this.PartitionKey1), - this.TestDocPk1ExistingA.Id); + this.TestDocPk1ExistingA.Id, + CosmosBatchTestBase.GetPartitionKey(this.PartitionKey1)); ItemRequestOptions firstReplaceOptions = new ItemRequestOptions() { @@ -178,7 +178,7 @@ public async Task BatchItemTimeToLiveAsync() // Verify with schematized containers where we are allowed to send TTL as a header const bool isSchematized = true; const bool isStream = true; - CosmosContainer container = CosmosBatchTestBase.SchematizedContainer; + Container container = CosmosBatchTestBase.SchematizedContainer; await this.CreateSchematizedTestDocsAsync(container); { TestDoc testDocToCreate = CosmosBatchTestBase.PopulateTestDoc(this.PartitionKey1); @@ -230,7 +230,7 @@ public async Task BatchItemTimeToLiveAsync() [Owner("abpai")] public async Task BatchLargerThanServerRequestAsync() { - CosmosContainer container = CosmosBatchTestBase.JsonContainer; + Container container = CosmosBatchTestBase.JsonContainer; const int operationCount = 20; int appxDocSize = Constants.MaxDirectModeBatchRequestBodySizeInBytes / operationCount; @@ -270,7 +270,7 @@ public async Task BatchLargerThanServerRequestAsync() [Owner("abpai")] public async Task BatchWithTooManyOperationsAsync() { - CosmosContainer container = CosmosBatchTestBase.JsonContainer; + Container container = CosmosBatchTestBase.JsonContainer; await this.CreateJsonTestDocsAsync(container); const int operationCount = Constants.MaxOperationsInDirectModeBatchRequest + 1; @@ -308,7 +308,7 @@ public async Task BatchWithTooManyOperationsAsync() [Owner("abpai")] public async Task BatchServerResponseTooLargeAsync() { - CosmosContainer container = CosmosBatchTestBase.JsonContainer; + Container container = CosmosBatchTestBase.JsonContainer; const int operationCount = 10; int appxDocSizeInBytes = 1 * 1024 * 1024; @@ -335,7 +335,7 @@ public async Task BatchServerResponseTooLargeAsync() [Owner("abpai")] public async Task BatchReadsOnlyAsync() { - CosmosContainer container = CosmosBatchTestBase.JsonContainer; + Container container = CosmosBatchTestBase.JsonContainer; await this.CreateJsonTestDocsAsync(container); CosmosBatchResponse batchResponse = await container.CreateBatch(CosmosBatchTestBase.GetPartitionKey(this.PartitionKey1)) @@ -355,7 +355,7 @@ public async Task BatchReadsOnlyAsync() Assert.AreEqual(this.TestDocPk1ExistingC, batchResponse.GetOperationResultAtIndex(2).Resource); } - private async Task RunCrudAsync(bool isStream, bool isSchematized, bool useEpk, CosmosContainer container, RequestOptions batchOptions = null) + private async Task RunCrudAsync(bool isStream, bool isSchematized, bool useEpk, Container container, RequestOptions batchOptions = null) { if (isSchematized) { @@ -448,18 +448,18 @@ private async Task RunCrudAsync(bool isStream, bool isSchem [Description("Verify batch with a large set of read operations that is expected to be rate limited.")] public async Task BatchRateLimitingAsync() { - CosmosContainer containerWithDefaultRetryPolicy = CosmosBatchTestBase.LowThroughputJsonContainer; + Container containerWithDefaultRetryPolicy = CosmosBatchTestBase.LowThroughputJsonContainer; await this.CreateJsonTestDocsAsync(containerWithDefaultRetryPolicy); CosmosClient clientWithNoThrottleRetry = new CosmosClientBuilder( - CosmosBatchTestBase.Client.ClientOptions.EndPoint.ToString(), - CosmosBatchTestBase.Client.ClientOptions.AccountKey.Key) + CosmosBatchTestBase.Client.Endpoint.ToString(), + CosmosBatchTestBase.Client.AccountKey) .WithThrottlingRetryOptions( maxRetryWaitTimeOnThrottledRequests: default(TimeSpan), maxRetryAttemptsOnThrottledRequests: 0) .Build(); - CosmosContainer containerWithNoThrottleRetry = + Container containerWithNoThrottleRetry = clientWithNoThrottleRetry.GetContainer(CosmosBatchTestBase.Database.Id, CosmosBatchTestBase.LowThroughputJsonContainer.Id); // The second batch started should be rate limited by the backend in admission control. @@ -480,7 +480,7 @@ public async Task BatchRateLimitingAsync() } } - private async Task RunTwoLargeBatchesAsync(CosmosContainer container) + private async Task RunTwoLargeBatchesAsync(Container container) { CosmosBatch batch1 = container.CreateBatch(CosmosBatchTestBase.GetPartitionKey(this.PartitionKey1)); CosmosBatch batch2 = container.CreateBatch(CosmosBatchTestBase.GetPartitionKey(this.PartitionKey1)); @@ -523,7 +523,7 @@ public async Task BatchWithCreateConflictSharedThroughputAsync() await this.RunBatchWithCreateConflictAsync(CosmosBatchTestBase.SharedThroughputContainer); } - private async Task RunBatchWithCreateConflictAsync(CosmosContainer container) + private async Task RunBatchWithCreateConflictAsync(Container container) { await this.CreateJsonTestDocsAsync(container); @@ -545,7 +545,7 @@ await this.RunWithErrorAsync( [Description("Verify batch with an invalid create operation rolls back prior operations")] public async Task BatchWithInvalidCreateAsync() { - CosmosContainer container = CosmosBatchTestBase.JsonContainer; + Container container = CosmosBatchTestBase.JsonContainer; // partition key mismatch between doc and and value passed in to the operation await this.RunWithErrorAsync( @@ -559,7 +559,7 @@ await this.RunWithErrorAsync( [Description("Verify batch with a read operation on a non-existent entity rolls back prior operations")] public async Task BatchWithReadOfNonExistentEntityAsync() { - CosmosContainer container = CosmosBatchTestBase.JsonContainer; + Container container = CosmosBatchTestBase.JsonContainer; await this.RunWithErrorAsync( container, batch => batch.ReadItem(Guid.NewGuid().ToString()), @@ -571,7 +571,7 @@ await this.RunWithErrorAsync( [Description("Verify batch with a replace operation on a stale entity rolls back prior operations")] public async Task BatchWithReplaceOfStaleEntityAsync() { - CosmosContainer container = CosmosBatchTestBase.JsonContainer; + Container container = CosmosBatchTestBase.JsonContainer; await this.CreateJsonTestDocsAsync(container); TestDoc staleTestDocToReplace = this.GetTestDocCopy(this.TestDocPk1ExistingA); @@ -595,15 +595,15 @@ await this.RunWithErrorAsync( [Description("Verify batch with a delete operation on a non-existent entity rolls back prior operations")] public async Task BatchWithDeleteOfNonExistentEntityAsync() { - CosmosContainer container = CosmosBatchTestBase.JsonContainer; + Container container = CosmosBatchTestBase.JsonContainer; await this.RunWithErrorAsync( container, batch => batch.DeleteItem(Guid.NewGuid().ToString()), HttpStatusCode.NotFound); } - private async Task RunWithErrorAsync( - CosmosContainer container, + private async Task RunWithErrorAsync( + Container container, Action appendOperation, HttpStatusCode expectedFailedOperationStatusCode) { diff --git a/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.EmulatorTests/Batch/CosmosBatchTestBase.cs b/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.EmulatorTests/Batch/CosmosBatchTestBase.cs index d2e4ec9f9d..0c0c39a0c3 100644 --- a/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.EmulatorTests/Batch/CosmosBatchTestBase.cs +++ b/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.EmulatorTests/Batch/CosmosBatchTestBase.cs @@ -31,19 +31,19 @@ public class CosmosBatchTestBase protected static CosmosDatabase GatewayDatabase { get; set; } - protected static CosmosContainer JsonContainer { get; set; } + protected static Container JsonContainer { get; set; } - protected static CosmosContainer GatewayJsonContainer { get; set; } + protected static Container GatewayJsonContainer { get; set; } - protected static CosmosContainer LowThroughputJsonContainer { get; set; } + protected static Container LowThroughputJsonContainer { get; set; } - protected static CosmosContainer GatewayLowThroughputJsonContainer { get; set; } + protected static Container GatewayLowThroughputJsonContainer { get; set; } - protected static CosmosContainer SchematizedContainer { get; set; } + protected static Container SchematizedContainer { get; set; } - protected static CosmosContainer GatewaySchematizedContainer { get; set; } + protected static Container GatewaySchematizedContainer { get; set; } - protected static CosmosContainer SharedThroughputContainer { get; set; } + protected static Container SharedThroughputContainer { get; set; } internal static PartitionKeyDefinition PartitionKeyDefinition { get; set; } @@ -84,23 +84,23 @@ private static void InitializeDirectContainers() partitionKeyDefinition.Paths.Add("/Status"); CosmosBatchTestBase.LowThroughputJsonContainer = CosmosBatchTestBase.Database.CreateContainerAsync( - new CosmosContainerSettings() + new ContainerProperties() { Id = Guid.NewGuid().ToString(), PartitionKey = partitionKeyDefinition }, - requestUnitsPerSecond: 400).GetAwaiter().GetResult().Container; + throughput: 400).GetAwaiter().GetResult().Container; - CosmosBatchTestBase.PartitionKeyDefinition = ((CosmosContainerCore)CosmosBatchTestBase.LowThroughputJsonContainer).GetPartitionKeyDefinitionAsync(CancellationToken.None).GetAwaiter().GetResult(); + CosmosBatchTestBase.PartitionKeyDefinition = ((ContainerCore)CosmosBatchTestBase.LowThroughputJsonContainer).GetPartitionKeyDefinitionAsync(CancellationToken.None).GetAwaiter().GetResult(); // Create a container with at least 2 physical partitions for effective cross-partition testing CosmosBatchTestBase.JsonContainer = CosmosBatchTestBase.Database.CreateContainerAsync( - new CosmosContainerSettings() + new ContainerProperties() { Id = Guid.NewGuid().ToString(), PartitionKey = CosmosBatchTestBase.PartitionKeyDefinition }, - requestUnitsPerSecond: 12000).GetAwaiter().GetResult().Container; + throughput: 12000).GetAwaiter().GetResult().Container; Serialization.HybridRow.Schemas.Schema testSchema = TestDoc.GetSchema(); Namespace testNamespace = new Namespace() @@ -116,7 +116,7 @@ private static void InitializeDirectContainers() CosmosBatchTestBase.LayoutResolver = new LayoutResolverNamespace(testNamespace); CosmosBatchTestBase.TestDocLayout = CosmosBatchTestBase.LayoutResolver.Resolve(testSchema.SchemaId); - BatchContainerSettings schematizedContainerSettings = new BatchContainerSettings() + BatchContainerProperties schematizedContainerProperties = new BatchContainerProperties() { Id = Guid.NewGuid().ToString(), PartitionKey = CosmosBatchTestBase.PartitionKeyDefinition, @@ -128,11 +128,11 @@ private static void InitializeDirectContainers() TableSchema = testNamespace, }; - schematizedContainerSettings.SchemaPolicy = schemaPolicy; + schematizedContainerProperties.SchemaPolicy = schemaPolicy; CosmosBatchTestBase.SchematizedContainer = CosmosBatchTestBase.Database.CreateContainerAsync( - schematizedContainerSettings, - requestUnitsPerSecond: 12000).GetAwaiter().GetResult().Container; + schematizedContainerProperties, + throughput: 12000).GetAwaiter().GetResult().Container; } private static void InitializeGatewayContainers() @@ -148,12 +148,12 @@ private static void InitializeGatewayContainers() private static void InitializeSharedThroughputContainer() { CosmosClient client = TestCommon.CreateCosmosClient(); - CosmosDatabase db = client.CreateDatabaseAsync(string.Format("Shared_{0}", Guid.NewGuid().ToString("N")), requestUnitsPerSecond: 20000).GetAwaiter().GetResult().Database; + CosmosDatabase db = client.CreateDatabaseAsync(string.Format("Shared_{0}", Guid.NewGuid().ToString("N")), throughput: 20000).GetAwaiter().GetResult().Database; for (int index = 0; index < 5; index++) { ContainerResponse containerResponse = db.CreateContainerAsync( - new CosmosContainerSettings + new ContainerProperties { Id = Guid.NewGuid().ToString(), PartitionKey = CosmosBatchTestBase.PartitionKeyDefinition @@ -191,7 +191,7 @@ public static void ClassClean() CosmosBatchTestBase.Client.Dispose(); } - protected virtual async Task CreateJsonTestDocsAsync(CosmosContainer container) + protected virtual async Task CreateJsonTestDocsAsync(Container container) { this.TestDocPk1ExistingA = await CosmosBatchTestBase.CreateJsonTestDocAsync(container, this.PartitionKey1); this.TestDocPk1ExistingB = await CosmosBatchTestBase.CreateJsonTestDocAsync(container, this.PartitionKey1); @@ -199,7 +199,7 @@ protected virtual async Task CreateJsonTestDocsAsync(CosmosContainer container) this.TestDocPk1ExistingD = await CosmosBatchTestBase.CreateJsonTestDocAsync(container, this.PartitionKey1); } - protected virtual async Task CreateSchematizedTestDocsAsync(CosmosContainer container) + protected virtual async Task CreateSchematizedTestDocsAsync(Container container) { this.TestDocPk1ExistingA = await CosmosBatchTestBase.CreateSchematizedTestDocAsync(container, this.PartitionKey1); this.TestDocPk1ExistingB = await CosmosBatchTestBase.CreateSchematizedTestDocAsync(container, this.PartitionKey1); @@ -256,7 +256,7 @@ protected static TestDoc StreamToTestDoc(Stream stream, bool isSchematized) } } - protected static async Task VerifyByReadAsync(CosmosContainer container, TestDoc doc, bool isStream = false, bool isSchematized = false, bool useEpk = false, string eTag = null) + protected static async Task VerifyByReadAsync(Container container, TestDoc doc, bool isStream = false, bool isSchematized = false, bool useEpk = false, string eTag = null) { Cosmos.PartitionKey partitionKey = CosmosBatchTestBase.GetPartitionKey(doc.Status, useEpk); @@ -264,7 +264,7 @@ protected static async Task VerifyByReadAsync(CosmosContainer container, TestDoc { string id = CosmosBatchTestBase.GetId(doc, isSchematized); ItemRequestOptions requestOptions = CosmosBatchTestBase.GetItemRequestOptions(doc, isSchematized, useEpk, isPartOfBatch: false); - CosmosResponseMessage response = await container.ReadItemStreamAsync(partitionKey, id, requestOptions); + CosmosResponseMessage response = await container.ReadItemStreamAsync(id, partitionKey, requestOptions); Assert.AreEqual(HttpStatusCode.OK, response.StatusCode); Assert.AreEqual(doc, CosmosBatchTestBase.StreamToTestDoc(response.Content, isSchematized)); @@ -276,7 +276,7 @@ protected static async Task VerifyByReadAsync(CosmosContainer container, TestDoc } else { - ItemResponse response = await container.ReadItemAsync(partitionKey, doc.Id); + ItemResponse response = await container.ReadItemAsync(doc.Id, partitionKey); Assert.AreEqual(HttpStatusCode.OK, response.StatusCode); Assert.AreEqual(doc, response.Resource); @@ -288,13 +288,13 @@ protected static async Task VerifyByReadAsync(CosmosContainer container, TestDoc } } - protected static async Task VerifyNotFoundAsync(CosmosContainer container, TestDoc doc, bool isSchematized = false, bool useEpk = false) + protected static async Task VerifyNotFoundAsync(Container container, TestDoc doc, bool isSchematized = false, bool useEpk = false) { string id = CosmosBatchTestBase.GetId(doc, isSchematized); Cosmos.PartitionKey partitionKey = CosmosBatchTestBase.GetPartitionKey(doc.Status, useEpk); ItemRequestOptions requestOptions = CosmosBatchTestBase.GetItemRequestOptions(doc, isSchematized, useEpk, isPartOfBatch: false); - CosmosResponseMessage response = await container.ReadItemStreamAsync(partitionKey, id, requestOptions); + CosmosResponseMessage response = await container.ReadItemStreamAsync(id, partitionKey, requestOptions); Assert.AreEqual(HttpStatusCode.NotFound, response.StatusCode); } @@ -386,7 +386,7 @@ protected static ItemRequestOptions GetItemRequestOptions(TestDoc doc, bool isSc return requestOptions; } - protected static async Task CreateJsonTestDocAsync(CosmosContainer container, object partitionKey, int minDesiredSize = 20) + protected static async Task CreateJsonTestDocAsync(Container container, object partitionKey, int minDesiredSize = 20) { TestDoc doc = CosmosBatchTestBase.PopulateTestDoc(partitionKey, minDesiredSize); ItemResponse createResponse = await container.CreateItemAsync(doc, CosmosBatchTestBase.GetPartitionKey(partitionKey)); @@ -394,12 +394,12 @@ protected static async Task CreateJsonTestDocAsync(CosmosContainer cont return doc; } - protected static async Task CreateSchematizedTestDocAsync(CosmosContainer container, object partitionKey, int? ttlInSeconds = null) + protected static async Task CreateSchematizedTestDocAsync(Container container, object partitionKey, int? ttlInSeconds = null) { TestDoc doc = CosmosBatchTestBase.PopulateTestDoc(partitionKey); CosmosResponseMessage createResponse = await container.CreateItemStreamAsync( - CosmosBatchTestBase.GetPartitionKey(partitionKey), doc.ToHybridRowStream(), + CosmosBatchTestBase.GetPartitionKey(partitionKey), CosmosBatchTestBase.GetItemRequestOptions(doc, isSchematized: true, isPartOfBatch: false, ttlInSeconds: ttlInSeconds)); Assert.AreEqual( HttpStatusCode.Created, @@ -615,7 +615,7 @@ private static void SkipBinaryField(BinaryReader binaryReader) } } - private class BatchContainerSettings : CosmosContainerSettings + private class BatchContainerProperties : ContainerProperties { [JsonProperty("schemaPolicy")] public SchemaPolicy SchemaPolicy { get; set; } diff --git a/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Batch/BatchSchemaTests.cs b/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Batch/BatchSchemaTests.cs index 1148dd54bb..d69444d233 100644 --- a/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Batch/BatchSchemaTests.cs +++ b/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Batch/BatchSchemaTests.cs @@ -149,7 +149,7 @@ public async Task BatchResponseDeserializationAsync() MemoryStream responseContent = await new BatchResponsePayloadWriter(results).GeneratePayloadAsync(); - CosmosJsonSerializer serializer = new CosmosJsonSerializerCore(); + CosmosSerializer serializer = new CosmosJsonSerializerCore(); SinglePartitionKeyServerBatchRequest batchResponse = await SinglePartitionKeyServerBatchRequest.CreateAsync( partitionKey: null, operations: new ArraySegment( @@ -197,8 +197,8 @@ private bool Equals(ItemRequestOptions x, ItemRequestOptions y) { CosmosRequestMessage xMessage = new CosmosRequestMessage(); CosmosRequestMessage yMessage = new CosmosRequestMessage(); - x.FillRequestOptions(xMessage); - y.FillRequestOptions(yMessage); + x.PopulateRequestOptions(xMessage); + y.PopulateRequestOptions(yMessage); foreach (string headerName in xMessage.Headers) { diff --git a/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Batch/CosmosBatchUnitTests.cs b/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Batch/CosmosBatchUnitTests.cs index 587bddab98..bd166784f8 100644 --- a/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Batch/CosmosBatchUnitTests.cs +++ b/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Batch/CosmosBatchUnitTests.cs @@ -30,7 +30,7 @@ public class CosmosBatchUnitTests [Owner("abpai")] public async Task BatchInvalidOptionsAsync() { - CosmosContainer container = CosmosBatchUnitTests.GetCosmosContainer(); + Container container = CosmosBatchUnitTests.GetCosmosContainer(); List badBatchOptionsList = new List() { @@ -54,7 +54,7 @@ public async Task BatchInvalidOptionsAsync() [Owner("abpai")] public async Task BatchInvalidItemOptionsAsync() { - CosmosContainer container = CosmosBatchUnitTests.GetCosmosContainer(); + Container container = CosmosBatchUnitTests.GetCosmosContainer(); List badItemOptionsList = new List() { @@ -106,7 +106,7 @@ public async Task BatchInvalidItemOptionsAsync() [Owner("abpai")] public async Task BatchNoOperationsAsync() { - CosmosContainer container = CosmosBatchUnitTests.GetCosmosContainer(); + Container container = CosmosBatchUnitTests.GetCosmosContainer(); CosmosBatchResponse batchResponse = await container.CreateBatch(new Cosmos.PartitionKey(CosmosBatchUnitTests.PartitionKey1)) .ExecuteAsync(); @@ -257,7 +257,7 @@ public async Task BatchCrudRequestAsync() return Task.FromResult(new CosmosResponseMessage(HttpStatusCode.OK)); }); - CosmosContainer container = CosmosBatchUnitTests.GetCosmosContainer(testHandler); + Container container = CosmosBatchUnitTests.GetCosmosContainer(testHandler); CosmosBatchResponse batchResponse = await container.CreateBatch(new Cosmos.PartitionKey(CosmosBatchUnitTests.PartitionKey1)) .CreateItem(createItem) @@ -323,7 +323,7 @@ public async Task BatchSingleServerResponseAsync() return responseMessage; }); - CosmosContainer container = CosmosBatchUnitTests.GetCosmosContainer(testHandler); + Container container = CosmosBatchUnitTests.GetCosmosContainer(testHandler); CosmosBatchResponse batchResponse = await container.CreateBatch(new Cosmos.PartitionKey(CosmosBatchUnitTests.PartitionKey1)) .ReadItem("id1") @@ -436,17 +436,17 @@ private static void VerifyItemRequestOptionsAreEqual(ItemRequestOptions expected } } - private static CosmosContainer GetCosmosContainer(TestHandler testHandler = null) + private static Container GetCosmosContainer(TestHandler testHandler = null) { CosmosClient client = MockCosmosUtil.CreateMockCosmosClient((builder) => builder.AddCustomHandlers(testHandler)); CosmosDatabaseCore database = new CosmosDatabaseCore(client.ClientContext, CosmosBatchUnitTests.DatabaseId); - CosmosContainerCore cosmosContainerCore = new CosmosContainerCore(client.ClientContext, database, CosmosBatchUnitTests.ContainerId); - return cosmosContainerCore; + ContainerCore container = new ContainerCore(client.ClientContext, database, CosmosBatchUnitTests.ContainerId); + return container; } - private static CosmosContainerCore GetMockedContainer(string containerName = null) + private static ContainerCore GetMockedContainer(string containerName = null) { - Mock mockedContainer = MockCosmosUtil.CreateMockContainer(containerName: containerName); + Mock mockedContainer = MockCosmosUtil.CreateMockContainer(containerName: containerName); mockedContainer.Setup(c => c.ClientContext).Returns(CosmosBatchUnitTests.GetMockedClientContext()); return mockedContainer.Object; } @@ -460,7 +460,7 @@ private static CosmosClientContext GetMockedClientContext() return mockContext.Object; } - private static TestItem Deserialize(Memory body, CosmosJsonSerializer serializer) + private static TestItem Deserialize(Memory body, CosmosSerializer serializer) { return serializer.FromStream(new MemoryStream(body.Span.ToArray())); } From c719782a1fc729dfc9d1d1e12c511c426c8518b2 Mon Sep 17 00:00:00 2001 From: "Abhijit P. Pai" Date: Fri, 14 Jun 2019 11:42:23 +0530 Subject: [PATCH 03/12] Remove Cosmos from Batch/BatchOperationResult/BatchResponse, use clientContext instead of client in BatchExecutor --- .../src/Batch/{CosmosBatch.cs => Batch.cs} | 54 ++-- .../src/Batch/BatchExecUtils.cs | 1 - .../src/Batch/BatchExecutor.cs | 27 +- ...ationResult.cs => BatchOperationResult.cs} | 22 +- ...osmosBatchResponse.cs => BatchResponse.cs} | 52 ++-- .../src/Resource/Container/Container.cs | 6 +- .../Resource/Container/ContainerCore.Items.cs | 4 +- .../Batch/BatchSinglePartitionKeyTests.cs | 234 +++++++++--------- ...osmosBatchTestBase.cs => BatchTestBase.cs} | 104 ++++---- .../Batch/BatchResponsePayloadWriter.cs | 6 +- .../Batch/BatchSchemaTests.cs | 16 +- ...mosBatchUnitTests.cs => BatchUnitTests.cs} | 70 +++--- 12 files changed, 296 insertions(+), 300 deletions(-) rename Microsoft.Azure.Cosmos/src/Batch/{CosmosBatch.cs => Batch.cs} (75%) rename Microsoft.Azure.Cosmos/src/Batch/{CosmosBatchOperationResult.cs => BatchOperationResult.cs} (86%) rename Microsoft.Azure.Cosmos/src/Batch/{CosmosBatchResponse.cs => BatchResponse.cs} (84%) rename Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.EmulatorTests/Batch/{CosmosBatchTestBase.cs => BatchTestBase.cs} (78%) rename Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Batch/{CosmosBatchUnitTests.cs => BatchUnitTests.cs} (84%) diff --git a/Microsoft.Azure.Cosmos/src/Batch/CosmosBatch.cs b/Microsoft.Azure.Cosmos/src/Batch/Batch.cs similarity index 75% rename from Microsoft.Azure.Cosmos/src/Batch/CosmosBatch.cs rename to Microsoft.Azure.Cosmos/src/Batch/Batch.cs index f28c3a9d52..b12f69f695 100644 --- a/Microsoft.Azure.Cosmos/src/Batch/CosmosBatch.cs +++ b/Microsoft.Azure.Cosmos/src/Batch/Batch.cs @@ -11,9 +11,9 @@ namespace Microsoft.Azure.Cosmos using Microsoft.Azure.Documents; /// - /// Represents a batch of requests to Cosmos DB. + /// Represents a batch of requests that will be performed atomically against the Azure Cosmos DB service. /// - public class CosmosBatch + public class Batch { private readonly PartitionKey partitionKey; @@ -22,11 +22,11 @@ public class CosmosBatch private List operations; /// - /// Initializes a new instance of the class. + /// Initializes a new instance of the class. /// /// Container that has items on which batch operations are to be performed. /// The partition key for all items in the batch. . - internal CosmosBatch(ContainerCore container, PartitionKey partitionKey) + internal Batch(ContainerCore container, PartitionKey partitionKey) { this.container = container; this.partitionKey = partitionKey; @@ -38,9 +38,9 @@ internal CosmosBatch(ContainerCore container, PartitionKey partitionKey) /// /// A JSON serializable object that must contain an id property. to implement a custom serializer. /// (Optional) The options for the item request. . - /// The instance with the operation added. + /// The instance with the operation added. /// The type of item to be created. - public virtual CosmosBatch CreateItem(T item, ItemRequestOptions itemRequestOptions = null) + public virtual Batch CreateItem(T item, ItemRequestOptions itemRequestOptions = null) { this.operations.Add(new ItemBatchOperation( operationType: OperationType.Create, @@ -59,8 +59,8 @@ public virtual CosmosBatch CreateItem(T item, ItemRequestOptions itemRequestO /// The stream must have a UTF-8 encoded JSON object which contains an id property. /// /// (Optional) The options for the item request. . - /// The instance with the operation added. - public virtual CosmosBatch CreateItemStream(Stream resourceStream, ItemRequestOptions itemRequestOptions = null) + /// The instance with the operation added. + public virtual Batch CreateItemStream(Stream resourceStream, ItemRequestOptions itemRequestOptions = null) { this.operations.Add(new ItemBatchOperation( operationType: OperationType.Create, @@ -76,8 +76,8 @@ public virtual CosmosBatch CreateItemStream(Stream resourceStream, ItemRequestOp /// /// The cosmos item id. /// (Optional) The options for the item request. . - /// The instance with the operation added. - public virtual CosmosBatch ReadItem(string id, ItemRequestOptions itemRequestOptions = null) + /// The instance with the operation added. + public virtual Batch ReadItem(string id, ItemRequestOptions itemRequestOptions = null) { this.operations.Add(new ItemBatchOperation( operationType: OperationType.Read, @@ -93,9 +93,9 @@ public virtual CosmosBatch ReadItem(string id, ItemRequestOptions itemRequestOpt /// /// A JSON serializable object that must contain an id property. to implement a custom serializer. /// (Optional) The options for the item request. . - /// The instance with the operation added. + /// The instance with the operation added. /// The type of item to be created. - public virtual CosmosBatch UpsertItem(T item, ItemRequestOptions itemRequestOptions = null) + public virtual Batch UpsertItem(T item, ItemRequestOptions itemRequestOptions = null) { this.operations.Add(new ItemBatchOperation( operationType: OperationType.Upsert, @@ -114,8 +114,8 @@ public virtual CosmosBatch UpsertItem(T item, ItemRequestOptions itemRequestO /// The stream must have a UTF-8 encoded JSON object which contains an id property. /// /// (Optional) The options for the item request. . - /// The instance with the operation added. - public virtual CosmosBatch UpsertItemStream(Stream resourceStream, ItemRequestOptions itemRequestOptions = null) + /// The instance with the operation added. + public virtual Batch UpsertItemStream(Stream resourceStream, ItemRequestOptions itemRequestOptions = null) { this.operations.Add(new ItemBatchOperation( operationType: OperationType.Upsert, @@ -132,9 +132,9 @@ public virtual CosmosBatch UpsertItemStream(Stream resourceStream, ItemRequestOp /// The cosmos item id. /// A JSON serializable object that must contain an id property. to implement a custom serializer. /// (Optional) The options for the item request. . - /// The instance with the operation added. + /// The instance with the operation added. /// The type of item to be created. - public virtual CosmosBatch ReplaceItem(string id, T item, ItemRequestOptions itemRequestOptions = null) + public virtual Batch ReplaceItem(string id, T item, ItemRequestOptions itemRequestOptions = null) { this.operations.Add(new ItemBatchOperation( operationType: OperationType.Replace, @@ -155,8 +155,8 @@ public virtual CosmosBatch ReplaceItem(string id, T item, ItemRequestOptions /// The stream must have a UTF-8 encoded JSON object which contains an id property. /// /// (Optional) The options for the item request. . - /// The instance with the operation added. - public virtual CosmosBatch ReplaceItemStream(string id, Stream resourceStream, ItemRequestOptions itemRequestOptions = null) + /// The instance with the operation added. + public virtual Batch ReplaceItemStream(string id, Stream resourceStream, ItemRequestOptions itemRequestOptions = null) { this.operations.Add(new ItemBatchOperation( operationType: OperationType.Replace, @@ -173,8 +173,8 @@ public virtual CosmosBatch ReplaceItemStream(string id, Stream resourceStream, I /// /// The cosmos item id. /// (Optional) The options for the item request. . - /// The instance with the operation added. - public virtual CosmosBatch DeleteItem(string id, ItemRequestOptions itemRequestOptions = null) + /// The instance with the operation added. + public virtual Batch DeleteItem(string id, ItemRequestOptions itemRequestOptions = null) { this.operations.Add(new ItemBatchOperation( operationType: OperationType.Delete, @@ -189,8 +189,8 @@ public virtual CosmosBatch DeleteItem(string id, ItemRequestOptions itemRequestO /// Executes the batch at the Azure Cosmos service as an asynchronous operation. /// /// (Optional) representing request cancellation. - /// An awaitable which contains the completion status and results of each operation. - public virtual Task ExecuteAsync(CancellationToken cancellationToken = default(CancellationToken)) + /// An awaitable which contains the completion status and results of each operation. + public virtual Task ExecuteAsync(CancellationToken cancellationToken = default(CancellationToken)) { return this.ExecuteAsync(requestOptions: null, cancellationToken: cancellationToken); } @@ -201,8 +201,8 @@ public virtual CosmosBatch DeleteItem(string id, ItemRequestOptions itemRequestO /// The cosmos item id. /// A containing the patch specification. /// (Optional) The options for the item request. . - /// The instance with the operation added. - internal virtual CosmosBatch PatchItemStream(string id, Stream patchStream, ItemRequestOptions itemRequestOptions = null) + /// The instance with the operation added. + internal virtual Batch PatchItemStream(string id, Stream patchStream, ItemRequestOptions itemRequestOptions = null) { this.operations.Add(new ItemBatchOperation( operationType: OperationType.Patch, @@ -219,14 +219,14 @@ internal virtual CosmosBatch PatchItemStream(string id, Stream patchStream, Item /// /// Options that apply to the batch. /// (Optional) representing request cancellation. - /// An awaitable which contains the completion status and results of each operation. - internal virtual Task ExecuteAsync(RequestOptions requestOptions, CancellationToken cancellationToken = default(CancellationToken)) + /// An awaitable which contains the completion status and results of each operation. + internal virtual Task ExecuteAsync(RequestOptions requestOptions, CancellationToken cancellationToken = default(CancellationToken)) { BatchExecUtils.GetServerRequestLimits(out int maxServerRequestBodyLength, out int maxServerRequestOperationCount); return this.ExecuteAsync(maxServerRequestBodyLength, maxServerRequestOperationCount, requestOptions, cancellationToken); } - internal Task ExecuteAsync( + internal Task ExecuteAsync( int maxServerRequestBodyLength, int maxServerRequestOperationCount, RequestOptions requestOptions = null, diff --git a/Microsoft.Azure.Cosmos/src/Batch/BatchExecUtils.cs b/Microsoft.Azure.Cosmos/src/Batch/BatchExecUtils.cs index d1f41a14d2..e6f5eb67d4 100644 --- a/Microsoft.Azure.Cosmos/src/Batch/BatchExecUtils.cs +++ b/Microsoft.Azure.Cosmos/src/Batch/BatchExecUtils.cs @@ -90,7 +90,6 @@ public static void GetServerRequestLimits(out int maxServerRequestBodyLength, ou public static CosmosResponseMessage Validate( IReadOnlyList operations, RequestOptions batchOptions, - CosmosClient client, int? maxOperationCount = null) { string errorMessage = null; diff --git a/Microsoft.Azure.Cosmos/src/Batch/BatchExecutor.cs b/Microsoft.Azure.Cosmos/src/Batch/BatchExecutor.cs index 654237e89d..afdb14e158 100644 --- a/Microsoft.Azure.Cosmos/src/Batch/BatchExecutor.cs +++ b/Microsoft.Azure.Cosmos/src/Batch/BatchExecutor.cs @@ -18,7 +18,7 @@ internal sealed class BatchExecutor { private readonly ContainerCore container; - private readonly CosmosClient client; + private readonly CosmosClientContext clientContext; private readonly IReadOnlyList inputOperations; @@ -39,7 +39,7 @@ public BatchExecutor( int maxServerRequestOperationCount) { this.container = container; - this.client = this.container.ClientContext.Client; + this.clientContext = this.container.ClientContext; this.inputOperations = operations; this.partitionKey = partitionKey; this.batchOptions = batchOptions; @@ -47,17 +47,16 @@ public BatchExecutor( this.maxServerRequestOperationCount = maxServerRequestOperationCount; } - public async Task ExecuteAsync(CancellationToken cancellationToken) + public async Task ExecuteAsync(CancellationToken cancellationToken) { CosmosResponseMessage validationResult = BatchExecUtils.Validate( this.inputOperations, this.batchOptions, - this.client, this.maxServerRequestOperationCount); if (!validationResult.IsSuccessStatusCode) { - return new CosmosBatchResponse( + return new BatchResponse( validationResult.StatusCode, validationResult.Headers.SubStatusCode, validationResult.ErrorMessage, @@ -72,18 +71,18 @@ public async Task ExecuteAsync(CancellationToken cancellati new ArraySegment(this.inputOperations.ToArray()), this.maxServerRequestBodyLength, this.maxServerRequestOperationCount, - serializer: this.client.ClientOptions.CosmosSerializerWithWrapperOrDefault, + serializer: this.clientContext.ClientOptions.CosmosSerializerWithWrapperOrDefault, cancellationToken: cancellationToken); } catch (RequestEntityTooLargeException ex) { - return new CosmosBatchResponse(ex.StatusCode ?? HttpStatusCode.RequestEntityTooLarge, ex.GetSubStatus(), ClientResources.BatchOperationTooLarge, this.inputOperations); + return new BatchResponse(ex.StatusCode ?? HttpStatusCode.RequestEntityTooLarge, ex.GetSubStatus(), ClientResources.BatchOperationTooLarge, this.inputOperations); } if (serverRequest.Operations.Count != this.inputOperations.Count) { // todo: should this be PayloadTooLarge - return new CosmosBatchResponse(HttpStatusCode.RequestEntityTooLarge, SubStatusCodes.Unknown, ClientResources.BatchTooLarge, this.inputOperations); + return new BatchResponse(HttpStatusCode.RequestEntityTooLarge, SubStatusCodes.Unknown, ClientResources.BatchTooLarge, this.inputOperations); } return await this.ExecuteServerRequestAsync(serverRequest, cancellationToken); @@ -95,15 +94,14 @@ public async Task ExecuteAsync(CancellationToken cancellati /// A server request with a set of operations on items. /// representing request cancellation. /// Response from the server or ServiceUnavailable response in case of exceptions. - private async Task ExecuteServerRequestAsync(SinglePartitionKeyServerBatchRequest serverRequest, CancellationToken cancellationToken) + private async Task ExecuteServerRequestAsync(SinglePartitionKeyServerBatchRequest serverRequest, CancellationToken cancellationToken) { try { using (Stream serverRequestPayload = serverRequest.TransferBodyStream()) { Debug.Assert(serverRequestPayload != null, "Server request payload expected to be non-null"); - CosmosResponseMessage cosmosResponseMessage = await ExecUtils.ProcessResourceOperationAsync( - this.client, + CosmosResponseMessage cosmosResponseMessage = await clientContext.ProcessResourceOperationStreamAsync( this.container.LinkUri, ResourceType.Document, OperationType.Batch, @@ -117,18 +115,17 @@ private async Task ExecuteServerRequestAsync(SinglePartitio requestMessage.Headers.Add(HttpConstants.HttpHeaders.IsBatchAtomic, bool.TrueString); requestMessage.Headers.Add(HttpConstants.HttpHeaders.IsBatchOrdered, bool.TrueString); }, - responseMessage => responseMessage, // response creator cancellationToken); - return await CosmosBatchResponse.FromResponseMessageAsync( + return await BatchResponse.FromResponseMessageAsync( cosmosResponseMessage, serverRequest, - this.client.ClientOptions.CosmosSerializerWithWrapperOrDefault); + this.clientContext.ClientOptions.CosmosSerializerWithWrapperOrDefault); } } catch (CosmosException ex) { - return new CosmosBatchResponse( + return new BatchResponse( HttpStatusCode.ServiceUnavailable, SubStatusCodes.Unknown, ex.Message, diff --git a/Microsoft.Azure.Cosmos/src/Batch/CosmosBatchOperationResult.cs b/Microsoft.Azure.Cosmos/src/Batch/BatchOperationResult.cs similarity index 86% rename from Microsoft.Azure.Cosmos/src/Batch/CosmosBatchOperationResult.cs rename to Microsoft.Azure.Cosmos/src/Batch/BatchOperationResult.cs index f137a58813..085060311e 100644 --- a/Microsoft.Azure.Cosmos/src/Batch/CosmosBatchOperationResult.cs +++ b/Microsoft.Azure.Cosmos/src/Batch/BatchOperationResult.cs @@ -14,14 +14,14 @@ namespace Microsoft.Azure.Cosmos /// /// Represents a result for a specific operation that was part of a batch request. /// - public class CosmosBatchOperationResult + public class BatchOperationResult { - internal CosmosBatchOperationResult(HttpStatusCode statusCode) + internal BatchOperationResult(HttpStatusCode statusCode) { this.StatusCode = statusCode; } - internal CosmosBatchOperationResult(CosmosBatchOperationResult other) + internal BatchOperationResult(BatchOperationResult other) { this.StatusCode = other.StatusCode; this.SubStatusCode = other.SubStatusCode; @@ -30,7 +30,7 @@ internal CosmosBatchOperationResult(CosmosBatchOperationResult other) this.RetryAfter = other.RetryAfter; } - private CosmosBatchOperationResult() + private BatchOperationResult() { } @@ -80,7 +80,7 @@ public virtual bool IsSuccessStatusCode /// internal virtual SubStatusCodes SubStatusCode { get; set; } - internal static Result ReadOperationResult(Memory input, out CosmosBatchOperationResult batchOperationResult) + internal static Result ReadOperationResult(Memory input, out BatchOperationResult batchOperationResult) { RowBuffer row = new RowBuffer(input.Length); if (!row.ReadFrom( @@ -93,7 +93,7 @@ internal static Result ReadOperationResult(Memory input, out CosmosBatchOp } RowReader reader = new RowReader(ref row); - Result result = CosmosBatchOperationResult.ReadOperationResult(ref reader, out batchOperationResult); + Result result = BatchOperationResult.ReadOperationResult(ref reader, out batchOperationResult); if (result != Result.Success) { return result; @@ -108,9 +108,9 @@ internal static Result ReadOperationResult(Memory input, out CosmosBatchOp return Result.Success; } - private static Result ReadOperationResult(ref RowReader reader, out CosmosBatchOperationResult batchOperationResult) + private static Result ReadOperationResult(ref RowReader reader, out BatchOperationResult batchOperationResult) { - batchOperationResult = new CosmosBatchOperationResult(); + batchOperationResult = new BatchOperationResult(); while (reader.Read()) { Result r; @@ -178,15 +178,15 @@ private static Result ReadOperationResult(ref RowReader reader, out CosmosBatchO /// /// The type of the Resource which this class wraps. #pragma warning disable SA1402 // File may only contain a single type - public class CosmosBatchOperationResult : CosmosBatchOperationResult + public class BatchOperationResult : BatchOperationResult #pragma warning restore SA1402 // File may only contain a single type { /// - /// Initializes a new instance of the class. + /// Initializes a new instance of the class. /// /// CosmosBatchOperationResult with stream resource. /// Deserialized resource. - internal CosmosBatchOperationResult(CosmosBatchOperationResult result, T resource) + internal BatchOperationResult(BatchOperationResult result, T resource) : base(result) { this.Resource = resource; diff --git a/Microsoft.Azure.Cosmos/src/Batch/CosmosBatchResponse.cs b/Microsoft.Azure.Cosmos/src/Batch/BatchResponse.cs similarity index 84% rename from Microsoft.Azure.Cosmos/src/Batch/CosmosBatchResponse.cs rename to Microsoft.Azure.Cosmos/src/Batch/BatchResponse.cs index 9ee031de13..12ed8fb980 100644 --- a/Microsoft.Azure.Cosmos/src/Batch/CosmosBatchResponse.cs +++ b/Microsoft.Azure.Cosmos/src/Batch/BatchResponse.cs @@ -17,14 +17,14 @@ namespace Microsoft.Azure.Cosmos /// Response of a batch request. /// #pragma warning disable CA1710 // Identifiers should have correct suffix - public class CosmosBatchResponse : IReadOnlyList, IDisposable + public class BatchResponse : IReadOnlyList, IDisposable #pragma warning restore CA1710 // Identifiers should have correct suffix { private bool isDisposed; - private List results; + private List results; - internal CosmosBatchResponse( + internal BatchResponse( HttpStatusCode statusCode, SubStatusCodes subStatusCode, string errorMessage, @@ -38,14 +38,14 @@ internal CosmosBatchResponse( } /// - /// Initializes a new instance of the class. + /// Initializes a new instance of the class. /// This method is intended to be used only when a response from the server is not available. /// /// Indicates why the batch was not processed. /// Provides further details about why the batch was not processed. /// The reason for failure. /// Operations that were to be executed. - internal CosmosBatchResponse( + internal BatchResponse( HttpStatusCode statusCode, SubStatusCodes subStatusCode, string errorMessage, @@ -62,13 +62,13 @@ internal CosmosBatchResponse( } /// - /// Initializes a new instance of the class. + /// Initializes a new instance of the class. /// - protected CosmosBatchResponse() + protected BatchResponse() { } - private CosmosBatchResponse( + private BatchResponse( HttpStatusCode statusCode, SubStatusCodes subStatusCode, string errorMessage, @@ -146,7 +146,7 @@ public virtual bool IsSuccessStatusCode /// /// 0-based index of the operation in the batch whose result needs to be returned. /// Result of operation at the provided index in the batch. - public virtual CosmosBatchOperationResult this[int index] + public virtual BatchOperationResult this[int index] { get { @@ -160,9 +160,9 @@ public virtual CosmosBatchOperationResult this[int index] /// Type to which the Resource in the operation result needs to be deserialized to, when present. /// 0-based index of the operation in the batch whose result needs to be returned. /// Result of batch operation that contains a Resource deserialized to specified type. - public virtual CosmosBatchOperationResult GetOperationResultAtIndex(int index) + public virtual BatchOperationResult GetOperationResultAtIndex(int index) { - CosmosBatchOperationResult result = this.results[index]; + BatchOperationResult result = this.results[index]; T resource = default(T); if (result.ResourceStream != null) @@ -170,14 +170,14 @@ public virtual CosmosBatchOperationResult GetOperationResultAtIndex(int in resource = this.Serializer.FromStream(result.ResourceStream); } - return new CosmosBatchOperationResult(result, resource); + return new BatchOperationResult(result, resource); } /// /// Gets an enumerator over the operation results. /// /// Enumerator over the operation results. - public virtual IEnumerator GetEnumerator() + public virtual IEnumerator GetEnumerator() { return this.results.GetEnumerator(); } @@ -192,7 +192,7 @@ public virtual IEnumerable GetActivityIds() } /// - /// Disposes the current . + /// Disposes the current . /// public void Dispose() { @@ -206,21 +206,21 @@ IEnumerator IEnumerable.GetEnumerator() return this.GetEnumerator(); } - internal static async Task FromResponseMessageAsync( + internal static async Task FromResponseMessageAsync( CosmosResponseMessage responseMessage, ServerBatchRequest serverRequest, CosmosSerializer serializer) { using (responseMessage) { - CosmosBatchResponse response = null; + BatchResponse response = null; if (responseMessage.IsSuccessStatusCode && responseMessage.Content != null) { - response = await CosmosBatchResponse.PopulateFromContentAsync(responseMessage, serverRequest, serializer); + response = await BatchResponse.PopulateFromContentAsync(responseMessage, serverRequest, serializer); if (response == null) { // Convert any payload read failures as InternalServerError - response = new CosmosBatchResponse( + response = new BatchResponse( HttpStatusCode.InternalServerError, SubStatusCodes.Unknown, ClientResources.ServerResponseDeserializationFailure, @@ -233,7 +233,7 @@ internal static async Task FromResponseMessageAsync( } else { - response = new CosmosBatchResponse( + response = new BatchResponse( responseMessage.StatusCode, responseMessage.Headers.SubStatusCode, responseMessage.ErrorMessage, @@ -250,7 +250,7 @@ internal static async Task FromResponseMessageAsync( { // Server should be guaranteeing number of results equal to operations when // batch request is successful - so fail as InternalServerError if this is not the case. - response = new CosmosBatchResponse( + response = new BatchResponse( HttpStatusCode.InternalServerError, SubStatusCodes.Unknown, ClientResources.InvalidServerResponse, @@ -274,11 +274,11 @@ internal static async Task FromResponseMessageAsync( } } - response.results = new List(); + response.results = new List(); for (int i = 0; i < serverRequest.Operations.Count; i++) { response.results.Add( - new CosmosBatchOperationResult(response.StatusCode) + new BatchOperationResult(response.StatusCode) { SubStatusCode = response.SubStatusCode, RetryAfter = TimeSpan.FromMilliseconds(retryAfterMilliseconds), @@ -290,12 +290,12 @@ internal static async Task FromResponseMessageAsync( } } - internal static async Task PopulateFromContentAsync( + internal static async Task PopulateFromContentAsync( CosmosResponseMessage responseMessage, ServerBatchRequest serverRequest, CosmosSerializer serializer) { - List results = new List(); + List results = new List(); int resizerInitialCapacity = 81920; if (responseMessage.Content.CanSeek) @@ -306,7 +306,7 @@ internal static async Task PopulateFromContentAsync( Result res = await responseMessage.Content.ReadRecordIOAsync( record => { - Result r = CosmosBatchOperationResult.ReadOperationResult(record, out CosmosBatchOperationResult operationResult); + Result r = BatchOperationResult.ReadOperationResult(record, out BatchOperationResult operationResult); if (r != Result.Success) { return r; @@ -322,7 +322,7 @@ record => return null; } - CosmosBatchResponse response = new CosmosBatchResponse( + BatchResponse response = new BatchResponse( responseMessage.StatusCode, responseMessage.Headers.SubStatusCode, responseMessage.ErrorMessage, diff --git a/Microsoft.Azure.Cosmos/src/Resource/Container/Container.cs b/Microsoft.Azure.Cosmos/src/Resource/Container/Container.cs index abcc92dfe8..68dfea2ff6 100644 --- a/Microsoft.Azure.Cosmos/src/Resource/Container/Container.cs +++ b/Microsoft.Azure.Cosmos/src/Resource/Container/Container.cs @@ -1143,10 +1143,10 @@ public abstract ChangeFeedProcessorBuilder DefineChangeFeedEstimator( TimeSpan? estimationPeriod = null); /// - /// Initializes a new instance of the class. + /// Initializes a new instance of the class. /// /// The partition key for all items in the batch. . - /// An instance of - public abstract CosmosBatch CreateBatch(PartitionKey partitionKey); + /// An instance of + public abstract Batch CreateBatch(PartitionKey partitionKey); } } \ No newline at end of file diff --git a/Microsoft.Azure.Cosmos/src/Resource/Container/ContainerCore.Items.cs b/Microsoft.Azure.Cosmos/src/Resource/Container/ContainerCore.Items.cs index 6676a8f39f..35926ca856 100644 --- a/Microsoft.Azure.Cosmos/src/Resource/Container/ContainerCore.Items.cs +++ b/Microsoft.Azure.Cosmos/src/Resource/Container/ContainerCore.Items.cs @@ -449,9 +449,9 @@ public override ChangeFeedProcessorBuilder DefineChangeFeedEstimator( applyBuilderConfiguration: changeFeedEstimatorCore.ApplyBuildConfiguration); } - public override CosmosBatch CreateBatch(PartitionKey partitionKey) + public override Batch CreateBatch(PartitionKey partitionKey) { - return new CosmosBatch(this, partitionKey); + return new Batch(this, partitionKey); } internal FeedIterator GetStandByFeedIterator( diff --git a/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.EmulatorTests/Batch/BatchSinglePartitionKeyTests.cs b/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.EmulatorTests/Batch/BatchSinglePartitionKeyTests.cs index 567f329e74..8352827607 100644 --- a/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.EmulatorTests/Batch/BatchSinglePartitionKeyTests.cs +++ b/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.EmulatorTests/Batch/BatchSinglePartitionKeyTests.cs @@ -12,18 +12,18 @@ namespace Microsoft.Azure.Cosmos.SDK.EmulatorTests using Microsoft.VisualStudio.TestTools.UnitTesting; [TestClass] - public class BatchSinglePartitionKeyTests : CosmosBatchTestBase + public class BatchSinglePartitionKeyTests : BatchTestBase { [ClassInitialize] public static void ClassInitialize(TestContext context) { - CosmosBatchTestBase.ClassInit(context); + BatchTestBase.ClassInit(context); } [ClassCleanup] public static void ClassCleanup() { - CosmosBatchTestBase.ClassClean(); + BatchTestBase.ClassClean(); } [TestMethod] @@ -31,7 +31,7 @@ public static void ClassCleanup() [Description("Verify batch CRUD with default options at all levels (client/batch/operation) and all operations expected to pass works")] public async Task BatchCrudAsync() { - await this.RunCrudAsync(isStream: false, isSchematized: false, useEpk: false, container: CosmosBatchTestBase.JsonContainer); + await this.RunCrudAsync(isStream: false, isSchematized: false, useEpk: false, container: BatchTestBase.JsonContainer); } [TestMethod] @@ -39,7 +39,7 @@ public async Task BatchCrudAsync() [Description("Verify batch CRUD with JSON stream operation resource bodies with default options and all operations expected to pass works")] public async Task BatchCrudStreamAsync() { - await this.RunCrudAsync(isStream: true, isSchematized: false, useEpk: false, container: CosmosBatchTestBase.JsonContainer); + await this.RunCrudAsync(isStream: true, isSchematized: false, useEpk: false, container: BatchTestBase.JsonContainer); } [TestMethod] @@ -47,7 +47,7 @@ public async Task BatchCrudStreamAsync() [Description("Verify batch CRUD with HybridRow stream operation resource bodies and EPK with default options and all operations expected to pass works")] public async Task BatchCrudHybridRowStreamWithEpkAsync() { - await this.RunCrudAsync(isStream: true, isSchematized: true, useEpk: true, container: CosmosBatchTestBase.SchematizedContainer); + await this.RunCrudAsync(isStream: true, isSchematized: true, useEpk: true, container: BatchTestBase.SchematizedContainer); } [TestMethod] @@ -55,7 +55,7 @@ public async Task BatchCrudHybridRowStreamWithEpkAsync() [Description("Verify batch CRUD with default options at all levels (client/batch/operation) and all operations expected to pass works in gateway mode")] public async Task BatchCrudGatewayAsync() { - await this.RunCrudAsync(isStream: false, isSchematized: false, useEpk: false, container: CosmosBatchTestBase.GatewayJsonContainer); + await this.RunCrudAsync(isStream: false, isSchematized: false, useEpk: false, container: BatchTestBase.GatewayJsonContainer); } [TestMethod] @@ -63,7 +63,7 @@ public async Task BatchCrudGatewayAsync() [Description("Verify batch CRUD with JSON stream operation resource bodies with default options and all operations expected to pass works in gateway mode")] public async Task BatchCrudStreamGatewayAsync() { - await this.RunCrudAsync(isStream: true, isSchematized: false, useEpk: false, container: CosmosBatchTestBase.GatewayJsonContainer); + await this.RunCrudAsync(isStream: true, isSchematized: false, useEpk: false, container: BatchTestBase.GatewayJsonContainer); } [TestMethod] @@ -71,7 +71,7 @@ public async Task BatchCrudStreamGatewayAsync() [Description("Verify batch CRUD with default options at all levels (client/batch/operation) and all operations expected to pass works in shared throughput ")] public async Task BatchCrudSharedThroughputAsync() { - await this.RunCrudAsync(isStream: false, isSchematized: false, useEpk: false, container: CosmosBatchTestBase.SharedThroughputContainer); + await this.RunCrudAsync(isStream: false, isSchematized: false, useEpk: false, container: BatchTestBase.SharedThroughputContainer); } [TestMethod] @@ -79,7 +79,7 @@ public async Task BatchCrudSharedThroughputAsync() [Description("Verify batch CRUD with JSON stream operation resource bodies with default options and all operations expected to pass works in shared throughput")] public async Task BatchCrudSharedThroughputStreamAsync() { - await this.RunCrudAsync(isStream: true, isSchematized: false, useEpk: false, container: CosmosBatchTestBase.SharedThroughputContainer); + await this.RunCrudAsync(isStream: true, isSchematized: false, useEpk: false, container: BatchTestBase.SharedThroughputContainer); } [TestMethod] @@ -87,15 +87,15 @@ public async Task BatchCrudSharedThroughputStreamAsync() [Description("Verify batch with multiple operations on the same entity works")] public async Task BatchOrderedAsync() { - Container container = CosmosBatchTestBase.JsonContainer; + Container container = BatchTestBase.JsonContainer; await this.CreateJsonTestDocsAsync(container); - TestDoc firstDoc = CosmosBatchTestBase.PopulateTestDoc(this.PartitionKey1); + TestDoc firstDoc = BatchTestBase.PopulateTestDoc(this.PartitionKey1); TestDoc replaceDoc = this.GetTestDocCopy(firstDoc); replaceDoc.Cost += 20; - CosmosBatchResponse batchResponse = await container.CreateBatch(CosmosBatchTestBase.GetPartitionKey(this.PartitionKey1)) + BatchResponse batchResponse = await container.CreateBatch(BatchTestBase.GetPartitionKey(this.PartitionKey1)) .CreateItem(firstDoc) .ReplaceItem(replaceDoc.Id, replaceDoc) .ExecuteAsync(); @@ -106,7 +106,7 @@ public async Task BatchOrderedAsync() Assert.AreEqual(HttpStatusCode.OK, batchResponse[1].StatusCode); // Ensure that the replace overwrote the doc from the first operation - await CosmosBatchTestBase.VerifyByReadAsync(container, replaceDoc); + await BatchTestBase.VerifyByReadAsync(container, replaceDoc); } [TestMethod] @@ -114,24 +114,24 @@ public async Task BatchOrderedAsync() [Description("Verify eTags passed to batch operations or returned in batch results flow as expected")] public async Task BatchItemETagAsync() { - Container container = CosmosBatchTestBase.JsonContainer; + Container container = BatchTestBase.JsonContainer; await this.CreateJsonTestDocsAsync(container); { - TestDoc testDocToCreate = CosmosBatchTestBase.PopulateTestDoc(this.PartitionKey1); + TestDoc testDocToCreate = BatchTestBase.PopulateTestDoc(this.PartitionKey1); TestDoc testDocToReplace = this.GetTestDocCopy(this.TestDocPk1ExistingA); testDocToReplace.Cost++; - ItemResponse readResponse = await CosmosBatchTestBase.JsonContainer.ReadItemAsync( + ItemResponse readResponse = await BatchTestBase.JsonContainer.ReadItemAsync( this.TestDocPk1ExistingA.Id, - CosmosBatchTestBase.GetPartitionKey(this.PartitionKey1)); + BatchTestBase.GetPartitionKey(this.PartitionKey1)); ItemRequestOptions firstReplaceOptions = new ItemRequestOptions() { IfMatchEtag = readResponse.ETag }; - CosmosBatchResponse batchResponse = await container.CreateBatch(CosmosBatchTestBase.GetPartitionKey(this.PartitionKey1)) + BatchResponse batchResponse = await container.CreateBatch(BatchTestBase.GetPartitionKey(this.PartitionKey1)) .CreateItem(testDocToCreate) .ReplaceItem(testDocToReplace.Id, testDocToReplace, itemRequestOptions: firstReplaceOptions) .ExecuteAsync(); @@ -141,8 +141,8 @@ public async Task BatchItemETagAsync() Assert.AreEqual(HttpStatusCode.Created, batchResponse[0].StatusCode); Assert.AreEqual(HttpStatusCode.OK, batchResponse[1].StatusCode); - await CosmosBatchTestBase.VerifyByReadAsync(container, testDocToCreate, eTag: batchResponse[0].ETag); - await CosmosBatchTestBase.VerifyByReadAsync(container, testDocToReplace, eTag: batchResponse[1].ETag); + await BatchTestBase.VerifyByReadAsync(container, testDocToCreate, eTag: batchResponse[0].ETag); + await BatchTestBase.VerifyByReadAsync(container, testDocToReplace, eTag: batchResponse[1].ETag); } { @@ -151,10 +151,10 @@ public async Task BatchItemETagAsync() ItemRequestOptions replaceOptions = new ItemRequestOptions() { - IfMatchEtag = CosmosBatchTestBase.Random.Next().ToString() + IfMatchEtag = BatchTestBase.Random.Next().ToString() }; - CosmosBatchResponse batchResponse = await container.CreateBatch(CosmosBatchTestBase.GetPartitionKey(this.PartitionKey1)) + BatchResponse batchResponse = await container.CreateBatch(BatchTestBase.GetPartitionKey(this.PartitionKey1)) .ReplaceItem(testDocToReplace.Id, testDocToReplace, itemRequestOptions: replaceOptions) .ExecuteAsync(); @@ -166,7 +166,7 @@ public async Task BatchItemETagAsync() Assert.AreEqual(HttpStatusCode.PreconditionFailed, batchResponse[0].StatusCode); // ensure the document was not updated - await CosmosBatchTestBase.VerifyByReadAsync(container, this.TestDocPk1ExistingB); + await BatchTestBase.VerifyByReadAsync(container, this.TestDocPk1ExistingB); } } @@ -178,11 +178,11 @@ public async Task BatchItemTimeToLiveAsync() // Verify with schematized containers where we are allowed to send TTL as a header const bool isSchematized = true; const bool isStream = true; - Container container = CosmosBatchTestBase.SchematizedContainer; + Container container = BatchTestBase.SchematizedContainer; await this.CreateSchematizedTestDocsAsync(container); { - TestDoc testDocToCreate = CosmosBatchTestBase.PopulateTestDoc(this.PartitionKey1); - TestDoc anotherTestDocToCreate = CosmosBatchTestBase.PopulateTestDoc(this.PartitionKey1); + TestDoc testDocToCreate = BatchTestBase.PopulateTestDoc(this.PartitionKey1); + TestDoc anotherTestDocToCreate = BatchTestBase.PopulateTestDoc(this.PartitionKey1); TestDoc testDocToReplace = this.GetTestDocCopy(this.TestDocPk1ExistingA); testDocToReplace.Cost++; @@ -190,24 +190,24 @@ public async Task BatchItemTimeToLiveAsync() const int ttlInSeconds = 3; const int infiniteTtl = -1; - TestDoc testDocToUpsert = await CosmosBatchTestBase.CreateSchematizedTestDocAsync(container, this.PartitionKey1, ttlInSeconds: ttlInSeconds); + TestDoc testDocToUpsert = await BatchTestBase.CreateSchematizedTestDocAsync(container, this.PartitionKey1, ttlInSeconds: ttlInSeconds); testDocToUpsert.Cost++; - CosmosBatchResponse batchResponse = await container.CreateBatch(CosmosBatchTestBase.GetPartitionKey(this.PartitionKey1)) + BatchResponse batchResponse = await container.CreateBatch(BatchTestBase.GetPartitionKey(this.PartitionKey1)) .CreateItemStream( - CosmosBatchTestBase.TestDocToStream(testDocToCreate, isSchematized), - CosmosBatchTestBase.GetItemRequestOptions(testDocToCreate, isSchematized, ttlInSeconds: ttlInSeconds)) + BatchTestBase.TestDocToStream(testDocToCreate, isSchematized), + BatchTestBase.GetItemRequestOptions(testDocToCreate, isSchematized, ttlInSeconds: ttlInSeconds)) .CreateItemStream( - CosmosBatchTestBase.TestDocToStream(anotherTestDocToCreate, isSchematized), - CosmosBatchTestBase.GetItemRequestOptions(anotherTestDocToCreate, isSchematized)) + BatchTestBase.TestDocToStream(anotherTestDocToCreate, isSchematized), + BatchTestBase.GetItemRequestOptions(anotherTestDocToCreate, isSchematized)) .ReplaceItemStream( - CosmosBatchTestBase.GetId(testDocToReplace, isSchematized), - CosmosBatchTestBase.TestDocToStream(testDocToReplace, isSchematized), - CosmosBatchTestBase.GetItemRequestOptions(testDocToReplace, isSchematized, ttlInSeconds: ttlInSeconds)) + BatchTestBase.GetId(testDocToReplace, isSchematized), + BatchTestBase.TestDocToStream(testDocToReplace, isSchematized), + BatchTestBase.GetItemRequestOptions(testDocToReplace, isSchematized, ttlInSeconds: ttlInSeconds)) .UpsertItemStream( - CosmosBatchTestBase.TestDocToStream(testDocToUpsert, isSchematized), - CosmosBatchTestBase.GetItemRequestOptions(testDocToUpsert, isSchematized, ttlInSeconds: infiniteTtl)) - .ExecuteAsync(CosmosBatchTestBase.GetUpdatedBatchRequestOptions(isSchematized: true)); + BatchTestBase.TestDocToStream(testDocToUpsert, isSchematized), + BatchTestBase.GetItemRequestOptions(testDocToUpsert, isSchematized, ttlInSeconds: infiniteTtl)) + .ExecuteAsync(BatchTestBase.GetUpdatedBatchRequestOptions(isSchematized: true)); BatchSinglePartitionKeyTests.VerifyBatchProcessed(batchResponse, numberOfOperations: 4); @@ -219,10 +219,10 @@ public async Task BatchItemTimeToLiveAsync() // wait for TTL to expire await Task.Delay(TimeSpan.FromSeconds(ttlInSeconds + 1)); - await CosmosBatchTestBase.VerifyNotFoundAsync(container, testDocToCreate, isSchematized); - await CosmosBatchTestBase.VerifyByReadAsync(container, anotherTestDocToCreate, isStream, isSchematized); - await CosmosBatchTestBase.VerifyNotFoundAsync(container, testDocToReplace, isSchematized); - await CosmosBatchTestBase.VerifyByReadAsync(container, testDocToUpsert, isStream, isSchematized); + await BatchTestBase.VerifyNotFoundAsync(container, testDocToCreate, isSchematized); + await BatchTestBase.VerifyByReadAsync(container, anotherTestDocToCreate, isStream, isSchematized); + await BatchTestBase.VerifyNotFoundAsync(container, testDocToReplace, isSchematized); + await BatchTestBase.VerifyByReadAsync(container, testDocToUpsert, isStream, isSchematized); } } @@ -230,35 +230,35 @@ public async Task BatchItemTimeToLiveAsync() [Owner("abpai")] public async Task BatchLargerThanServerRequestAsync() { - Container container = CosmosBatchTestBase.JsonContainer; + Container container = BatchTestBase.JsonContainer; const int operationCount = 20; int appxDocSize = Constants.MaxDirectModeBatchRequestBodySizeInBytes / operationCount; // Increase the doc size by a bit so all docs won't fit in one server request. appxDocSize = (int)(appxDocSize * 1.05); { - CosmosBatch batch = container.CreateBatch(CosmosBatchTestBase.GetPartitionKey(this.PartitionKey1)); + Batch batch = container.CreateBatch(BatchTestBase.GetPartitionKey(this.PartitionKey1)); for (int i = 0; i < operationCount; i++) { - TestDoc doc = CosmosBatchTestBase.PopulateTestDoc(this.PartitionKey1, appxDocSize); + TestDoc doc = BatchTestBase.PopulateTestDoc(this.PartitionKey1, appxDocSize); batch.CreateItem(doc); } - CosmosBatchResponse batchResponse = await batch.ExecuteAsync(); + BatchResponse batchResponse = await batch.ExecuteAsync(); Assert.AreEqual(HttpStatusCode.RequestEntityTooLarge, batchResponse.StatusCode); } // Validate the server enforces this as well { - CosmosBatch batch = container.CreateBatch(CosmosBatchTestBase.GetPartitionKey(this.PartitionKey1)); + Batch batch = container.CreateBatch(BatchTestBase.GetPartitionKey(this.PartitionKey1)); for (int i = 0; i < operationCount; i++) { - TestDoc doc = CosmosBatchTestBase.PopulateTestDoc(this.PartitionKey1, appxDocSize); + TestDoc doc = BatchTestBase.PopulateTestDoc(this.PartitionKey1, appxDocSize); batch.CreateItem(doc); } - CosmosBatchResponse batchResponse = await batch.ExecuteAsync( + BatchResponse batchResponse = await batch.ExecuteAsync( maxServerRequestBodyLength: int.MaxValue, maxServerRequestOperationCount: int.MaxValue); @@ -270,33 +270,33 @@ public async Task BatchLargerThanServerRequestAsync() [Owner("abpai")] public async Task BatchWithTooManyOperationsAsync() { - Container container = CosmosBatchTestBase.JsonContainer; + Container container = BatchTestBase.JsonContainer; await this.CreateJsonTestDocsAsync(container); const int operationCount = Constants.MaxOperationsInDirectModeBatchRequest + 1; // Validate SDK enforces this { - CosmosBatch batch = container.CreateBatch(CosmosBatchTestBase.GetPartitionKey(this.PartitionKey1)); + Batch batch = container.CreateBatch(BatchTestBase.GetPartitionKey(this.PartitionKey1)); for (int i = 0; i < operationCount; i++) { batch.ReadItem(this.TestDocPk1ExistingA.Id); } - CosmosBatchResponse batchResponse = await batch.ExecuteAsync(); + BatchResponse batchResponse = await batch.ExecuteAsync(); Assert.AreEqual(HttpStatusCode.BadRequest, batchResponse.StatusCode); } // Validate the server enforces this as well { - CosmosBatch batch = container.CreateBatch(CosmosBatchTestBase.GetPartitionKey(this.PartitionKey1)); + Batch batch = container.CreateBatch(BatchTestBase.GetPartitionKey(this.PartitionKey1)); for (int i = 0; i < operationCount; i++) { batch.ReadItem(this.TestDocPk1ExistingA.Id); } - CosmosBatchResponse batchResponse = await batch.ExecuteAsync( + BatchResponse batchResponse = await batch.ExecuteAsync( maxServerRequestBodyLength: int.MaxValue, maxServerRequestOperationCount: int.MaxValue); @@ -308,19 +308,19 @@ public async Task BatchWithTooManyOperationsAsync() [Owner("abpai")] public async Task BatchServerResponseTooLargeAsync() { - Container container = CosmosBatchTestBase.JsonContainer; + Container container = BatchTestBase.JsonContainer; const int operationCount = 10; int appxDocSizeInBytes = 1 * 1024 * 1024; - TestDoc doc = await CosmosBatchTestBase.CreateJsonTestDocAsync(container, this.PartitionKey1, appxDocSizeInBytes); + TestDoc doc = await BatchTestBase.CreateJsonTestDocAsync(container, this.PartitionKey1, appxDocSizeInBytes); - CosmosBatch batch = container.CreateBatch(CosmosBatchTestBase.GetPartitionKey(this.PartitionKey1)); + Batch batch = container.CreateBatch(BatchTestBase.GetPartitionKey(this.PartitionKey1)); for (int i = 0; i < operationCount; i++) { batch.ReadItem(doc.Id); } - CosmosBatchResponse batchResponse = await batch.ExecuteAsync(); + BatchResponse batchResponse = await batch.ExecuteAsync(); BatchSinglePartitionKeyTests.VerifyBatchProcessed( batchResponse, @@ -335,10 +335,10 @@ public async Task BatchServerResponseTooLargeAsync() [Owner("abpai")] public async Task BatchReadsOnlyAsync() { - Container container = CosmosBatchTestBase.JsonContainer; + Container container = BatchTestBase.JsonContainer; await this.CreateJsonTestDocsAsync(container); - CosmosBatchResponse batchResponse = await container.CreateBatch(CosmosBatchTestBase.GetPartitionKey(this.PartitionKey1)) + BatchResponse batchResponse = await container.CreateBatch(BatchTestBase.GetPartitionKey(this.PartitionKey1)) .ReadItem(this.TestDocPk1ExistingA.Id) .ReadItem(this.TestDocPk1ExistingB.Id) .ReadItem(this.TestDocPk1ExistingC.Id) @@ -355,22 +355,22 @@ public async Task BatchReadsOnlyAsync() Assert.AreEqual(this.TestDocPk1ExistingC, batchResponse.GetOperationResultAtIndex(2).Resource); } - private async Task RunCrudAsync(bool isStream, bool isSchematized, bool useEpk, Container container, RequestOptions batchOptions = null) + private async Task RunCrudAsync(bool isStream, bool isSchematized, bool useEpk, Container container, RequestOptions batchOptions = null) { if (isSchematized) { await this.CreateSchematizedTestDocsAsync(container); - batchOptions = CosmosBatchTestBase.GetUpdatedBatchRequestOptions(batchOptions, isSchematized, useEpk, this.PartitionKey1); + batchOptions = BatchTestBase.GetUpdatedBatchRequestOptions(batchOptions, isSchematized, useEpk, this.PartitionKey1); } else { await this.CreateJsonTestDocsAsync(container); } - TestDoc testDocToCreate = CosmosBatchTestBase.PopulateTestDoc(this.PartitionKey1); + TestDoc testDocToCreate = BatchTestBase.PopulateTestDoc(this.PartitionKey1); - TestDoc testDocToUpsert = CosmosBatchTestBase.PopulateTestDoc(this.PartitionKey1); + TestDoc testDocToUpsert = BatchTestBase.PopulateTestDoc(this.PartitionKey1); TestDoc anotherTestDocToUpsert = this.GetTestDocCopy(this.TestDocPk1ExistingA); anotherTestDocToUpsert.Cost++; @@ -379,10 +379,10 @@ private async Task RunCrudAsync(bool isStream, bool isSchem testDocToReplace.Cost++; // We run CRUD operations where all are expected to return HTTP 2xx. - CosmosBatchResponse batchResponse; + BatchResponse batchResponse; if (!isStream) { - batchResponse = await container.CreateBatch(CosmosBatchTestBase.GetPartitionKey(this.PartitionKey1)) + batchResponse = await container.CreateBatch(BatchTestBase.GetPartitionKey(this.PartitionKey1)) .CreateItem(testDocToCreate) .ReadItem(this.TestDocPk1ExistingC.Id) .ReplaceItem(testDocToReplace.Id, testDocToReplace) @@ -393,26 +393,26 @@ private async Task RunCrudAsync(bool isStream, bool isSchem } else { - batchResponse = await container.CreateBatch(CosmosBatchTestBase.GetPartitionKey(this.PartitionKey1, useEpk)) + batchResponse = await container.CreateBatch(BatchTestBase.GetPartitionKey(this.PartitionKey1, useEpk)) .CreateItemStream( - CosmosBatchTestBase.TestDocToStream(testDocToCreate, isSchematized), - CosmosBatchTestBase.GetItemRequestOptions(testDocToCreate, isSchematized)) + BatchTestBase.TestDocToStream(testDocToCreate, isSchematized), + BatchTestBase.GetItemRequestOptions(testDocToCreate, isSchematized)) .ReadItem( - CosmosBatchTestBase.GetId(this.TestDocPk1ExistingC, isSchematized), - CosmosBatchTestBase.GetItemRequestOptions(this.TestDocPk1ExistingC, isSchematized)) + BatchTestBase.GetId(this.TestDocPk1ExistingC, isSchematized), + BatchTestBase.GetItemRequestOptions(this.TestDocPk1ExistingC, isSchematized)) .ReplaceItemStream( - CosmosBatchTestBase.GetId(testDocToReplace, isSchematized), - CosmosBatchTestBase.TestDocToStream(testDocToReplace, isSchematized), - CosmosBatchTestBase.GetItemRequestOptions(testDocToReplace, isSchematized)) + BatchTestBase.GetId(testDocToReplace, isSchematized), + BatchTestBase.TestDocToStream(testDocToReplace, isSchematized), + BatchTestBase.GetItemRequestOptions(testDocToReplace, isSchematized)) .UpsertItemStream( - CosmosBatchTestBase.TestDocToStream(testDocToUpsert, isSchematized), - CosmosBatchTestBase.GetItemRequestOptions(testDocToUpsert, isSchematized)) + BatchTestBase.TestDocToStream(testDocToUpsert, isSchematized), + BatchTestBase.GetItemRequestOptions(testDocToUpsert, isSchematized)) .UpsertItemStream( - CosmosBatchTestBase.TestDocToStream(anotherTestDocToUpsert, isSchematized), - CosmosBatchTestBase.GetItemRequestOptions(anotherTestDocToUpsert, isSchematized)) + BatchTestBase.TestDocToStream(anotherTestDocToUpsert, isSchematized), + BatchTestBase.GetItemRequestOptions(anotherTestDocToUpsert, isSchematized)) .DeleteItem( - CosmosBatchTestBase.GetId(this.TestDocPk1ExistingD, isSchematized), - CosmosBatchTestBase.GetItemRequestOptions(this.TestDocPk1ExistingD, isSchematized)) + BatchTestBase.GetId(this.TestDocPk1ExistingD, isSchematized), + BatchTestBase.GetItemRequestOptions(this.TestDocPk1ExistingD, isSchematized)) .ExecuteAsync(batchOptions); } @@ -431,14 +431,14 @@ private async Task RunCrudAsync(bool isStream, bool isSchem } else { - Assert.AreEqual(this.TestDocPk1ExistingC, CosmosBatchTestBase.StreamToTestDoc(batchResponse[1].ResourceStream, isSchematized)); + Assert.AreEqual(this.TestDocPk1ExistingC, BatchTestBase.StreamToTestDoc(batchResponse[1].ResourceStream, isSchematized)); } - await CosmosBatchTestBase.VerifyByReadAsync(container, testDocToCreate, isStream, isSchematized, useEpk); - await CosmosBatchTestBase.VerifyByReadAsync(container, testDocToReplace, isStream, isSchematized, useEpk); - await CosmosBatchTestBase.VerifyByReadAsync(container, testDocToUpsert, isStream, isSchematized, useEpk); - await CosmosBatchTestBase.VerifyByReadAsync(container, anotherTestDocToUpsert, isStream, isSchematized, useEpk); - await CosmosBatchTestBase.VerifyNotFoundAsync(container, this.TestDocPk1ExistingD, isSchematized, useEpk); + await BatchTestBase.VerifyByReadAsync(container, testDocToCreate, isStream, isSchematized, useEpk); + await BatchTestBase.VerifyByReadAsync(container, testDocToReplace, isStream, isSchematized, useEpk); + await BatchTestBase.VerifyByReadAsync(container, testDocToUpsert, isStream, isSchematized, useEpk); + await BatchTestBase.VerifyByReadAsync(container, anotherTestDocToUpsert, isStream, isSchematized, useEpk); + await BatchTestBase.VerifyNotFoundAsync(container, this.TestDocPk1ExistingD, isSchematized, useEpk); return batchResponse; } @@ -448,23 +448,23 @@ private async Task RunCrudAsync(bool isStream, bool isSchem [Description("Verify batch with a large set of read operations that is expected to be rate limited.")] public async Task BatchRateLimitingAsync() { - Container containerWithDefaultRetryPolicy = CosmosBatchTestBase.LowThroughputJsonContainer; + Container containerWithDefaultRetryPolicy = BatchTestBase.LowThroughputJsonContainer; await this.CreateJsonTestDocsAsync(containerWithDefaultRetryPolicy); CosmosClient clientWithNoThrottleRetry = new CosmosClientBuilder( - CosmosBatchTestBase.Client.Endpoint.ToString(), - CosmosBatchTestBase.Client.AccountKey) + BatchTestBase.Client.Endpoint.ToString(), + BatchTestBase.Client.AccountKey) .WithThrottlingRetryOptions( maxRetryWaitTimeOnThrottledRequests: default(TimeSpan), maxRetryAttemptsOnThrottledRequests: 0) .Build(); Container containerWithNoThrottleRetry = - clientWithNoThrottleRetry.GetContainer(CosmosBatchTestBase.Database.Id, CosmosBatchTestBase.LowThroughputJsonContainer.Id); + clientWithNoThrottleRetry.GetContainer(BatchTestBase.Database.Id, BatchTestBase.LowThroughputJsonContainer.Id); // The second batch started should be rate limited by the backend in admission control. { - CosmosBatchResponse[] batchResponses = await this.RunTwoLargeBatchesAsync(containerWithNoThrottleRetry); + BatchResponse[] batchResponses = await this.RunTwoLargeBatchesAsync(containerWithNoThrottleRetry); Assert.AreEqual(HttpStatusCode.OK, batchResponses[0].StatusCode); Assert.AreEqual((int)StatusCodes.TooManyRequests, (int)batchResponses[1].StatusCode); @@ -473,17 +473,17 @@ public async Task BatchRateLimitingAsync() // The default retry policy around throttling should ensure the second batch also succeeds. { - CosmosBatchResponse[] batchResponses = await this.RunTwoLargeBatchesAsync(containerWithDefaultRetryPolicy); + BatchResponse[] batchResponses = await this.RunTwoLargeBatchesAsync(containerWithDefaultRetryPolicy); Assert.AreEqual(HttpStatusCode.OK, batchResponses[0].StatusCode); Assert.AreEqual(HttpStatusCode.OK, batchResponses[1].StatusCode); } } - private async Task RunTwoLargeBatchesAsync(Container container) + private async Task RunTwoLargeBatchesAsync(Container container) { - CosmosBatch batch1 = container.CreateBatch(CosmosBatchTestBase.GetPartitionKey(this.PartitionKey1)); - CosmosBatch batch2 = container.CreateBatch(CosmosBatchTestBase.GetPartitionKey(this.PartitionKey1)); + Batch batch1 = container.CreateBatch(BatchTestBase.GetPartitionKey(this.PartitionKey1)); + Batch batch2 = container.CreateBatch(BatchTestBase.GetPartitionKey(this.PartitionKey1)); for (int i = 0; i < Constants.MaxOperationsInDirectModeBatchRequest; i++) { @@ -491,11 +491,11 @@ private async Task RunTwoLargeBatchesAsync(Container cont batch2.CreateItem(BatchSinglePartitionKeyTests.PopulateTestDoc(this.PartitionKey1)); } - Task batch1Task = batch1.ExecuteAsync(); + Task batch1Task = batch1.ExecuteAsync(); await Task.Delay(50); - Task batch2Task = batch2.ExecuteAsync(); + Task batch2Task = batch2.ExecuteAsync(); - CosmosBatchResponse[] batchResponses = await Task.WhenAll(batch1Task, batch2Task); + BatchResponse[] batchResponses = await Task.WhenAll(batch1Task, batch2Task); return batchResponses; } @@ -504,7 +504,7 @@ private async Task RunTwoLargeBatchesAsync(Container cont [Description("Verify batch with a create operation having a conflict rolls back prior operations")] public async Task BatchWithCreateConflictAsync() { - await this.RunBatchWithCreateConflictAsync(CosmosBatchTestBase.JsonContainer); + await this.RunBatchWithCreateConflictAsync(BatchTestBase.JsonContainer); } [TestMethod] @@ -512,7 +512,7 @@ public async Task BatchWithCreateConflictAsync() [Description("Verify batch with a create operation having a conflict rolls back prior operations in gateway mode")] public async Task BatchWithCreateConflictGatewayAsync() { - await this.RunBatchWithCreateConflictAsync(CosmosBatchTestBase.GatewayJsonContainer); + await this.RunBatchWithCreateConflictAsync(BatchTestBase.GatewayJsonContainer); } [TestMethod] @@ -520,7 +520,7 @@ public async Task BatchWithCreateConflictGatewayAsync() [Description("Verify batch with a create operation having a conflict rolls back prior operations in shared throughput")] public async Task BatchWithCreateConflictSharedThroughputAsync() { - await this.RunBatchWithCreateConflictAsync(CosmosBatchTestBase.SharedThroughputContainer); + await this.RunBatchWithCreateConflictAsync(BatchTestBase.SharedThroughputContainer); } private async Task RunBatchWithCreateConflictAsync(Container container) @@ -537,7 +537,7 @@ await this.RunWithErrorAsync( HttpStatusCode.Conflict); // make sure the conflicted doc hasn't changed - await CosmosBatchTestBase.VerifyByReadAsync(container, this.TestDocPk1ExistingA); + await BatchTestBase.VerifyByReadAsync(container, this.TestDocPk1ExistingA); } [TestMethod] @@ -545,12 +545,12 @@ await this.RunWithErrorAsync( [Description("Verify batch with an invalid create operation rolls back prior operations")] public async Task BatchWithInvalidCreateAsync() { - Container container = CosmosBatchTestBase.JsonContainer; + Container container = BatchTestBase.JsonContainer; // partition key mismatch between doc and and value passed in to the operation await this.RunWithErrorAsync( container, - batch => batch.CreateItem(CosmosBatchTestBase.PopulateTestDoc(partitionKey: Guid.NewGuid().ToString())), + batch => batch.CreateItem(BatchTestBase.PopulateTestDoc(partitionKey: Guid.NewGuid().ToString())), HttpStatusCode.BadRequest); } @@ -559,7 +559,7 @@ await this.RunWithErrorAsync( [Description("Verify batch with a read operation on a non-existent entity rolls back prior operations")] public async Task BatchWithReadOfNonExistentEntityAsync() { - Container container = CosmosBatchTestBase.JsonContainer; + Container container = BatchTestBase.JsonContainer; await this.RunWithErrorAsync( container, batch => batch.ReadItem(Guid.NewGuid().ToString()), @@ -571,7 +571,7 @@ await this.RunWithErrorAsync( [Description("Verify batch with a replace operation on a stale entity rolls back prior operations")] public async Task BatchWithReplaceOfStaleEntityAsync() { - Container container = CosmosBatchTestBase.JsonContainer; + Container container = BatchTestBase.JsonContainer; await this.CreateJsonTestDocsAsync(container); TestDoc staleTestDocToReplace = this.GetTestDocCopy(this.TestDocPk1ExistingA); @@ -587,7 +587,7 @@ await this.RunWithErrorAsync( HttpStatusCode.PreconditionFailed); // make sure the stale doc hasn't changed - await CosmosBatchTestBase.VerifyByReadAsync(container, this.TestDocPk1ExistingA); + await BatchTestBase.VerifyByReadAsync(container, this.TestDocPk1ExistingA); } [TestMethod] @@ -595,7 +595,7 @@ await this.RunWithErrorAsync( [Description("Verify batch with a delete operation on a non-existent entity rolls back prior operations")] public async Task BatchWithDeleteOfNonExistentEntityAsync() { - Container container = CosmosBatchTestBase.JsonContainer; + Container container = BatchTestBase.JsonContainer; await this.RunWithErrorAsync( container, batch => batch.DeleteItem(Guid.NewGuid().ToString()), @@ -604,18 +604,18 @@ await this.RunWithErrorAsync( private async Task RunWithErrorAsync( Container container, - Action appendOperation, + Action appendOperation, HttpStatusCode expectedFailedOperationStatusCode) { - TestDoc testDocToCreate = CosmosBatchTestBase.PopulateTestDoc(this.PartitionKey1); - TestDoc anotherTestDocToCreate = CosmosBatchTestBase.PopulateTestDoc(this.PartitionKey1); + TestDoc testDocToCreate = BatchTestBase.PopulateTestDoc(this.PartitionKey1); + TestDoc anotherTestDocToCreate = BatchTestBase.PopulateTestDoc(this.PartitionKey1); - CosmosBatch batch = container.CreateBatch(CosmosBatchTestBase.GetPartitionKey(this.PartitionKey1)) + Batch batch = container.CreateBatch(BatchTestBase.GetPartitionKey(this.PartitionKey1)) .CreateItem(testDocToCreate); appendOperation(batch); - CosmosBatchResponse batchResponse = await batch + BatchResponse batchResponse = await batch .CreateItem(anotherTestDocToCreate) .ExecuteAsync(); @@ -628,12 +628,12 @@ private async Task RunWithErrorAsync( Assert.AreEqual(expectedFailedOperationStatusCode, batchResponse[1].StatusCode); Assert.AreEqual((HttpStatusCode)StatusCodes.FailedDependency, batchResponse[2].StatusCode); - await CosmosBatchTestBase.VerifyNotFoundAsync(container, testDocToCreate); - await CosmosBatchTestBase.VerifyNotFoundAsync(container, anotherTestDocToCreate); + await BatchTestBase.VerifyNotFoundAsync(container, testDocToCreate); + await BatchTestBase.VerifyNotFoundAsync(container, anotherTestDocToCreate); return container; } - private static void VerifyBatchProcessed(CosmosBatchResponse batchResponse, int numberOfOperations, HttpStatusCode expectedStatusCode = HttpStatusCode.OK) + private static void VerifyBatchProcessed(BatchResponse batchResponse, int numberOfOperations, HttpStatusCode expectedStatusCode = HttpStatusCode.OK) { Assert.IsNotNull(batchResponse); Assert.AreEqual( diff --git a/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.EmulatorTests/Batch/CosmosBatchTestBase.cs b/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.EmulatorTests/Batch/BatchTestBase.cs similarity index 78% rename from Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.EmulatorTests/Batch/CosmosBatchTestBase.cs rename to Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.EmulatorTests/Batch/BatchTestBase.cs index 0c0c39a0c3..2506d41cbd 100644 --- a/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.EmulatorTests/Batch/CosmosBatchTestBase.cs +++ b/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.EmulatorTests/Batch/BatchTestBase.cs @@ -19,7 +19,7 @@ namespace Microsoft.Azure.Cosmos.SDK.EmulatorTests using Microsoft.VisualStudio.TestTools.UnitTesting; using Newtonsoft.Json; - public class CosmosBatchTestBase + public class BatchTestBase { protected static CosmosClient Client { get; set; } @@ -76,14 +76,14 @@ public static void ClassInit(TestContext context) private static void InitializeDirectContainers() { - CosmosBatchTestBase.Client = TestCommon.CreateCosmosClient(); - CosmosBatchTestBase.Database = CosmosBatchTestBase.Client.CreateDatabaseAsync(Guid.NewGuid().ToString()) + BatchTestBase.Client = TestCommon.CreateCosmosClient(); + BatchTestBase.Database = BatchTestBase.Client.CreateDatabaseAsync(Guid.NewGuid().ToString()) .GetAwaiter().GetResult().Database; PartitionKeyDefinition partitionKeyDefinition = new PartitionKeyDefinition(); partitionKeyDefinition.Paths.Add("/Status"); - CosmosBatchTestBase.LowThroughputJsonContainer = CosmosBatchTestBase.Database.CreateContainerAsync( + BatchTestBase.LowThroughputJsonContainer = BatchTestBase.Database.CreateContainerAsync( new ContainerProperties() { Id = Guid.NewGuid().ToString(), @@ -91,14 +91,14 @@ private static void InitializeDirectContainers() }, throughput: 400).GetAwaiter().GetResult().Container; - CosmosBatchTestBase.PartitionKeyDefinition = ((ContainerCore)CosmosBatchTestBase.LowThroughputJsonContainer).GetPartitionKeyDefinitionAsync(CancellationToken.None).GetAwaiter().GetResult(); + BatchTestBase.PartitionKeyDefinition = ((ContainerCore)BatchTestBase.LowThroughputJsonContainer).GetPartitionKeyDefinitionAsync(CancellationToken.None).GetAwaiter().GetResult(); // Create a container with at least 2 physical partitions for effective cross-partition testing - CosmosBatchTestBase.JsonContainer = CosmosBatchTestBase.Database.CreateContainerAsync( + BatchTestBase.JsonContainer = BatchTestBase.Database.CreateContainerAsync( new ContainerProperties() { Id = Guid.NewGuid().ToString(), - PartitionKey = CosmosBatchTestBase.PartitionKeyDefinition + PartitionKey = BatchTestBase.PartitionKeyDefinition }, throughput: 12000).GetAwaiter().GetResult().Container; @@ -113,13 +113,13 @@ private static void InitializeDirectContainers() } }; - CosmosBatchTestBase.LayoutResolver = new LayoutResolverNamespace(testNamespace); - CosmosBatchTestBase.TestDocLayout = CosmosBatchTestBase.LayoutResolver.Resolve(testSchema.SchemaId); + BatchTestBase.LayoutResolver = new LayoutResolverNamespace(testNamespace); + BatchTestBase.TestDocLayout = BatchTestBase.LayoutResolver.Resolve(testSchema.SchemaId); BatchContainerProperties schematizedContainerProperties = new BatchContainerProperties() { Id = Guid.NewGuid().ToString(), - PartitionKey = CosmosBatchTestBase.PartitionKeyDefinition, + PartitionKey = BatchTestBase.PartitionKeyDefinition, DefaultTimeToLive = (int)TimeSpan.FromDays(1).TotalSeconds // allow for TTL testing }; @@ -130,19 +130,19 @@ private static void InitializeDirectContainers() schematizedContainerProperties.SchemaPolicy = schemaPolicy; - CosmosBatchTestBase.SchematizedContainer = CosmosBatchTestBase.Database.CreateContainerAsync( + BatchTestBase.SchematizedContainer = BatchTestBase.Database.CreateContainerAsync( schematizedContainerProperties, throughput: 12000).GetAwaiter().GetResult().Container; } private static void InitializeGatewayContainers() { - CosmosBatchTestBase.GatewayClient = TestCommon.CreateCosmosClient(useGateway: true); - CosmosBatchTestBase.GatewayDatabase = GatewayClient.GetDatabase(CosmosBatchTestBase.Database.Id); + BatchTestBase.GatewayClient = TestCommon.CreateCosmosClient(useGateway: true); + BatchTestBase.GatewayDatabase = GatewayClient.GetDatabase(BatchTestBase.Database.Id); - CosmosBatchTestBase.GatewayLowThroughputJsonContainer = CosmosBatchTestBase.GatewayDatabase.GetContainer(CosmosBatchTestBase.LowThroughputJsonContainer.Id); - CosmosBatchTestBase.GatewayJsonContainer = CosmosBatchTestBase.GatewayDatabase.GetContainer(CosmosBatchTestBase.JsonContainer.Id); - CosmosBatchTestBase.GatewaySchematizedContainer = CosmosBatchTestBase.GatewayDatabase.GetContainer(CosmosBatchTestBase.SchematizedContainer.Id); + BatchTestBase.GatewayLowThroughputJsonContainer = BatchTestBase.GatewayDatabase.GetContainer(BatchTestBase.LowThroughputJsonContainer.Id); + BatchTestBase.GatewayJsonContainer = BatchTestBase.GatewayDatabase.GetContainer(BatchTestBase.JsonContainer.Id); + BatchTestBase.GatewaySchematizedContainer = BatchTestBase.GatewayDatabase.GetContainer(BatchTestBase.SchematizedContainer.Id); } private static void InitializeSharedThroughputContainer() @@ -156,7 +156,7 @@ private static void InitializeSharedThroughputContainer() new ContainerProperties { Id = Guid.NewGuid().ToString(), - PartitionKey = CosmosBatchTestBase.PartitionKeyDefinition + PartitionKey = BatchTestBase.PartitionKeyDefinition }) .GetAwaiter().GetResult(); @@ -164,47 +164,47 @@ private static void InitializeSharedThroughputContainer() if (index == 2) { - CosmosBatchTestBase.SharedThroughputContainer = containerResponse.Container; + BatchTestBase.SharedThroughputContainer = containerResponse.Container; } } - CosmosBatchTestBase.SharedThroughputDatabase = db; + BatchTestBase.SharedThroughputDatabase = db; } public static void ClassClean() { - if (CosmosBatchTestBase.Client == null) + if (BatchTestBase.Client == null) { return; } - if (CosmosBatchTestBase.Database != null) + if (BatchTestBase.Database != null) { - CosmosBatchTestBase.Database.DeleteAsync().GetAwaiter().GetResult(); + BatchTestBase.Database.DeleteAsync().GetAwaiter().GetResult(); } - if (CosmosBatchTestBase.SharedThroughputDatabase != null) + if (BatchTestBase.SharedThroughputDatabase != null) { - CosmosBatchTestBase.SharedThroughputDatabase.DeleteAsync().GetAwaiter().GetResult(); + BatchTestBase.SharedThroughputDatabase.DeleteAsync().GetAwaiter().GetResult(); } - CosmosBatchTestBase.Client.Dispose(); + BatchTestBase.Client.Dispose(); } protected virtual async Task CreateJsonTestDocsAsync(Container container) { - this.TestDocPk1ExistingA = await CosmosBatchTestBase.CreateJsonTestDocAsync(container, this.PartitionKey1); - this.TestDocPk1ExistingB = await CosmosBatchTestBase.CreateJsonTestDocAsync(container, this.PartitionKey1); - this.TestDocPk1ExistingC = await CosmosBatchTestBase.CreateJsonTestDocAsync(container, this.PartitionKey1); - this.TestDocPk1ExistingD = await CosmosBatchTestBase.CreateJsonTestDocAsync(container, this.PartitionKey1); + this.TestDocPk1ExistingA = await BatchTestBase.CreateJsonTestDocAsync(container, this.PartitionKey1); + this.TestDocPk1ExistingB = await BatchTestBase.CreateJsonTestDocAsync(container, this.PartitionKey1); + this.TestDocPk1ExistingC = await BatchTestBase.CreateJsonTestDocAsync(container, this.PartitionKey1); + this.TestDocPk1ExistingD = await BatchTestBase.CreateJsonTestDocAsync(container, this.PartitionKey1); } protected virtual async Task CreateSchematizedTestDocsAsync(Container container) { - this.TestDocPk1ExistingA = await CosmosBatchTestBase.CreateSchematizedTestDocAsync(container, this.PartitionKey1); - this.TestDocPk1ExistingB = await CosmosBatchTestBase.CreateSchematizedTestDocAsync(container, this.PartitionKey1); - this.TestDocPk1ExistingC = await CosmosBatchTestBase.CreateSchematizedTestDocAsync(container, this.PartitionKey1); - this.TestDocPk1ExistingD = await CosmosBatchTestBase.CreateSchematizedTestDocAsync(container, this.PartitionKey1); + this.TestDocPk1ExistingA = await BatchTestBase.CreateSchematizedTestDocAsync(container, this.PartitionKey1); + this.TestDocPk1ExistingB = await BatchTestBase.CreateSchematizedTestDocAsync(container, this.PartitionKey1); + this.TestDocPk1ExistingC = await BatchTestBase.CreateSchematizedTestDocAsync(container, this.PartitionKey1); + this.TestDocPk1ExistingD = await BatchTestBase.CreateSchematizedTestDocAsync(container, this.PartitionKey1); } protected static TestDoc PopulateTestDoc(object partitionKey, int minDesiredSize = 20) @@ -213,7 +213,7 @@ protected static TestDoc PopulateTestDoc(object partitionKey, int minDesiredSize return new TestDoc() { Id = Guid.NewGuid().ToString(), - Cost = CosmosBatchTestBase.Random.Next(), + Cost = BatchTestBase.Random.Next(), Description = description, Status = partitionKey.ToString() }; @@ -258,16 +258,16 @@ protected static TestDoc StreamToTestDoc(Stream stream, bool isSchematized) protected static async Task VerifyByReadAsync(Container container, TestDoc doc, bool isStream = false, bool isSchematized = false, bool useEpk = false, string eTag = null) { - Cosmos.PartitionKey partitionKey = CosmosBatchTestBase.GetPartitionKey(doc.Status, useEpk); + Cosmos.PartitionKey partitionKey = BatchTestBase.GetPartitionKey(doc.Status, useEpk); if (isStream) { - string id = CosmosBatchTestBase.GetId(doc, isSchematized); - ItemRequestOptions requestOptions = CosmosBatchTestBase.GetItemRequestOptions(doc, isSchematized, useEpk, isPartOfBatch: false); + string id = BatchTestBase.GetId(doc, isSchematized); + ItemRequestOptions requestOptions = BatchTestBase.GetItemRequestOptions(doc, isSchematized, useEpk, isPartOfBatch: false); CosmosResponseMessage response = await container.ReadItemStreamAsync(id, partitionKey, requestOptions); Assert.AreEqual(HttpStatusCode.OK, response.StatusCode); - Assert.AreEqual(doc, CosmosBatchTestBase.StreamToTestDoc(response.Content, isSchematized)); + Assert.AreEqual(doc, BatchTestBase.StreamToTestDoc(response.Content, isSchematized)); if (eTag != null) { @@ -290,9 +290,9 @@ protected static async Task VerifyByReadAsync(Container container, TestDoc doc, protected static async Task VerifyNotFoundAsync(Container container, TestDoc doc, bool isSchematized = false, bool useEpk = false) { - string id = CosmosBatchTestBase.GetId(doc, isSchematized); - Cosmos.PartitionKey partitionKey = CosmosBatchTestBase.GetPartitionKey(doc.Status, useEpk); - ItemRequestOptions requestOptions = CosmosBatchTestBase.GetItemRequestOptions(doc, isSchematized, useEpk, isPartOfBatch: false); + string id = BatchTestBase.GetId(doc, isSchematized); + Cosmos.PartitionKey partitionKey = BatchTestBase.GetPartitionKey(doc.Status, useEpk); + ItemRequestOptions requestOptions = BatchTestBase.GetItemRequestOptions(doc, isSchematized, useEpk, isPartOfBatch: false); CosmosResponseMessage response = await container.ReadItemStreamAsync(id, partitionKey, requestOptions); @@ -323,10 +323,10 @@ protected static RequestOptions GetUpdatedBatchRequestOptions( { string epk = new Microsoft.Azure.Documents.PartitionKey(partitionKey) .InternalKey - .GetEffectivePartitionKeyString(CosmosBatchTestBase.PartitionKeyDefinition); + .GetEffectivePartitionKeyString(BatchTestBase.PartitionKeyDefinition); batchOptions.Properties.Add(WFConstants.BackendHeaders.EffectivePartitionKeyString, epk); - batchOptions.Properties.Add(WFConstants.BackendHeaders.EffectivePartitionKey, CosmosBatchTestBase.HexStringToBytes(epk)); + batchOptions.Properties.Add(WFConstants.BackendHeaders.EffectivePartitionKey, BatchTestBase.HexStringToBytes(epk)); } } @@ -371,10 +371,10 @@ protected static ItemRequestOptions GetItemRequestOptions(TestDoc doc, bool isSc { string epk = new Microsoft.Azure.Documents.PartitionKey(doc.Status) .InternalKey - .GetEffectivePartitionKeyString(CosmosBatchTestBase.PartitionKeyDefinition); + .GetEffectivePartitionKeyString(BatchTestBase.PartitionKeyDefinition); requestOptions.Properties.Add(WFConstants.BackendHeaders.EffectivePartitionKeyString, epk); - requestOptions.Properties.Add(WFConstants.BackendHeaders.EffectivePartitionKey, CosmosBatchTestBase.HexStringToBytes(epk)); + requestOptions.Properties.Add(WFConstants.BackendHeaders.EffectivePartitionKey, BatchTestBase.HexStringToBytes(epk)); } if (!isPartOfBatch) @@ -388,19 +388,19 @@ protected static ItemRequestOptions GetItemRequestOptions(TestDoc doc, bool isSc protected static async Task CreateJsonTestDocAsync(Container container, object partitionKey, int minDesiredSize = 20) { - TestDoc doc = CosmosBatchTestBase.PopulateTestDoc(partitionKey, minDesiredSize); - ItemResponse createResponse = await container.CreateItemAsync(doc, CosmosBatchTestBase.GetPartitionKey(partitionKey)); + TestDoc doc = BatchTestBase.PopulateTestDoc(partitionKey, minDesiredSize); + ItemResponse createResponse = await container.CreateItemAsync(doc, BatchTestBase.GetPartitionKey(partitionKey)); Assert.AreEqual(HttpStatusCode.Created, createResponse.StatusCode); return doc; } protected static async Task CreateSchematizedTestDocAsync(Container container, object partitionKey, int? ttlInSeconds = null) { - TestDoc doc = CosmosBatchTestBase.PopulateTestDoc(partitionKey); + TestDoc doc = BatchTestBase.PopulateTestDoc(partitionKey); CosmosResponseMessage createResponse = await container.CreateItemStreamAsync( doc.ToHybridRowStream(), - CosmosBatchTestBase.GetPartitionKey(partitionKey), - CosmosBatchTestBase.GetItemRequestOptions(doc, isSchematized: true, isPartOfBatch: false, ttlInSeconds: ttlInSeconds)); + BatchTestBase.GetPartitionKey(partitionKey), + BatchTestBase.GetItemRequestOptions(doc, isSchematized: true, isPartOfBatch: false, ttlInSeconds: ttlInSeconds)); Assert.AreEqual( HttpStatusCode.Created, createResponse.StatusCode); @@ -468,7 +468,7 @@ public static TestDoc FromHybridRowStream(Stream stream) } RowBuffer row = new RowBuffer((int)length); - Assert.IsTrue(row.ReadFrom(stream, (int)length, HybridRowVersion.V1, CosmosBatchTestBase.LayoutResolver)); + Assert.IsTrue(row.ReadFrom(stream, (int)length, HybridRowVersion.V1, BatchTestBase.LayoutResolver)); RowReader reader = new RowReader(ref row); TestDoc testDoc = new TestDoc(); @@ -509,7 +509,7 @@ public static TestDoc FromHybridRowStream(Stream stream) public MemoryStream ToHybridRowStream() { RowBuffer row = new RowBuffer(80000); - row.InitLayout(HybridRowVersion.V1, CosmosBatchTestBase.TestDocLayout, CosmosBatchTestBase.LayoutResolver); + row.InitLayout(HybridRowVersion.V1, BatchTestBase.TestDocLayout, BatchTestBase.LayoutResolver); Result r = RowWriter.WriteBuffer(ref row, this, TestDoc.WriteDoc); Assert.AreEqual(Result.Success, r); MemoryStream output = new MemoryStream(row.Length); diff --git a/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Batch/BatchResponsePayloadWriter.cs b/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Batch/BatchResponsePayloadWriter.cs index 5f18953896..d225f0dbed 100644 --- a/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Batch/BatchResponsePayloadWriter.cs +++ b/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Batch/BatchResponsePayloadWriter.cs @@ -17,9 +17,9 @@ namespace Microsoft.Azure.Cosmos.Tests internal class BatchResponsePayloadWriter { - private List results; + private List results; - public BatchResponsePayloadWriter(List results) + public BatchResponsePayloadWriter(List results) { this.results = results; } @@ -55,7 +55,7 @@ private Result WriteOperationResult(long index, out ReadOnlyMemory buffer) return r; } - private static Result WriteResult(ref RowWriter writer, TypeArgument typeArg, CosmosBatchOperationResult result) + private static Result WriteResult(ref RowWriter writer, TypeArgument typeArg, BatchOperationResult result) { Result r = writer.WriteInt32("statusCode", (int)result.StatusCode); if (r != Result.Success) diff --git a/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Batch/BatchSchemaTests.cs b/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Batch/BatchSchemaTests.cs index d69444d233..553e670155 100644 --- a/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Batch/BatchSchemaTests.cs +++ b/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Batch/BatchSchemaTests.cs @@ -130,19 +130,19 @@ public async Task BatchRequestSerializationFillAsync() [Owner("abpai")] public async Task BatchResponseDeserializationAsync() { - List results = new List(); + List results = new List(); - results.Add(new CosmosBatchOperationResult(HttpStatusCode.Conflict)); + results.Add(new BatchOperationResult(HttpStatusCode.Conflict)); results.Add( - new CosmosBatchOperationResult(HttpStatusCode.OK) + new BatchOperationResult(HttpStatusCode.OK) { ResourceStream = new MemoryStream(new byte[] { 0x41, 0x42 }, index: 0, count: 2, writable: false, publiclyVisible: true), ETag = "1234" }); results.Add( - new CosmosBatchOperationResult((HttpStatusCode)StatusCodes.TooManyRequests) + new BatchOperationResult((HttpStatusCode)StatusCodes.TooManyRequests) { RetryAfter = TimeSpan.FromMilliseconds(360) }); @@ -161,7 +161,7 @@ public async Task BatchResponseDeserializationAsync() maxOperationCount: 1, serializer: serializer, cancellationToken: CancellationToken.None); - CosmosBatchResponse batchresponse = await CosmosBatchResponse.PopulateFromContentAsync( + BatchResponse batchresponse = await BatchResponse.PopulateFromContentAsync( new CosmosResponseMessage(HttpStatusCode.OK) { Content = responseContent }, batchResponse, serializer); @@ -226,9 +226,9 @@ public int GetHashCode(ItemBatchOperation obj) } } - private class CosmosBatchOperationResultEqualityComparer : IEqualityComparer + private class CosmosBatchOperationResultEqualityComparer : IEqualityComparer { - public bool Equals(CosmosBatchOperationResult x, CosmosBatchOperationResult y) + public bool Equals(BatchOperationResult x, BatchOperationResult y) { return x.StatusCode == y.StatusCode && x.SubStatusCode == y.SubStatusCode @@ -256,7 +256,7 @@ private bool Equals(MemoryStream x, MemoryStream y) return false; } - public int GetHashCode(CosmosBatchOperationResult obj) + public int GetHashCode(BatchOperationResult obj) { int hashCode = 1176625765; hashCode = (hashCode * -1521134295) + obj.StatusCode.GetHashCode(); diff --git a/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Batch/CosmosBatchUnitTests.cs b/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Batch/BatchUnitTests.cs similarity index 84% rename from Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Batch/CosmosBatchUnitTests.cs rename to Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Batch/BatchUnitTests.cs index bd166784f8..750f30e453 100644 --- a/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Batch/CosmosBatchUnitTests.cs +++ b/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Batch/BatchUnitTests.cs @@ -18,7 +18,7 @@ namespace Microsoft.Azure.Cosmos.Tests using Newtonsoft.Json; [TestClass] - public class CosmosBatchUnitTests + public class BatchUnitTests { private const string DatabaseId = "mockDatabase"; @@ -30,7 +30,7 @@ public class CosmosBatchUnitTests [Owner("abpai")] public async Task BatchInvalidOptionsAsync() { - Container container = CosmosBatchUnitTests.GetCosmosContainer(); + Container container = BatchUnitTests.GetCosmosContainer(); List badBatchOptionsList = new List() { @@ -42,7 +42,7 @@ public async Task BatchInvalidOptionsAsync() foreach (RequestOptions batchOptions in badBatchOptionsList) { - CosmosBatchResponse batchResponse = await container.CreateBatch(new Cosmos.PartitionKey(CosmosBatchUnitTests.PartitionKey1)) + BatchResponse batchResponse = await container.CreateBatch(new Cosmos.PartitionKey(BatchUnitTests.PartitionKey1)) .ReadItem("someId") .ExecuteAsync(batchOptions); @@ -54,7 +54,7 @@ public async Task BatchInvalidOptionsAsync() [Owner("abpai")] public async Task BatchInvalidItemOptionsAsync() { - Container container = CosmosBatchUnitTests.GetCosmosContainer(); + Container container = BatchUnitTests.GetCosmosContainer(); List badItemOptionsList = new List() { @@ -94,7 +94,7 @@ public async Task BatchInvalidItemOptionsAsync() foreach (ItemRequestOptions itemOptions in badItemOptionsList) { - CosmosBatchResponse batchResponse = await container.CreateBatch(new Cosmos.PartitionKey(CosmosBatchUnitTests.PartitionKey1)) + BatchResponse batchResponse = await container.CreateBatch(new Cosmos.PartitionKey(BatchUnitTests.PartitionKey1)) .ReplaceItem("someId", new TestItem("repl"), itemOptions) .ExecuteAsync(); @@ -106,8 +106,8 @@ public async Task BatchInvalidItemOptionsAsync() [Owner("abpai")] public async Task BatchNoOperationsAsync() { - Container container = CosmosBatchUnitTests.GetCosmosContainer(); - CosmosBatchResponse batchResponse = await container.CreateBatch(new Cosmos.PartitionKey(CosmosBatchUnitTests.PartitionKey1)) + Container container = BatchUnitTests.GetCosmosContainer(); + BatchResponse batchResponse = await container.CreateBatch(new Cosmos.PartitionKey(BatchUnitTests.PartitionKey1)) .ExecuteAsync(); Assert.AreEqual(HttpStatusCode.BadRequest, batchResponse.StatusCode); @@ -193,7 +193,7 @@ public async Task BatchCrudRequestAsync() CosmosJsonSerializerCore jsonSerializer = new CosmosJsonSerializerCore(); BatchTestHandler testHandler = new BatchTestHandler((request, operations) => { - Assert.AreEqual(new Cosmos.PartitionKey(CosmosBatchUnitTests.PartitionKey1).ToString(), request.Headers.PartitionKey); + Assert.AreEqual(new Cosmos.PartitionKey(BatchUnitTests.PartitionKey1).ToString(), request.Headers.PartitionKey); Assert.AreEqual(bool.TrueString, request.Headers[HttpConstants.HttpHeaders.IsBatchAtomic]); Assert.AreEqual(bool.TrueString, request.Headers[HttpConstants.HttpHeaders.IsBatchOrdered]); Assert.IsFalse(request.Headers.TryGetValue(HttpConstants.HttpHeaders.ShouldBatchContinueOnError, out string unused)); @@ -210,56 +210,56 @@ public async Task BatchCrudRequestAsync() ItemBatchOperation operation = operations[operationIndex++]; Assert.AreEqual(OperationType.Create, operation.OperationType); Assert.IsNull(operation.Id); - Assert.AreEqual(createItem, CosmosBatchUnitTests.Deserialize(operation.ResourceBody, jsonSerializer)); - CosmosBatchUnitTests.VerifyItemRequestOptionsAreEqual(hasItemRequestOptions ? createRequestOptions : null, operation.RequestOptions); + Assert.AreEqual(createItem, BatchUnitTests.Deserialize(operation.ResourceBody, jsonSerializer)); + BatchUnitTests.VerifyItemRequestOptionsAreEqual(hasItemRequestOptions ? createRequestOptions : null, operation.RequestOptions); operation = operations[operationIndex++]; Assert.AreEqual(OperationType.Read, operation.OperationType); Assert.AreEqual(readId, operation.Id); - CosmosBatchUnitTests.VerifyItemRequestOptionsAreEqual(hasItemRequestOptions ? readRequestOptions : null, operation.RequestOptions); + BatchUnitTests.VerifyItemRequestOptionsAreEqual(hasItemRequestOptions ? readRequestOptions : null, operation.RequestOptions); operation = operations[operationIndex++]; Assert.AreEqual(OperationType.Replace, operation.OperationType); Assert.AreEqual(replaceItem.Id, operation.Id); - Assert.AreEqual(replaceItem, CosmosBatchUnitTests.Deserialize(operation.ResourceBody, jsonSerializer)); - CosmosBatchUnitTests.VerifyItemRequestOptionsAreEqual(hasItemRequestOptions ? replaceRequestOptions : null, operation.RequestOptions); + Assert.AreEqual(replaceItem, BatchUnitTests.Deserialize(operation.ResourceBody, jsonSerializer)); + BatchUnitTests.VerifyItemRequestOptionsAreEqual(hasItemRequestOptions ? replaceRequestOptions : null, operation.RequestOptions); operation = operations[operationIndex++]; Assert.AreEqual(OperationType.Upsert, operation.OperationType); Assert.IsNull(operation.Id); - Assert.AreEqual(upsertItem, CosmosBatchUnitTests.Deserialize(operation.ResourceBody, jsonSerializer)); - CosmosBatchUnitTests.VerifyItemRequestOptionsAreEqual(hasItemRequestOptions ? upsertRequestOptions : null, operation.RequestOptions); + Assert.AreEqual(upsertItem, BatchUnitTests.Deserialize(operation.ResourceBody, jsonSerializer)); + BatchUnitTests.VerifyItemRequestOptionsAreEqual(hasItemRequestOptions ? upsertRequestOptions : null, operation.RequestOptions); operation = operations[operationIndex++]; Assert.AreEqual(OperationType.Delete, operation.OperationType); Assert.AreEqual(deleteId, operation.Id); - CosmosBatchUnitTests.VerifyItemRequestOptionsAreEqual(hasItemRequestOptions ? deleteRequestOptions : null, operation.RequestOptions); + BatchUnitTests.VerifyItemRequestOptionsAreEqual(hasItemRequestOptions ? deleteRequestOptions : null, operation.RequestOptions); operation = operations[operationIndex++]; Assert.AreEqual(OperationType.Create, operation.OperationType); Assert.IsNull(operation.Id); Assert.IsTrue(operation.ResourceBody.Span.SequenceEqual(createStreamContent)); - CosmosBatchUnitTests.VerifyItemRequestOptionsAreEqual(hasItemRequestOptions ? createRequestOptions : null, operation.RequestOptions); + BatchUnitTests.VerifyItemRequestOptionsAreEqual(hasItemRequestOptions ? createRequestOptions : null, operation.RequestOptions); operation = operations[operationIndex++]; Assert.AreEqual(OperationType.Replace, operation.OperationType); Assert.AreEqual(replaceStreamId, operation.Id); Assert.IsTrue(operation.ResourceBody.Span.SequenceEqual(replaceStreamContent)); - CosmosBatchUnitTests.VerifyItemRequestOptionsAreEqual(hasItemRequestOptions ? replaceRequestOptions : null, operation.RequestOptions); + BatchUnitTests.VerifyItemRequestOptionsAreEqual(hasItemRequestOptions ? replaceRequestOptions : null, operation.RequestOptions); operation = operations[operationIndex++]; Assert.AreEqual(OperationType.Upsert, operation.OperationType); Assert.IsNull(operation.Id); Assert.IsTrue(operation.ResourceBody.Span.SequenceEqual(upsertStreamContent)); - CosmosBatchUnitTests.VerifyItemRequestOptionsAreEqual(hasItemRequestOptions ? upsertRequestOptions : null, operation.RequestOptions); + BatchUnitTests.VerifyItemRequestOptionsAreEqual(hasItemRequestOptions ? upsertRequestOptions : null, operation.RequestOptions); } return Task.FromResult(new CosmosResponseMessage(HttpStatusCode.OK)); }); - Container container = CosmosBatchUnitTests.GetCosmosContainer(testHandler); + Container container = BatchUnitTests.GetCosmosContainer(testHandler); - CosmosBatchResponse batchResponse = await container.CreateBatch(new Cosmos.PartitionKey(CosmosBatchUnitTests.PartitionKey1)) + BatchResponse batchResponse = await container.CreateBatch(new Cosmos.PartitionKey(BatchUnitTests.PartitionKey1)) .CreateItem(createItem) .ReadItem(readId) .ReplaceItem(replaceItem.Id, replaceItem) @@ -289,7 +289,7 @@ public async Task BatchCrudRequestAsync() [Owner("abpai")] public async Task BatchSingleServerResponseAsync() { - List expectedResults = new List(); + List expectedResults = new List(); CosmosJsonSerializerCore jsonSerializer = new CosmosJsonSerializerCore(); TestItem testItem = new TestItem("tst"); @@ -302,13 +302,13 @@ public async Task BatchSingleServerResponseAsync() } expectedResults.Add( - new CosmosBatchOperationResult(HttpStatusCode.OK) + new BatchOperationResult(HttpStatusCode.OK) { ETag = "theETag", SubStatusCode = (SubStatusCodes)1100, ResourceStream = resourceStream }); - expectedResults.Add(new CosmosBatchOperationResult(HttpStatusCode.Conflict)); + expectedResults.Add(new BatchOperationResult(HttpStatusCode.Conflict)); double requestCharge = 3.6; @@ -323,9 +323,9 @@ public async Task BatchSingleServerResponseAsync() return responseMessage; }); - Container container = CosmosBatchUnitTests.GetCosmosContainer(testHandler); + Container container = BatchUnitTests.GetCosmosContainer(testHandler); - CosmosBatchResponse batchResponse = await container.CreateBatch(new Cosmos.PartitionKey(CosmosBatchUnitTests.PartitionKey1)) + BatchResponse batchResponse = await container.CreateBatch(new Cosmos.PartitionKey(BatchUnitTests.PartitionKey1)) .ReadItem("id1") .ReadItem("id2") .ExecuteAsync(); @@ -333,7 +333,7 @@ public async Task BatchSingleServerResponseAsync() Assert.AreEqual(HttpStatusCode.OK, batchResponse.StatusCode); Assert.AreEqual(requestCharge, batchResponse.RequestCharge); - CosmosBatchOperationResult result0 = batchResponse.GetOperationResultAtIndex(0); + BatchOperationResult result0 = batchResponse.GetOperationResultAtIndex(0); Assert.AreEqual(expectedResults[0].StatusCode, result0.StatusCode); Assert.AreEqual(expectedResults[0].SubStatusCode, result0.SubStatusCode); Assert.AreEqual(expectedResults[0].ETag, result0.ETag); @@ -365,10 +365,10 @@ public void BatchIsWriteOperation() private static async Task GetBatchResponseMessageAsync(List operations, int rateLimitedOperationCount = 0) { - CosmosBatchOperationResult okOperationResult = new CosmosBatchOperationResult(HttpStatusCode.OK); - CosmosBatchOperationResult rateLimitedOperationResult = new CosmosBatchOperationResult((HttpStatusCode)StatusCodes.TooManyRequests); + BatchOperationResult okOperationResult = new BatchOperationResult(HttpStatusCode.OK); + BatchOperationResult rateLimitedOperationResult = new BatchOperationResult((HttpStatusCode)StatusCodes.TooManyRequests); - List resultsFromServer = new List(); + List resultsFromServer = new List(); for (int operationIndex = 0; operationIndex < operations.Count - rateLimitedOperationCount; operationIndex++) { resultsFromServer.Add(okOperationResult); @@ -439,15 +439,15 @@ private static void VerifyItemRequestOptionsAreEqual(ItemRequestOptions expected private static Container GetCosmosContainer(TestHandler testHandler = null) { CosmosClient client = MockCosmosUtil.CreateMockCosmosClient((builder) => builder.AddCustomHandlers(testHandler)); - CosmosDatabaseCore database = new CosmosDatabaseCore(client.ClientContext, CosmosBatchUnitTests.DatabaseId); - ContainerCore container = new ContainerCore(client.ClientContext, database, CosmosBatchUnitTests.ContainerId); + CosmosDatabaseCore database = new CosmosDatabaseCore(client.ClientContext, BatchUnitTests.DatabaseId); + ContainerCore container = new ContainerCore(client.ClientContext, database, BatchUnitTests.ContainerId); return container; } private static ContainerCore GetMockedContainer(string containerName = null) { Mock mockedContainer = MockCosmosUtil.CreateMockContainer(containerName: containerName); - mockedContainer.Setup(c => c.ClientContext).Returns(CosmosBatchUnitTests.GetMockedClientContext()); + mockedContainer.Setup(c => c.ClientContext).Returns(BatchUnitTests.GetMockedClientContext()); return mockedContainer.Object; } @@ -495,8 +495,8 @@ private static void VerifyServerRequestProperties(CosmosRequestMessage request) Uri expectedRequestUri = new Uri( string.Format( "dbs/{0}/colls/{1}", - CosmosBatchUnitTests.DatabaseId, - CosmosBatchUnitTests.ContainerId), + BatchUnitTests.DatabaseId, + BatchUnitTests.ContainerId), UriKind.Relative); Assert.AreEqual(expectedRequestUri, request.RequestUri); } From ba99a88eaf963531f98db00ae07a3c214c5dfe0d Mon Sep 17 00:00:00 2001 From: "Abhijit P. Pai" Date: Wed, 3 Jul 2019 15:49:24 +0530 Subject: [PATCH 04/12] Add BatchItemRequestOptions so we can avoid having unsupported options; minor: getActivityIds to internal, use serializer from clientContext instead of clientOptions --- Microsoft.Azure.Cosmos/src/Batch/Batch.cs | 54 ++++++------ .../src/Batch/BatchExecUtils.cs | 36 +++----- .../src/Batch/BatchExecutor.cs | 8 +- .../src/Batch/BatchItemRequestOptions.cs | 22 +++++ .../src/Batch/BatchResponse.cs | 2 +- .../src/Batch/ItemBatchOperation.cs | 12 +-- .../Batch/BatchSinglePartitionKeyTests.cs | 26 +++--- .../Batch/BatchTestBase.cs | 88 ++++++++++++------- .../Batch/BatchRequestPayloadReader.cs | 4 +- .../Batch/BatchSchemaTests.cs | 6 +- .../Batch/BatchUnitTests.cs | 52 ++++------- 11 files changed, 166 insertions(+), 144 deletions(-) create mode 100644 Microsoft.Azure.Cosmos/src/Batch/BatchItemRequestOptions.cs diff --git a/Microsoft.Azure.Cosmos/src/Batch/Batch.cs b/Microsoft.Azure.Cosmos/src/Batch/Batch.cs index b12f69f695..f011d8468b 100644 --- a/Microsoft.Azure.Cosmos/src/Batch/Batch.cs +++ b/Microsoft.Azure.Cosmos/src/Batch/Batch.cs @@ -37,16 +37,16 @@ internal Batch(ContainerCore container, PartitionKey partitionKey) /// Adds an operation to create an item into the batch. /// /// A JSON serializable object that must contain an id property. to implement a custom serializer. - /// (Optional) The options for the item request. . + /// (Optional) The options for the item request. . /// The instance with the operation added. /// The type of item to be created. - public virtual Batch CreateItem(T item, ItemRequestOptions itemRequestOptions = null) + public virtual Batch CreateItem(T item, BatchItemRequestOptions requestOptions = null) { this.operations.Add(new ItemBatchOperation( operationType: OperationType.Create, operationIndex: this.operations.Count, resource: item, - requestOptions: itemRequestOptions)); + requestOptions: requestOptions)); return this; } @@ -58,15 +58,15 @@ public virtual Batch CreateItem(T item, ItemRequestOptions itemRequestOptions /// A containing the payload of the item. /// The stream must have a UTF-8 encoded JSON object which contains an id property. /// - /// (Optional) The options for the item request. . + /// (Optional) The options for the item request. . /// The instance with the operation added. - public virtual Batch CreateItemStream(Stream resourceStream, ItemRequestOptions itemRequestOptions = null) + public virtual Batch CreateItemStream(Stream resourceStream, BatchItemRequestOptions requestOptions = null) { this.operations.Add(new ItemBatchOperation( operationType: OperationType.Create, operationIndex: this.operations.Count, resourceStream: resourceStream, - requestOptions: itemRequestOptions)); + requestOptions: requestOptions)); return this; } @@ -75,15 +75,15 @@ public virtual Batch CreateItemStream(Stream resourceStream, ItemRequestOptions /// Adds an operation to read an item into the batch. /// /// The cosmos item id. - /// (Optional) The options for the item request. . + /// (Optional) The options for the item request. . /// The instance with the operation added. - public virtual Batch ReadItem(string id, ItemRequestOptions itemRequestOptions = null) + public virtual Batch ReadItem(string id, BatchItemRequestOptions requestOptions = null) { this.operations.Add(new ItemBatchOperation( operationType: OperationType.Read, operationIndex: this.operations.Count, id: id, - requestOptions: itemRequestOptions)); + requestOptions: requestOptions)); return this; } @@ -92,16 +92,16 @@ public virtual Batch ReadItem(string id, ItemRequestOptions itemRequestOptions = /// Adds an operation to upsert an item into the batch. /// /// A JSON serializable object that must contain an id property. to implement a custom serializer. - /// (Optional) The options for the item request. . + /// (Optional) The options for the item request. . /// The instance with the operation added. /// The type of item to be created. - public virtual Batch UpsertItem(T item, ItemRequestOptions itemRequestOptions = null) + public virtual Batch UpsertItem(T item, BatchItemRequestOptions requestOptions = null) { this.operations.Add(new ItemBatchOperation( operationType: OperationType.Upsert, operationIndex: this.operations.Count, resource: item, - requestOptions: itemRequestOptions)); + requestOptions: requestOptions)); return this; } @@ -113,15 +113,15 @@ public virtual Batch UpsertItem(T item, ItemRequestOptions itemRequestOptions /// A containing the payload of the item. /// The stream must have a UTF-8 encoded JSON object which contains an id property. /// - /// (Optional) The options for the item request. . + /// (Optional) The options for the item request. . /// The instance with the operation added. - public virtual Batch UpsertItemStream(Stream resourceStream, ItemRequestOptions itemRequestOptions = null) + public virtual Batch UpsertItemStream(Stream resourceStream, BatchItemRequestOptions requestOptions = null) { this.operations.Add(new ItemBatchOperation( operationType: OperationType.Upsert, operationIndex: this.operations.Count, resourceStream: resourceStream, - requestOptions: itemRequestOptions)); + requestOptions: requestOptions)); return this; } @@ -131,17 +131,17 @@ public virtual Batch UpsertItemStream(Stream resourceStream, ItemRequestOptions /// /// The cosmos item id. /// A JSON serializable object that must contain an id property. to implement a custom serializer. - /// (Optional) The options for the item request. . + /// (Optional) The options for the item request. . /// The instance with the operation added. /// The type of item to be created. - public virtual Batch ReplaceItem(string id, T item, ItemRequestOptions itemRequestOptions = null) + public virtual Batch ReplaceItem(string id, T item, BatchItemRequestOptions requestOptions = null) { this.operations.Add(new ItemBatchOperation( operationType: OperationType.Replace, operationIndex: this.operations.Count, id: id, resource: item, - requestOptions: itemRequestOptions)); + requestOptions: requestOptions)); return this; } @@ -154,16 +154,16 @@ public virtual Batch ReplaceItem(string id, T item, ItemRequestOptions itemRe /// A containing the payload of the item. /// The stream must have a UTF-8 encoded JSON object which contains an id property. /// - /// (Optional) The options for the item request. . + /// (Optional) The options for the item request. . /// The instance with the operation added. - public virtual Batch ReplaceItemStream(string id, Stream resourceStream, ItemRequestOptions itemRequestOptions = null) + public virtual Batch ReplaceItemStream(string id, Stream resourceStream, BatchItemRequestOptions requestOptions = null) { this.operations.Add(new ItemBatchOperation( operationType: OperationType.Replace, operationIndex: this.operations.Count, id: id, resourceStream: resourceStream, - requestOptions: itemRequestOptions)); + requestOptions: requestOptions)); return this; } @@ -172,15 +172,15 @@ public virtual Batch ReplaceItemStream(string id, Stream resourceStream, ItemReq /// Adds an operation to delete an item into the batch. /// /// The cosmos item id. - /// (Optional) The options for the item request. . + /// (Optional) The options for the item request. . /// The instance with the operation added. - public virtual Batch DeleteItem(string id, ItemRequestOptions itemRequestOptions = null) + public virtual Batch DeleteItem(string id, BatchItemRequestOptions requestOptions = null) { this.operations.Add(new ItemBatchOperation( operationType: OperationType.Delete, operationIndex: this.operations.Count, id: id, - requestOptions: itemRequestOptions)); + requestOptions: requestOptions)); return this; } @@ -200,16 +200,16 @@ public virtual Batch DeleteItem(string id, ItemRequestOptions itemRequestOptions /// /// The cosmos item id. /// A containing the patch specification. - /// (Optional) The options for the item request. . + /// (Optional) The options for the item request. . /// The instance with the operation added. - internal virtual Batch PatchItemStream(string id, Stream patchStream, ItemRequestOptions itemRequestOptions = null) + internal virtual Batch PatchItemStream(string id, Stream patchStream, BatchItemRequestOptions requestOptions = null) { this.operations.Add(new ItemBatchOperation( operationType: OperationType.Patch, operationIndex: this.operations.Count, id: id, resourceStream: patchStream, - requestOptions: itemRequestOptions)); + requestOptions: requestOptions)); return this; } diff --git a/Microsoft.Azure.Cosmos/src/Batch/BatchExecUtils.cs b/Microsoft.Azure.Cosmos/src/Batch/BatchExecUtils.cs index e6f5eb67d4..9fa0c59cfb 100644 --- a/Microsoft.Azure.Cosmos/src/Batch/BatchExecUtils.cs +++ b/Microsoft.Azure.Cosmos/src/Batch/BatchExecUtils.cs @@ -116,34 +116,24 @@ public static CosmosResponseMessage Validate( { foreach (ItemBatchOperation operation in operations) { - if (operation.RequestOptions != null) + if (operation.RequestOptions != null + && operation.RequestOptions.Properties != null + && (operation.RequestOptions.Properties.TryGetValue(WFConstants.BackendHeaders.EffectivePartitionKey, out object epkObj) + | operation.RequestOptions.Properties.TryGetValue(WFConstants.BackendHeaders.EffectivePartitionKeyString, out object epkStrObj))) { - if (operation.RequestOptions.ConsistencyLevel.HasValue - || operation.RequestOptions.PreTriggers != null - || operation.RequestOptions.PostTriggers != null - || operation.RequestOptions.SessionToken != null) + byte[] epk = epkObj as byte[]; + string epkStr = epkStrObj as string; + if (epk == null || epkStr == null) { - errorMessage = ClientResources.BatchItemRequestOptionNotSupported; + errorMessage = string.Format( + ClientResources.EpkPropertiesPairingExpected, + WFConstants.BackendHeaders.EffectivePartitionKey, + WFConstants.BackendHeaders.EffectivePartitionKeyString); } - if (operation.RequestOptions.Properties != null - && (operation.RequestOptions.Properties.TryGetValue(WFConstants.BackendHeaders.EffectivePartitionKey, out object epkObj) - | operation.RequestOptions.Properties.TryGetValue(WFConstants.BackendHeaders.EffectivePartitionKeyString, out object epkStrObj))) + if (operation.PartitionKey != null) { - byte[] epk = epkObj as byte[]; - string epkStr = epkStrObj as string; - if (epk == null || epkStr == null) - { - errorMessage = string.Format( - ClientResources.EpkPropertiesPairingExpected, - WFConstants.BackendHeaders.EffectivePartitionKey, - WFConstants.BackendHeaders.EffectivePartitionKeyString); - } - - if (operation.PartitionKey != null) - { - errorMessage = ClientResources.PKAndEpkSetTogether; - } + errorMessage = ClientResources.PKAndEpkSetTogether; } } } diff --git a/Microsoft.Azure.Cosmos/src/Batch/BatchExecutor.cs b/Microsoft.Azure.Cosmos/src/Batch/BatchExecutor.cs index afdb14e158..a191a32f8e 100644 --- a/Microsoft.Azure.Cosmos/src/Batch/BatchExecutor.cs +++ b/Microsoft.Azure.Cosmos/src/Batch/BatchExecutor.cs @@ -71,7 +71,7 @@ public async Task ExecuteAsync(CancellationToken cancellationToke new ArraySegment(this.inputOperations.ToArray()), this.maxServerRequestBodyLength, this.maxServerRequestOperationCount, - serializer: this.clientContext.ClientOptions.CosmosSerializerWithWrapperOrDefault, + serializer: this.clientContext.CosmosSerializer, cancellationToken: cancellationToken); } catch (RequestEntityTooLargeException ex) @@ -120,14 +120,14 @@ private async Task ExecuteServerRequestAsync(SinglePartitionKeySe return await BatchResponse.FromResponseMessageAsync( cosmosResponseMessage, serverRequest, - this.clientContext.ClientOptions.CosmosSerializerWithWrapperOrDefault); + this.clientContext.CosmosSerializer); } } catch (CosmosException ex) { return new BatchResponse( - HttpStatusCode.ServiceUnavailable, - SubStatusCodes.Unknown, + ex.StatusCode, + (SubStatusCodes)ex.SubStatusCode, ex.Message, serverRequest.Operations); } diff --git a/Microsoft.Azure.Cosmos/src/Batch/BatchItemRequestOptions.cs b/Microsoft.Azure.Cosmos/src/Batch/BatchItemRequestOptions.cs new file mode 100644 index 0000000000..5d1e29fdbf --- /dev/null +++ b/Microsoft.Azure.Cosmos/src/Batch/BatchItemRequestOptions.cs @@ -0,0 +1,22 @@ +//------------------------------------------------------------ +// Copyright (c) Microsoft Corporation. All rights reserved. +//------------------------------------------------------------ + +namespace Microsoft.Azure.Cosmos +{ + /// + /// that apply to operations within a . + /// + public class BatchItemRequestOptions : RequestOptions + { + /// + /// Gets or sets the indexing directive (Include or Exclude) for the request in the Azure Cosmos DB service. + /// + /// + /// The indexing directive to use with a request. + /// + /// + /// + public virtual IndexingDirective? IndexingDirective { get; set; } + } +} \ No newline at end of file diff --git a/Microsoft.Azure.Cosmos/src/Batch/BatchResponse.cs b/Microsoft.Azure.Cosmos/src/Batch/BatchResponse.cs index 12ed8fb980..a114c681d8 100644 --- a/Microsoft.Azure.Cosmos/src/Batch/BatchResponse.cs +++ b/Microsoft.Azure.Cosmos/src/Batch/BatchResponse.cs @@ -186,7 +186,7 @@ public virtual IEnumerator GetEnumerator() /// Gets all the Activity IDs associated with the response. /// /// An enumerable that contains the Activity IDs. - public virtual IEnumerable GetActivityIds() + internal virtual IEnumerable GetActivityIds() { yield return this.ActivityId; } diff --git a/Microsoft.Azure.Cosmos/src/Batch/ItemBatchOperation.cs b/Microsoft.Azure.Cosmos/src/Batch/ItemBatchOperation.cs index 135eb623f2..4304d9a4cb 100644 --- a/Microsoft.Azure.Cosmos/src/Batch/ItemBatchOperation.cs +++ b/Microsoft.Azure.Cosmos/src/Batch/ItemBatchOperation.cs @@ -31,7 +31,7 @@ public ItemBatchOperation( PartitionKey partitionKey, string id = null, Stream resourceStream = null, - ItemRequestOptions requestOptions = null) + BatchItemRequestOptions requestOptions = null) { this.OperationType = operationType; this.OperationIndex = operationIndex; @@ -48,7 +48,7 @@ public ItemBatchOperation( int operationIndex, string id = null, Stream resourceStream = null, - ItemRequestOptions requestOptions = null) + BatchItemRequestOptions requestOptions = null) { this.OperationType = operationType; this.OperationIndex = operationIndex; @@ -65,7 +65,7 @@ public ItemBatchOperation( public Stream ResourceStream { get; protected set; } - public ItemRequestOptions RequestOptions { get; } + public BatchItemRequestOptions RequestOptions { get; } public int OperationIndex { get; } @@ -141,7 +141,7 @@ internal static Result WriteOperation(ref RowWriter writer, TypeArgument typeArg if (operation.RequestOptions != null) { - ItemRequestOptions options = operation.RequestOptions; + BatchItemRequestOptions options = operation.RequestOptions; if (options.IndexingDirective.HasValue) { string indexingDirectiveString = IndexingDirectiveStrings.FromIndexingDirective(options.IndexingDirective.Value); @@ -319,7 +319,7 @@ public ItemBatchOperation( PartitionKey partitionKey, T resource, string id = null, - ItemRequestOptions requestOptions = null) + BatchItemRequestOptions requestOptions = null) : base(operationType, operationIndex, partitionKey: partitionKey, id: id, requestOptions: requestOptions) { this.Resource = resource; @@ -330,7 +330,7 @@ public ItemBatchOperation( int operationIndex, T resource, string id = null, - ItemRequestOptions requestOptions = null) + BatchItemRequestOptions requestOptions = null) : base(operationType, operationIndex, id: id, requestOptions: requestOptions) { this.Resource = resource; diff --git a/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.EmulatorTests/Batch/BatchSinglePartitionKeyTests.cs b/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.EmulatorTests/Batch/BatchSinglePartitionKeyTests.cs index 8352827607..8523102e3f 100644 --- a/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.EmulatorTests/Batch/BatchSinglePartitionKeyTests.cs +++ b/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.EmulatorTests/Batch/BatchSinglePartitionKeyTests.cs @@ -126,7 +126,7 @@ public async Task BatchItemETagAsync() this.TestDocPk1ExistingA.Id, BatchTestBase.GetPartitionKey(this.PartitionKey1)); - ItemRequestOptions firstReplaceOptions = new ItemRequestOptions() + BatchItemRequestOptions firstReplaceOptions = new BatchItemRequestOptions() { IfMatchEtag = readResponse.ETag }; @@ -149,7 +149,7 @@ public async Task BatchItemETagAsync() TestDoc testDocToReplace = this.GetTestDocCopy(this.TestDocPk1ExistingB); testDocToReplace.Cost++; - ItemRequestOptions replaceOptions = new ItemRequestOptions() + BatchItemRequestOptions replaceOptions = new BatchItemRequestOptions() { IfMatchEtag = BatchTestBase.Random.Next().ToString() }; @@ -196,17 +196,17 @@ public async Task BatchItemTimeToLiveAsync() BatchResponse batchResponse = await container.CreateBatch(BatchTestBase.GetPartitionKey(this.PartitionKey1)) .CreateItemStream( BatchTestBase.TestDocToStream(testDocToCreate, isSchematized), - BatchTestBase.GetItemRequestOptions(testDocToCreate, isSchematized, ttlInSeconds: ttlInSeconds)) + BatchTestBase.GetBatchItemRequestOptions(testDocToCreate, isSchematized, ttlInSeconds: ttlInSeconds)) .CreateItemStream( BatchTestBase.TestDocToStream(anotherTestDocToCreate, isSchematized), - BatchTestBase.GetItemRequestOptions(anotherTestDocToCreate, isSchematized)) + BatchTestBase.GetBatchItemRequestOptions(anotherTestDocToCreate, isSchematized)) .ReplaceItemStream( BatchTestBase.GetId(testDocToReplace, isSchematized), BatchTestBase.TestDocToStream(testDocToReplace, isSchematized), - BatchTestBase.GetItemRequestOptions(testDocToReplace, isSchematized, ttlInSeconds: ttlInSeconds)) + BatchTestBase.GetBatchItemRequestOptions(testDocToReplace, isSchematized, ttlInSeconds: ttlInSeconds)) .UpsertItemStream( BatchTestBase.TestDocToStream(testDocToUpsert, isSchematized), - BatchTestBase.GetItemRequestOptions(testDocToUpsert, isSchematized, ttlInSeconds: infiniteTtl)) + BatchTestBase.GetBatchItemRequestOptions(testDocToUpsert, isSchematized, ttlInSeconds: infiniteTtl)) .ExecuteAsync(BatchTestBase.GetUpdatedBatchRequestOptions(isSchematized: true)); BatchSinglePartitionKeyTests.VerifyBatchProcessed(batchResponse, numberOfOperations: 4); @@ -396,23 +396,23 @@ private async Task RunCrudAsync(bool isStream, bool isSchematized batchResponse = await container.CreateBatch(BatchTestBase.GetPartitionKey(this.PartitionKey1, useEpk)) .CreateItemStream( BatchTestBase.TestDocToStream(testDocToCreate, isSchematized), - BatchTestBase.GetItemRequestOptions(testDocToCreate, isSchematized)) + BatchTestBase.GetBatchItemRequestOptions(testDocToCreate, isSchematized)) .ReadItem( BatchTestBase.GetId(this.TestDocPk1ExistingC, isSchematized), - BatchTestBase.GetItemRequestOptions(this.TestDocPk1ExistingC, isSchematized)) + BatchTestBase.GetBatchItemRequestOptions(this.TestDocPk1ExistingC, isSchematized)) .ReplaceItemStream( BatchTestBase.GetId(testDocToReplace, isSchematized), BatchTestBase.TestDocToStream(testDocToReplace, isSchematized), - BatchTestBase.GetItemRequestOptions(testDocToReplace, isSchematized)) + BatchTestBase.GetBatchItemRequestOptions(testDocToReplace, isSchematized)) .UpsertItemStream( BatchTestBase.TestDocToStream(testDocToUpsert, isSchematized), - BatchTestBase.GetItemRequestOptions(testDocToUpsert, isSchematized)) + BatchTestBase.GetBatchItemRequestOptions(testDocToUpsert, isSchematized)) .UpsertItemStream( BatchTestBase.TestDocToStream(anotherTestDocToUpsert, isSchematized), - BatchTestBase.GetItemRequestOptions(anotherTestDocToUpsert, isSchematized)) + BatchTestBase.GetBatchItemRequestOptions(anotherTestDocToUpsert, isSchematized)) .DeleteItem( BatchTestBase.GetId(this.TestDocPk1ExistingD, isSchematized), - BatchTestBase.GetItemRequestOptions(this.TestDocPk1ExistingD, isSchematized)) + BatchTestBase.GetBatchItemRequestOptions(this.TestDocPk1ExistingD, isSchematized)) .ExecuteAsync(batchOptions); } @@ -576,7 +576,7 @@ public async Task BatchWithReplaceOfStaleEntityAsync() TestDoc staleTestDocToReplace = this.GetTestDocCopy(this.TestDocPk1ExistingA); staleTestDocToReplace.Cost++; - ItemRequestOptions staleReplaceOptions = new ItemRequestOptions() + BatchItemRequestOptions staleReplaceOptions = new BatchItemRequestOptions() { IfMatchEtag = Guid.NewGuid().ToString() }; diff --git a/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.EmulatorTests/Batch/BatchTestBase.cs b/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.EmulatorTests/Batch/BatchTestBase.cs index 2506d41cbd..928de8feb5 100644 --- a/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.EmulatorTests/Batch/BatchTestBase.cs +++ b/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.EmulatorTests/Batch/BatchTestBase.cs @@ -263,7 +263,7 @@ protected static async Task VerifyByReadAsync(Container container, TestDoc doc, if (isStream) { string id = BatchTestBase.GetId(doc, isSchematized); - ItemRequestOptions requestOptions = BatchTestBase.GetItemRequestOptions(doc, isSchematized, useEpk, isPartOfBatch: false); + ItemRequestOptions requestOptions = BatchTestBase.GetItemRequestOptions(doc, isSchematized, useEpk); CosmosResponseMessage response = await container.ReadItemStreamAsync(id, partitionKey, requestOptions); Assert.AreEqual(HttpStatusCode.OK, response.StatusCode); @@ -292,7 +292,7 @@ protected static async Task VerifyNotFoundAsync(Container container, TestDoc doc { string id = BatchTestBase.GetId(doc, isSchematized); Cosmos.PartitionKey partitionKey = BatchTestBase.GetPartitionKey(doc.Status, useEpk); - ItemRequestOptions requestOptions = BatchTestBase.GetItemRequestOptions(doc, isSchematized, useEpk, isPartOfBatch: false); + ItemRequestOptions requestOptions = BatchTestBase.GetItemRequestOptions(doc, isSchematized, useEpk); CosmosResponseMessage response = await container.ReadItemStreamAsync(id, partitionKey, requestOptions); @@ -348,42 +348,41 @@ protected static string GetId(TestDoc doc, bool isSchematized) return doc.Id; } - - protected static ItemRequestOptions GetItemRequestOptions(TestDoc doc, bool isSchematized, bool useEpk = false, bool isPartOfBatch = true, int? ttlInSeconds = null) + protected static BatchItemRequestOptions GetBatchItemRequestOptions(TestDoc doc, bool isSchematized, bool useEpk = false, int? ttlInSeconds = null) { - ItemRequestOptions requestOptions = null; - if (isSchematized) + BatchItemRequestOptions requestOptions = new BatchItemRequestOptions() { - requestOptions = new ItemRequestOptions() - { - Properties = new Dictionary - { - { WFConstants.BackendHeaders.BinaryId, Encoding.UTF8.GetBytes(doc.Id) } - } - }; + Properties = new Dictionary() + }; - if (ttlInSeconds.HasValue) - { - requestOptions.Properties.Add(WFConstants.BackendHeaders.TimeToLiveInSeconds, ttlInSeconds.Value.ToString()); - } + if (PopulateRequestOptionsProperties(requestOptions.Properties, doc, isSchematized, useEpk, ttlInSeconds)) + { + return requestOptions; + } - if (useEpk) - { - string epk = new Microsoft.Azure.Documents.PartitionKey(doc.Status) - .InternalKey - .GetEffectivePartitionKeyString(BatchTestBase.PartitionKeyDefinition); + return null; + } - requestOptions.Properties.Add(WFConstants.BackendHeaders.EffectivePartitionKeyString, epk); - requestOptions.Properties.Add(WFConstants.BackendHeaders.EffectivePartitionKey, BatchTestBase.HexStringToBytes(epk)); - } + protected static ItemRequestOptions GetItemRequestOptions(TestDoc doc, bool isSchematized, bool useEpk = false, int? ttlInSeconds = null) + { + ItemRequestOptions requestOptions = new ItemRequestOptions() + { + Properties = new Dictionary() + }; - if (!isPartOfBatch) - { - requestOptions.Properties.Add(WFConstants.BackendHeaders.BinaryPassthroughRequest, bool.TrueString); - } + bool wasPopulated = BatchTestBase.PopulateRequestOptionsProperties(requestOptions.Properties, doc, isSchematized, useEpk, ttlInSeconds); + if (isSchematized) + { + requestOptions.Properties.Add(WFConstants.BackendHeaders.BinaryPassthroughRequest, bool.TrueString); + wasPopulated = true; + } + + if (wasPopulated) + { + return requestOptions; } - return requestOptions; + return null; } protected static async Task CreateJsonTestDocAsync(Container container, object partitionKey, int minDesiredSize = 20) @@ -400,7 +399,7 @@ protected static async Task CreateSchematizedTestDocAsync(Container con CosmosResponseMessage createResponse = await container.CreateItemStreamAsync( doc.ToHybridRowStream(), BatchTestBase.GetPartitionKey(partitionKey), - BatchTestBase.GetItemRequestOptions(doc, isSchematized: true, isPartOfBatch: false, ttlInSeconds: ttlInSeconds)); + BatchTestBase.GetItemRequestOptions(doc, isSchematized: true, ttlInSeconds: ttlInSeconds)); Assert.AreEqual( HttpStatusCode.Created, createResponse.StatusCode); @@ -418,6 +417,33 @@ protected static byte[] HexStringToBytes(string input) return bytes; } + private static bool PopulateRequestOptionsProperties(IDictionary properties, TestDoc doc, bool isSchematized, bool useEpk, int? ttlInSeconds) + { + if (isSchematized) + { + properties.Add(WFConstants.BackendHeaders.BinaryId, Encoding.UTF8.GetBytes(doc.Id)); + + if (ttlInSeconds.HasValue) + { + properties.Add(WFConstants.BackendHeaders.TimeToLiveInSeconds, ttlInSeconds.Value.ToString()); + } + + if (useEpk) + { + string epk = new Microsoft.Azure.Documents.PartitionKey(doc.Status) + .InternalKey + .GetEffectivePartitionKeyString(BatchTestBase.PartitionKeyDefinition); + + properties.Add(WFConstants.BackendHeaders.EffectivePartitionKeyString, epk); + properties.Add(WFConstants.BackendHeaders.EffectivePartitionKey, BatchTestBase.HexStringToBytes(epk)); + } + + return true; + } + + return false; + } + #pragma warning disable CA1034 public class TestDoc #pragma warning restore CA1034 diff --git a/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Batch/BatchRequestPayloadReader.cs b/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Batch/BatchRequestPayloadReader.cs index 42604bdf0a..eed2e7a0aa 100644 --- a/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Batch/BatchRequestPayloadReader.cs +++ b/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Batch/BatchRequestPayloadReader.cs @@ -188,10 +188,10 @@ private static Result ReadOperation(ref RowReader reader, int operationIndex, ou return Result.Failure; } - ItemRequestOptions requestOptions = null; + BatchItemRequestOptions requestOptions = null; if (indexingDirective.HasValue || ifMatch != null || ifNoneMatch != null || binaryId != null || effectivePartitionKey != null || ttlInSeconds.HasValue) { - requestOptions = new ItemRequestOptions(); + requestOptions = new BatchItemRequestOptions(); if (indexingDirective.HasValue) { requestOptions.IndexingDirective = indexingDirective; diff --git a/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Batch/BatchSchemaTests.cs b/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Batch/BatchSchemaTests.cs index 553e670155..6c310436cc 100644 --- a/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Batch/BatchSchemaTests.cs +++ b/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Batch/BatchSchemaTests.cs @@ -38,7 +38,7 @@ public async Task BatchRequestSerializationAsync() id: "id2", operationType: OperationType.Replace, operationIndex: 1, - requestOptions: new ItemRequestOptions() + requestOptions: new BatchItemRequestOptions() { IfMatchEtag = "theCondition" }) @@ -187,7 +187,7 @@ public bool Equals(ItemBatchOperation x, ItemBatchOperation y) && x.ResourceBody.Span.SequenceEqual(y.ResourceBody.Span); } - private bool Equals(ItemRequestOptions x, ItemRequestOptions y) + private bool Equals(BatchItemRequestOptions x, BatchItemRequestOptions y) { if (x == null && y == null) { @@ -219,7 +219,7 @@ public int GetHashCode(ItemBatchOperation obj) int hashCode = 1660235553; hashCode = (hashCode * -1521134295) + EqualityComparer.Default.GetHashCode(obj.Id); hashCode = (hashCode * -1521134295) + obj.OperationType.GetHashCode(); - hashCode = (hashCode * -1521134295) + EqualityComparer.Default.GetHashCode(obj.RequestOptions); + hashCode = (hashCode * -1521134295) + EqualityComparer.Default.GetHashCode(obj.RequestOptions); hashCode = (hashCode * -1521134295) + obj.OperationIndex.GetHashCode(); hashCode = (hashCode * -1521134295) + EqualityComparer>.Default.GetHashCode(obj.ResourceBody); return hashCode; diff --git a/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Batch/BatchUnitTests.cs b/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Batch/BatchUnitTests.cs index 750f30e453..e438dfe538 100644 --- a/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Batch/BatchUnitTests.cs +++ b/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Batch/BatchUnitTests.cs @@ -56,25 +56,9 @@ public async Task BatchInvalidItemOptionsAsync() { Container container = BatchUnitTests.GetCosmosContainer(); - List badItemOptionsList = new List() + List badItemOptionsList = new List() { - new ItemRequestOptions() - { - ConsistencyLevel = Microsoft.Azure.Cosmos.ConsistencyLevel.Strong - }, - new ItemRequestOptions() - { - PreTriggers = new List() { "pre" } - }, - new ItemRequestOptions() - { - PostTriggers = new List() { "post" } - }, - new ItemRequestOptions() - { - SessionToken = "sess" - }, - new ItemRequestOptions() + new BatchItemRequestOptions() { Properties = new Dictionary { @@ -82,7 +66,7 @@ public async Task BatchInvalidItemOptionsAsync() { WFConstants.BackendHeaders.EffectivePartitionKey, new byte[1] { 0x41 } } } }, - new ItemRequestOptions() + new BatchItemRequestOptions() { Properties = new Dictionary { @@ -92,7 +76,7 @@ public async Task BatchInvalidItemOptionsAsync() } }; - foreach (ItemRequestOptions itemOptions in badItemOptionsList) + foreach (BatchItemRequestOptions itemOptions in badItemOptionsList) { BatchResponse batchResponse = await container.CreateBatch(new Cosmos.PartitionKey(BatchUnitTests.PartitionKey1)) .ReplaceItem("someId", new TestItem("repl"), itemOptions) @@ -125,7 +109,7 @@ public async Task BatchCrudRequestAsync() byte[] createStreamBinaryId = new byte[20]; random.NextBytes(createStreamBinaryId); int createTtl = 45; - ItemRequestOptions createRequestOptions = new ItemRequestOptions() + BatchItemRequestOptions createRequestOptions = new BatchItemRequestOptions() { Properties = new Dictionary() { @@ -138,7 +122,7 @@ public async Task BatchCrudRequestAsync() string readId = Guid.NewGuid().ToString(); byte[] readStreamBinaryId = new byte[20]; random.NextBytes(readStreamBinaryId); - ItemRequestOptions readRequestOptions = new ItemRequestOptions() + BatchItemRequestOptions readRequestOptions = new BatchItemRequestOptions() { Properties = new Dictionary() { @@ -153,7 +137,7 @@ public async Task BatchCrudRequestAsync() const string replaceStreamId = "replStream"; byte[] replaceStreamBinaryId = new byte[20]; random.NextBytes(replaceStreamBinaryId); - ItemRequestOptions replaceRequestOptions = new ItemRequestOptions() + BatchItemRequestOptions replaceRequestOptions = new BatchItemRequestOptions() { Properties = new Dictionary() { @@ -168,7 +152,7 @@ public async Task BatchCrudRequestAsync() random.NextBytes(upsertStreamContent); byte[] upsertStreamBinaryId = new byte[20]; random.NextBytes(upsertStreamBinaryId); - ItemRequestOptions upsertRequestOptions = new ItemRequestOptions() + BatchItemRequestOptions upsertRequestOptions = new BatchItemRequestOptions() { Properties = new Dictionary() { @@ -181,7 +165,7 @@ public async Task BatchCrudRequestAsync() string deleteId = Guid.NewGuid().ToString(); byte[] deleteStreamBinaryId = new byte[20]; random.NextBytes(deleteStreamBinaryId); - ItemRequestOptions deleteRequestOptions = new ItemRequestOptions() + BatchItemRequestOptions deleteRequestOptions = new BatchItemRequestOptions() { Properties = new Dictionary() { @@ -211,47 +195,47 @@ public async Task BatchCrudRequestAsync() Assert.AreEqual(OperationType.Create, operation.OperationType); Assert.IsNull(operation.Id); Assert.AreEqual(createItem, BatchUnitTests.Deserialize(operation.ResourceBody, jsonSerializer)); - BatchUnitTests.VerifyItemRequestOptionsAreEqual(hasItemRequestOptions ? createRequestOptions : null, operation.RequestOptions); + BatchUnitTests.VerifyBatchItemRequestOptionsAreEqual(hasItemRequestOptions ? createRequestOptions : null, operation.RequestOptions); operation = operations[operationIndex++]; Assert.AreEqual(OperationType.Read, operation.OperationType); Assert.AreEqual(readId, operation.Id); - BatchUnitTests.VerifyItemRequestOptionsAreEqual(hasItemRequestOptions ? readRequestOptions : null, operation.RequestOptions); + BatchUnitTests.VerifyBatchItemRequestOptionsAreEqual(hasItemRequestOptions ? readRequestOptions : null, operation.RequestOptions); operation = operations[operationIndex++]; Assert.AreEqual(OperationType.Replace, operation.OperationType); Assert.AreEqual(replaceItem.Id, operation.Id); Assert.AreEqual(replaceItem, BatchUnitTests.Deserialize(operation.ResourceBody, jsonSerializer)); - BatchUnitTests.VerifyItemRequestOptionsAreEqual(hasItemRequestOptions ? replaceRequestOptions : null, operation.RequestOptions); + BatchUnitTests.VerifyBatchItemRequestOptionsAreEqual(hasItemRequestOptions ? replaceRequestOptions : null, operation.RequestOptions); operation = operations[operationIndex++]; Assert.AreEqual(OperationType.Upsert, operation.OperationType); Assert.IsNull(operation.Id); Assert.AreEqual(upsertItem, BatchUnitTests.Deserialize(operation.ResourceBody, jsonSerializer)); - BatchUnitTests.VerifyItemRequestOptionsAreEqual(hasItemRequestOptions ? upsertRequestOptions : null, operation.RequestOptions); + BatchUnitTests.VerifyBatchItemRequestOptionsAreEqual(hasItemRequestOptions ? upsertRequestOptions : null, operation.RequestOptions); operation = operations[operationIndex++]; Assert.AreEqual(OperationType.Delete, operation.OperationType); Assert.AreEqual(deleteId, operation.Id); - BatchUnitTests.VerifyItemRequestOptionsAreEqual(hasItemRequestOptions ? deleteRequestOptions : null, operation.RequestOptions); + BatchUnitTests.VerifyBatchItemRequestOptionsAreEqual(hasItemRequestOptions ? deleteRequestOptions : null, operation.RequestOptions); operation = operations[operationIndex++]; Assert.AreEqual(OperationType.Create, operation.OperationType); Assert.IsNull(operation.Id); Assert.IsTrue(operation.ResourceBody.Span.SequenceEqual(createStreamContent)); - BatchUnitTests.VerifyItemRequestOptionsAreEqual(hasItemRequestOptions ? createRequestOptions : null, operation.RequestOptions); + BatchUnitTests.VerifyBatchItemRequestOptionsAreEqual(hasItemRequestOptions ? createRequestOptions : null, operation.RequestOptions); operation = operations[operationIndex++]; Assert.AreEqual(OperationType.Replace, operation.OperationType); Assert.AreEqual(replaceStreamId, operation.Id); Assert.IsTrue(operation.ResourceBody.Span.SequenceEqual(replaceStreamContent)); - BatchUnitTests.VerifyItemRequestOptionsAreEqual(hasItemRequestOptions ? replaceRequestOptions : null, operation.RequestOptions); + BatchUnitTests.VerifyBatchItemRequestOptionsAreEqual(hasItemRequestOptions ? replaceRequestOptions : null, operation.RequestOptions); operation = operations[operationIndex++]; Assert.AreEqual(OperationType.Upsert, operation.OperationType); Assert.IsNull(operation.Id); Assert.IsTrue(operation.ResourceBody.Span.SequenceEqual(upsertStreamContent)); - BatchUnitTests.VerifyItemRequestOptionsAreEqual(hasItemRequestOptions ? upsertRequestOptions : null, operation.RequestOptions); + BatchUnitTests.VerifyBatchItemRequestOptionsAreEqual(hasItemRequestOptions ? upsertRequestOptions : null, operation.RequestOptions); } return Task.FromResult(new CosmosResponseMessage(HttpStatusCode.OK)); @@ -387,7 +371,7 @@ private static async Task GetBatchResponseMessageAsync(Li }; } - private static void VerifyItemRequestOptionsAreEqual(ItemRequestOptions expected, ItemRequestOptions actual) + private static void VerifyBatchItemRequestOptionsAreEqual(BatchItemRequestOptions expected, BatchItemRequestOptions actual) { if (expected != null) { From 982e605b2f6b165570b3994f427bf37e5719236c Mon Sep 17 00:00:00 2001 From: "Abhijit P. Pai" Date: Wed, 3 Jul 2019 17:26:48 +0530 Subject: [PATCH 05/12] Changes for merge: move Cosmos removals; PartitionKey is struct and use of IsEffectivePartitionKeyRouting --- .../src/Batch/BatchExecUtils.cs | 6 +-- .../src/Batch/BatchExecutor.cs | 14 +++++-- .../src/Batch/BatchResponse.cs | 4 +- .../src/Batch/ItemBatchOperation.cs | 8 ++-- .../SinglePartitionKeyServerBatchRequest.cs | 6 +-- .../Batch/BatchSinglePartitionKeyTests.cs | 6 +-- .../Batch/BatchTestBase.cs | 38 ++++++++++--------- .../Batch/BatchSchemaTests.cs | 10 ++--- .../Batch/BatchUnitTests.cs | 35 ++++++++++------- 9 files changed, 71 insertions(+), 56 deletions(-) diff --git a/Microsoft.Azure.Cosmos/src/Batch/BatchExecUtils.cs b/Microsoft.Azure.Cosmos/src/Batch/BatchExecUtils.cs index 9fa0c59cfb..c81b52cfaf 100644 --- a/Microsoft.Azure.Cosmos/src/Batch/BatchExecUtils.cs +++ b/Microsoft.Azure.Cosmos/src/Batch/BatchExecUtils.cs @@ -87,7 +87,7 @@ public static void GetServerRequestLimits(out int maxServerRequestBodyLength, ou maxServerRequestOperationCount = Constants.MaxOperationsInDirectModeBatchRequest; } - public static CosmosResponseMessage Validate( + public static ResponseMessage Validate( IReadOnlyList operations, RequestOptions batchOptions, int? maxOperationCount = null) @@ -141,10 +141,10 @@ public static CosmosResponseMessage Validate( if (errorMessage != null) { - return new CosmosResponseMessage(HttpStatusCode.BadRequest, errorMessage: errorMessage); + return new ResponseMessage(HttpStatusCode.BadRequest, errorMessage: errorMessage); } - return new CosmosResponseMessage(HttpStatusCode.OK); + return new ResponseMessage(HttpStatusCode.OK); } } } \ No newline at end of file diff --git a/Microsoft.Azure.Cosmos/src/Batch/BatchExecutor.cs b/Microsoft.Azure.Cosmos/src/Batch/BatchExecutor.cs index a191a32f8e..ffdbacf404 100644 --- a/Microsoft.Azure.Cosmos/src/Batch/BatchExecutor.cs +++ b/Microsoft.Azure.Cosmos/src/Batch/BatchExecutor.cs @@ -49,7 +49,7 @@ public BatchExecutor( public async Task ExecuteAsync(CancellationToken cancellationToken) { - CosmosResponseMessage validationResult = BatchExecUtils.Validate( + ResponseMessage validationResult = BatchExecUtils.Validate( this.inputOperations, this.batchOptions, this.maxServerRequestOperationCount); @@ -66,8 +66,14 @@ public async Task ExecuteAsync(CancellationToken cancellationToke SinglePartitionKeyServerBatchRequest serverRequest; try { + PartitionKey? serverRequestPartitionKey = this.partitionKey; + if (this.batchOptions != null && this.batchOptions.IsEffectivePartitionKeyRouting) + { + serverRequestPartitionKey = null; + } + serverRequest = await SinglePartitionKeyServerBatchRequest.CreateAsync( - this.partitionKey, + serverRequestPartitionKey, new ArraySegment(this.inputOperations.ToArray()), this.maxServerRequestBodyLength, this.maxServerRequestOperationCount, @@ -101,7 +107,7 @@ private async Task ExecuteServerRequestAsync(SinglePartitionKeySe using (Stream serverRequestPayload = serverRequest.TransferBodyStream()) { Debug.Assert(serverRequestPayload != null, "Server request payload expected to be non-null"); - CosmosResponseMessage cosmosResponseMessage = await clientContext.ProcessResourceOperationStreamAsync( + ResponseMessage responseMessage = await clientContext.ProcessResourceOperationStreamAsync( this.container.LinkUri, ResourceType.Document, OperationType.Batch, @@ -118,7 +124,7 @@ private async Task ExecuteServerRequestAsync(SinglePartitionKeySe cancellationToken); return await BatchResponse.FromResponseMessageAsync( - cosmosResponseMessage, + responseMessage, serverRequest, this.clientContext.CosmosSerializer); } diff --git a/Microsoft.Azure.Cosmos/src/Batch/BatchResponse.cs b/Microsoft.Azure.Cosmos/src/Batch/BatchResponse.cs index a114c681d8..b4d28e9650 100644 --- a/Microsoft.Azure.Cosmos/src/Batch/BatchResponse.cs +++ b/Microsoft.Azure.Cosmos/src/Batch/BatchResponse.cs @@ -207,7 +207,7 @@ IEnumerator IEnumerable.GetEnumerator() } internal static async Task FromResponseMessageAsync( - CosmosResponseMessage responseMessage, + ResponseMessage responseMessage, ServerBatchRequest serverRequest, CosmosSerializer serializer) { @@ -291,7 +291,7 @@ internal static async Task FromResponseMessageAsync( } internal static async Task PopulateFromContentAsync( - CosmosResponseMessage responseMessage, + ResponseMessage responseMessage, ServerBatchRequest serverRequest, CosmosSerializer serializer) { diff --git a/Microsoft.Azure.Cosmos/src/Batch/ItemBatchOperation.cs b/Microsoft.Azure.Cosmos/src/Batch/ItemBatchOperation.cs index 4304d9a4cb..8bfbd35d93 100644 --- a/Microsoft.Azure.Cosmos/src/Batch/ItemBatchOperation.cs +++ b/Microsoft.Azure.Cosmos/src/Batch/ItemBatchOperation.cs @@ -28,7 +28,7 @@ internal class ItemBatchOperation : IDisposable public ItemBatchOperation( OperationType operationType, int operationIndex, - PartitionKey partitionKey, + PartitionKey? partitionKey, string id = null, Stream resourceStream = null, BatchItemRequestOptions requestOptions = null) @@ -36,8 +36,6 @@ public ItemBatchOperation( this.OperationType = operationType; this.OperationIndex = operationIndex; this.PartitionKey = partitionKey; - this.ParsedPartitionKey = new Documents.PartitionKey(partitionKey.Value); - this.PartitionKeyJson = this.ParsedPartitionKey.ToString(); this.Id = id; this.ResourceStream = resourceStream; this.RequestOptions = requestOptions; @@ -57,7 +55,7 @@ public ItemBatchOperation( this.RequestOptions = requestOptions; } - public PartitionKey PartitionKey { get; } + public PartitionKey? PartitionKey { get; } public string Id { get; } @@ -69,7 +67,7 @@ public ItemBatchOperation( public int OperationIndex { get; } - internal string PartitionKeyJson { get; } + internal string PartitionKeyJson { get; set; } internal Documents.PartitionKey ParsedPartitionKey { get; set; } diff --git a/Microsoft.Azure.Cosmos/src/Batch/SinglePartitionKeyServerBatchRequest.cs b/Microsoft.Azure.Cosmos/src/Batch/SinglePartitionKeyServerBatchRequest.cs index 69e1af237d..4686d7e1f4 100644 --- a/Microsoft.Azure.Cosmos/src/Batch/SinglePartitionKeyServerBatchRequest.cs +++ b/Microsoft.Azure.Cosmos/src/Batch/SinglePartitionKeyServerBatchRequest.cs @@ -19,7 +19,7 @@ internal sealed class SinglePartitionKeyServerBatchRequest : ServerBatchRequest /// Maximum number of operations allowed in the request. /// Serializer to serialize user provided objects to JSON. private SinglePartitionKeyServerBatchRequest( - PartitionKey partitionKey, + PartitionKey? partitionKey, int maxBodyLength, int maxOperationCount, CosmosSerializer serializer) @@ -31,7 +31,7 @@ private SinglePartitionKeyServerBatchRequest( /// /// PartitionKey that applies to all operations in this request. /// - public PartitionKey PartitionKey { get; } + public PartitionKey? PartitionKey { get; } /// /// Creates an instance of . @@ -45,7 +45,7 @@ private SinglePartitionKeyServerBatchRequest( /// representing request cancellation. /// A newly created instance of . public static async Task CreateAsync( - PartitionKey partitionKey, + PartitionKey? partitionKey, ArraySegment operations, int maxBodyLength, int maxOperationCount, diff --git a/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.EmulatorTests/Batch/BatchSinglePartitionKeyTests.cs b/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.EmulatorTests/Batch/BatchSinglePartitionKeyTests.cs index 8523102e3f..971e0a4427 100644 --- a/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.EmulatorTests/Batch/BatchSinglePartitionKeyTests.cs +++ b/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.EmulatorTests/Batch/BatchSinglePartitionKeyTests.cs @@ -133,7 +133,7 @@ public async Task BatchItemETagAsync() BatchResponse batchResponse = await container.CreateBatch(BatchTestBase.GetPartitionKey(this.PartitionKey1)) .CreateItem(testDocToCreate) - .ReplaceItem(testDocToReplace.Id, testDocToReplace, itemRequestOptions: firstReplaceOptions) + .ReplaceItem(testDocToReplace.Id, testDocToReplace, requestOptions: firstReplaceOptions) .ExecuteAsync(); BatchSinglePartitionKeyTests.VerifyBatchProcessed(batchResponse, numberOfOperations: 2); @@ -155,7 +155,7 @@ public async Task BatchItemETagAsync() }; BatchResponse batchResponse = await container.CreateBatch(BatchTestBase.GetPartitionKey(this.PartitionKey1)) - .ReplaceItem(testDocToReplace.Id, testDocToReplace, itemRequestOptions: replaceOptions) + .ReplaceItem(testDocToReplace.Id, testDocToReplace, requestOptions: replaceOptions) .ExecuteAsync(); BatchSinglePartitionKeyTests.VerifyBatchProcessed( @@ -393,7 +393,7 @@ private async Task RunCrudAsync(bool isStream, bool isSchematized } else { - batchResponse = await container.CreateBatch(BatchTestBase.GetPartitionKey(this.PartitionKey1, useEpk)) + batchResponse = await container.CreateBatch(BatchTestBase.GetPartitionKey(this.PartitionKey1)) .CreateItemStream( BatchTestBase.TestDocToStream(testDocToCreate, isSchematized), BatchTestBase.GetBatchItemRequestOptions(testDocToCreate, isSchematized)) diff --git a/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.EmulatorTests/Batch/BatchTestBase.cs b/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.EmulatorTests/Batch/BatchTestBase.cs index 928de8feb5..8c7840becc 100644 --- a/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.EmulatorTests/Batch/BatchTestBase.cs +++ b/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.EmulatorTests/Batch/BatchTestBase.cs @@ -25,11 +25,11 @@ public class BatchTestBase protected static CosmosClient GatewayClient { get; set; } - protected static CosmosDatabase Database { get; set; } + protected static Cosmos.Database Database { get; set; } - protected static CosmosDatabase SharedThroughputDatabase { get; set; } + protected static Cosmos.Database SharedThroughputDatabase { get; set; } - protected static CosmosDatabase GatewayDatabase { get; set; } + protected static Cosmos.Database GatewayDatabase { get; set; } protected static Container JsonContainer { get; set; } @@ -148,7 +148,7 @@ private static void InitializeGatewayContainers() private static void InitializeSharedThroughputContainer() { CosmosClient client = TestCommon.CreateCosmosClient(); - CosmosDatabase db = client.CreateDatabaseAsync(string.Format("Shared_{0}", Guid.NewGuid().ToString("N")), throughput: 20000).GetAwaiter().GetResult().Database; + Cosmos.Database db = client.CreateDatabaseAsync(string.Format("Shared_{0}", Guid.NewGuid().ToString("N")), throughput: 20000).GetAwaiter().GetResult().Database; for (int index = 0; index < 5; index++) { @@ -258,13 +258,13 @@ protected static TestDoc StreamToTestDoc(Stream stream, bool isSchematized) protected static async Task VerifyByReadAsync(Container container, TestDoc doc, bool isStream = false, bool isSchematized = false, bool useEpk = false, string eTag = null) { - Cosmos.PartitionKey partitionKey = BatchTestBase.GetPartitionKey(doc.Status, useEpk); + Cosmos.PartitionKey partitionKey = BatchTestBase.GetPartitionKey(doc.Status); if (isStream) { string id = BatchTestBase.GetId(doc, isSchematized); ItemRequestOptions requestOptions = BatchTestBase.GetItemRequestOptions(doc, isSchematized, useEpk); - CosmosResponseMessage response = await container.ReadItemStreamAsync(id, partitionKey, requestOptions); + ResponseMessage response = await container.ReadItemStreamAsync(id, partitionKey, requestOptions); Assert.AreEqual(HttpStatusCode.OK, response.StatusCode); Assert.AreEqual(doc, BatchTestBase.StreamToTestDoc(response.Content, isSchematized)); @@ -291,10 +291,10 @@ protected static async Task VerifyByReadAsync(Container container, TestDoc doc, protected static async Task VerifyNotFoundAsync(Container container, TestDoc doc, bool isSchematized = false, bool useEpk = false) { string id = BatchTestBase.GetId(doc, isSchematized); - Cosmos.PartitionKey partitionKey = BatchTestBase.GetPartitionKey(doc.Status, useEpk); + Cosmos.PartitionKey partitionKey = BatchTestBase.GetPartitionKey(doc.Status); ItemRequestOptions requestOptions = BatchTestBase.GetItemRequestOptions(doc, isSchematized, useEpk); - CosmosResponseMessage response = await container.ReadItemStreamAsync(id, partitionKey, requestOptions); + ResponseMessage response = await container.ReadItemStreamAsync(id, partitionKey, requestOptions); Assert.AreEqual(HttpStatusCode.NotFound, response.StatusCode); } @@ -327,15 +327,16 @@ protected static RequestOptions GetUpdatedBatchRequestOptions( batchOptions.Properties.Add(WFConstants.BackendHeaders.EffectivePartitionKeyString, epk); batchOptions.Properties.Add(WFConstants.BackendHeaders.EffectivePartitionKey, BatchTestBase.HexStringToBytes(epk)); + batchOptions.IsEffectivePartitionKeyRouting = true; } } return batchOptions; } - protected static Cosmos.PartitionKey GetPartitionKey(object partitionKey, bool useEpk = false) + protected static Cosmos.PartitionKey GetPartitionKey(object partitionKey) { - return useEpk ? null : new Cosmos.PartitionKey(partitionKey); + return new Cosmos.PartitionKey(partitionKey); } protected static string GetId(TestDoc doc, bool isSchematized) @@ -355,7 +356,7 @@ protected static BatchItemRequestOptions GetBatchItemRequestOptions(TestDoc doc, Properties = new Dictionary() }; - if (PopulateRequestOptionsProperties(requestOptions.Properties, doc, isSchematized, useEpk, ttlInSeconds)) + if (PopulateRequestOptions(requestOptions, doc, isSchematized, useEpk, ttlInSeconds)) { return requestOptions; } @@ -370,7 +371,7 @@ protected static ItemRequestOptions GetItemRequestOptions(TestDoc doc, bool isSc Properties = new Dictionary() }; - bool wasPopulated = BatchTestBase.PopulateRequestOptionsProperties(requestOptions.Properties, doc, isSchematized, useEpk, ttlInSeconds); + bool wasPopulated = BatchTestBase.PopulateRequestOptions(requestOptions, doc, isSchematized, useEpk, ttlInSeconds); if (isSchematized) { requestOptions.Properties.Add(WFConstants.BackendHeaders.BinaryPassthroughRequest, bool.TrueString); @@ -396,7 +397,7 @@ protected static async Task CreateJsonTestDocAsync(Container container, protected static async Task CreateSchematizedTestDocAsync(Container container, object partitionKey, int? ttlInSeconds = null) { TestDoc doc = BatchTestBase.PopulateTestDoc(partitionKey); - CosmosResponseMessage createResponse = await container.CreateItemStreamAsync( + ResponseMessage createResponse = await container.CreateItemStreamAsync( doc.ToHybridRowStream(), BatchTestBase.GetPartitionKey(partitionKey), BatchTestBase.GetItemRequestOptions(doc, isSchematized: true, ttlInSeconds: ttlInSeconds)); @@ -417,15 +418,15 @@ protected static byte[] HexStringToBytes(string input) return bytes; } - private static bool PopulateRequestOptionsProperties(IDictionary properties, TestDoc doc, bool isSchematized, bool useEpk, int? ttlInSeconds) + private static bool PopulateRequestOptions(RequestOptions requestOptions, TestDoc doc, bool isSchematized, bool useEpk, int? ttlInSeconds) { if (isSchematized) { - properties.Add(WFConstants.BackendHeaders.BinaryId, Encoding.UTF8.GetBytes(doc.Id)); + requestOptions.Properties.Add(WFConstants.BackendHeaders.BinaryId, Encoding.UTF8.GetBytes(doc.Id)); if (ttlInSeconds.HasValue) { - properties.Add(WFConstants.BackendHeaders.TimeToLiveInSeconds, ttlInSeconds.Value.ToString()); + requestOptions.Properties.Add(WFConstants.BackendHeaders.TimeToLiveInSeconds, ttlInSeconds.Value.ToString()); } if (useEpk) @@ -434,8 +435,9 @@ private static bool PopulateRequestOptionsProperties(IDictionary .InternalKey .GetEffectivePartitionKeyString(BatchTestBase.PartitionKeyDefinition); - properties.Add(WFConstants.BackendHeaders.EffectivePartitionKeyString, epk); - properties.Add(WFConstants.BackendHeaders.EffectivePartitionKey, BatchTestBase.HexStringToBytes(epk)); + requestOptions.Properties.Add(WFConstants.BackendHeaders.EffectivePartitionKeyString, epk); + requestOptions.Properties.Add(WFConstants.BackendHeaders.EffectivePartitionKey, BatchTestBase.HexStringToBytes(epk)); + requestOptions.IsEffectivePartitionKeyRouting = true; } return true; diff --git a/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Batch/BatchSchemaTests.cs b/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Batch/BatchSchemaTests.cs index 6c310436cc..ac67d1be49 100644 --- a/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Batch/BatchSchemaTests.cs +++ b/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Batch/BatchSchemaTests.cs @@ -151,7 +151,7 @@ public async Task BatchResponseDeserializationAsync() CosmosSerializer serializer = new CosmosJsonSerializerCore(); SinglePartitionKeyServerBatchRequest batchResponse = await SinglePartitionKeyServerBatchRequest.CreateAsync( - partitionKey: null, + partitionKey: Cosmos.PartitionKey.None, operations: new ArraySegment( new ItemBatchOperation[] { @@ -162,12 +162,12 @@ public async Task BatchResponseDeserializationAsync() serializer: serializer, cancellationToken: CancellationToken.None); BatchResponse batchresponse = await BatchResponse.PopulateFromContentAsync( - new CosmosResponseMessage(HttpStatusCode.OK) { Content = responseContent }, + new ResponseMessage(HttpStatusCode.OK) { Content = responseContent }, batchResponse, serializer); Assert.IsNotNull(batchresponse); - Assert.IsTrue(batchresponse.IsSuccessStatusCode, "batchresponse.IsSuccessStatusCode" + batchresponse.StatusCode); + Assert.IsTrue(batchresponse.IsSuccessStatusCode); Assert.AreEqual(3, batchresponse.Count); CosmosBatchOperationResultEqualityComparer comparer = new CosmosBatchOperationResultEqualityComparer(); @@ -195,8 +195,8 @@ private bool Equals(BatchItemRequestOptions x, BatchItemRequestOptions y) } else if (x != null && y != null) { - CosmosRequestMessage xMessage = new CosmosRequestMessage(); - CosmosRequestMessage yMessage = new CosmosRequestMessage(); + RequestMessage xMessage = new RequestMessage(); + RequestMessage yMessage = new RequestMessage(); x.PopulateRequestOptions(xMessage); y.PopulateRequestOptions(yMessage); diff --git a/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Batch/BatchUnitTests.cs b/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Batch/BatchUnitTests.cs index e438dfe538..13ecd5e744 100644 --- a/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Batch/BatchUnitTests.cs +++ b/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Batch/BatchUnitTests.cs @@ -238,7 +238,7 @@ public async Task BatchCrudRequestAsync() BatchUnitTests.VerifyBatchItemRequestOptionsAreEqual(hasItemRequestOptions ? upsertRequestOptions : null, operation.RequestOptions); } - return Task.FromResult(new CosmosResponseMessage(HttpStatusCode.OK)); + return Task.FromResult(new ResponseMessage(HttpStatusCode.OK)); }); Container container = BatchUnitTests.GetCosmosContainer(testHandler); @@ -298,7 +298,7 @@ public async Task BatchSingleServerResponseAsync() TestHandler testHandler = new TestHandler(async (request, cancellationToken) => { - CosmosResponseMessage responseMessage = new CosmosResponseMessage(HttpStatusCode.OK, requestMessage: null, errorMessage: null) + ResponseMessage responseMessage = new ResponseMessage(HttpStatusCode.OK, requestMessage: null, errorMessage: null) { Content = await new BatchResponsePayloadWriter(expectedResults).GeneratePayloadAsync() }; @@ -347,7 +347,7 @@ public void BatchIsWriteOperation() Assert.IsTrue(OperationType.Batch.IsWriteOperation()); } - private static async Task GetBatchResponseMessageAsync(List operations, int rateLimitedOperationCount = 0) + private static async Task GetBatchResponseMessageAsync(List operations, int rateLimitedOperationCount = 0) { BatchOperationResult okOperationResult = new BatchOperationResult(HttpStatusCode.OK); BatchOperationResult rateLimitedOperationResult = new BatchOperationResult((HttpStatusCode)StatusCodes.TooManyRequests); @@ -365,7 +365,7 @@ private static async Task GetBatchResponseMessageAsync(Li HttpStatusCode batchStatus = rateLimitedOperationCount > 0 ? (HttpStatusCode)StatusCodes.MultiStatus : HttpStatusCode.OK; - return new CosmosResponseMessage(batchStatus, requestMessage: null, errorMessage: null) + return new ResponseMessage(batchStatus, requestMessage: null, errorMessage: null) { Content = await new BatchResponsePayloadWriter(resultsFromServer).GeneratePayloadAsync() }; @@ -422,8 +422,17 @@ private static void VerifyBatchItemRequestOptionsAreEqual(BatchItemRequestOption private static Container GetCosmosContainer(TestHandler testHandler = null) { - CosmosClient client = MockCosmosUtil.CreateMockCosmosClient((builder) => builder.AddCustomHandlers(testHandler)); - CosmosDatabaseCore database = new CosmosDatabaseCore(client.ClientContext, BatchUnitTests.DatabaseId); + CosmosClient client; + if (testHandler != null) + { + client = MockCosmosUtil.CreateMockCosmosClient((builder) => builder.AddCustomHandlers(testHandler)); + } + else + { + client = MockCosmosUtil.CreateMockCosmosClient(); + } + + DatabaseCore database = new DatabaseCore(client.ClientContext, BatchUnitTests.DatabaseId); ContainerCore container = new ContainerCore(client.ClientContext, database, BatchUnitTests.ContainerId); return container; } @@ -451,26 +460,26 @@ private static TestItem Deserialize(Memory body, CosmosSerializer serializ private class BatchTestHandler : TestHandler { - private readonly Func, Task> func; + private readonly Func, Task> func; - public BatchTestHandler(Func, Task> func) + public BatchTestHandler(Func, Task> func) { this.func = func; } - public List>> Received { get; } = new List>>(); + public List>> Received { get; } = new List>>(); - public override async Task SendAsync( - CosmosRequestMessage request, CancellationToken cancellationToken) + public override async Task SendAsync( + RequestMessage request, CancellationToken cancellationToken) { BatchTestHandler.VerifyServerRequestProperties(request); List operations = await new BatchRequestPayloadReader().ReadPayloadAsync(request.Content); - this.Received.Add(new Tuple>(request, operations)); + this.Received.Add(new Tuple>(request, operations)); return await this.func(request, operations); } - private static void VerifyServerRequestProperties(CosmosRequestMessage request) + private static void VerifyServerRequestProperties(RequestMessage request) { Assert.AreEqual(OperationType.Batch, request.OperationType); Assert.AreEqual(ResourceType.Document, request.ResourceType); From cf0e1b1be17cd5f08b2b02d85e076fc49c96719a Mon Sep 17 00:00:00 2001 From: "Abhijit P. Pai" Date: Wed, 3 Jul 2019 17:28:23 +0530 Subject: [PATCH 06/12] Remove no longer used resx string --- Microsoft.Azure.Cosmos/src/ClientResources.Designer.cs | 9 --------- Microsoft.Azure.Cosmos/src/ClientResources.resx | 3 --- 2 files changed, 12 deletions(-) diff --git a/Microsoft.Azure.Cosmos/src/ClientResources.Designer.cs b/Microsoft.Azure.Cosmos/src/ClientResources.Designer.cs index 2c6810f738..5ad6b5a498 100644 --- a/Microsoft.Azure.Cosmos/src/ClientResources.Designer.cs +++ b/Microsoft.Azure.Cosmos/src/ClientResources.Designer.cs @@ -204,15 +204,6 @@ internal static string BadSession { } } - /// - /// Looks up a localized string similar to One or more provided request options are not supported on items that are part of a batch request.. - /// - internal static string BatchItemRequestOptionNotSupported { - get { - return ResourceManager.GetString("BatchItemRequestOptionNotSupported", resourceCulture); - } - } - /// /// Looks up a localized string similar to The batch request did not have any operations to be executed.. /// diff --git a/Microsoft.Azure.Cosmos/src/ClientResources.resx b/Microsoft.Azure.Cosmos/src/ClientResources.resx index 146cefa808..d4d316593a 100644 --- a/Microsoft.Azure.Cosmos/src/ClientResources.resx +++ b/Microsoft.Azure.Cosmos/src/ClientResources.resx @@ -165,9 +165,6 @@ Session object retrieved from client with endpoint {0} cannot be used on a client initialized to endpoint {1}. - - One or more provided request options are not supported on items that are part of a batch request. - The batch request did not have any operations to be executed. From aa4e14f38d9e4aeb60d7f9ee2744a85a26c44f61 Mon Sep 17 00:00:00 2001 From: "Abhijit P. Pai" Date: Wed, 3 Jul 2019 22:25:26 +0530 Subject: [PATCH 07/12] Ignore rate limiting test as emulator likely runs without rate limit in SDK test run --- .../Batch/BatchSinglePartitionKeyTests.cs | 1 + 1 file changed, 1 insertion(+) diff --git a/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.EmulatorTests/Batch/BatchSinglePartitionKeyTests.cs b/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.EmulatorTests/Batch/BatchSinglePartitionKeyTests.cs index 971e0a4427..4c27e9a5b8 100644 --- a/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.EmulatorTests/Batch/BatchSinglePartitionKeyTests.cs +++ b/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.EmulatorTests/Batch/BatchSinglePartitionKeyTests.cs @@ -444,6 +444,7 @@ private async Task RunCrudAsync(bool isStream, bool isSchematized } [TestMethod] + [Ignore] [Owner("abpai")] [Description("Verify batch with a large set of read operations that is expected to be rate limited.")] public async Task BatchRateLimitingAsync() From 14f2aa5363331e01bc61686b3eb2ff07457aa63c Mon Sep 17 00:00:00 2001 From: "Abhijit P. Pai" Date: Fri, 5 Jul 2019 00:50:35 +0530 Subject: [PATCH 08/12] Dummy --- .../Batch/BatchTestBase.cs | 6 ------ 1 file changed, 6 deletions(-) diff --git a/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.EmulatorTests/Batch/BatchTestBase.cs b/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.EmulatorTests/Batch/BatchTestBase.cs index 8c7840becc..e759721870 100644 --- a/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.EmulatorTests/Batch/BatchTestBase.cs +++ b/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.EmulatorTests/Batch/BatchTestBase.cs @@ -57,14 +57,8 @@ public class BatchTestBase // Documents in PartitionKey1 protected TestDoc TestDocPk1ExistingA { get; set; } - - // Documents in PartitionKey1 protected TestDoc TestDocPk1ExistingB { get; set; } - - // Documents in PartitionKey1 protected TestDoc TestDocPk1ExistingC { get; set; } - - // Documents in PartitionKey1 protected TestDoc TestDocPk1ExistingD { get; set; } public static void ClassInit(TestContext context) From e9641135af7ff8735df274b155a32b422e6f1b2f Mon Sep 17 00:00:00 2001 From: "Abhijit P. Pai" Date: Sat, 6 Jul 2019 00:30:44 +0530 Subject: [PATCH 09/12] Batch as abstract with BatchCore impl, arg checks for public methods, exceptions for input errors --- Microsoft.Azure.Cosmos/src/Batch/Batch.cs | 240 ----------------- Microsoft.Azure.Cosmos/src/Batch/BatchCore.cs | 254 ++++++++++++++++++ .../src/Batch/BatchExecUtils.cs | 22 +- .../src/Batch/BatchExecutor.cs | 95 +++---- .../src/Batch/BatchItemRequestOptions.cs | 2 +- .../src/RequestOptions/ItemRequestOptions.cs | 4 +- .../src/Resource/Container/Container.cs | 84 ++++++ .../Resource/Container/ContainerCore.Items.cs | 2 +- .../Batch/BatchSinglePartitionKeyTests.cs | 121 ++------- .../Batch/BatchUnitTests.cs | 140 +++++++--- 10 files changed, 512 insertions(+), 452 deletions(-) delete mode 100644 Microsoft.Azure.Cosmos/src/Batch/Batch.cs create mode 100644 Microsoft.Azure.Cosmos/src/Batch/BatchCore.cs diff --git a/Microsoft.Azure.Cosmos/src/Batch/Batch.cs b/Microsoft.Azure.Cosmos/src/Batch/Batch.cs deleted file mode 100644 index f011d8468b..0000000000 --- a/Microsoft.Azure.Cosmos/src/Batch/Batch.cs +++ /dev/null @@ -1,240 +0,0 @@ -//------------------------------------------------------------ -// Copyright (c) Microsoft Corporation. All rights reserved. -//------------------------------------------------------------ - -namespace Microsoft.Azure.Cosmos -{ - using System.Collections.Generic; - using System.IO; - using System.Threading; - using System.Threading.Tasks; - using Microsoft.Azure.Documents; - - /// - /// Represents a batch of requests that will be performed atomically against the Azure Cosmos DB service. - /// - public class Batch - { - private readonly PartitionKey partitionKey; - - private readonly ContainerCore container; - - private List operations; - - /// - /// Initializes a new instance of the class. - /// - /// Container that has items on which batch operations are to be performed. - /// The partition key for all items in the batch. . - internal Batch(ContainerCore container, PartitionKey partitionKey) - { - this.container = container; - this.partitionKey = partitionKey; - this.operations = new List(); - } - - /// - /// Adds an operation to create an item into the batch. - /// - /// A JSON serializable object that must contain an id property. to implement a custom serializer. - /// (Optional) The options for the item request. . - /// The instance with the operation added. - /// The type of item to be created. - public virtual Batch CreateItem(T item, BatchItemRequestOptions requestOptions = null) - { - this.operations.Add(new ItemBatchOperation( - operationType: OperationType.Create, - operationIndex: this.operations.Count, - resource: item, - requestOptions: requestOptions)); - - return this; - } - - /// - /// Adds an operation to create an item into the batch. - /// - /// - /// A containing the payload of the item. - /// The stream must have a UTF-8 encoded JSON object which contains an id property. - /// - /// (Optional) The options for the item request. . - /// The instance with the operation added. - public virtual Batch CreateItemStream(Stream resourceStream, BatchItemRequestOptions requestOptions = null) - { - this.operations.Add(new ItemBatchOperation( - operationType: OperationType.Create, - operationIndex: this.operations.Count, - resourceStream: resourceStream, - requestOptions: requestOptions)); - - return this; - } - - /// - /// Adds an operation to read an item into the batch. - /// - /// The cosmos item id. - /// (Optional) The options for the item request. . - /// The instance with the operation added. - public virtual Batch ReadItem(string id, BatchItemRequestOptions requestOptions = null) - { - this.operations.Add(new ItemBatchOperation( - operationType: OperationType.Read, - operationIndex: this.operations.Count, - id: id, - requestOptions: requestOptions)); - - return this; - } - - /// - /// Adds an operation to upsert an item into the batch. - /// - /// A JSON serializable object that must contain an id property. to implement a custom serializer. - /// (Optional) The options for the item request. . - /// The instance with the operation added. - /// The type of item to be created. - public virtual Batch UpsertItem(T item, BatchItemRequestOptions requestOptions = null) - { - this.operations.Add(new ItemBatchOperation( - operationType: OperationType.Upsert, - operationIndex: this.operations.Count, - resource: item, - requestOptions: requestOptions)); - - return this; - } - - /// - /// Adds an operation to upsert an item into the batch. - /// - /// - /// A containing the payload of the item. - /// The stream must have a UTF-8 encoded JSON object which contains an id property. - /// - /// (Optional) The options for the item request. . - /// The instance with the operation added. - public virtual Batch UpsertItemStream(Stream resourceStream, BatchItemRequestOptions requestOptions = null) - { - this.operations.Add(new ItemBatchOperation( - operationType: OperationType.Upsert, - operationIndex: this.operations.Count, - resourceStream: resourceStream, - requestOptions: requestOptions)); - - return this; - } - - /// - /// Adds an operation to replace an item into the batch. - /// - /// The cosmos item id. - /// A JSON serializable object that must contain an id property. to implement a custom serializer. - /// (Optional) The options for the item request. . - /// The instance with the operation added. - /// The type of item to be created. - public virtual Batch ReplaceItem(string id, T item, BatchItemRequestOptions requestOptions = null) - { - this.operations.Add(new ItemBatchOperation( - operationType: OperationType.Replace, - operationIndex: this.operations.Count, - id: id, - resource: item, - requestOptions: requestOptions)); - - return this; - } - - /// - /// Adds an operation to replace an item into the batch. - /// - /// The cosmos item id. - /// - /// A containing the payload of the item. - /// The stream must have a UTF-8 encoded JSON object which contains an id property. - /// - /// (Optional) The options for the item request. . - /// The instance with the operation added. - public virtual Batch ReplaceItemStream(string id, Stream resourceStream, BatchItemRequestOptions requestOptions = null) - { - this.operations.Add(new ItemBatchOperation( - operationType: OperationType.Replace, - operationIndex: this.operations.Count, - id: id, - resourceStream: resourceStream, - requestOptions: requestOptions)); - - return this; - } - - /// - /// Adds an operation to delete an item into the batch. - /// - /// The cosmos item id. - /// (Optional) The options for the item request. . - /// The instance with the operation added. - public virtual Batch DeleteItem(string id, BatchItemRequestOptions requestOptions = null) - { - this.operations.Add(new ItemBatchOperation( - operationType: OperationType.Delete, - operationIndex: this.operations.Count, - id: id, - requestOptions: requestOptions)); - - return this; - } - - /// - /// Executes the batch at the Azure Cosmos service as an asynchronous operation. - /// - /// (Optional) representing request cancellation. - /// An awaitable which contains the completion status and results of each operation. - public virtual Task ExecuteAsync(CancellationToken cancellationToken = default(CancellationToken)) - { - return this.ExecuteAsync(requestOptions: null, cancellationToken: cancellationToken); - } - - /// - /// Adds an operation to patch an item into the batch. - /// - /// The cosmos item id. - /// A containing the patch specification. - /// (Optional) The options for the item request. . - /// The instance with the operation added. - internal virtual Batch PatchItemStream(string id, Stream patchStream, BatchItemRequestOptions requestOptions = null) - { - this.operations.Add(new ItemBatchOperation( - operationType: OperationType.Patch, - operationIndex: this.operations.Count, - id: id, - resourceStream: patchStream, - requestOptions: requestOptions)); - - return this; - } - - /// - /// Executes the batch at the Azure Cosmos service as an asynchronous operation. - /// - /// Options that apply to the batch. - /// (Optional) representing request cancellation. - /// An awaitable which contains the completion status and results of each operation. - internal virtual Task ExecuteAsync(RequestOptions requestOptions, CancellationToken cancellationToken = default(CancellationToken)) - { - BatchExecUtils.GetServerRequestLimits(out int maxServerRequestBodyLength, out int maxServerRequestOperationCount); - return this.ExecuteAsync(maxServerRequestBodyLength, maxServerRequestOperationCount, requestOptions, cancellationToken); - } - - internal Task ExecuteAsync( - int maxServerRequestBodyLength, - int maxServerRequestOperationCount, - RequestOptions requestOptions = null, - CancellationToken cancellationToken = default(CancellationToken)) - { - BatchExecutor executor = new BatchExecutor(this.container, this.partitionKey, this.operations, requestOptions, maxServerRequestBodyLength, maxServerRequestOperationCount); - this.operations = new List(); - return executor.ExecuteAsync(cancellationToken); - } - } -} diff --git a/Microsoft.Azure.Cosmos/src/Batch/BatchCore.cs b/Microsoft.Azure.Cosmos/src/Batch/BatchCore.cs new file mode 100644 index 0000000000..36d6494cff --- /dev/null +++ b/Microsoft.Azure.Cosmos/src/Batch/BatchCore.cs @@ -0,0 +1,254 @@ +//------------------------------------------------------------ +// Copyright (c) Microsoft Corporation. All rights reserved. +//------------------------------------------------------------ + +namespace Microsoft.Azure.Cosmos +{ + using System; + using System.Collections.Generic; + using System.IO; + using System.Threading; + using System.Threading.Tasks; + using Microsoft.Azure.Documents; + + internal class BatchCore : Batch + { + private readonly PartitionKey partitionKey; + + private readonly ContainerCore container; + + private List operations; + + /// + /// Initializes a new instance of the class. + /// + /// Container that has items on which batch operations are to be performed. + /// The partition key for all items in the batch. . + internal BatchCore( + ContainerCore container, + PartitionKey partitionKey) + { + this.container = container; + this.partitionKey = partitionKey; + this.operations = new List(); + } + + public override Batch CreateItem( + T item, + BatchItemRequestOptions requestOptions = null) + { + if (item == null) + { + throw new ArgumentNullException(nameof(item)); + } + + this.operations.Add(new ItemBatchOperation( + operationType: OperationType.Create, + operationIndex: this.operations.Count, + resource: item, + requestOptions: requestOptions)); + + return this; + } + + public override Batch CreateItemStream( + Stream streamPayload, + BatchItemRequestOptions requestOptions = null) + { + if (streamPayload == null) + { + throw new ArgumentNullException(nameof(streamPayload)); + } + + this.operations.Add(new ItemBatchOperation( + operationType: OperationType.Create, + operationIndex: this.operations.Count, + resourceStream: streamPayload, + requestOptions: requestOptions)); + + return this; + } + + public override Batch ReadItem( + string id, + BatchItemRequestOptions requestOptions = null) + { + if (id == null) + { + throw new ArgumentNullException(nameof(id)); + } + + this.operations.Add(new ItemBatchOperation( + operationType: OperationType.Read, + operationIndex: this.operations.Count, + id: id, + requestOptions: requestOptions)); + + return this; + } + + public override Batch UpsertItem( + T item, + BatchItemRequestOptions requestOptions = null) + { + if (item == null) + { + throw new ArgumentNullException(nameof(item)); + } + + this.operations.Add(new ItemBatchOperation( + operationType: OperationType.Upsert, + operationIndex: this.operations.Count, + resource: item, + requestOptions: requestOptions)); + + return this; + } + + public override Batch UpsertItemStream( + Stream streamPayload, + BatchItemRequestOptions requestOptions = null) + { + if (streamPayload == null) + { + throw new ArgumentNullException(nameof(streamPayload)); + } + + this.operations.Add(new ItemBatchOperation( + operationType: OperationType.Upsert, + operationIndex: this.operations.Count, + resourceStream: streamPayload, + requestOptions: requestOptions)); + + return this; + } + + public override Batch ReplaceItem( + string id, + T item, + BatchItemRequestOptions requestOptions = null) + { + if (id == null) + { + throw new ArgumentNullException(nameof(id)); + } + + if (item == null) + { + throw new ArgumentNullException(nameof(item)); + } + + this.operations.Add(new ItemBatchOperation( + operationType: OperationType.Replace, + operationIndex: this.operations.Count, + id: id, + resource: item, + requestOptions: requestOptions)); + + return this; + } + + public override Batch ReplaceItemStream( + string id, + Stream streamPayload, + BatchItemRequestOptions requestOptions = null) + { + if (id == null) + { + throw new ArgumentNullException(nameof(id)); + } + + if (streamPayload == null) + { + throw new ArgumentNullException(nameof(streamPayload)); + } + + this.operations.Add(new ItemBatchOperation( + operationType: OperationType.Replace, + operationIndex: this.operations.Count, + id: id, + resourceStream: streamPayload, + requestOptions: requestOptions)); + + return this; + } + + public override Batch DeleteItem( + string id, + BatchItemRequestOptions requestOptions = null) + { + if (id == null) + { + throw new ArgumentNullException(nameof(id)); + } + + this.operations.Add(new ItemBatchOperation( + operationType: OperationType.Delete, + operationIndex: this.operations.Count, + id: id, + requestOptions: requestOptions)); + + return this; + } + + public override Task ExecuteAsync( + CancellationToken cancellationToken = default(CancellationToken)) + { + return this.ExecuteAsync( + Constants.MaxDirectModeBatchRequestBodySizeInBytes, + Constants.MaxOperationsInDirectModeBatchRequest, + requestOptions: null, + cancellationToken: cancellationToken); + } + + /// + /// Executes the batch at the Azure Cosmos service as an asynchronous operation. + /// + /// Options that apply to the batch. Used only for EPK routing. + /// (Optional) representing request cancellation. + /// An awaitable which contains the completion status and results of each operation. + public virtual Task ExecuteAsync( + RequestOptions requestOptions, + CancellationToken cancellationToken = default(CancellationToken)) + { + return this.ExecuteAsync( + Constants.MaxDirectModeBatchRequestBodySizeInBytes, + Constants.MaxOperationsInDirectModeBatchRequest, + requestOptions, + cancellationToken); + } + + /// + /// Adds an operation to patch an item into the batch. + /// + /// The cosmos item id. + /// A containing the patch specification. + /// (Optional) The options for the item request. . + /// The instance with the operation added. + public virtual Batch PatchItemStream( + string id, + Stream patchStream, + BatchItemRequestOptions requestOptions = null) + { + this.operations.Add(new ItemBatchOperation( + operationType: OperationType.Patch, + operationIndex: this.operations.Count, + id: id, + resourceStream: patchStream, + requestOptions: requestOptions)); + + return this; + } + + internal Task ExecuteAsync( + int maxServerRequestBodyLength, + int maxServerRequestOperationCount, + RequestOptions requestOptions = null, + CancellationToken cancellationToken = default(CancellationToken)) + { + BatchExecutor executor = new BatchExecutor(this.container, this.partitionKey, this.operations, requestOptions, maxServerRequestBodyLength, maxServerRequestOperationCount); + this.operations = new List(); + return executor.ExecuteAsync(cancellationToken); + } + } +} diff --git a/Microsoft.Azure.Cosmos/src/Batch/BatchExecUtils.cs b/Microsoft.Azure.Cosmos/src/Batch/BatchExecUtils.cs index c81b52cfaf..530439992e 100644 --- a/Microsoft.Azure.Cosmos/src/Batch/BatchExecUtils.cs +++ b/Microsoft.Azure.Cosmos/src/Batch/BatchExecUtils.cs @@ -17,6 +17,9 @@ namespace Microsoft.Azure.Cosmos /// internal static class BatchExecUtils { + // Using the same buffer size as the Stream.DefaultCopyBufferSize + private const int BufferSize = 81920; + /// /// Converts a Stream to a Memory{byte} wrapping a byte array honoring a provided maximum length for the returned Memory. /// @@ -25,7 +28,10 @@ internal static class BatchExecUtils /// to cancel the operation. /// A Memory{byte} with length at most maximumLength. /// Throws RequestEntityTooLargeException if the input stream has more bytes than maximumLength. - public static async Task> StreamToMemoryAsync(Stream stream, int maximumLength, CancellationToken cancellationToken) + public static async Task> StreamToMemoryAsync( + Stream stream, + int maximumLength, + CancellationToken cancellationToken) { if (stream.CanSeek) { @@ -56,7 +62,7 @@ public static async Task> StreamToMemoryAsync(Stream stream, int ma } else { - int bufferSize = 81920; // Using the same buffer size as the Stream.DefaultCopyBufferSize + int bufferSize = BatchExecUtils.BufferSize; byte[] buffer = new byte[bufferSize]; using (MemoryStream memoryStream = new MemoryStream(bufferSize)) // using bufferSize as initial capacity as well @@ -81,13 +87,7 @@ public static async Task> StreamToMemoryAsync(Stream stream, int ma } } - public static void GetServerRequestLimits(out int maxServerRequestBodyLength, out int maxServerRequestOperationCount) - { - maxServerRequestBodyLength = Constants.MaxDirectModeBatchRequestBodySizeInBytes; - maxServerRequestOperationCount = Constants.MaxOperationsInDirectModeBatchRequest; - } - - public static ResponseMessage Validate( + public static void EnsureValid( IReadOnlyList operations, RequestOptions batchOptions, int? maxOperationCount = null) @@ -141,10 +141,8 @@ public static ResponseMessage Validate( if (errorMessage != null) { - return new ResponseMessage(HttpStatusCode.BadRequest, errorMessage: errorMessage); + throw new ArgumentException(errorMessage); } - - return new ResponseMessage(HttpStatusCode.OK); } } } \ No newline at end of file diff --git a/Microsoft.Azure.Cosmos/src/Batch/BatchExecutor.cs b/Microsoft.Azure.Cosmos/src/Batch/BatchExecutor.cs index ffdbacf404..7e9926ed7a 100644 --- a/Microsoft.Azure.Cosmos/src/Batch/BatchExecutor.cs +++ b/Microsoft.Azure.Cosmos/src/Batch/BatchExecutor.cs @@ -9,7 +9,6 @@ namespace Microsoft.Azure.Cosmos using System.Diagnostics; using System.IO; using System.Linq; - using System.Net; using System.Threading; using System.Threading.Tasks; using Microsoft.Azure.Documents; @@ -49,46 +48,25 @@ public BatchExecutor( public async Task ExecuteAsync(CancellationToken cancellationToken) { - ResponseMessage validationResult = BatchExecUtils.Validate( - this.inputOperations, - this.batchOptions, - this.maxServerRequestOperationCount); + BatchExecUtils.EnsureValid(this.inputOperations, this.batchOptions, this.maxServerRequestOperationCount); - if (!validationResult.IsSuccessStatusCode) + PartitionKey? serverRequestPartitionKey = this.partitionKey; + if (this.batchOptions != null && this.batchOptions.IsEffectivePartitionKeyRouting) { - return new BatchResponse( - validationResult.StatusCode, - validationResult.Headers.SubStatusCode, - validationResult.ErrorMessage, - this.inputOperations); + serverRequestPartitionKey = null; } - SinglePartitionKeyServerBatchRequest serverRequest; - try - { - PartitionKey? serverRequestPartitionKey = this.partitionKey; - if (this.batchOptions != null && this.batchOptions.IsEffectivePartitionKeyRouting) - { - serverRequestPartitionKey = null; - } - - serverRequest = await SinglePartitionKeyServerBatchRequest.CreateAsync( + SinglePartitionKeyServerBatchRequest serverRequest = await SinglePartitionKeyServerBatchRequest.CreateAsync( serverRequestPartitionKey, new ArraySegment(this.inputOperations.ToArray()), this.maxServerRequestBodyLength, this.maxServerRequestOperationCount, serializer: this.clientContext.CosmosSerializer, cancellationToken: cancellationToken); - } - catch (RequestEntityTooLargeException ex) - { - return new BatchResponse(ex.StatusCode ?? HttpStatusCode.RequestEntityTooLarge, ex.GetSubStatus(), ClientResources.BatchOperationTooLarge, this.inputOperations); - } if (serverRequest.Operations.Count != this.inputOperations.Count) { - // todo: should this be PayloadTooLarge - return new BatchResponse(HttpStatusCode.RequestEntityTooLarge, SubStatusCodes.Unknown, ClientResources.BatchTooLarge, this.inputOperations); + throw new RequestEntityTooLargeException(ClientResources.BatchTooLarge); } return await this.ExecuteServerRequestAsync(serverRequest, cancellationToken); @@ -99,43 +77,34 @@ public async Task ExecuteAsync(CancellationToken cancellationToke /// /// A server request with a set of operations on items. /// representing request cancellation. - /// Response from the server or ServiceUnavailable response in case of exceptions. - private async Task ExecuteServerRequestAsync(SinglePartitionKeyServerBatchRequest serverRequest, CancellationToken cancellationToken) + /// Response from the server. + private async Task ExecuteServerRequestAsync( + SinglePartitionKeyServerBatchRequest serverRequest, + CancellationToken cancellationToken) { - try - { - using (Stream serverRequestPayload = serverRequest.TransferBodyStream()) - { - Debug.Assert(serverRequestPayload != null, "Server request payload expected to be non-null"); - ResponseMessage responseMessage = await clientContext.ProcessResourceOperationStreamAsync( - this.container.LinkUri, - ResourceType.Document, - OperationType.Batch, - this.batchOptions, - this.container, - serverRequest.PartitionKey, - serverRequestPayload, - requestMessage => - { - requestMessage.Headers.Add(HttpConstants.HttpHeaders.IsBatchRequest, bool.TrueString); - requestMessage.Headers.Add(HttpConstants.HttpHeaders.IsBatchAtomic, bool.TrueString); - requestMessage.Headers.Add(HttpConstants.HttpHeaders.IsBatchOrdered, bool.TrueString); - }, - cancellationToken); - - return await BatchResponse.FromResponseMessageAsync( - responseMessage, - serverRequest, - this.clientContext.CosmosSerializer); - } - } - catch (CosmosException ex) + using (Stream serverRequestPayload = serverRequest.TransferBodyStream()) { - return new BatchResponse( - ex.StatusCode, - (SubStatusCodes)ex.SubStatusCode, - ex.Message, - serverRequest.Operations); + Debug.Assert(serverRequestPayload != null, "Server request payload expected to be non-null"); + ResponseMessage responseMessage = await clientContext.ProcessResourceOperationStreamAsync( + this.container.LinkUri, + ResourceType.Document, + OperationType.Batch, + this.batchOptions, + this.container, + serverRequest.PartitionKey, + serverRequestPayload, + requestMessage => + { + requestMessage.Headers.Add(HttpConstants.HttpHeaders.IsBatchRequest, bool.TrueString); + requestMessage.Headers.Add(HttpConstants.HttpHeaders.IsBatchAtomic, bool.TrueString); + requestMessage.Headers.Add(HttpConstants.HttpHeaders.IsBatchOrdered, bool.TrueString); + }, + cancellationToken); + + return await BatchResponse.FromResponseMessageAsync( + responseMessage, + serverRequest, + this.clientContext.CosmosSerializer); } } } diff --git a/Microsoft.Azure.Cosmos/src/Batch/BatchItemRequestOptions.cs b/Microsoft.Azure.Cosmos/src/Batch/BatchItemRequestOptions.cs index 5d1e29fdbf..194071d89f 100644 --- a/Microsoft.Azure.Cosmos/src/Batch/BatchItemRequestOptions.cs +++ b/Microsoft.Azure.Cosmos/src/Batch/BatchItemRequestOptions.cs @@ -17,6 +17,6 @@ public class BatchItemRequestOptions : RequestOptions /// /// /// - public virtual IndexingDirective? IndexingDirective { get; set; } + public IndexingDirective? IndexingDirective { get; set; } } } \ No newline at end of file diff --git a/Microsoft.Azure.Cosmos/src/RequestOptions/ItemRequestOptions.cs b/Microsoft.Azure.Cosmos/src/RequestOptions/ItemRequestOptions.cs index a891cc80ad..7f0fd69553 100644 --- a/Microsoft.Azure.Cosmos/src/RequestOptions/ItemRequestOptions.cs +++ b/Microsoft.Azure.Cosmos/src/RequestOptions/ItemRequestOptions.cs @@ -111,7 +111,9 @@ internal override void PopulateRequestOptions(RequestMessage request) if (this.IndexingDirective != null && this.IndexingDirective.HasValue) { - request.Headers.Add(HttpConstants.HttpHeaders.IndexingDirective, this.IndexingDirective.Value.ToString()); + request.Headers.Add( + HttpConstants.HttpHeaders.IndexingDirective, + IndexingDirectiveStrings.FromIndexingDirective(this.IndexingDirective.Value)); } RequestOptions.SetSessionToken(request, this.SessionToken); diff --git a/Microsoft.Azure.Cosmos/src/Resource/Container/Container.cs b/Microsoft.Azure.Cosmos/src/Resource/Container/Container.cs index 6c44a2e728..1e81e20fe1 100644 --- a/Microsoft.Azure.Cosmos/src/Resource/Container/Container.cs +++ b/Microsoft.Azure.Cosmos/src/Resource/Container/Container.cs @@ -1019,6 +1019,90 @@ public abstract ChangeFeedProcessorBuilder GetChangeFeedEstimatorBuilder( /// /// The partition key for all items in the batch. . /// An instance of + /// + /// This API only throws on client side exceptions. This is to increase performance and prevent the overhead of throwing exceptions. Check the HTTP status code on the response to check if the operation failed. + /// + /// + /// This example atomically modifies a set of documents as a batch. + /// + /// (test1) + /// .ReplaceItem(test2.id, test2) + /// .UpsertItem(test3) + /// .DeleteItem("reading") + /// .CreateItemStream(streamPayload1) + /// .ReplaceItemStream("eating", streamPayload2) + /// .UpsertItemStream(streamPayload3) + /// .ExecuteAsync()) + /// { + /// if (!batchResponse.IsSuccessStatusCode) + /// { + /// // Handle and log exception + /// return; + /// } + /// + /// // Look up interested results - eg. via typed access on operation results + /// BatchOperationResult replaceResult = batchResponse.GetOperationResultAtIndex(0); + /// ToDoActivity readActivity = replaceResult.Resource; + /// } + /// ]]> + /// + /// + /// + /// This example atomically reads a set of documents as a batch. + /// + /// resultItems = new List(); + /// foreach (BatchOperationResult operationResult in batchResponse) + /// { + /// using (StreamReader streamReader = new StreamReader(operationResult.ResourceStream)) + /// { + /// resultItems.Add(await streamReader.ReadToEndAsync()); + /// } + /// } + /// } + /// ]]> + /// + /// public abstract Batch CreateBatch(PartitionKey partitionKey); } } \ No newline at end of file diff --git a/Microsoft.Azure.Cosmos/src/Resource/Container/ContainerCore.Items.cs b/Microsoft.Azure.Cosmos/src/Resource/Container/ContainerCore.Items.cs index 65e432e693..d6ca98e819 100644 --- a/Microsoft.Azure.Cosmos/src/Resource/Container/ContainerCore.Items.cs +++ b/Microsoft.Azure.Cosmos/src/Resource/Container/ContainerCore.Items.cs @@ -406,7 +406,7 @@ public override ChangeFeedProcessorBuilder GetChangeFeedEstimatorBuilder( public override Batch CreateBatch(PartitionKey partitionKey) { - return new Batch(this, partitionKey); + return new BatchCore(this, partitionKey); } internal FeedIterator GetStandByFeedIterator( diff --git a/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.EmulatorTests/Batch/BatchSinglePartitionKeyTests.cs b/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.EmulatorTests/Batch/BatchSinglePartitionKeyTests.cs index 4c27e9a5b8..b3e6688e84 100644 --- a/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.EmulatorTests/Batch/BatchSinglePartitionKeyTests.cs +++ b/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.EmulatorTests/Batch/BatchSinglePartitionKeyTests.cs @@ -193,21 +193,22 @@ public async Task BatchItemTimeToLiveAsync() TestDoc testDocToUpsert = await BatchTestBase.CreateSchematizedTestDocAsync(container, this.PartitionKey1, ttlInSeconds: ttlInSeconds); testDocToUpsert.Cost++; - BatchResponse batchResponse = await container.CreateBatch(BatchTestBase.GetPartitionKey(this.PartitionKey1)) - .CreateItemStream( - BatchTestBase.TestDocToStream(testDocToCreate, isSchematized), - BatchTestBase.GetBatchItemRequestOptions(testDocToCreate, isSchematized, ttlInSeconds: ttlInSeconds)) - .CreateItemStream( - BatchTestBase.TestDocToStream(anotherTestDocToCreate, isSchematized), - BatchTestBase.GetBatchItemRequestOptions(anotherTestDocToCreate, isSchematized)) - .ReplaceItemStream( - BatchTestBase.GetId(testDocToReplace, isSchematized), - BatchTestBase.TestDocToStream(testDocToReplace, isSchematized), - BatchTestBase.GetBatchItemRequestOptions(testDocToReplace, isSchematized, ttlInSeconds: ttlInSeconds)) - .UpsertItemStream( - BatchTestBase.TestDocToStream(testDocToUpsert, isSchematized), - BatchTestBase.GetBatchItemRequestOptions(testDocToUpsert, isSchematized, ttlInSeconds: infiniteTtl)) - .ExecuteAsync(BatchTestBase.GetUpdatedBatchRequestOptions(isSchematized: true)); + BatchCore batch = (BatchCore)(container.CreateBatch(BatchTestBase.GetPartitionKey(this.PartitionKey1)) + .CreateItemStream( + BatchTestBase.TestDocToStream(testDocToCreate, isSchematized), + BatchTestBase.GetBatchItemRequestOptions(testDocToCreate, isSchematized, ttlInSeconds: ttlInSeconds)) + .CreateItemStream( + BatchTestBase.TestDocToStream(anotherTestDocToCreate, isSchematized), + BatchTestBase.GetBatchItemRequestOptions(anotherTestDocToCreate, isSchematized)) + .ReplaceItemStream( + BatchTestBase.GetId(testDocToReplace, isSchematized), + BatchTestBase.TestDocToStream(testDocToReplace, isSchematized), + BatchTestBase.GetBatchItemRequestOptions(testDocToReplace, isSchematized, ttlInSeconds: ttlInSeconds)) + .UpsertItemStream( + BatchTestBase.TestDocToStream(testDocToUpsert, isSchematized), + BatchTestBase.GetBatchItemRequestOptions(testDocToUpsert, isSchematized, ttlInSeconds: infiniteTtl))); + + BatchResponse batchResponse = await batch.ExecuteAsync(BatchTestBase.GetUpdatedBatchRequestOptions(isSchematized: true)); BatchSinglePartitionKeyTests.VerifyBatchProcessed(batchResponse, numberOfOperations: 4); @@ -226,84 +227,6 @@ public async Task BatchItemTimeToLiveAsync() } } - [TestMethod] - [Owner("abpai")] - public async Task BatchLargerThanServerRequestAsync() - { - Container container = BatchTestBase.JsonContainer; - const int operationCount = 20; - int appxDocSize = Constants.MaxDirectModeBatchRequestBodySizeInBytes / operationCount; - - // Increase the doc size by a bit so all docs won't fit in one server request. - appxDocSize = (int)(appxDocSize * 1.05); - { - Batch batch = container.CreateBatch(BatchTestBase.GetPartitionKey(this.PartitionKey1)); - for (int i = 0; i < operationCount; i++) - { - TestDoc doc = BatchTestBase.PopulateTestDoc(this.PartitionKey1, appxDocSize); - batch.CreateItem(doc); - } - - BatchResponse batchResponse = await batch.ExecuteAsync(); - - Assert.AreEqual(HttpStatusCode.RequestEntityTooLarge, batchResponse.StatusCode); - } - - // Validate the server enforces this as well - { - Batch batch = container.CreateBatch(BatchTestBase.GetPartitionKey(this.PartitionKey1)); - for (int i = 0; i < operationCount; i++) - { - TestDoc doc = BatchTestBase.PopulateTestDoc(this.PartitionKey1, appxDocSize); - batch.CreateItem(doc); - } - - BatchResponse batchResponse = await batch.ExecuteAsync( - maxServerRequestBodyLength: int.MaxValue, - maxServerRequestOperationCount: int.MaxValue); - - Assert.AreEqual(HttpStatusCode.RequestEntityTooLarge, batchResponse.StatusCode); - } - } - - [TestMethod] - [Owner("abpai")] - public async Task BatchWithTooManyOperationsAsync() - { - Container container = BatchTestBase.JsonContainer; - await this.CreateJsonTestDocsAsync(container); - - const int operationCount = Constants.MaxOperationsInDirectModeBatchRequest + 1; - - // Validate SDK enforces this - { - Batch batch = container.CreateBatch(BatchTestBase.GetPartitionKey(this.PartitionKey1)); - for (int i = 0; i < operationCount; i++) - { - batch.ReadItem(this.TestDocPk1ExistingA.Id); - } - - BatchResponse batchResponse = await batch.ExecuteAsync(); - - Assert.AreEqual(HttpStatusCode.BadRequest, batchResponse.StatusCode); - } - - // Validate the server enforces this as well - { - Batch batch = container.CreateBatch(BatchTestBase.GetPartitionKey(this.PartitionKey1)); - for (int i = 0; i < operationCount; i++) - { - batch.ReadItem(this.TestDocPk1ExistingA.Id); - } - - BatchResponse batchResponse = await batch.ExecuteAsync( - maxServerRequestBodyLength: int.MaxValue, - maxServerRequestOperationCount: int.MaxValue); - - Assert.AreEqual(HttpStatusCode.BadRequest, batchResponse.StatusCode); - } - } - [TestMethod] [Owner("abpai")] public async Task BatchServerResponseTooLargeAsync() @@ -355,8 +278,9 @@ public async Task BatchReadsOnlyAsync() Assert.AreEqual(this.TestDocPk1ExistingC, batchResponse.GetOperationResultAtIndex(2).Resource); } - private async Task RunCrudAsync(bool isStream, bool isSchematized, bool useEpk, Container container, RequestOptions batchOptions = null) + private async Task RunCrudAsync(bool isStream, bool isSchematized, bool useEpk, Container container) { + RequestOptions batchOptions = null; if (isSchematized) { await this.CreateSchematizedTestDocsAsync(container); @@ -389,11 +313,11 @@ private async Task RunCrudAsync(bool isStream, bool isSchematized .UpsertItem(testDocToUpsert) .UpsertItem(anotherTestDocToUpsert) .DeleteItem(this.TestDocPk1ExistingD.Id) - .ExecuteAsync(batchOptions); + .ExecuteAsync(); } else { - batchResponse = await container.CreateBatch(BatchTestBase.GetPartitionKey(this.PartitionKey1)) + BatchCore batch = (BatchCore)(container.CreateBatch(BatchTestBase.GetPartitionKey(this.PartitionKey1)) .CreateItemStream( BatchTestBase.TestDocToStream(testDocToCreate, isSchematized), BatchTestBase.GetBatchItemRequestOptions(testDocToCreate, isSchematized)) @@ -412,8 +336,9 @@ private async Task RunCrudAsync(bool isStream, bool isSchematized BatchTestBase.GetBatchItemRequestOptions(anotherTestDocToUpsert, isSchematized)) .DeleteItem( BatchTestBase.GetId(this.TestDocPk1ExistingD, isSchematized), - BatchTestBase.GetBatchItemRequestOptions(this.TestDocPk1ExistingD, isSchematized)) - .ExecuteAsync(batchOptions); + BatchTestBase.GetBatchItemRequestOptions(this.TestDocPk1ExistingD, isSchematized))); + + batchResponse = await batch.ExecuteAsync(batchOptions); } BatchSinglePartitionKeyTests.VerifyBatchProcessed(batchResponse, numberOfOperations: 6); diff --git a/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Batch/BatchUnitTests.cs b/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Batch/BatchUnitTests.cs index 13ecd5e744..bf5a1f5ed0 100644 --- a/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Batch/BatchUnitTests.cs +++ b/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Batch/BatchUnitTests.cs @@ -30,23 +30,30 @@ public class BatchUnitTests [Owner("abpai")] public async Task BatchInvalidOptionsAsync() { - Container container = BatchUnitTests.GetCosmosContainer(); - + Container container = BatchUnitTests.GetContainer(); List badBatchOptionsList = new List() { new RequestOptions() { IfMatchEtag = "cond", + }, + new RequestOptions() + { + IfNoneMatchEtag = "cond2", } }; foreach (RequestOptions batchOptions in badBatchOptionsList) { - BatchResponse batchResponse = await container.CreateBatch(new Cosmos.PartitionKey(BatchUnitTests.PartitionKey1)) - .ReadItem("someId") - .ExecuteAsync(batchOptions); - - Assert.AreEqual(HttpStatusCode.BadRequest, batchResponse.StatusCode); + BatchCore batch = (BatchCore)( + container.CreateBatch(new Cosmos.PartitionKey(BatchUnitTests.PartitionKey1)) + .ReadItem("someId")); + + await BatchUnitTests.VerifyExceptionThrownOnExecuteAsync( + batch, + typeof(ArgumentException), + ClientResources.BatchRequestOptionNotSupported, + batchOptions); } } @@ -54,7 +61,7 @@ public async Task BatchInvalidOptionsAsync() [Owner("abpai")] public async Task BatchInvalidItemOptionsAsync() { - Container container = BatchUnitTests.GetCosmosContainer(); + Container container = BatchUnitTests.GetContainer(); List badItemOptionsList = new List() { @@ -78,11 +85,12 @@ public async Task BatchInvalidItemOptionsAsync() foreach (BatchItemRequestOptions itemOptions in badItemOptionsList) { - BatchResponse batchResponse = await container.CreateBatch(new Cosmos.PartitionKey(BatchUnitTests.PartitionKey1)) - .ReplaceItem("someId", new TestItem("repl"), itemOptions) - .ExecuteAsync(); + Batch batch = container.CreateBatch(new Cosmos.PartitionKey(BatchUnitTests.PartitionKey1)) + .ReplaceItem("someId", new TestItem("repl"), itemOptions); - Assert.AreEqual(HttpStatusCode.BadRequest, batchResponse.StatusCode); + await BatchUnitTests.VerifyExceptionThrownOnExecuteAsync( + batch, + typeof(ArgumentException)); } } @@ -90,11 +98,53 @@ public async Task BatchInvalidItemOptionsAsync() [Owner("abpai")] public async Task BatchNoOperationsAsync() { - Container container = BatchUnitTests.GetCosmosContainer(); - BatchResponse batchResponse = await container.CreateBatch(new Cosmos.PartitionKey(BatchUnitTests.PartitionKey1)) - .ExecuteAsync(); + Container container = BatchUnitTests.GetContainer(); + Batch batch = container.CreateBatch(new Cosmos.PartitionKey(BatchUnitTests.PartitionKey1)); + await BatchUnitTests.VerifyExceptionThrownOnExecuteAsync( + batch, + typeof(ArgumentException), + ClientResources.BatchNoOperations); + } - Assert.AreEqual(HttpStatusCode.BadRequest, batchResponse.StatusCode); + [TestMethod] + [Owner("abpai")] + public async Task BatchLargerThanServerRequestAsync() + { + Container container = BatchUnitTests.GetContainer(); + const int operationCount = 20; + int appxDocSize = Constants.MaxDirectModeBatchRequestBodySizeInBytes / operationCount; + + // Increase the doc size by a bit so all docs won't fit in one server request. + appxDocSize = (int)(appxDocSize * 1.05); + Batch batch = container.CreateBatch(new Cosmos.PartitionKey(BatchUnitTests.PartitionKey1)); + for (int i = 0; i < operationCount; i++) + { + TestItem testItem = new TestItem(new string('x', appxDocSize)); + batch.CreateItem(testItem); + } + + await BatchUnitTests.VerifyExceptionThrownOnExecuteAsync( + batch, + typeof(RequestEntityTooLargeException)); + } + + [TestMethod] + [Owner("abpai")] + public async Task BatchWithTooManyOperationsAsync() + { + Container container = BatchUnitTests.GetContainer(); + const int operationCount = Constants.MaxOperationsInDirectModeBatchRequest + 1; + + Batch batch = container.CreateBatch(new Cosmos.PartitionKey(BatchUnitTests.PartitionKey1)); + for (int i = 0; i < operationCount; i++) + { + batch.ReadItem("someId"); + } + + await BatchUnitTests.VerifyExceptionThrownOnExecuteAsync( + batch, + typeof(ArgumentException), + ClientResources.BatchTooLarge); } [TestMethod] @@ -241,7 +291,7 @@ public async Task BatchCrudRequestAsync() return Task.FromResult(new ResponseMessage(HttpStatusCode.OK)); }); - Container container = BatchUnitTests.GetCosmosContainer(testHandler); + Container container = BatchUnitTests.GetContainer(testHandler); BatchResponse batchResponse = await container.CreateBatch(new Cosmos.PartitionKey(BatchUnitTests.PartitionKey1)) .CreateItem(createItem) @@ -307,7 +357,7 @@ public async Task BatchSingleServerResponseAsync() return responseMessage; }); - Container container = BatchUnitTests.GetCosmosContainer(testHandler); + Container container = BatchUnitTests.GetContainer(testHandler); BatchResponse batchResponse = await container.CreateBatch(new Cosmos.PartitionKey(BatchUnitTests.PartitionKey1)) .ReadItem("id1") @@ -420,7 +470,41 @@ private static void VerifyBatchItemRequestOptionsAreEqual(BatchItemRequestOption } } - private static Container GetCosmosContainer(TestHandler testHandler = null) + private static async Task VerifyExceptionThrownOnExecuteAsync( + Batch batch, + Type expectedTypeOfException, + string expectedExceptionMessage = null, + RequestOptions requestOptions = null) + { + bool wasExceptionThrown = false; + try + { + if (requestOptions != null) + { + await ((BatchCore)batch).ExecuteAsync(requestOptions); + } + else + { + await batch.ExecuteAsync(); + } + } + catch (Exception ex) + { + Assert.AreEqual(expectedTypeOfException, ex.GetType()); + if (expectedExceptionMessage != null) + { + Assert.IsTrue(ex.Message.Contains(expectedExceptionMessage)); + } + wasExceptionThrown = true; + } + + if (!wasExceptionThrown) + { + Assert.Fail("Exception was expected to be thrown but was not."); + } + } + + private static Container GetContainer(TestHandler testHandler = null) { CosmosClient client; if (testHandler != null) @@ -437,22 +521,6 @@ private static Container GetCosmosContainer(TestHandler testHandler = null) return container; } - private static ContainerCore GetMockedContainer(string containerName = null) - { - Mock mockedContainer = MockCosmosUtil.CreateMockContainer(containerName: containerName); - mockedContainer.Setup(c => c.ClientContext).Returns(BatchUnitTests.GetMockedClientContext()); - return mockedContainer.Object; - } - - private static CosmosClientContext GetMockedClientContext() - { - Mock mockContext = new Mock(); - mockContext.Setup(x => x.ClientOptions).Returns(MockCosmosUtil.GetDefaultConfiguration()); - mockContext.Setup(x => x.DocumentClient).Returns(new MockDocumentClient()); - mockContext.Setup(x => x.CosmosSerializer).Returns(new CosmosJsonSerializerCore()); - return mockContext.Object; - } - private static TestItem Deserialize(Memory body, CosmosSerializer serializer) { return serializer.FromStream(new MemoryStream(body.Span.ToArray())); @@ -493,7 +561,7 @@ private static void VerifyServerRequestProperties(RequestMessage request) UriKind.Relative); Assert.AreEqual(expectedRequestUri, request.RequestUri); } - } + } private class TestItem { From 2232c97df306cd25ee4feaa4f8bcf5a786e6d759 Mon Sep 17 00:00:00 2001 From: "Abhijit P. Pai" Date: Sat, 6 Jul 2019 11:39:02 +0530 Subject: [PATCH 10/12] Missed file --- Microsoft.Azure.Cosmos/src/Batch/Batch.cs | 120 ++++++++++++++++++++++ 1 file changed, 120 insertions(+) create mode 100644 Microsoft.Azure.Cosmos/src/Batch/Batch.cs diff --git a/Microsoft.Azure.Cosmos/src/Batch/Batch.cs b/Microsoft.Azure.Cosmos/src/Batch/Batch.cs new file mode 100644 index 0000000000..202c2bf8d3 --- /dev/null +++ b/Microsoft.Azure.Cosmos/src/Batch/Batch.cs @@ -0,0 +1,120 @@ +//------------------------------------------------------------ +// Copyright (c) Microsoft Corporation. All rights reserved. +//------------------------------------------------------------ + +namespace Microsoft.Azure.Cosmos +{ + using System.IO; + using System.Threading; + using System.Threading.Tasks; + + /// + /// Represents a batch of requests that will be performed atomically against the Azure Cosmos DB service. + /// + public abstract class Batch + { + /// + /// Adds an operation to create an item into the batch. + /// + /// A JSON serializable object that must contain an id property. to implement a custom serializer. + /// (Optional) The options for the item request. . + /// The instance with the operation added. + /// The type of item to be created. + public abstract Batch CreateItem( + T item, + BatchItemRequestOptions requestOptions = null); + + /// + /// Adds an operation to create an item into the batch. + /// + /// + /// A containing the payload of the item. + /// The stream must have a UTF-8 encoded JSON object which contains an id property. + /// + /// (Optional) The options for the item request. . + /// The instance with the operation added. + public abstract Batch CreateItemStream( + Stream streamPayload, + BatchItemRequestOptions requestOptions = null); + + /// + /// Adds an operation to read an item into the batch. + /// + /// The cosmos item id. + /// (Optional) The options for the item request. . + /// The instance with the operation added. + public abstract Batch ReadItem( + string id, + BatchItemRequestOptions requestOptions = null); + + /// + /// Adds an operation to upsert an item into the batch. + /// + /// A JSON serializable object that must contain an id property. to implement a custom serializer. + /// (Optional) The options for the item request. . + /// The instance with the operation added. + /// The type of item to be created. + public abstract Batch UpsertItem( + T item, + BatchItemRequestOptions requestOptions = null); + + /// + /// Adds an operation to upsert an item into the batch. + /// + /// + /// A containing the payload of the item. + /// The stream must have a UTF-8 encoded JSON object which contains an id property. + /// + /// (Optional) The options for the item request. . + /// The instance with the operation added. + public abstract Batch UpsertItemStream( + Stream streamPayload, + BatchItemRequestOptions requestOptions = null); + + /// + /// Adds an operation to replace an item into the batch. + /// + /// The cosmos item id. + /// A JSON serializable object that must contain an id property. to implement a custom serializer. + /// (Optional) The options for the item request. . + /// The instance with the operation added. + /// The type of item to be created. + public abstract Batch ReplaceItem( + string id, + T item, + BatchItemRequestOptions requestOptions = null); + + /// + /// Adds an operation to replace an item into the batch. + /// + /// The cosmos item id. + /// + /// A containing the payload of the item. + /// The stream must have a UTF-8 encoded JSON object which contains an id property. + /// + /// (Optional) The options for the item request. . + /// The instance with the operation added. + public abstract Batch ReplaceItemStream( + string id, + Stream streamPayload, + BatchItemRequestOptions requestOptions = null); + + /// + /// Adds an operation to delete an item into the batch. + /// + /// The cosmos item id. + /// (Optional) The options for the item request. . + /// The instance with the operation added. + public abstract Batch DeleteItem( + string id, + BatchItemRequestOptions requestOptions = null); + + /// + /// Executes the batch at the Azure Cosmos service as an asynchronous operation. + /// + /// (Optional) representing request cancellation. + /// An awaitable which contains the completion status and results of each operation. + public abstract Task ExecuteAsync( + CancellationToken cancellationToken = default(CancellationToken)); + } +} From 91ab2f768cabdc5612d5afc928b46995f31afcf7 Mon Sep 17 00:00:00 2001 From: "Abhijit P. Pai" Date: Tue, 9 Jul 2019 14:24:58 +0530 Subject: [PATCH 11/12] Move batch public contract under PREVIEW pre-processor directive --- Microsoft.Azure.Cosmos/src/Batch/Batch.cs | 7 +++++- .../src/Batch/BatchItemRequestOptions.cs | 7 +++++- .../src/Batch/BatchOperationResult.cs | 14 ++++++++++-- .../src/Batch/BatchResponse.cs | 7 +++++- .../src/Resource/Container/Container.cs | 2 ++ .../Resource/Container/ContainerCore.Items.cs | 2 ++ .../Batch/BatchSinglePartitionKeyTests.cs | 22 +++++++++---------- .../Batch/BatchTestBase.cs | 2 +- .../Batch/BatchUnitTests.cs | 14 ++++++------ 9 files changed, 53 insertions(+), 24 deletions(-) diff --git a/Microsoft.Azure.Cosmos/src/Batch/Batch.cs b/Microsoft.Azure.Cosmos/src/Batch/Batch.cs index 202c2bf8d3..c731e5c551 100644 --- a/Microsoft.Azure.Cosmos/src/Batch/Batch.cs +++ b/Microsoft.Azure.Cosmos/src/Batch/Batch.cs @@ -11,7 +11,12 @@ namespace Microsoft.Azure.Cosmos /// /// Represents a batch of requests that will be performed atomically against the Azure Cosmos DB service. /// - public abstract class Batch +#if PREVIEW + public +#else + internal +#endif + abstract class Batch { /// /// Adds an operation to create an item into the batch. diff --git a/Microsoft.Azure.Cosmos/src/Batch/BatchItemRequestOptions.cs b/Microsoft.Azure.Cosmos/src/Batch/BatchItemRequestOptions.cs index 194071d89f..3611886715 100644 --- a/Microsoft.Azure.Cosmos/src/Batch/BatchItemRequestOptions.cs +++ b/Microsoft.Azure.Cosmos/src/Batch/BatchItemRequestOptions.cs @@ -7,7 +7,12 @@ namespace Microsoft.Azure.Cosmos /// /// that apply to operations within a . /// - public class BatchItemRequestOptions : RequestOptions +#if PREVIEW + public +#else + internal +#endif + class BatchItemRequestOptions : RequestOptions { /// /// Gets or sets the indexing directive (Include or Exclude) for the request in the Azure Cosmos DB service. diff --git a/Microsoft.Azure.Cosmos/src/Batch/BatchOperationResult.cs b/Microsoft.Azure.Cosmos/src/Batch/BatchOperationResult.cs index 085060311e..26a1719881 100644 --- a/Microsoft.Azure.Cosmos/src/Batch/BatchOperationResult.cs +++ b/Microsoft.Azure.Cosmos/src/Batch/BatchOperationResult.cs @@ -14,7 +14,12 @@ namespace Microsoft.Azure.Cosmos /// /// Represents a result for a specific operation that was part of a batch request. /// - public class BatchOperationResult +#if PREVIEW + public +#else + internal +#endif + class BatchOperationResult { internal BatchOperationResult(HttpStatusCode statusCode) { @@ -178,7 +183,12 @@ private static Result ReadOperationResult(ref RowReader reader, out BatchOperati /// /// The type of the Resource which this class wraps. #pragma warning disable SA1402 // File may only contain a single type - public class BatchOperationResult : BatchOperationResult +#if PREVIEW + public +#else + internal +#endif + class BatchOperationResult : BatchOperationResult #pragma warning restore SA1402 // File may only contain a single type { /// diff --git a/Microsoft.Azure.Cosmos/src/Batch/BatchResponse.cs b/Microsoft.Azure.Cosmos/src/Batch/BatchResponse.cs index b4d28e9650..8647df8226 100644 --- a/Microsoft.Azure.Cosmos/src/Batch/BatchResponse.cs +++ b/Microsoft.Azure.Cosmos/src/Batch/BatchResponse.cs @@ -17,7 +17,12 @@ namespace Microsoft.Azure.Cosmos /// Response of a batch request. /// #pragma warning disable CA1710 // Identifiers should have correct suffix - public class BatchResponse : IReadOnlyList, IDisposable +#if PREVIEW + public +#else + internal +#endif + class BatchResponse : IReadOnlyList, IDisposable #pragma warning restore CA1710 // Identifiers should have correct suffix { private bool isDisposed; diff --git a/Microsoft.Azure.Cosmos/src/Resource/Container/Container.cs b/Microsoft.Azure.Cosmos/src/Resource/Container/Container.cs index 1e81e20fe1..44ec527e35 100644 --- a/Microsoft.Azure.Cosmos/src/Resource/Container/Container.cs +++ b/Microsoft.Azure.Cosmos/src/Resource/Container/Container.cs @@ -1014,6 +1014,7 @@ public abstract ChangeFeedProcessorBuilder GetChangeFeedEstimatorBuilder( ChangesEstimationHandler estimationDelegate, TimeSpan? estimationPeriod = null); +#if PREVIEW /// /// Initializes a new instance of the class. /// @@ -1104,5 +1105,6 @@ public abstract ChangeFeedProcessorBuilder GetChangeFeedEstimatorBuilder( /// /// public abstract Batch CreateBatch(PartitionKey partitionKey); +#endif } } \ No newline at end of file diff --git a/Microsoft.Azure.Cosmos/src/Resource/Container/ContainerCore.Items.cs b/Microsoft.Azure.Cosmos/src/Resource/Container/ContainerCore.Items.cs index d6ca98e819..0b3fad1ee0 100644 --- a/Microsoft.Azure.Cosmos/src/Resource/Container/ContainerCore.Items.cs +++ b/Microsoft.Azure.Cosmos/src/Resource/Container/ContainerCore.Items.cs @@ -404,10 +404,12 @@ public override ChangeFeedProcessorBuilder GetChangeFeedEstimatorBuilder( applyBuilderConfiguration: changeFeedEstimatorCore.ApplyBuildConfiguration); } +#if PREVIEW public override Batch CreateBatch(PartitionKey partitionKey) { return new BatchCore(this, partitionKey); } +#endif internal FeedIterator GetStandByFeedIterator( string continuationToken = null, diff --git a/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.EmulatorTests/Batch/BatchSinglePartitionKeyTests.cs b/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.EmulatorTests/Batch/BatchSinglePartitionKeyTests.cs index b3e6688e84..4fd5e48066 100644 --- a/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.EmulatorTests/Batch/BatchSinglePartitionKeyTests.cs +++ b/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.EmulatorTests/Batch/BatchSinglePartitionKeyTests.cs @@ -95,7 +95,7 @@ public async Task BatchOrderedAsync() TestDoc replaceDoc = this.GetTestDocCopy(firstDoc); replaceDoc.Cost += 20; - BatchResponse batchResponse = await container.CreateBatch(BatchTestBase.GetPartitionKey(this.PartitionKey1)) + BatchResponse batchResponse = await new BatchCore((ContainerCore)container, BatchTestBase.GetPartitionKey(this.PartitionKey1)) .CreateItem(firstDoc) .ReplaceItem(replaceDoc.Id, replaceDoc) .ExecuteAsync(); @@ -131,7 +131,7 @@ public async Task BatchItemETagAsync() IfMatchEtag = readResponse.ETag }; - BatchResponse batchResponse = await container.CreateBatch(BatchTestBase.GetPartitionKey(this.PartitionKey1)) + BatchResponse batchResponse = await new BatchCore((ContainerCore)container, BatchTestBase.GetPartitionKey(this.PartitionKey1)) .CreateItem(testDocToCreate) .ReplaceItem(testDocToReplace.Id, testDocToReplace, requestOptions: firstReplaceOptions) .ExecuteAsync(); @@ -154,7 +154,7 @@ public async Task BatchItemETagAsync() IfMatchEtag = BatchTestBase.Random.Next().ToString() }; - BatchResponse batchResponse = await container.CreateBatch(BatchTestBase.GetPartitionKey(this.PartitionKey1)) + BatchResponse batchResponse = await new BatchCore((ContainerCore)container, BatchTestBase.GetPartitionKey(this.PartitionKey1)) .ReplaceItem(testDocToReplace.Id, testDocToReplace, requestOptions: replaceOptions) .ExecuteAsync(); @@ -193,7 +193,7 @@ public async Task BatchItemTimeToLiveAsync() TestDoc testDocToUpsert = await BatchTestBase.CreateSchematizedTestDocAsync(container, this.PartitionKey1, ttlInSeconds: ttlInSeconds); testDocToUpsert.Cost++; - BatchCore batch = (BatchCore)(container.CreateBatch(BatchTestBase.GetPartitionKey(this.PartitionKey1)) + BatchCore batch = (BatchCore)(new BatchCore((ContainerCore)container, BatchTestBase.GetPartitionKey(this.PartitionKey1)) .CreateItemStream( BatchTestBase.TestDocToStream(testDocToCreate, isSchematized), BatchTestBase.GetBatchItemRequestOptions(testDocToCreate, isSchematized, ttlInSeconds: ttlInSeconds)) @@ -237,7 +237,7 @@ public async Task BatchServerResponseTooLargeAsync() TestDoc doc = await BatchTestBase.CreateJsonTestDocAsync(container, this.PartitionKey1, appxDocSizeInBytes); - Batch batch = container.CreateBatch(BatchTestBase.GetPartitionKey(this.PartitionKey1)); + Batch batch = new BatchCore((ContainerCore)container, BatchTestBase.GetPartitionKey(this.PartitionKey1)); for (int i = 0; i < operationCount; i++) { batch.ReadItem(doc.Id); @@ -261,7 +261,7 @@ public async Task BatchReadsOnlyAsync() Container container = BatchTestBase.JsonContainer; await this.CreateJsonTestDocsAsync(container); - BatchResponse batchResponse = await container.CreateBatch(BatchTestBase.GetPartitionKey(this.PartitionKey1)) + BatchResponse batchResponse = await new BatchCore((ContainerCore)container, BatchTestBase.GetPartitionKey(this.PartitionKey1)) .ReadItem(this.TestDocPk1ExistingA.Id) .ReadItem(this.TestDocPk1ExistingB.Id) .ReadItem(this.TestDocPk1ExistingC.Id) @@ -306,7 +306,7 @@ private async Task RunCrudAsync(bool isStream, bool isSchematized BatchResponse batchResponse; if (!isStream) { - batchResponse = await container.CreateBatch(BatchTestBase.GetPartitionKey(this.PartitionKey1)) + batchResponse = await new BatchCore((ContainerCore)container, BatchTestBase.GetPartitionKey(this.PartitionKey1)) .CreateItem(testDocToCreate) .ReadItem(this.TestDocPk1ExistingC.Id) .ReplaceItem(testDocToReplace.Id, testDocToReplace) @@ -317,7 +317,7 @@ private async Task RunCrudAsync(bool isStream, bool isSchematized } else { - BatchCore batch = (BatchCore)(container.CreateBatch(BatchTestBase.GetPartitionKey(this.PartitionKey1)) + BatchCore batch = (BatchCore)(new BatchCore((ContainerCore)container, BatchTestBase.GetPartitionKey(this.PartitionKey1)) .CreateItemStream( BatchTestBase.TestDocToStream(testDocToCreate, isSchematized), BatchTestBase.GetBatchItemRequestOptions(testDocToCreate, isSchematized)) @@ -408,8 +408,8 @@ public async Task BatchRateLimitingAsync() private async Task RunTwoLargeBatchesAsync(Container container) { - Batch batch1 = container.CreateBatch(BatchTestBase.GetPartitionKey(this.PartitionKey1)); - Batch batch2 = container.CreateBatch(BatchTestBase.GetPartitionKey(this.PartitionKey1)); + Batch batch1 = new BatchCore((ContainerCore)container, BatchTestBase.GetPartitionKey(this.PartitionKey1)); + Batch batch2 = new BatchCore((ContainerCore)container, BatchTestBase.GetPartitionKey(this.PartitionKey1)); for (int i = 0; i < Constants.MaxOperationsInDirectModeBatchRequest; i++) { @@ -536,7 +536,7 @@ private async Task RunWithErrorAsync( TestDoc testDocToCreate = BatchTestBase.PopulateTestDoc(this.PartitionKey1); TestDoc anotherTestDocToCreate = BatchTestBase.PopulateTestDoc(this.PartitionKey1); - Batch batch = container.CreateBatch(BatchTestBase.GetPartitionKey(this.PartitionKey1)) + Batch batch = new BatchCore((ContainerCore)container, BatchTestBase.GetPartitionKey(this.PartitionKey1)) .CreateItem(testDocToCreate); appendOperation(batch); diff --git a/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.EmulatorTests/Batch/BatchTestBase.cs b/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.EmulatorTests/Batch/BatchTestBase.cs index e759721870..9871ec8b4d 100644 --- a/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.EmulatorTests/Batch/BatchTestBase.cs +++ b/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.EmulatorTests/Batch/BatchTestBase.cs @@ -343,7 +343,7 @@ protected static string GetId(TestDoc doc, bool isSchematized) return doc.Id; } - protected static BatchItemRequestOptions GetBatchItemRequestOptions(TestDoc doc, bool isSchematized, bool useEpk = false, int? ttlInSeconds = null) + internal static BatchItemRequestOptions GetBatchItemRequestOptions(TestDoc doc, bool isSchematized, bool useEpk = false, int? ttlInSeconds = null) { BatchItemRequestOptions requestOptions = new BatchItemRequestOptions() { diff --git a/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Batch/BatchUnitTests.cs b/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Batch/BatchUnitTests.cs index bf5a1f5ed0..9c55b65ba5 100644 --- a/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Batch/BatchUnitTests.cs +++ b/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Batch/BatchUnitTests.cs @@ -46,7 +46,7 @@ public async Task BatchInvalidOptionsAsync() foreach (RequestOptions batchOptions in badBatchOptionsList) { BatchCore batch = (BatchCore)( - container.CreateBatch(new Cosmos.PartitionKey(BatchUnitTests.PartitionKey1)) + new BatchCore((ContainerCore)container, new Cosmos.PartitionKey(BatchUnitTests.PartitionKey1)) .ReadItem("someId")); await BatchUnitTests.VerifyExceptionThrownOnExecuteAsync( @@ -85,7 +85,7 @@ public async Task BatchInvalidItemOptionsAsync() foreach (BatchItemRequestOptions itemOptions in badItemOptionsList) { - Batch batch = container.CreateBatch(new Cosmos.PartitionKey(BatchUnitTests.PartitionKey1)) + Batch batch = new BatchCore((ContainerCore)container, new Cosmos.PartitionKey(BatchUnitTests.PartitionKey1)) .ReplaceItem("someId", new TestItem("repl"), itemOptions); await BatchUnitTests.VerifyExceptionThrownOnExecuteAsync( @@ -99,7 +99,7 @@ await BatchUnitTests.VerifyExceptionThrownOnExecuteAsync( public async Task BatchNoOperationsAsync() { Container container = BatchUnitTests.GetContainer(); - Batch batch = container.CreateBatch(new Cosmos.PartitionKey(BatchUnitTests.PartitionKey1)); + Batch batch = new BatchCore((ContainerCore)container, new Cosmos.PartitionKey(BatchUnitTests.PartitionKey1)); await BatchUnitTests.VerifyExceptionThrownOnExecuteAsync( batch, typeof(ArgumentException), @@ -116,7 +116,7 @@ public async Task BatchLargerThanServerRequestAsync() // Increase the doc size by a bit so all docs won't fit in one server request. appxDocSize = (int)(appxDocSize * 1.05); - Batch batch = container.CreateBatch(new Cosmos.PartitionKey(BatchUnitTests.PartitionKey1)); + Batch batch = new BatchCore((ContainerCore)container, new Cosmos.PartitionKey(BatchUnitTests.PartitionKey1)); for (int i = 0; i < operationCount; i++) { TestItem testItem = new TestItem(new string('x', appxDocSize)); @@ -135,7 +135,7 @@ public async Task BatchWithTooManyOperationsAsync() Container container = BatchUnitTests.GetContainer(); const int operationCount = Constants.MaxOperationsInDirectModeBatchRequest + 1; - Batch batch = container.CreateBatch(new Cosmos.PartitionKey(BatchUnitTests.PartitionKey1)); + Batch batch = new BatchCore((ContainerCore)container, new Cosmos.PartitionKey(BatchUnitTests.PartitionKey1)); for (int i = 0; i < operationCount; i++) { batch.ReadItem("someId"); @@ -293,7 +293,7 @@ public async Task BatchCrudRequestAsync() Container container = BatchUnitTests.GetContainer(testHandler); - BatchResponse batchResponse = await container.CreateBatch(new Cosmos.PartitionKey(BatchUnitTests.PartitionKey1)) + BatchResponse batchResponse = await new BatchCore((ContainerCore)container, new Cosmos.PartitionKey(BatchUnitTests.PartitionKey1)) .CreateItem(createItem) .ReadItem(readId) .ReplaceItem(replaceItem.Id, replaceItem) @@ -359,7 +359,7 @@ public async Task BatchSingleServerResponseAsync() Container container = BatchUnitTests.GetContainer(testHandler); - BatchResponse batchResponse = await container.CreateBatch(new Cosmos.PartitionKey(BatchUnitTests.PartitionKey1)) + BatchResponse batchResponse = await new BatchCore((ContainerCore)container, new Cosmos.PartitionKey(BatchUnitTests.PartitionKey1)) .ReadItem("id1") .ReadItem("id2") .ExecuteAsync(); From bdd440fb0e8d63e2fa60055dc856e51b45067fbc Mon Sep 17 00:00:00 2001 From: kirankumarkolli Date: Tue, 16 Jul 2019 23:57:44 +0530 Subject: [PATCH 12/12] Fixing clean-up --- .../Batch/BatchTestBase.cs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.EmulatorTests/Batch/BatchTestBase.cs b/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.EmulatorTests/Batch/BatchTestBase.cs index 9871ec8b4d..98047b1c1e 100644 --- a/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.EmulatorTests/Batch/BatchTestBase.cs +++ b/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.EmulatorTests/Batch/BatchTestBase.cs @@ -174,12 +174,12 @@ public static void ClassClean() if (BatchTestBase.Database != null) { - BatchTestBase.Database.DeleteAsync().GetAwaiter().GetResult(); + BatchTestBase.Database.DeleteStreamAsync().GetAwaiter().GetResult(); } if (BatchTestBase.SharedThroughputDatabase != null) { - BatchTestBase.SharedThroughputDatabase.DeleteAsync().GetAwaiter().GetResult(); + BatchTestBase.SharedThroughputDatabase.DeleteStreamAsync().GetAwaiter().GetResult(); } BatchTestBase.Client.Dispose();