Skip to content

Commit

Permalink
Use ESBlobStoreRepositoryIntegTestCase to test the repository-s3 plug…
Browse files Browse the repository at this point in the history
…in (#29315)

This commit adds the S3BlobStoreRepositoryTests class that extends the
base testing class for S3. It also removes some usage of socket servers
that emulate socket connections in unit tests. It was added to trigger
security exceptions, but this won't be needed anymore since #29296
is merged.
  • Loading branch information
tlrx committed Jun 7, 2018
1 parent b7c2cd1 commit 5c3a40b
Show file tree
Hide file tree
Showing 5 changed files with 223 additions and 245 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -151,8 +151,7 @@ class S3Repository extends BlobStoreRepository {
/**
* Constructs an s3 backed repository
*/
S3Repository(RepositoryMetaData metadata, Settings settings,
NamedXContentRegistry namedXContentRegistry, AwsS3Service s3Service) throws IOException {
S3Repository(RepositoryMetaData metadata, Settings settings, NamedXContentRegistry namedXContentRegistry, AwsS3Service s3Service) {
super(metadata, settings, namedXContentRegistry);

String bucket = BUCKET_SETTING.get(metadata.settings());
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,14 +20,14 @@
package org.elasticsearch.repositories.s3;

import com.amazonaws.AmazonClientException;
import com.amazonaws.AmazonServiceException;
import com.amazonaws.SdkClientException;
import com.amazonaws.services.s3.AbstractAmazonS3;
import com.amazonaws.services.s3.model.AmazonS3Exception;
import com.amazonaws.services.s3.model.CopyObjectRequest;
import com.amazonaws.services.s3.model.CopyObjectResult;
import com.amazonaws.services.s3.model.DeleteObjectRequest;
import com.amazonaws.services.s3.model.GetObjectMetadataRequest;
import com.amazonaws.services.s3.model.DeleteObjectsRequest;
import com.amazonaws.services.s3.model.DeleteObjectsResult;
import com.amazonaws.services.s3.model.GetObjectRequest;
import com.amazonaws.services.s3.model.ListObjectsRequest;
import com.amazonaws.services.s3.model.ObjectListing;
Expand All @@ -37,203 +37,163 @@
import com.amazonaws.services.s3.model.S3Object;
import com.amazonaws.services.s3.model.S3ObjectInputStream;
import com.amazonaws.services.s3.model.S3ObjectSummary;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.io.Streams;

import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.UncheckedIOException;
import java.net.InetAddress;
import java.net.Socket;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.Objects;
import java.util.concurrent.ConcurrentMap;

import static org.junit.Assert.assertTrue;
import static org.hamcrest.MatcherAssert.assertThat;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.notNullValue;
import static org.hamcrest.Matchers.nullValue;

class MockAmazonS3 extends AbstractAmazonS3 {

private final int mockSocketPort;

private Map<String, InputStream> blobs = new ConcurrentHashMap<>();

// in ESBlobStoreContainerTestCase.java, the maximum
// length of the input data is 100 bytes
private byte[] byteCounter = new byte[100];


MockAmazonS3(int mockSocketPort) {
this.mockSocketPort = mockSocketPort;
private final ConcurrentMap<String, byte[]> blobs;
private final String bucket;
private final boolean serverSideEncryption;
private final String cannedACL;
private final String storageClass;

MockAmazonS3(final ConcurrentMap<String, byte[]> blobs,
final String bucket,
final boolean serverSideEncryption,
final String cannedACL,
final String storageClass) {
this.blobs = Objects.requireNonNull(blobs);
this.bucket = Objects.requireNonNull(bucket);
this.serverSideEncryption = serverSideEncryption;
this.cannedACL = cannedACL;
this.storageClass = storageClass;
}

// Simulate a socket connection to check that SocketAccess.doPrivileged() is used correctly.
// Any method of AmazonS3 might potentially open a socket to the S3 service. Firstly, a call
// to any method of AmazonS3 has to be wrapped by SocketAccess.doPrivileged().
// Secondly, each method on the stack from doPrivileged to opening the socket has to be
// located in a jar that is provided by the plugin.
// Thirdly, a SocketPermission has to be configured in plugin-security.policy.
// By opening a socket in each method of MockAmazonS3 it is ensured that in production AmazonS3
// is able to to open a socket to the S3 Service without causing a SecurityException
private void simulateS3SocketConnection() {
try (Socket socket = new Socket(InetAddress.getByName("127.0.0.1"), mockSocketPort)) {
assertTrue(socket.isConnected()); // NOOP to keep static analysis happy
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}


@Override
public boolean doesBucketExist(String bucket) {
return true;
public boolean doesBucketExist(final String bucket) {
return this.bucket.equalsIgnoreCase(bucket);
}

@Override
public boolean doesObjectExist(String bucketName, String objectName) throws AmazonServiceException, SdkClientException {
simulateS3SocketConnection();
public boolean doesObjectExist(final String bucketName, final String objectName) throws SdkClientException {
assertThat(bucketName, equalTo(bucket));
return blobs.containsKey(objectName);
}

@Override
public ObjectMetadata getObjectMetadata(
GetObjectMetadataRequest getObjectMetadataRequest)
throws AmazonClientException, AmazonServiceException {
simulateS3SocketConnection();
String blobName = getObjectMetadataRequest.getKey();

if (!blobs.containsKey(blobName)) {
throw new AmazonS3Exception("[" + blobName + "] does not exist.");
}

return new ObjectMetadata(); // nothing is done with it
}

@Override
public PutObjectResult putObject(PutObjectRequest putObjectRequest)
throws AmazonClientException, AmazonServiceException {
simulateS3SocketConnection();
String blobName = putObjectRequest.getKey();

if (blobs.containsKey(blobName)) {
throw new AmazonS3Exception("[" + blobName + "] already exists.");
public PutObjectResult putObject(final PutObjectRequest request) throws AmazonClientException {
assertThat(request.getBucketName(), equalTo(bucket));
assertThat(request.getMetadata().getSSEAlgorithm(), serverSideEncryption ? equalTo("AES256") : nullValue());
assertThat(request.getCannedAcl(), notNullValue());
assertThat(request.getCannedAcl().toString(), cannedACL != null ? equalTo(cannedACL) : equalTo("private"));
assertThat(request.getStorageClass(), storageClass != null ? equalTo(storageClass) : equalTo("STANDARD"));


final String blobName = request.getKey();
final ByteArrayOutputStream out = new ByteArrayOutputStream();
try {
Streams.copy(request.getInputStream(), out);
blobs.put(blobName, out.toByteArray());
} catch (IOException e) {
throw new AmazonClientException(e);
}

blobs.put(blobName, putObjectRequest.getInputStream());
return new PutObjectResult();
}

@Override
public S3Object getObject(GetObjectRequest getObjectRequest)
throws AmazonClientException, AmazonServiceException {
simulateS3SocketConnection();
// in ESBlobStoreContainerTestCase.java, the prefix is empty,
// so the key and blobName are equivalent to each other
String blobName = getObjectRequest.getKey();

if (!blobs.containsKey(blobName)) {
final AmazonS3Exception e = new AmazonS3Exception("[" + blobName + "] does not exist.");
e.setStatusCode(404);
throw e;
public S3Object getObject(final GetObjectRequest request) throws AmazonClientException {
assertThat(request.getBucketName(), equalTo(bucket));

final String blobName = request.getKey();
final byte[] content = blobs.get(blobName);
if (content == null) {
AmazonS3Exception exception = new AmazonS3Exception("[" + blobName + "] does not exist.");
exception.setStatusCode(404);
throw exception;
}

// the HTTP request attribute is irrelevant for reading
S3ObjectInputStream stream = new S3ObjectInputStream(
blobs.get(blobName), null, false);
ObjectMetadata metadata = new ObjectMetadata();
metadata.setContentLength(content.length);

S3Object s3Object = new S3Object();
s3Object.setObjectContent(stream);
s3Object.setObjectContent(new S3ObjectInputStream(new ByteArrayInputStream(content), null, false));
s3Object.setKey(blobName);
s3Object.setObjectMetadata(metadata);

return s3Object;
}

@Override
public ObjectListing listObjects(ListObjectsRequest listObjectsRequest)
throws AmazonClientException, AmazonServiceException {
simulateS3SocketConnection();
MockObjectListing list = new MockObjectListing();
list.setTruncated(false);

String blobName;
String prefix = listObjectsRequest.getPrefix();

ArrayList<S3ObjectSummary> mockObjectSummaries = new ArrayList<>();

for (Map.Entry<String, InputStream> blob : blobs.entrySet()) {
blobName = blob.getKey();
S3ObjectSummary objectSummary = new S3ObjectSummary();

if (prefix.isEmpty() || blobName.startsWith(prefix)) {
objectSummary.setKey(blobName);

try {
objectSummary.setSize(getSize(blob.getValue()));
} catch (IOException e) {
throw new AmazonS3Exception("Object listing " +
"failed for blob [" + blob.getKey() + "]");
}

mockObjectSummaries.add(objectSummary);
public ObjectListing listObjects(final ListObjectsRequest request) throws AmazonClientException {
assertThat(request.getBucketName(), equalTo(bucket));

final ObjectListing listing = new ObjectListing();
listing.setBucketName(request.getBucketName());
listing.setPrefix(request.getPrefix());

for (Map.Entry<String, byte[]> blob : blobs.entrySet()) {
if (Strings.isEmpty(request.getPrefix()) || blob.getKey().startsWith(request.getPrefix())) {
S3ObjectSummary summary = new S3ObjectSummary();
summary.setBucketName(request.getBucketName());
summary.setKey(blob.getKey());
summary.setSize(blob.getValue().length);
listing.getObjectSummaries().add(summary);
}
}

list.setObjectSummaries(mockObjectSummaries);
return list;
return listing;
}

@Override
public CopyObjectResult copyObject(CopyObjectRequest copyObjectRequest)
throws AmazonClientException, AmazonServiceException {
simulateS3SocketConnection();
String sourceBlobName = copyObjectRequest.getSourceKey();
String targetBlobName = copyObjectRequest.getDestinationKey();

if (!blobs.containsKey(sourceBlobName)) {
final AmazonS3Exception e = new AmazonS3Exception("Source blob [" +
sourceBlobName + "] does not exist.");
e.setStatusCode(404);
throw e;
}
public CopyObjectResult copyObject(final CopyObjectRequest request) throws AmazonClientException {
assertThat(request.getSourceBucketName(), equalTo(bucket));
assertThat(request.getDestinationBucketName(), equalTo(bucket));

if (blobs.containsKey(targetBlobName)) {
throw new AmazonS3Exception("Target blob [" +
targetBlobName + "] already exists.");
final String sourceBlobName = request.getSourceKey();

final byte[] content = blobs.get(sourceBlobName);
if (content == null) {
AmazonS3Exception exception = new AmazonS3Exception("[" + sourceBlobName + "] does not exist.");
exception.setStatusCode(404);
throw exception;
}

blobs.put(targetBlobName, blobs.get(sourceBlobName));
return new CopyObjectResult(); // nothing is done with it
blobs.put(request.getDestinationKey(), content);
return new CopyObjectResult();
}

@Override
public void deleteObject(DeleteObjectRequest deleteObjectRequest)
throws AmazonClientException, AmazonServiceException {
simulateS3SocketConnection();
String blobName = deleteObjectRequest.getKey();

if (!blobs.containsKey(blobName)) {
final AmazonS3Exception e = new AmazonS3Exception("[" + blobName + "] does not exist.");
e.setStatusCode(404);
throw e;
public void deleteObject(final DeleteObjectRequest request) throws AmazonClientException {
assertThat(request.getBucketName(), equalTo(bucket));

final String blobName = request.getKey();
if (blobs.remove(blobName) == null) {
AmazonS3Exception exception = new AmazonS3Exception("[" + blobName + "] does not exist.");
exception.setStatusCode(404);
throw exception;
}

blobs.remove(blobName);
}

private int getSize(InputStream stream) throws IOException {
int size = stream.read(byteCounter);
stream.reset(); // in case we ever need the size again
return size;
}

private class MockObjectListing extends ObjectListing {
// the objectSummaries attribute in ObjectListing.java
// is read-only, but we need to be able to write to it,
// so we create a mock of it to work around this
private List<S3ObjectSummary> mockObjectSummaries;

@Override
public List<S3ObjectSummary> getObjectSummaries() {
return mockObjectSummaries;
}

private void setObjectSummaries(List<S3ObjectSummary> objectSummaries) {
mockObjectSummaries = objectSummaries;
@Override
public DeleteObjectsResult deleteObjects(DeleteObjectsRequest request) throws SdkClientException {
assertThat(request.getBucketName(), equalTo(bucket));

final List<DeleteObjectsResult.DeletedObject> deletions = new ArrayList<>();
for (DeleteObjectsRequest.KeyVersion key : request.getKeys()) {
if (blobs.remove(key.getKey()) == null) {
AmazonS3Exception exception = new AmazonS3Exception("[" + key + "] does not exist.");
exception.setStatusCode(404);
throw exception;
} else {
DeleteObjectsResult.DeletedObject deletion = new DeleteObjectsResult.DeletedObject();
deletion.setKey(key.getKey());
deletions.add(deletion);
}
}
return new DeleteObjectsResult(deletions);
}
}
Loading

0 comments on commit 5c3a40b

Please sign in to comment.