Skip to content

Commit

Permalink
CR: comments
Browse files Browse the repository at this point in the history
  • Loading branch information
original-brownbear committed Jun 5, 2020
1 parent bf095d9 commit c9ab2fc
Show file tree
Hide file tree
Showing 3 changed files with 22 additions and 79 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -1178,6 +1178,7 @@ public void testDeduplicateIndexMetadata() throws Exception {
final String repositoryName = "repo-" + indexName;
final String snapshot0 = "snapshot-0";
final String snapshot1 = "snapshot-1";
final String snapshot2 = "snapshot-2";

createIndex(indexName);

Expand All @@ -1186,12 +1187,8 @@ public void testDeduplicateIndexMetadata() throws Exception {
client().prepareIndex(indexName).setSource("test", "init").execute().actionGet();
}

logger.info("--> register a repository");

final Path repoPath = randomRepoPath();
assertAcked(client().admin().cluster().preparePutRepository(repositoryName)
.setType("fs")
.setSettings(Settings.builder().put("location", repoPath)));
createRepository(repositoryName, "fs", repoPath);

logger.info("--> create a snapshot");
client().admin().cluster().prepareCreateSnapshot(repositoryName, snapshot0)
Expand All @@ -1202,7 +1199,6 @@ public void testDeduplicateIndexMetadata() throws Exception {
final List<Path> snapshot0IndexMetaFiles = findRepoMetaBlobs(repoPath);
assertThat(snapshot0IndexMetaFiles, hasSize(1)); // snapshotting a single index

// add few docs - less than initially
docs = between(1, 5);
for (int i = 0; i < docs; i++) {
client().prepareIndex(indexName).setSource("test", "test" + i).execute().actionGet();
Expand All @@ -1213,17 +1209,27 @@ public void testDeduplicateIndexMetadata() throws Exception {
internalCluster().startDataOnlyNode();
ensureGreen(indexName);

// create another snapshot
// total size has to grow and has to be equal to files on fs
assertThat(client().admin().cluster()
.prepareCreateSnapshot(repositoryName, snapshot1)
.setWaitForCompletion(true).get().status(),
equalTo(RestStatus.OK));
assertThat(client().admin().cluster().prepareCreateSnapshot(repositoryName, snapshot1).setWaitForCompletion(true).get().status(),
equalTo(RestStatus.OK));

final List<Path> snapshot1IndexMetaFiles = findRepoMetaBlobs(repoPath);

// The IndexMetadata did not change between snapshots, verify that no new redundant IndexMetaData was written to the repository
assertThat(snapshot1IndexMetaFiles, is(snapshot0IndexMetaFiles));

// index to some other field to trigger a change in index metadata
for (int i = 0; i < docs; i++) {
client().prepareIndex(indexName).setSource("new_field", "test" + i).execute().actionGet();
}
assertThat(client().admin().cluster().prepareCreateSnapshot(repositoryName, snapshot2).setWaitForCompletion(true).get().status(),
equalTo(RestStatus.OK));

final List<Path> snapshot2IndexMetaFiles = findRepoMetaBlobs(repoPath);
assertThat(snapshot2IndexMetaFiles, hasSize(2)); // should have created one new metadata blob

assertAcked(client().admin().cluster().prepareDeleteSnapshot(repositoryName, snapshot0, snapshot1).get());
final List<Path> snapshot3IndexMetaFiles = findRepoMetaBlobs(repoPath);
assertThat(snapshot3IndexMetaFiles, hasSize(1)); // should have deleted the metadata blob referenced by the first two snapshots
}

public void testDataNodeRestartWithBusyMasterDuringSnapshot() throws Exception {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -169,9 +169,9 @@ public String toString() {
* @return identifier string
*/
public static String buildUniqueIdentifier(IndexMetadata indexMetaData) {
return indexMetaData.getIndexUUID() + "-" +
indexMetaData.getSettings().get(IndexMetadata.SETTING_HISTORY_UUID, IndexMetadata.INDEX_UUID_NA_VALUE) +
indexMetaData.getSettingsVersion() + "-" + indexMetaData.getMappingVersion() + "-" +
indexMetaData.getAliasesVersion();
return indexMetaData.getIndexUUID() +
"-" + indexMetaData.getSettings().get(IndexMetadata.SETTING_HISTORY_UUID, IndexMetadata.INDEX_UUID_NA_VALUE) +
"-" + indexMetaData.getSettingsVersion() + "-" + indexMetaData.getMappingVersion() +
"-" + indexMetaData.getAliasesVersion();
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,6 @@
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.index.snapshots.IndexShardSnapshotStatus;
import org.elasticsearch.repositories.IndexId;
import org.elasticsearch.repositories.RepositoriesService;
import org.elasticsearch.repositories.Repository;
Expand Down Expand Up @@ -105,7 +104,6 @@

import static java.util.Collections.emptySet;
import static java.util.Collections.unmodifiableList;
import static java.util.Collections.unmodifiableMap;
import static org.elasticsearch.cluster.SnapshotsInProgress.completed;

/**
Expand Down Expand Up @@ -418,67 +416,6 @@ public static List<SnapshotsInProgress.Entry> currentSnapshots(@Nullable Snapsho
return unmodifiableList(builder);
}

/**
* Returns status of shards currently finished snapshots
* <p>
* This method is executed on master node and it's complimentary to the
* {@link SnapshotShardsService#currentSnapshotShards(Snapshot)} because it
* returns similar information but for already finished snapshots.
* </p>
*
* @param repositoryName repository name
* @param snapshotInfo snapshot info
* @return map of shard id to snapshot status
*/
public Map<ShardId, IndexShardSnapshotStatus> snapshotShards(final String repositoryName,
final RepositoryData repositoryData,
final SnapshotInfo snapshotInfo) throws IOException {
final Repository repository = repositoriesService.repository(repositoryName);
final Map<ShardId, IndexShardSnapshotStatus> shardStatus = new HashMap<>();
for (String index : snapshotInfo.indices()) {
IndexId indexId = repositoryData.resolveIndexId(index);
IndexMetadata indexMetaData = repository.getSnapshotIndexMetaData(repositoryData, snapshotInfo.snapshotId(), indexId);
if (indexMetaData != null) {
int numberOfShards = indexMetaData.getNumberOfShards();
for (int i = 0; i < numberOfShards; i++) {
ShardId shardId = new ShardId(indexMetaData.getIndex(), i);
SnapshotShardFailure shardFailure = findShardFailure(snapshotInfo.shardFailures(), shardId);
if (shardFailure != null) {
shardStatus.put(shardId, IndexShardSnapshotStatus.newFailed(shardFailure.reason()));
} else {
final IndexShardSnapshotStatus shardSnapshotStatus;
if (snapshotInfo.state() == SnapshotState.FAILED) {
// If the snapshot failed, but the shard's snapshot does
// not have an exception, it means that partial snapshots
// were disabled and in this case, the shard snapshot will
// *not* have any metadata, so attempting to read the shard
// snapshot status will throw an exception. Instead, we create
// a status for the shard to indicate that the shard snapshot
// could not be taken due to partial being set to false.
shardSnapshotStatus = IndexShardSnapshotStatus.newFailed("skipped");
} else {
shardSnapshotStatus = repository.getShardSnapshotStatus(
snapshotInfo.snapshotId(),
indexId,
shardId);
}
shardStatus.put(shardId, shardSnapshotStatus);
}
}
}
}
return unmodifiableMap(shardStatus);
}

private static SnapshotShardFailure findShardFailure(List<SnapshotShardFailure> shardFailures, ShardId shardId) {
for (SnapshotShardFailure shardFailure : shardFailures) {
if (shardId.getIndexName().equals(shardFailure.index()) && shardId.getId() == shardFailure.shardId()) {
return shardFailure;
}
}
return null;
}

@Override
public void applyClusterState(ClusterChangedEvent event) {
try {
Expand Down

0 comments on commit c9ab2fc

Please sign in to comment.