Skip to content

Commit

Permalink
Correction of a typo (#15220)
Browse files Browse the repository at this point in the history
Signed-off-by: 10000-ki <[email protected]>
  • Loading branch information
10000-ki authored Aug 13, 2024
1 parent 2defb76 commit ea1ab7c
Show file tree
Hide file tree
Showing 12 changed files with 14 additions and 14 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -375,7 +375,7 @@ private static void skipToListStart(XContentParser parser) throws IOException {
}
}

// read a list without bounds checks, assuming the the current parser is always on an array start
// read a list without bounds checks, assuming the current parser is always on an array start
private static List<Object> readListUnsafe(XContentParser parser, Supplier<Map<String, Object>> mapFactory) throws IOException {
assert parser.currentToken() == Token.START_ARRAY;
ArrayList<Object> list = new ArrayList<>();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -380,7 +380,7 @@ public static String escapePath(Path path) {
}

/**
* Recursively copy the the source directory to the target directory, preserving permissions.
* Recursively copy the source directory to the target directory, preserving permissions.
*/
public static void copyDirectory(Path source, Path target) throws IOException {
Files.walkFileTree(source, new SimpleFileVisitor<Path>() {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -152,8 +152,8 @@ public void testDanglingIndicesCanBeImported() throws Exception {
* 1, then create two indices and delete them both while one node in
* the cluster is stopped. The deletion of the second pushes the deletion
* of the first out of the graveyard. When the stopped node is resumed,
* only the second index will be found into the graveyard and the the
* other will be considered dangling, and can therefore be listed and
* only the second index will be found into the graveyard and the other
* will be considered dangling, and can therefore be listed and
* deleted through the API
*/
public void testDanglingIndicesCanBeDeleted() throws Exception {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -298,8 +298,8 @@ public void testMustAcceptDataLossToImportDanglingIndex() throws Exception {
* 1, then create two indices and delete them both while one node in
* the cluster is stopped. The deletion of the second pushes the deletion
* of the first out of the graveyard. When the stopped node is resumed,
* only the second index will be found into the graveyard and the the
* other will be considered dangling, and can therefore be listed and
* only the second index will be found into the graveyard and the other
* will be considered dangling, and can therefore be listed and
* deleted through the API
*/
public void testDanglingIndexCanBeDeleted() throws Exception {
Expand Down
2 changes: 1 addition & 1 deletion server/src/main/java/org/opensearch/http/HttpChannel.java
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,7 @@ default void handleException(Exception ex) {}

/**
* Returns the contextual property associated with this specific HTTP channel (the
* implementation of how such properties are managed depends on the the particular
* implementation of how such properties are managed depends on the particular
* transport engine).
*
* @param name the name of the property
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -540,7 +540,7 @@ void sendFiles(Store store, StoreFileMetadata[] files, IntSupplier translogOps,

void createRetentionLease(final long startingSeqNo, ActionListener<RetentionLease> listener) {
RunUnderPrimaryPermit.run(() -> {
// Clone the peer recovery retention lease belonging to the source shard. We are retaining history between the the local
// Clone the peer recovery retention lease belonging to the source shard. We are retaining history between the local
// checkpoint of the safe commit we're creating and this lease's retained seqno with the retention lock, and by cloning an
// existing lease we (approximately) know that all our peers are also retaining history as requested by the cloned lease. If
// the recovery now fails before copying enough history over then a subsequent attempt will find this lease, determine it is
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -400,7 +400,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
/**
* Flag that is set to {@code true} if this instance is started with {@link #metadata} that has a higher value for
* {@link RepositoryMetadata#pendingGeneration()} than for {@link RepositoryMetadata#generation()} indicating a full cluster restart
* potentially accounting for the the last {@code index-N} write in the cluster state.
* potentially accounting for the last {@code index-N} write in the cluster state.
* Note: While it is true that this value could also be set to {@code true} for an instance on a node that is just joining the cluster
* during a new {@code index-N} write, this does not present a problem. The node will still load the correct {@link RepositoryData} in
* all cases and simply do a redundant listing of the repository contents if it tries to load {@link RepositoryData} and falls back
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -131,7 +131,7 @@
* snapshots, we load the {@link org.opensearch.snapshots.SnapshotInfo} for the source snapshot and check for shard snapshot
* failures of the relevant indices.</li>
* <li>Once all shard counts are known and the health of all source indices data has been verified, we populate the
* {@code SnapshotsInProgress.Entry#clones} map for the clone operation with the the relevant shard clone tasks.</li>
* {@code SnapshotsInProgress.Entry#clones} map for the clone operation with the relevant shard clone tasks.</li>
* <li>After the clone tasks have been added to the {@code SnapshotsInProgress.Entry}, cluster-manager executes them on its snapshot thread-pool
* by invoking {@link org.opensearch.repositories.Repository#cloneShardSnapshot} for each shard that is to be cloned. Each completed
* shard snapshot triggers a call to the {@link org.opensearch.snapshots.SnapshotsService#SHARD_STATE_EXECUTOR} which updates the
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -101,7 +101,7 @@ public interface TcpChannel extends CloseableChannel {

/**
* Returns the contextual property associated with this specific TCP channel (the
* implementation of how such properties are managed depends on the the particular
* implementation of how such properties are managed depends on the particular
* transport engine).
*
* @param name the name of the property
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@
public class NodeInfoTests extends OpenSearchTestCase {

/**
* Check that the the {@link NodeInfo#getInfo(Class)} method returns null
* Check that the {@link NodeInfo#getInfo(Class)} method returns null
* for absent info objects, and returns the right thing for present info
* objects.
*/
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -302,7 +302,7 @@ private static long time(String time, ZoneId zone) {
}

/**
* The the last "fully defined" transitions in the provided {@linkplain ZoneId}.
* The last "fully defined" transitions in the provided {@linkplain ZoneId}.
*/
private static ZoneOffsetTransition lastTransitionIn(ZoneId zone) {
List<ZoneOffsetTransition> transitions = zone.getRules().getTransitions();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -793,7 +793,7 @@ public void testAllocationBucketsBreaker() {

// make sure used bytes is greater than the total circuit breaker limit
breaker.addWithoutBreaking(200);
// make sure that we check on the the following call
// make sure that we check on the following call
for (int i = 0; i < 1023; i++) {
multiBucketConsumer.accept(0);
}
Expand Down

0 comments on commit ea1ab7c

Please sign in to comment.