Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

HDDS-10608. Recon can't get full key when using Recon API. #6492

Merged
merged 22 commits into from
May 9, 2024
Merged
Show file tree
Hide file tree
Changes from 3 commits
Commits
Show all changes
22 commits
Select commit Hold shift + click to select a range
f66979f
Adding a new parameter to NSSummary and exposing it
ArafatKhan2198 Apr 7, 2024
4dbab9a
HDDS-10608. Recon can't get full key when using Recon API.
ArafatKhan2198 Apr 7, 2024
f75aac2
Fixed bugs and checkstyle issues
ArafatKhan2198 Apr 7, 2024
a14592c
Merge branch 'master' into HDDS-10608
ArafatKhan2198 Apr 17, 2024
ee97fa6
Added Integration Tests to ContainerEndpoint
ArafatKhan2198 Apr 17, 2024
7faa2e1
Rebuilding tree to prevent backwards compatability
ArafatKhan2198 Apr 29, 2024
0d03da5
Finished the changes for preventing compatability issues
ArafatKhan2198 Apr 29, 2024
dd1016d
Revised the backporting strategy
ArafatKhan2198 Apr 29, 2024
0e7baa8
Made review changes
ArafatKhan2198 Apr 29, 2024
76f0e41
Made 2nd review changes
ArafatKhan2198 May 3, 2024
89f5d1e
Added command to terminate the thread
ArafatKhan2198 May 6, 2024
1c41bc6
Removed the unnecessary synchronised block
ArafatKhan2198 May 6, 2024
5773c18
Made final review comments
ArafatKhan2198 May 7, 2024
415d315
Added more logs and removed the boolean flag
ArafatKhan2198 May 7, 2024
f2e8511
Fixed checkstyle issues
ArafatKhan2198 May 7, 2024
5cb1cc5
Fixed failing integration teasrt
ArafatKhan2198 May 8, 2024
248a375
Added tests to check for backporting
ArafatKhan2198 May 8, 2024
047ac8b
Fixed checkstyle issues
ArafatKhan2198 May 8, 2024
fafc2e3
Fixed the missed checkstyle
ArafatKhan2198 May 8, 2024
50ac78f
Fixed the final review comments
ArafatKhan2198 May 8, 2024
d802e67
Changed the java doc as well
ArafatKhan2198 May 8, 2024
4c93221
Fixed find bugs
ArafatKhan2198 May 8, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -54,12 +54,17 @@
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_EVENT_THREAD_POOL_SIZE_DEFAULT;
import static org.apache.hadoop.hdds.server.ServerUtils.getDirectoryFromConfig;
import static org.apache.hadoop.hdds.server.ServerUtils.getOzoneMetaDirPath;
import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX;
import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_SCM_DB_DIR;
import static org.jooq.impl.DSL.currentTimestamp;
import static org.jooq.impl.DSL.select;
import static org.jooq.impl.DSL.using;

import org.apache.hadoop.ozone.OmUtils;
import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
import org.apache.hadoop.ozone.recon.api.types.NSSummary;
import org.apache.hadoop.ozone.recon.scm.ReconContainerReportQueue;
import org.apache.hadoop.ozone.recon.spi.ReconNamespaceSummaryManager;
import org.apache.hadoop.security.authentication.client.AuthenticationException;
import org.hadoop.ozone.recon.schema.tables.daos.GlobalStatsDao;
import org.hadoop.ozone.recon.schema.tables.pojos.GlobalStats;
Expand Down Expand Up @@ -244,25 +249,69 @@ public void untarCheckpointFile(File tarFile, Path destPath)
}
}


/**
* Constructs the full path of a key from its OmKeyInfo using a bottom-up approach, starting from the leaf node.
* <p>
* The method begins with the leaf node (the key itself) and recursively prepends parent directory names, fetched
* via NSSummary objects, until reaching the parent bucket (parentId is -1). It effectively builds the path from
* bottom to top, finally prepending the volume and bucket names to complete the full path.
*
* @param omKeyInfo The OmKeyInfo object for the key
* @return The constructed full path of the key as a String.
* @throws IOException
*/
public static String constructFullPath(OmKeyInfo omKeyInfo,
ReconNamespaceSummaryManager reconNamespaceSummaryManager)
throws IOException {
StringBuilder fullPath = new StringBuilder(omKeyInfo.getKeyName());
long parentId = omKeyInfo.getParentObjectID();
boolean isDirectoryPresent = false;
while (parentId != -1) {
sumitagrawl marked this conversation as resolved.
Show resolved Hide resolved
NSSummary nsSummary = reconNamespaceSummaryManager.getNSSummary(parentId);
if (nsSummary == null) {
break;
}
// Prepend the directory name to the path
fullPath.insert(0, nsSummary.getDirName() + OM_KEY_PREFIX);
sumitagrawl marked this conversation as resolved.
Show resolved Hide resolved

// Move to the parent ID of the current directory
parentId = nsSummary.getParentId();
isDirectoryPresent = true;
}

// Prepend the volume and bucket to the constructed path
String volumeName = omKeyInfo.getVolumeName();
sumitagrawl marked this conversation as resolved.
Show resolved Hide resolved
String bucketName = omKeyInfo.getBucketName();
fullPath.insert(0, volumeName + OM_KEY_PREFIX + bucketName + OM_KEY_PREFIX);
sumitagrawl marked this conversation as resolved.
Show resolved Hide resolved
if (isDirectoryPresent) {
return OmUtils.normalizeKey(fullPath.toString(), true);
}
return fullPath.toString();
}


/**
* Make HTTP GET call on the URL and return HttpURLConnection instance.
*
* @param connectionFactory URLConnectionFactory to use.
* @param url url to call
* @param isSpnego is SPNEGO enabled
* @param url url to call
* @param isSpnego is SPNEGO enabled
* @return HttpURLConnection instance of the HTTP call.
* @throws IOException, AuthenticationException While reading the response.
*/
public HttpURLConnection makeHttpCall(URLConnectionFactory connectionFactory,
String url, boolean isSpnego)
String url, boolean isSpnego)
throws IOException, AuthenticationException {
HttpURLConnection urlConnection = (HttpURLConnection)
connectionFactory.openConnection(new URL(url), isSpnego);
connectionFactory.openConnection(new URL(url), isSpnego);
urlConnection.connect();
return urlConnection;
}

/**
* Load last known DB in Recon.
*
* @param reconDbDir
* @param fileNamePrefix
* @return
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@
import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup;
import org.apache.hadoop.ozone.recon.ReconUtils;
import org.apache.hadoop.ozone.recon.api.types.ContainerDiscrepancyInfo;
import org.apache.hadoop.ozone.recon.api.types.ContainerKeyPrefix;
import org.apache.hadoop.ozone.recon.api.types.ContainerMetadata;
Expand Down Expand Up @@ -144,8 +145,8 @@ public static DataFilter fromValue(String value) {

@Inject
public ContainerEndpoint(OzoneStorageContainerManager reconSCM,
ContainerHealthSchemaManager containerHealthSchemaManager,
ReconNamespaceSummaryManager reconNamespaceSummaryManager) {
ContainerHealthSchemaManager containerHealthSchemaManager,
ReconNamespaceSummaryManager reconNamespaceSummaryManager) {
this.containerManager =
(ReconContainerManager) reconSCM.getContainerManager();
this.pipelineManager = reconSCM.getPipelineManager();
Expand All @@ -158,19 +159,20 @@ public ContainerEndpoint(OzoneStorageContainerManager reconSCM,
* Return @{@link org.apache.hadoop.hdds.scm.container}
* for the containers starting from the given "prev-key" query param for the
* given "limit". The given "prev-key" is skipped from the results returned.
*
* @param prevKey the containerID after which results are returned.
* start containerID, >=0,
* start searching at the head if 0.
* @param limit max no. of containers to get.
* count must be >= 0
* Usually the count will be replace with a very big
* value instead of being unlimited in case the db is very big.
* @param limit max no. of containers to get.
* count must be >= 0
* Usually the count will be replace with a very big
* value instead of being unlimited in case the db is very big.
* @return {@link Response}
*/
@GET
public Response getContainers(
@DefaultValue(DEFAULT_FETCH_COUNT) @QueryParam(RECON_QUERY_LIMIT)
int limit,
int limit,
@DefaultValue(PREV_CONTAINER_ID_DEFAULT_VALUE)
@QueryParam(RECON_QUERY_PREVKEY) long prevKey) {
if (limit < 0 || prevKey < 0) {
Expand Down Expand Up @@ -212,8 +214,8 @@ public Response getContainers(
* starting from the given "prev-key" query param for the given "limit".
* The given prevKeyPrefix is skipped from the results returned.
*
* @param containerID the given containerID.
* @param limit max no. of keys to get.
* @param containerID the given containerID.
* @param limit max no. of keys to get.
* @param prevKeyPrefix the key prefix after which results are returned.
* @return {@link Response}
*/
Expand All @@ -226,7 +228,12 @@ public Response getKeysForContainer(
@DefaultValue(StringUtils.EMPTY) @QueryParam(RECON_QUERY_PREVKEY)
String prevKeyPrefix) {
Map<String, KeyMetadata> keyMetadataMap = new LinkedHashMap<>();

// Total count of keys in the container.
long totalCount;
// Last key prefix to be used for pagination. It will be exposed in the response.
String lastKey = "";

try {
Map<ContainerKeyPrefix, Integer> containerKeyPrefixMap =
reconContainerMetadataManager.getKeyPrefixesForContainer(containerID,
Expand Down Expand Up @@ -263,6 +270,7 @@ public Response getKeysForContainer(
omKeyInfo.getVolumeName(),
omKeyInfo.getBucketName(),
omKeyInfo.getKeyName());
lastKey = ozoneKey;
if (keyMetadataMap.containsKey(ozoneKey)) {
keyMetadataMap.get(ozoneKey).getVersions()
.add(containerKeyPrefix.getKeyVersion());
Expand All @@ -278,6 +286,8 @@ public Response getKeysForContainer(
keyMetadata.setBucket(omKeyInfo.getBucketName());
keyMetadata.setVolume(omKeyInfo.getVolumeName());
keyMetadata.setKey(omKeyInfo.getKeyName());
keyMetadata.setCompletePath(ReconUtils.constructFullPath(omKeyInfo,
sumitagrawl marked this conversation as resolved.
Show resolved Hide resolved
reconNamespaceSummaryManager));
keyMetadata.setCreationTime(
Instant.ofEpochMilli(omKeyInfo.getCreationTime()));
keyMetadata.setModificationTime(
Expand All @@ -298,7 +308,7 @@ public Response getKeysForContainer(
Response.Status.INTERNAL_SERVER_ERROR);
}
KeysResponse keysResponse =
new KeysResponse(totalCount, keyMetadataMap.values());
new KeysResponse(totalCount, keyMetadataMap.values(), lastKey);
return Response.ok(keysResponse).build();
}

Expand Down Expand Up @@ -334,7 +344,7 @@ public Response getMissingContainers(
) {
List<MissingContainerMetadata> missingContainers = new ArrayList<>();
containerHealthSchemaManager.getUnhealthyContainers(
UnHealthyContainerStates.MISSING, 0, limit)
UnHealthyContainerStates.MISSING, 0, limit)
.forEach(container -> {
long containerID = container.getContainerId();
try {
Expand Down Expand Up @@ -378,7 +388,7 @@ public Response getMissingContainers(
public Response getUnhealthyContainers(
@PathParam("state") String state,
@DefaultValue(DEFAULT_FETCH_COUNT) @QueryParam(RECON_QUERY_LIMIT)
int limit,
int limit,
@DefaultValue(DEFAULT_BATCH_NUMBER)
@QueryParam(RECON_QUERY_BATCH_PARAM) int batchNum) {
int offset = Math.max(((batchNum - 1) * limit), 0);
Expand All @@ -399,7 +409,8 @@ public Response getUnhealthyContainers(
.getUnhealthyContainers(internalState, offset, limit);
List<UnhealthyContainers> emptyMissingFiltered = containers.stream()
.filter(
container -> !container.getContainerState().equals(UnHealthyContainerStates.EMPTY_MISSING.toString()))
container -> !container.getContainerState()
.equals(UnHealthyContainerStates.EMPTY_MISSING.toString()))
.collect(
Collectors.toList());
for (UnhealthyContainers c : emptyMissingFiltered) {
Expand Down Expand Up @@ -433,7 +444,6 @@ public Response getUnhealthyContainers(
* Return
* {@link org.apache.hadoop.ozone.recon.api.types.UnhealthyContainerMetadata}
* for all unhealthy containers.

* @param limit The limit of unhealthy containers to return.
* @param batchNum The batch number (like "page number") of results to return.
* Passing 1, will return records 1 to limit. 2 will return
Expand All @@ -444,7 +454,7 @@ public Response getUnhealthyContainers(
@Path("/unhealthy")
public Response getUnhealthyContainers(
@DefaultValue(DEFAULT_FETCH_COUNT) @QueryParam(RECON_QUERY_LIMIT)
int limit,
int limit,
@DefaultValue(DEFAULT_BATCH_NUMBER)
@QueryParam(RECON_QUERY_BATCH_PARAM) int batchNum) {
return getUnhealthyContainers(null, limit, batchNum);
Expand Down Expand Up @@ -519,6 +529,7 @@ public Response getSCMDeletedContainers(
/**
* Helper function to extract the blocks for a given container from a given
* OM Key.
*
* @param matchedKeys List of OM Key Info locations
* @param containerID containerId.
* @return List of blocks.
Expand Down Expand Up @@ -703,7 +714,8 @@ public Response getContainerMisMatchInsights(
}


/** This API retrieves set of deleted containers in SCM which are present
/**
* This API retrieves set of deleted containers in SCM which are present
* in OM to find out list of keys mapped to such DELETED state containers.
*
* limit - limits the number of such SCM DELETED containers present in OM.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -155,7 +155,6 @@ public DUResponse getDuResponse(
duResponse.setCount(subdirDUData.size());
duResponse.setSize(dirDataSize);
duResponse.setDuData(subdirDUData);

return duResponse;
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,9 @@ public class KeyMetadata {
@XmlElement(name = "Key")
private String key;

@XmlElement(name = "CompletePath")
private String completePath;

@XmlElement(name = "DataSize")
private long dataSize;

Expand Down Expand Up @@ -126,6 +129,14 @@ public void setBlockIds(Map<Long, List<ContainerBlockMetadata>> blockIds) {
this.blockIds = blockIds;
}

public String getCompletePath() {
return completePath;
}

public void setCompletePath(String completePath) {
this.completePath = completePath;
}

/**
* Class to hold ContainerID and BlockID.
*/
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -36,9 +36,13 @@ public class KeysResponse {
@JsonProperty("keys")
private Collection<KeyMetadata> keys;

public KeysResponse(long totalCount, Collection<KeyMetadata> keys) {
@JsonProperty("lastKey")
private String lastKey;

public KeysResponse(long totalCount, Collection<KeyMetadata> keys, String lastKey) {
this.totalCount = totalCount;
this.keys = keys;
this.lastKey = lastKey;
}

public long getTotalCount() {
Expand All @@ -48,4 +52,7 @@ public long getTotalCount() {
public Collection<KeyMetadata> getKeys() {
return keys;
}
public String getLastKey() {
return lastKey;
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -36,22 +36,25 @@ public class NSSummary {
private int[] fileSizeBucket;
private Set<Long> childDir;
private String dirName;
private long parentId = -1;

public NSSummary() {
this(0, 0L, new int[ReconConstants.NUM_OF_FILE_SIZE_BINS],
new HashSet<>(), "");
new HashSet<>(), "", -1); // -1 can be a default value indicating no parent
}

public NSSummary(int numOfFiles,
long sizeOfFiles,
int[] bucket,
Set<Long> childDir,
String dirName) {
String dirName,
long parentId) {
this.numOfFiles = numOfFiles;
this.sizeOfFiles = sizeOfFiles;
setFileSizeBucket(bucket);
this.childDir = childDir;
this.dirName = dirName;
this.parentId = parentId;
}

public int getNumOfFiles() {
Expand Down Expand Up @@ -107,4 +110,12 @@ public void removeChildDir(long childId) {
this.childDir.remove(childId);
}
}

public long getParentId() {
return parentId;
}

public void setParentId(long parentId) {
this.parentId = parentId;
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -65,9 +65,10 @@ public byte[] toPersistedFormat(NSSummary object) throws IOException {
int stringLen = dirName.getBytes(StandardCharsets.UTF_8).length;
int numOfChildDirs = childDirs.size();
final int resSize = NUM_OF_INTS * Integer.BYTES
+ (numOfChildDirs + 1) * Long.BYTES // 1 long field + list size
+ (numOfChildDirs + 1) * Long.BYTES // 1 long field for parentId + list size
+ Short.BYTES // 2 dummy shorts to track length
+ stringLen; // directory name length
+ stringLen // directory name length
+ Long.BYTES; // Added space for parentId serialization

ByteArrayOutputStream out = new ByteArrayOutputStream(resSize);
out.write(integerCodec.toPersistedFormat(object.getNumOfFiles()));
Expand All @@ -84,6 +85,8 @@ public byte[] toPersistedFormat(NSSummary object) throws IOException {
}
out.write(integerCodec.toPersistedFormat(stringLen));
out.write(stringCodec.toPersistedFormat(dirName));
out.write(longCodec.toPersistedFormat(object.getParentId()));

return out.toByteArray();
}

Expand All @@ -110,13 +113,17 @@ public NSSummary fromPersistedFormat(byte[] rawData) throws IOException {

int strLen = in.readInt();
if (strLen == 0) {
long parentId = in.readLong(); // Deserialize parentId
res.setParentId(parentId);
return res;
}
byte[] buffer = new byte[strLen];
int bytesRead = in.read(buffer);
assert (bytesRead == strLen);
String dirName = stringCodec.fromPersistedFormat(buffer);
res.setDirName(dirName);
long parentId = in.readLong();
sumitagrawl marked this conversation as resolved.
Show resolved Hide resolved
res.setParentId(parentId);
return res;
}

Expand All @@ -128,6 +135,7 @@ public NSSummary copyObject(NSSummary object) {
copy.setFileSizeBucket(object.getFileSizeBucket());
copy.setChildDir(object.getChildDir());
copy.setDirName(object.getDirName());
copy.setParentId(object.getParentId());
return copy;
}
}
Loading