xBis7 commented on code in PR #3741:
URL: https://github.com/apache/ozone/pull/3741#discussion_r1015829690
##########
hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java:
##########
@@ -395,4 +416,174 @@ public static Path getMetadataDirectory(
return Paths.get(metadataPath);
}
+
+ /**
+ * Utilities for container_delete_service directory
+ * which is located under <volume>/hdds/<cluster-id>/tmp/.
+ * Containers will be moved under it before getting deleted
+ * to avoid, in case of failure, having artifact leftovers
+ * on the default container path on the disk.
+ */
+ public static class ContainerDeleteDirectory {
+
+ /**
+ * Delete all files under
+ * <volume>/hdds/<cluster-id>/tmp/container_delete_service.
+ */
+ public static synchronized void cleanTmpDir(HddsVolume hddsVolume)
+ throws IOException {
+ if (hddsVolume.getStorageState() != StorageVolume.VolumeState.NORMAL) {
+ LOG.debug("Call to clean tmp dir container_delete_service directory "
+ + "for {} while VolumeState {}",
+ hddsVolume.getStorageDir(),
+ hddsVolume.getStorageState().toString());
+ return;
+ }
+
+ if (!hddsVolume.getClusterID().isEmpty()) {
+ // Initialize delete directory
+ hddsVolume.createDeleteServiceDir();
+ } else {
+ throw new IOException("Volume has no ClusterId");
+ }
+
+ ListIterator<File> leftoversListIt = getDeleteLeftovers(hddsVolume);
+
+ while (leftoversListIt.hasNext()) {
+ File file = leftoversListIt.next();
+
+ // If SchemaV3 is enabled and we have a RocksDB
+ if (VersionedDatanodeFeatures.isFinalized(
+ HDDSLayoutFeature.DATANODE_SCHEMA_V3)) {
+ // Get container file
+ File containerFile = getContainerFile(file);
+
+ // If file exists
+ if (containerFile != null) {
+ ContainerData containerData = ContainerDataYaml
+ .readContainerFile(containerFile);
+ KeyValueContainerData keyValueContainerData =
+ (KeyValueContainerData) containerData;
+
+ // Remove container from Rocks DB
+ String dbPath = hddsVolume.getDbParentDir().getAbsolutePath();
+ DatanodeStoreSchemaThreeImpl store =
+ new DatanodeStoreSchemaThreeImpl(hddsVolume.getConf(),
+ dbPath, false);
+
store.removeKVContainerData(keyValueContainerData.getContainerID());
+ }
+ }
+
+ try {
+ if (file.isDirectory()) {
+ FileUtils.deleteDirectory(file);
+ } else {
+ FileUtils.delete(file);
+ }
+ } catch (IOException ex) {
+ LOG.error("Failed to delete directory or file inside " +
+ "{}", hddsVolume.getDeleteServiceDirPath().toString(), ex);
+ }
+ }
+ }
+
+ /**
+ * Search recursively for the container file under a
+ * directory. Return null if the file is not found.
+ * @param file
+ * @return container file or null if it doesn't exist
+ * @throws IOException
+ */
+ public static File getContainerFile(File file) throws IOException {
Review Comment:
@errose28 There is a chance that cleaning the tmp directory can fail and
then the container directory won't be in it's normal structure and format. In
that case, ContainerUtils.getContainerFile() won't work as expected. That's why
I had created my own method that searches recursively the directory for the
container file.
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]