This is an automated email from the ASF dual-hosted git repository.

jackie pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/pinot.git


The following commit(s) were added to refs/heads/master by this push:
     new 7a96b429bfd Not able to delete existing table & schema from Pinot UI 
(#17294)
7a96b429bfd is described below

commit 7a96b429bfd224bec38ddef0583234f612803f1e
Author: Akanksha kedia <[email protected]>
AuthorDate: Fri Jan 16 06:14:37 2026 +0530

    Not able to delete existing table & schema from Pinot UI (#17294)
---
 .../pinot/plugin/filesystem/HadoopPinotFS.java     |  12 +-
 .../pinot/plugin/filesystem/HadoopPinotFSTest.java | 302 +++++++++++++++++++++
 .../apache/pinot/spi/filesystem/BasePinotFS.java   |   7 +-
 3 files changed, 317 insertions(+), 4 deletions(-)

diff --git 
a/pinot-plugins/pinot-file-system/pinot-hdfs/src/main/java/org/apache/pinot/plugin/filesystem/HadoopPinotFS.java
 
b/pinot-plugins/pinot-file-system/pinot-hdfs/src/main/java/org/apache/pinot/plugin/filesystem/HadoopPinotFS.java
index 3696962fe8a..37ff660e0c0 100644
--- 
a/pinot-plugins/pinot-file-system/pinot-hdfs/src/main/java/org/apache/pinot/plugin/filesystem/HadoopPinotFS.java
+++ 
b/pinot-plugins/pinot-file-system/pinot-hdfs/src/main/java/org/apache/pinot/plugin/filesystem/HadoopPinotFS.java
@@ -81,11 +81,17 @@ public class HadoopPinotFS extends BasePinotFS {
   @Override
   public boolean delete(URI segmentUri, boolean forceDelete)
       throws IOException {
-    // Returns false if we are moving a directory and that directory is not 
empty
+    Path path = new Path(segmentUri);
+
+    if (!_hadoopFS.exists(path)) {
+      return true;
+    }
+
     if (isDirectory(segmentUri) && listFiles(segmentUri, false).length > 0 && 
!forceDelete) {
       return false;
     }
-    return _hadoopFS.delete(new Path(segmentUri), true);
+
+    return _hadoopFS.delete(path, true);
   }
 
   @Override
@@ -198,7 +204,7 @@ public class HadoopPinotFS extends BasePinotFS {
         throw new RuntimeException("_hadoopFS client is not initialized when 
trying to copy files");
       }
       if (_hadoopFS.isDirectory(remoteFile)) {
-        throw new IllegalArgumentException(srcUri.toString() + " is a 
direactory");
+        throw new IllegalArgumentException(srcUri.toString() + " is a 
directory");
       }
       long startMs = System.currentTimeMillis();
       _hadoopFS.copyToLocalFile(remoteFile, localFile);
diff --git 
a/pinot-plugins/pinot-file-system/pinot-hdfs/src/test/java/org/apache/pinot/plugin/filesystem/HadoopPinotFSTest.java
 
b/pinot-plugins/pinot-file-system/pinot-hdfs/src/test/java/org/apache/pinot/plugin/filesystem/HadoopPinotFSTest.java
index 78fa54ba9b6..9c5f7d9d576 100644
--- 
a/pinot-plugins/pinot-file-system/pinot-hdfs/src/test/java/org/apache/pinot/plugin/filesystem/HadoopPinotFSTest.java
+++ 
b/pinot-plugins/pinot-file-system/pinot-hdfs/src/test/java/org/apache/pinot/plugin/filesystem/HadoopPinotFSTest.java
@@ -150,4 +150,306 @@ public class HadoopPinotFSTest {
           
fileMetadata.stream().map(FileMetadata::getFilePath).collect(Collectors.toSet())),
 fileMetadata.toString());
     }
   }
+
+  @Test
+  public void testDeleteBatchWithEmptyList()
+      throws IOException {
+    try (HadoopPinotFS hadoopFS = new HadoopPinotFS()) {
+      hadoopFS.init(new PinotConfiguration());
+
+      // Test with null list
+      Assert.assertTrue(hadoopFS.deleteBatch(null, false));
+
+      // Test with empty list
+      Assert.assertTrue(hadoopFS.deleteBatch(new ArrayList<>(), false));
+    }
+  }
+
+  @Test
+  public void testDeleteBatchWithSingleFile()
+      throws IOException {
+    URI baseURI = URI.create(TMP_DIR + "/testDeleteBatchWithSingleFile");
+    try (HadoopPinotFS hadoopFS = new HadoopPinotFS()) {
+      hadoopFS.init(new PinotConfiguration());
+      hadoopFS.mkdir(baseURI);
+
+      // Create a single file
+      URI testFile = new Path(baseURI.getPath(), "testFile.txt").toUri();
+      hadoopFS.touch(testFile);
+      Assert.assertTrue(hadoopFS.exists(testFile));
+
+      // Delete using deleteBatch
+      List<URI> urisToDelete = new ArrayList<>();
+      urisToDelete.add(testFile);
+      Assert.assertTrue(hadoopFS.deleteBatch(urisToDelete, false));
+
+      // Verify file is deleted
+      Assert.assertFalse(hadoopFS.exists(testFile));
+
+      hadoopFS.delete(baseURI, true);
+    }
+  }
+
+  @Test
+  public void testDeleteBatchWithMultipleFiles()
+      throws IOException {
+    URI baseURI = URI.create(TMP_DIR + "/testDeleteBatchWithMultipleFiles");
+    try (HadoopPinotFS hadoopFS = new HadoopPinotFS()) {
+      hadoopFS.init(new PinotConfiguration());
+      hadoopFS.mkdir(baseURI);
+
+      // Create multiple files
+      List<URI> urisToDelete = new ArrayList<>();
+      for (int i = 0; i < 5; i++) {
+        URI testFile = new Path(baseURI.getPath(), "testFile" + i + 
".txt").toUri();
+        hadoopFS.touch(testFile);
+        Assert.assertTrue(hadoopFS.exists(testFile));
+        urisToDelete.add(testFile);
+      }
+
+      // Delete all files using deleteBatch
+      Assert.assertTrue(hadoopFS.deleteBatch(urisToDelete, false));
+
+      // Verify all files are deleted
+      for (URI uri : urisToDelete) {
+        Assert.assertFalse(hadoopFS.exists(uri));
+      }
+
+      hadoopFS.delete(baseURI, true);
+    }
+  }
+
+  @Test
+  public void testDeleteBatchWithNonExistentFiles()
+      throws IOException {
+    URI baseURI = URI.create(TMP_DIR + "/testDeleteBatchWithNonExistentFiles");
+    try (HadoopPinotFS hadoopFS = new HadoopPinotFS()) {
+      hadoopFS.init(new PinotConfiguration());
+      hadoopFS.mkdir(baseURI);
+
+      // Create list with non-existent files
+      List<URI> urisToDelete = new ArrayList<>();
+      URI nonExistentFile1 = new Path(baseURI.getPath(), 
"nonExistent1.txt").toUri();
+      URI nonExistentFile2 = new Path(baseURI.getPath(), 
"nonExistent2.txt").toUri();
+      urisToDelete.add(nonExistentFile1);
+      urisToDelete.add(nonExistentFile2);
+
+      // Should return true and skip non-existent files
+      Assert.assertTrue(hadoopFS.deleteBatch(urisToDelete, false));
+
+      hadoopFS.delete(baseURI, true);
+    }
+  }
+
+  @Test
+  public void testDeleteBatchWithMixedExistingAndNonExisting()
+      throws IOException {
+    URI baseURI = URI.create(TMP_DIR + "/testDeleteBatchWithMixedFiles");
+    try (HadoopPinotFS hadoopFS = new HadoopPinotFS()) {
+      hadoopFS.init(new PinotConfiguration());
+      hadoopFS.mkdir(baseURI);
+
+      // Create some files
+      URI existingFile1 = new Path(baseURI.getPath(), "existing1.txt").toUri();
+      URI existingFile2 = new Path(baseURI.getPath(), "existing2.txt").toUri();
+      hadoopFS.touch(existingFile1);
+      hadoopFS.touch(existingFile2);
+
+      // Create list with mix of existing and non-existing files
+      List<URI> urisToDelete = new ArrayList<>();
+      urisToDelete.add(existingFile1);
+      urisToDelete.add(new Path(baseURI.getPath(), "nonExistent.txt").toUri());
+      urisToDelete.add(existingFile2);
+
+      // Should successfully delete existing files and skip non-existing
+      Assert.assertTrue(hadoopFS.deleteBatch(urisToDelete, false));
+
+      // Verify existing files are deleted
+      Assert.assertFalse(hadoopFS.exists(existingFile1));
+      Assert.assertFalse(hadoopFS.exists(existingFile2));
+
+      hadoopFS.delete(baseURI, true);
+    }
+  }
+
+  @Test
+  public void testDeleteBatchWithEmptyDirectory()
+      throws IOException {
+    URI baseURI = URI.create(TMP_DIR + "/testDeleteBatchWithEmptyDirectory");
+    try (HadoopPinotFS hadoopFS = new HadoopPinotFS()) {
+      hadoopFS.init(new PinotConfiguration());
+      hadoopFS.mkdir(baseURI);
+
+      // Create an empty directory
+      URI emptyDir = new Path(baseURI.getPath(), "emptyDir").toUri();
+      hadoopFS.mkdir(emptyDir);
+      Assert.assertTrue(hadoopFS.exists(emptyDir));
+
+      // Delete empty directory with forceDelete=false
+      List<URI> urisToDelete = new ArrayList<>();
+      urisToDelete.add(emptyDir);
+      Assert.assertTrue(hadoopFS.deleteBatch(urisToDelete, false));
+
+      // Verify directory is deleted
+      Assert.assertFalse(hadoopFS.exists(emptyDir));
+
+      hadoopFS.delete(baseURI, true);
+    }
+  }
+
+  @Test
+  public void testDeleteBatchWithNonEmptyDirectoryForceDeleteFalse()
+      throws IOException {
+    URI baseURI = URI.create(TMP_DIR + "/testDeleteBatchNonEmptyDirNoForce");
+    try (HadoopPinotFS hadoopFS = new HadoopPinotFS()) {
+      hadoopFS.init(new PinotConfiguration());
+      hadoopFS.mkdir(baseURI);
+
+      // Create a non-empty directory
+      URI nonEmptyDir = new Path(baseURI.getPath(), "nonEmptyDir").toUri();
+      hadoopFS.mkdir(nonEmptyDir);
+      URI fileInDir = new Path(nonEmptyDir.getPath(), "file.txt").toUri();
+      hadoopFS.touch(fileInDir);
+
+      // Try to delete non-empty directory with forceDelete=false
+      List<URI> urisToDelete = new ArrayList<>();
+      urisToDelete.add(nonEmptyDir);
+      Assert.assertFalse(hadoopFS.deleteBatch(urisToDelete, false));
+
+      // Verify directory still exists
+      Assert.assertTrue(hadoopFS.exists(nonEmptyDir));
+      Assert.assertTrue(hadoopFS.exists(fileInDir));
+
+      hadoopFS.delete(baseURI, true);
+    }
+  }
+
+  @Test
+  public void testDeleteBatchWithNonEmptyDirectoryForceDeleteTrue()
+      throws IOException {
+    URI baseURI = URI.create(TMP_DIR + "/testDeleteBatchNonEmptyDirForce");
+    try (HadoopPinotFS hadoopFS = new HadoopPinotFS()) {
+      hadoopFS.init(new PinotConfiguration());
+      hadoopFS.mkdir(baseURI);
+
+      // Create a non-empty directory with a file
+      URI nonEmptyDir = new Path(baseURI.getPath(), "nonEmptyDir").toUri();
+      hadoopFS.mkdir(nonEmptyDir);
+      URI fileInDir = new Path(nonEmptyDir.getPath(), "file.txt").toUri();
+      hadoopFS.touch(fileInDir);
+
+      // Delete non-empty directory with forceDelete=true
+      // Note: This tests that the method processes the directory
+      // The actual deletion behavior depends on Hadoop's delete implementation
+      List<URI> urisToDelete = new ArrayList<>();
+      urisToDelete.add(nonEmptyDir);
+      hadoopFS.deleteBatch(urisToDelete, true);
+
+      hadoopFS.delete(baseURI, true);
+    }
+  }
+
+  @Test
+  public void testDeleteBatchWithMixedFilesAndDirectories()
+      throws IOException {
+    URI baseURI = URI.create(TMP_DIR + "/testDeleteBatchMixed");
+    try (HadoopPinotFS hadoopFS = new HadoopPinotFS()) {
+      hadoopFS.init(new PinotConfiguration());
+      hadoopFS.mkdir(baseURI);
+
+      // Create files and directories
+      URI file1 = new Path(baseURI.getPath(), "file1.txt").toUri();
+      hadoopFS.touch(file1);
+
+      URI emptyDir = new Path(baseURI.getPath(), "emptyDir").toUri();
+      hadoopFS.mkdir(emptyDir);
+
+      URI nonEmptyDir = new Path(baseURI.getPath(), "nonEmptyDir").toUri();
+      hadoopFS.mkdir(nonEmptyDir);
+      URI fileInNonEmptyDir = new Path(nonEmptyDir.getPath(), 
"file.txt").toUri();
+      hadoopFS.touch(fileInNonEmptyDir);
+
+      URI file2 = new Path(baseURI.getPath(), "file2.txt").toUri();
+      hadoopFS.touch(file2);
+
+      // Delete with forceDelete=true
+      List<URI> urisToDelete = new ArrayList<>();
+      urisToDelete.add(file1);
+      urisToDelete.add(emptyDir);
+      urisToDelete.add(nonEmptyDir);
+      urisToDelete.add(file2);
+
+      Assert.assertTrue(hadoopFS.deleteBatch(urisToDelete, true));
+
+      // Verify all are deleted
+      Assert.assertFalse(hadoopFS.exists(file1));
+      Assert.assertFalse(hadoopFS.exists(emptyDir));
+      Assert.assertFalse(hadoopFS.exists(nonEmptyDir));
+      Assert.assertFalse(hadoopFS.exists(fileInNonEmptyDir));
+      Assert.assertFalse(hadoopFS.exists(file2));
+
+      hadoopFS.delete(baseURI, true);
+    }
+  }
+
+  @Test
+  public void testDeleteBatchWithDeepNestedDirectories()
+      throws IOException {
+    URI baseURI = URI.create(TMP_DIR + "/testDeleteBatchDeepNested");
+    try (HadoopPinotFS hadoopFS = new HadoopPinotFS()) {
+      hadoopFS.init(new PinotConfiguration());
+      hadoopFS.mkdir(baseURI);
+
+      // Create deeply nested directory structure
+      URI level1 = new Path(baseURI.getPath(), "level1").toUri();
+      hadoopFS.mkdir(level1);
+      URI level2 = new Path(level1.getPath(), "level2").toUri();
+      hadoopFS.mkdir(level2);
+      URI deepFile = new Path(level2.getPath(), "deepFile.txt").toUri();
+      hadoopFS.touch(deepFile);
+
+      // Delete with forceDelete=true
+      // Note: This tests that the method processes nested directories
+      List<URI> urisToDelete = new ArrayList<>();
+      urisToDelete.add(level1);
+
+      hadoopFS.deleteBatch(urisToDelete, true);
+
+      hadoopFS.delete(baseURI, true);
+    }
+  }
+
+  @Test
+  public void testDeleteBatchPerformanceWithManyFiles()
+      throws IOException {
+    URI baseURI = URI.create(TMP_DIR + "/testDeleteBatchPerformance");
+    try (HadoopPinotFS hadoopFS = new HadoopPinotFS()) {
+      hadoopFS.init(new PinotConfiguration());
+      hadoopFS.mkdir(baseURI);
+
+      // Create many files
+      int fileCount = 50;
+      List<URI> urisToDelete = new ArrayList<>();
+      for (int i = 0; i < fileCount; i++) {
+        URI testFile = new Path(baseURI.getPath(), "file" + i + 
".txt").toUri();
+        hadoopFS.touch(testFile);
+        urisToDelete.add(testFile);
+      }
+
+      // Delete all files using deleteBatch
+      long startTime = System.currentTimeMillis();
+      Assert.assertTrue(hadoopFS.deleteBatch(urisToDelete, false));
+      long batchTime = System.currentTimeMillis() - startTime;
+
+      // Verify all files are deleted
+      for (URI uri : urisToDelete) {
+        Assert.assertFalse(hadoopFS.exists(uri));
+      }
+
+      // Log performance (batch deletion should be reasonably fast)
+      Assert.assertTrue(batchTime < 10000, "Batch deletion took too long: " + 
batchTime + "ms");
+
+      hadoopFS.delete(baseURI, true);
+    }
+  }
 }
diff --git 
a/pinot-spi/src/main/java/org/apache/pinot/spi/filesystem/BasePinotFS.java 
b/pinot-spi/src/main/java/org/apache/pinot/spi/filesystem/BasePinotFS.java
index 8d2724f25aa..16b253e000f 100644
--- a/pinot-spi/src/main/java/org/apache/pinot/spi/filesystem/BasePinotFS.java
+++ b/pinot-spi/src/main/java/org/apache/pinot/spi/filesystem/BasePinotFS.java
@@ -34,9 +34,14 @@ public abstract class BasePinotFS implements PinotFS {
   @Override
   public boolean deleteBatch(List<URI> segmentUris, boolean forceDelete)
       throws IOException {
+    if (segmentUris == null || segmentUris.isEmpty()) {
+      return true;
+    }
     boolean result = true;
     for (URI segmentUri : segmentUris) {
-      result &= delete(segmentUri, forceDelete);
+      if (!delete(segmentUri, forceDelete)) {
+        result = false;
+      }
     }
     return result;
   }


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to