This is an automated email from the ASF dual-hosted git repository.

zhangduo pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/master by this push:
     new a71288f2e2f HBASE-29370 TestHFileOutputFormat2 fails 100% on flaky 
dashboard (#7073)
a71288f2e2f is described below

commit a71288f2e2fe3f0b3fe254ca9a1174ce42e72d6a
Author: Duo Zhang <zhang...@apache.org>
AuthorDate: Fri Jun 6 20:59:47 2025 +0800

    HBASE-29370 TestHFileOutputFormat2 fails 100% on flaky dashboard (#7073)
    
    Signed-off-by: Wellington Chevreuil <wchevre...@apache.org>
---
 .../hadoop/hbase/mapreduce/HFileOutputFormat2.java |  4 +-
 .../hbase/mapreduce/TestHFileOutputFormat2.java    | 99 ++++++++++++----------
 2 files changed, 55 insertions(+), 48 deletions(-)

diff --git 
a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java
 
b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java
index 51e23abc8ca..cc09904c826 100644
--- 
a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java
+++ 
b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java
@@ -889,8 +889,8 @@ public class HFileOutputFormat2 extends 
FileOutputFormat<ImmutableBytesWritable,
     FileSystem fs = FileSystem.get(conf);
     String hbaseTmpFsDir =
       conf.get(HConstants.TEMPORARY_FS_DIRECTORY_KEY, fs.getHomeDirectory() + 
"/hbase-staging");
-    Path partitionsPath = new Path(hbaseTmpFsDir, "partitions_" + 
UUID.randomUUID());
-    fs.makeQualified(partitionsPath);
+    Path partitionsPath =
+      fs.makeQualified(new Path(hbaseTmpFsDir, "partitions_" + 
UUID.randomUUID()));
     writePartitions(conf, partitionsPath, splitPoints, writeMultipleTables);
     fs.deleteOnExit(partitionsPath);
 
diff --git 
a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java
 
b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java
index 0ba5834e075..fb7dde1cc69 100644
--- 
a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java
+++ 
b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java
@@ -32,7 +32,6 @@ import java.security.PrivilegedAction;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.HashMap;
-import java.util.LinkedList;
 import java.util.List;
 import java.util.Map;
 import java.util.Map.Entry;
@@ -1517,53 +1516,61 @@ public class TestHFileOutputFormat2 {
   }
 
   @Test
-  public void TestConfigurePartitioner() throws IOException {
-    Configuration conf = util.getConfiguration();
-    // Create a user who is not the current user
-    String fooUserName = "foo1234";
-    String fooGroupName = "group1";
-    UserGroupInformation ugi =
-      UserGroupInformation.createUserForTesting(fooUserName, new String[] { 
fooGroupName });
-    // Get user's home directory
-    Path fooHomeDirectory = ugi.doAs(new PrivilegedAction<Path>() {
-      @Override
-      public Path run() {
-        try (FileSystem fs = FileSystem.get(conf)) {
-          return fs.makeQualified(fs.getHomeDirectory());
-        } catch (IOException ioe) {
-          LOG.error("Failed to get foo's home directory", ioe);
+  public void testConfigurePartitioner() throws Exception {
+    util.startMiniDFSCluster(1);
+    try {
+      Configuration conf = util.getConfiguration();
+      // Create a user who is not the current user
+      String fooUserName = "foo1234";
+      String fooGroupName = "group1";
+      UserGroupInformation ugi =
+        UserGroupInformation.createUserForTesting(fooUserName, new String[] { 
fooGroupName });
+      // Get user's home directory
+      Path fooHomeDirectory = ugi.doAs(new PrivilegedAction<Path>() {
+        @Override
+        public Path run() {
+          try (FileSystem fs = FileSystem.get(conf)) {
+            return fs.makeQualified(fs.getHomeDirectory());
+          } catch (IOException ioe) {
+            LOG.error("Failed to get foo's home directory", ioe);
+          }
+          return null;
         }
-        return null;
-      }
-    });
-
-    Job job = Mockito.mock(Job.class);
-    Mockito.doReturn(conf).when(job).getConfiguration();
-    ImmutableBytesWritable writable = new ImmutableBytesWritable();
-    List<ImmutableBytesWritable> splitPoints = new 
LinkedList<ImmutableBytesWritable>();
-    splitPoints.add(writable);
-
-    ugi.doAs(new PrivilegedAction<Void>() {
-      @Override
-      public Void run() {
-        try {
-          HFileOutputFormat2.configurePartitioner(job, splitPoints, false);
-        } catch (IOException ioe) {
-          LOG.error("Failed to configure partitioner", ioe);
+      });
+      // create the home directory and chown
+      FileSystem fs = FileSystem.get(conf);
+      fs.mkdirs(fooHomeDirectory);
+      fs.setOwner(fooHomeDirectory, fooUserName, fooGroupName);
+
+      Job job = Mockito.mock(Job.class);
+      Mockito.doReturn(conf).when(job).getConfiguration();
+      ImmutableBytesWritable writable = new ImmutableBytesWritable();
+      List<ImmutableBytesWritable> splitPoints = new 
ArrayList<ImmutableBytesWritable>();
+      splitPoints.add(writable);
+
+      ugi.doAs(new PrivilegedAction<Void>() {
+        @Override
+        public Void run() {
+          try {
+            HFileOutputFormat2.configurePartitioner(job, splitPoints, false);
+          } catch (IOException ioe) {
+            LOG.error("Failed to configure partitioner", ioe);
+          }
+          return null;
         }
-        return null;
-      }
-    });
-    FileSystem fs = FileSystem.get(conf);
-    // verify that the job uses TotalOrderPartitioner
-    verify(job).setPartitionerClass(TotalOrderPartitioner.class);
-    // verify that TotalOrderPartitioner.setPartitionFile() is called.
-    String partitionPathString = 
conf.get("mapreduce.totalorderpartitioner.path");
-    Assert.assertNotNull(partitionPathString);
-    // Make sure the partion file is in foo1234's home directory, and that
-    // the file exists.
-    
Assert.assertTrue(partitionPathString.startsWith(fooHomeDirectory.toString()));
-    Assert.assertTrue(fs.exists(new Path(partitionPathString)));
+      });
+      // verify that the job uses TotalOrderPartitioner
+      verify(job).setPartitionerClass(TotalOrderPartitioner.class);
+      // verify that TotalOrderPartitioner.setPartitionFile() is called.
+      String partitionPathString = 
conf.get("mapreduce.totalorderpartitioner.path");
+      Assert.assertNotNull(partitionPathString);
+      // Make sure the partion file is in foo1234's home directory, and that
+      // the file exists.
+      
Assert.assertTrue(partitionPathString.startsWith(fooHomeDirectory.toString()));
+      Assert.assertTrue(fs.exists(new Path(partitionPathString)));
+    } finally {
+      util.shutdownMiniDFSCluster();
+    }
   }
 
   @Test

Reply via email to