This is an automated email from the ASF dual-hosted git repository.

zhangduo pushed a commit to branch branch-2
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2 by this push:
     new 1e53a342711 HBASE-27530 Fix comment syntax errors (#4910)
1e53a342711 is described below

commit 1e53a342711cf6274db6bd7a7fc4c8a587a10939
Author: LYCJeff <116775163+lycj...@users.noreply.github.com>
AuthorDate: Wed Dec 14 20:11:21 2022 +0800

    HBASE-27530 Fix comment syntax errors (#4910)
    
    Signed-off-by: Duo Zhang <zhang...@apache.org>
    (cherry picked from commit f9518cc1a13becb669a3ca5c257850c4fcea989c)
---
 .../io/asyncfs/FanOutOneBlockAsyncDFSOutput.java   | 10 +++++-----
 .../hadoop/hbase/util/RecoverLeaseFSUtils.java     | 22 +++++++++++-----------
 .../hbase/master/balancer/BaseLoadBalancer.java    |  6 +++---
 3 files changed, 19 insertions(+), 19 deletions(-)

diff --git 
a/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutput.java
 
b/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutput.java
index 149dec431e0..5febcc8daa1 100644
--- 
a/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutput.java
+++ 
b/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutput.java
@@ -84,7 +84,7 @@ import 
org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler;
  * An asynchronous HDFS output stream implementation which fans out data to 
datanode and only
  * supports writing file with only one block.
  * <p>
- * Use the createOutput method in {@link FanOutOneBlockAsyncDFSOutputHelper} 
to create. The mainly
+ * Use the createOutput method in {@link FanOutOneBlockAsyncDFSOutputHelper} 
to create. The main
  * usage of this class is implementing WAL, so we only expose a little HDFS 
configurations in the
  * method. And we place it here under io package because we want to make it 
independent of WAL
  * implementation thus easier to move it to HDFS project finally.
@@ -104,8 +104,8 @@ import 
org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler;
 @InterfaceAudience.Private
 public class FanOutOneBlockAsyncDFSOutput implements AsyncFSOutput {
 
-  // The MAX_PACKET_SIZE is 16MB but it include the header size and checksum 
size. So here we set a
-  // smaller limit for data size.
+  // The MAX_PACKET_SIZE is 16MB, but it includes the header size and checksum 
size. So here we set
+  // a smaller limit for data size.
   private static final int MAX_DATA_LEN = 12 * 1024 * 1024;
 
   private final Configuration conf;
@@ -173,7 +173,7 @@ public class FanOutOneBlockAsyncDFSOutput implements 
AsyncFSOutput {
   private long nextPacketOffsetInBlock = 0L;
 
   // the length of the trailing partial chunk, this is because the packet 
start offset must be
-  // aligned with the length of checksum chunk so we need to resend the same 
data.
+  // aligned with the length of checksum chunk, so we need to resend the same 
data.
   private int trailingPartialChunkLength = 0;
 
   private long nextPacketSeqno = 0L;
@@ -437,7 +437,7 @@ public class FanOutOneBlockAsyncDFSOutput implements 
AsyncFSOutput {
       checksumBuf.release();
       headerBuf.release();
 
-      // This method takes ownership of the dataBuf so we need release it 
before returning.
+      // This method takes ownership of the dataBuf, so we need release it 
before returning.
       dataBuf.release();
       return;
     }
diff --git 
a/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/util/RecoverLeaseFSUtils.java
 
b/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/util/RecoverLeaseFSUtils.java
index 0014185b85c..ff457cb5074 100644
--- 
a/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/util/RecoverLeaseFSUtils.java
+++ 
b/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/util/RecoverLeaseFSUtils.java
@@ -72,14 +72,14 @@ public final class RecoverLeaseFSUtils {
    * file's primary node. If all is well, it should return near immediately. 
But, as is common, it
    * is the very primary node that has crashed and so the namenode will be 
stuck waiting on a socket
    * timeout before it will ask another datanode to start the recovery. It 
does not help if we call
-   * recoverLease in the meantime and in particular, subsequent to the socket 
timeout, a
-   * recoverLease invocation will cause us to start over from square one 
(possibly waiting on socket
-   * timeout against primary node). So, in the below, we do the following: 1. 
Call recoverLease. 2.
-   * If it returns true, break. 3. If it returns false, wait a few seconds and 
then call it again.
-   * 4. If it returns true, break. 5. If it returns false, wait for what we 
think the datanode
-   * socket timeout is (configurable) and then try again. 6. If it returns 
true, break. 7. If it
-   * returns false, repeat starting at step 5. above. If HDFS-4525 is 
available, call it every
-   * second and we might be able to exit early.
+   * recoverLease in the meantime and in particular, after the socket timeout, 
a recoverLease
+   * invocation will cause us to start over from square one (possibly waiting 
on socket timeout
+   * against primary node). So, in the below, we do the following: 1. Call 
recoverLease. 2. If it
+   * returns true, break. 3. If it returns false, wait a few seconds and then 
call it again. 4. If
+   * it returns true, break. 5. If it returns false, wait for what we think 
the datanode socket
+   * timeout is (configurable) and then try again. 6. If it returns true, 
break. 7. If it returns
+   * false, repeat starting at step 5. above. If HDFS-4525 is available, call 
it every second, and
+   * we might be able to exit early.
    */
   private static boolean recoverDFSFileLease(final DistributedFileSystem dfs, 
final Path p,
     final Configuration conf, final CancelableProgressable reporter) throws 
IOException {
@@ -89,10 +89,10 @@ public final class RecoverLeaseFSUtils {
     // usually needs 10 minutes before marking the nodes as dead. So we're 
putting ourselves
     // beyond that limit 'to be safe'.
     long recoveryTimeout = conf.getInt("hbase.lease.recovery.timeout", 900000) 
+ startWaiting;
-    // This setting should be a little bit above what the cluster dfs 
heartbeat is set to.
+    // This setting should be a little above what the cluster dfs heartbeat is 
set to.
     long firstPause = conf.getInt("hbase.lease.recovery.first.pause", 4000);
     // This should be set to how long it'll take for us to timeout against 
primary datanode if it
-    // is dead. We set it to 64 seconds, 4 second than the default 
READ_TIMEOUT in HDFS, the
+    // is dead. We set it to 64 seconds, 4 seconds than the default 
READ_TIMEOUT in HDFS, the
     // default value for DFS_CLIENT_SOCKET_TIMEOUT_KEY. If recovery is still 
failing after this
     // timeout, then further recovery will take liner backoff with this base, 
to avoid endless
     // preemptions when this value is not properly configured.
@@ -118,7 +118,7 @@ public final class RecoverLeaseFSUtils {
           Thread.sleep(firstPause);
         } else {
           // Cycle here until (subsequentPause * nbAttempt) elapses. While 
spinning, check
-          // isFileClosed if available (should be in hadoop 2.0.5... not in 
hadoop 1 though.
+          // isFileClosed if available (should be in hadoop 2.0.5... not in 
hadoop 1 though).
           long localStartWaiting = EnvironmentEdgeManager.currentTime();
           while (
             (EnvironmentEdgeManager.currentTime() - localStartWaiting)
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
index 084b97473e5..45c65e1fe58 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
@@ -56,7 +56,7 @@ import 
org.apache.hbase.thirdparty.com.google.common.collect.Lists;
 import org.apache.hbase.thirdparty.com.google.common.collect.Sets;
 
 /**
- * The base class for load balancers. It provides the the functions used to by
+ * The base class for load balancers. It provides the functions used to by
  * {@link org.apache.hadoop.hbase.master.assignment.AssignmentManager} to 
assign regions in the edge
  * cases. It doesn't provide an implementation of the actual balancing 
algorithm.
  */
@@ -456,7 +456,7 @@ public abstract class BaseLoadBalancer implements 
LoadBalancer {
       return assignments;
     }
 
-    // Group all of the old assignments by their hostname.
+    // Group all the old assignments by their hostname.
     // We can't group directly by ServerName since the servers all have
     // new start-codes.
 
@@ -642,7 +642,7 @@ public abstract class BaseLoadBalancer implements 
LoadBalancer {
   }
 
   /**
-   * Round robin a list of regions to a list of servers
+   * Round-robin a list of regions to a list of servers
    */
   private void roundRobinAssignment(BalancerClusterState cluster, 
List<RegionInfo> regions,
     List<ServerName> servers, Map<ServerName, List<RegionInfo>> assignments) {

Reply via email to