This is an automated email from the ASF dual-hosted git repository.

stack pushed a commit to branch branch-2
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2 by this push:
     new a18f5b1   HBASE-24052 Add debug to TestMasterShutdown  Addendum
a18f5b1 is described below

commit a18f5b15174b42f899e00caa7c3c2d5acf44defb
Author: stack <st...@apache.org>
AuthorDate: Thu Mar 26 12:22:22 2020 -0700

     HBASE-24052 Add debug to TestMasterShutdown
     Addendum
---
 .../hadoop/hbase/regionserver/HRegionServer.java   | 39 ++++++++++++----------
 .../hadoop/hbase/master/TestMasterShutdown.java    |  7 ++--
 2 files changed, 25 insertions(+), 21 deletions(-)

diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
index 0de2c17..904447c 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
@@ -951,6 +951,10 @@ public class HRegionServer extends HasThread implements
    */
   @Override
   public void run() {
+    if (isStopped()) {
+      LOG.info("Skipping run; stopped");
+      return;
+    }
     try {
       // Do pre-registration initializations; zookeeper, lease threads, etc.
       preRegistrationInitialization();
@@ -964,24 +968,25 @@ public class HRegionServer extends HasThread implements
         // Initialize the RegionServerCoprocessorHost now that our ephemeral
         // node was created, in case any coprocessors want to use ZooKeeper
         this.rsHost = new RegionServerCoprocessorHost(this, this.conf);
-      }
 
-      // Try and register with the Master; tell it we are here.  Break if 
server is stopped or the
-      // clusterup flag is down or hdfs went wacky. Once registered 
successfully, go ahead and start
-      // up all Services. Use RetryCounter to get backoff in case Master is 
struggling to come up.
-      LOG.debug("About to register with Master.");
-      RetryCounterFactory rcf = new RetryCounterFactory(Integer.MAX_VALUE,
-          this.sleeper.getPeriod(), 1000 * 60 * 5);
-      RetryCounter rc = rcf.create();
-      while (keepLooping()) {
-        RegionServerStartupResponse w = reportForDuty();
-        if (w == null) {
-          long sleepTime = rc.getBackoffTimeAndIncrementAttempts();
-          LOG.warn("reportForDuty failed; sleeping {} ms and then retrying.", 
sleepTime);
-          this.sleeper.sleep(sleepTime);
-        } else {
-          handleReportForDutyResponse(w);
-          break;
+        // Try and register with the Master; tell it we are here.  Break if 
server is stopped or
+        // the clusterup flag is down or hdfs went wacky. Once registered 
successfully, go ahead and
+        // start up all Services. Use RetryCounter to get backoff in case 
Master is struggling to
+        // come up.
+        LOG.debug("About to register with Master.");
+        RetryCounterFactory rcf =
+          new RetryCounterFactory(Integer.MAX_VALUE, this.sleeper.getPeriod(), 
1000 * 60 * 5);
+        RetryCounter rc = rcf.create();
+        while (keepLooping()) {
+          RegionServerStartupResponse w = reportForDuty();
+          if (w == null) {
+            long sleepTime = rc.getBackoffTimeAndIncrementAttempts();
+            LOG.warn("reportForDuty failed; sleeping {} ms and then 
retrying.", sleepTime);
+            this.sleeper.sleep(sleepTime);
+          } else {
+            handleReportForDutyResponse(w);
+            break;
+          }
         }
       }
 
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterShutdown.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterShutdown.java
index 4dc6262..4d586a9 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterShutdown.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterShutdown.java
@@ -168,8 +168,7 @@ public class TestMasterShutdown {
         final long timeout = TimeUnit.MINUTES.toMillis(10);
         assertNotEquals("timeout waiting for server manager to become 
available.",
           -1, Waiter.waitFor(htu.getConfiguration(), timeout,
-            () -> masterThread.getMaster().getServerManager() != null &&
-              !masterThread.getMaster().isStopping()));
+            () -> masterThread.getMaster().getServerManager() != null));
 
         // Master has come up far enough that we can terminate it without 
creating a zombie.
         final long result = Waiter.waitFor(htu.getConfiguration(), timeout, 
500, () -> {
@@ -184,10 +183,10 @@ public class TestMasterShutdown {
               LOG.debug("Shutdown RPC sent.");
               return true;
             } catch (CompletionException e) {
-              LOG.debug("Failure sending shutdown RPC.");
+              LOG.debug("Failure sending shutdown RPC.", e);
             }
           } catch (IOException|CompletionException e) {
-            LOG.debug("Failed to establish connection.");
+            LOG.debug("Failed to establish connection.", e);
           } catch (Throwable e) {
             LOG.info("Something unexpected happened.", e);
           }

Reply via email to