Apache9 commented on a change in pull request #2574:
URL: https://github.com/apache/hbase/pull/2574#discussion_r509840637



##########
File path: 
hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
##########
@@ -1174,7 +1178,7 @@ public HStore call() throws IOException {
           LOG.info("Setting FlushNonSloppyStoresFirstPolicy for the region=" + 
this);
         }
       } catch (InterruptedException e) {
-        throw (InterruptedIOException)new 
InterruptedIOException().initCause(e);
+        throwOnInterrupt(e);

Review comment:
       Better make it return an exception and make here 'throw 
throwOnInterrupt(e)' so the compiler will know that this is an exit point.

##########
File path: 
hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
##########
@@ -6588,8 +6677,10 @@ protected RowLock getRowLockInternal(byte[] row, boolean 
readLock, final RowLock
       success = true;
       return result;
     } catch (InterruptedException ie) {
-      LOG.warn("Thread interrupted waiting for lock on row: {}, in region {}", 
rowKey,
-        getRegionInfo().getRegionNameAsString());
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Thread interrupted waiting for lock on row: {}, in region 
{}", rowKey,

Review comment:
       Here the interrupt may come from the checkInterrupt call, not acquire 
lock?

##########
File path: 
hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
##########
@@ -8748,6 +8863,11 @@ public void startRegionOperation(Operation op) throws 
IOException {
       throw new 
NotServingRegionException(getRegionInfo().getRegionNameAsString() + " is 
closing");
     }
     lock(lock.readLock());
+    // Update regionLockHolders ONLY for any startRegionOperation call that is 
invoked from an RPC handler
+    Thread thisThread = Thread.currentThread();
+    if (isInterruptableOp) {
+      regionLockHolders.put(thisThread.hashCode(), thisThread);

Review comment:
       Maybe we could just use Collections.newSetFromMap(new 
ConcurrentHashMap<>()) here?

##########
File path: 
hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
##########
@@ -4550,6 +4619,10 @@ private void doMiniBatchMutate(BatchOperation<?> 
batchOp) throws IOException {
     /** Keep track of the locks we hold so we can release them in finally 
clause */
     List<RowLock> acquiredRowLocks = 
Lists.newArrayListWithCapacity(batchOp.size());
     try {
+      // Check for thread interrupt status in case we have been signaled from
+      // #interruptRegionOperation.
+      checkInterrupt();

Review comment:
       Pity we need to repeat this call many times in the same method...

##########
File path: 
hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
##########
@@ -7364,4 +7367,154 @@ protected HStoreForTesting(final HRegion region,
       return super.doCompaction(cr, filesToCompact, user, compactionStartTime, 
newFiles);
     }
   }
+
+  @Test(timeout=20000)

Review comment:
       Usually we do not add this to a single test as we have a test timeout 
for the whole test file.

##########
File path: 
hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
##########
@@ -7364,4 +7367,154 @@ protected HStoreForTesting(final HRegion region,
       return super.doCompaction(cr, filesToCompact, user, compactionStartTime, 
newFiles);
     }
   }
+
+  @Test(timeout=20000)
+  public void testCloseNoInterrupt() throws Exception {
+    byte[] cf1 = Bytes.toBytes("CF1");
+    byte[][] families = { cf1 };
+
+    Configuration conf = HBaseConfiguration.create();

Review comment:
       Use the config from the HBTU? Maybe new 
Configuration(TEST_UTIL.getConfiguration())

##########
File path: 
hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionInterrupt.java
##########
@@ -0,0 +1,357 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.regionserver;
+
+import static org.junit.Assert.assertTrue;
+
+import java.io.IOException;
+import java.io.InterruptedIOException;
+import java.util.Optional;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.HBaseClassTestRule;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.NotServingRegionException;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.Waiter;
+import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.Append;
+import org.apache.hadoop.hbase.client.BufferedMutator;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
+import org.apache.hadoop.hbase.client.Delete;
+import org.apache.hadoop.hbase.client.Durability;
+import org.apache.hadoop.hbase.client.Increment;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.RegionInfo;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.ResultScanner;
+import org.apache.hadoop.hbase.client.RetriesExhaustedWithDetailsException;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.client.TableDescriptor;
+import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
+import org.apache.hadoop.hbase.coprocessor.ObserverContext;
+import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor;
+import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
+import org.apache.hadoop.hbase.coprocessor.RegionObserver;
+import org.apache.hadoop.hbase.exceptions.DeserializationException;
+import org.apache.hadoop.hbase.filter.FilterBase;
+import org.apache.hadoop.hbase.testclassification.LargeTests;
+import org.apache.hadoop.hbase.testclassification.RegionServerTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.wal.WAL;
+import org.apache.hadoop.hbase.wal.WALEdit;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.ClassRule;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.junit.rules.TestName;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@Category({RegionServerTests.class, LargeTests.class})
+public class TestRegionInterrupt {
+
+  @ClassRule
+  public static final HBaseClassTestRule CLASS_RULE =
+    HBaseClassTestRule.forClass(TestRegionInterrupt.class);
+
+  private static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
+  private static final Logger LOG = 
LoggerFactory.getLogger(TestRegionInterrupt.class);
+
+  static final int SLEEP_TIME = 10 * 1000;
+  static final byte[] FAMILY = Bytes.toBytes("info");
+
+  @Rule
+  public TestName name = new TestName();

Review comment:
       Just use TableNameTestRule.




----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
[email protected]


Reply via email to