[08/29] hbase-site git commit: Published site at 44dec60054d1c45880d591c74a023f7a534e6d73.

2018-12-25 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/70f4ddbc/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestScannersFromClientSide.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestScannersFromClientSide.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestScannersFromClientSide.html
index ba27788..fe6a726 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestScannersFromClientSide.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestScannersFromClientSide.html
@@ -26,924 +26,967 @@
 018package org.apache.hadoop.hbase.client;
 019
 020import static 
org.apache.hadoop.hbase.HConstants.RPC_CODEC_CONF_KEY;
-021import static 
org.apache.hadoop.hbase.ipc.RpcClient.DEFAULT_CODEC_CLASS;
-022import static 
org.junit.Assert.assertArrayEquals;
-023import static 
org.junit.Assert.assertEquals;
-024import static 
org.junit.Assert.assertFalse;
-025import static 
org.junit.Assert.assertNotNull;
-026import static 
org.junit.Assert.assertNull;
-027import static 
org.junit.Assert.assertTrue;
-028import static org.junit.Assert.fail;
-029
-030import java.io.IOException;
-031import java.util.ArrayList;
-032import java.util.List;
-033import java.util.concurrent.TimeUnit;
-034import java.util.function.Consumer;
-035import java.util.stream.IntStream;
-036import 
org.apache.hadoop.conf.Configuration;
-037import org.apache.hadoop.hbase.Cell;
-038import 
org.apache.hadoop.hbase.CompareOperator;
-039import 
org.apache.hadoop.hbase.HBaseClassTestRule;
-040import 
org.apache.hadoop.hbase.HBaseTestingUtility;
-041import 
org.apache.hadoop.hbase.HColumnDescriptor;
-042import 
org.apache.hadoop.hbase.HConstants;
-043import 
org.apache.hadoop.hbase.HRegionInfo;
-044import 
org.apache.hadoop.hbase.HRegionLocation;
-045import 
org.apache.hadoop.hbase.HTestConst;
-046import 
org.apache.hadoop.hbase.KeyValue;
-047import 
org.apache.hadoop.hbase.MiniHBaseCluster;
-048import 
org.apache.hadoop.hbase.TableName;
-049import 
org.apache.hadoop.hbase.TableNotFoundException;
-050import 
org.apache.hadoop.hbase.filter.BinaryComparator;
-051import 
org.apache.hadoop.hbase.filter.ColumnPrefixFilter;
-052import 
org.apache.hadoop.hbase.filter.ColumnRangeFilter;
-053import 
org.apache.hadoop.hbase.filter.QualifierFilter;
-054import 
org.apache.hadoop.hbase.regionserver.HRegionServer;
-055import 
org.apache.hadoop.hbase.testclassification.ClientTests;
-056import 
org.apache.hadoop.hbase.testclassification.MediumTests;
-057import 
org.apache.hadoop.hbase.util.Bytes;
-058import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-059import org.junit.After;
-060import org.junit.AfterClass;
-061import org.junit.Before;
-062import org.junit.BeforeClass;
-063import org.junit.ClassRule;
-064import org.junit.Rule;
-065import org.junit.Test;
-066import 
org.junit.experimental.categories.Category;
-067import org.junit.rules.TestName;
-068import org.slf4j.Logger;
-069import org.slf4j.LoggerFactory;
-070
-071/**
-072 * A client-side test, mostly testing 
scanners with various parameters.
-073 */
-074@Category({MediumTests.class, 
ClientTests.class})
-075public class TestScannersFromClientSide 
{
-076
-077  @ClassRule
-078  public static final HBaseClassTestRule 
CLASS_RULE =
-079  
HBaseClassTestRule.forClass(TestScannersFromClientSide.class);
-080
-081  private static final Logger LOG = 
LoggerFactory.getLogger(TestScannersFromClientSide.class);
-082
-083  private final static 
HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
-084  private static byte [] ROW = 
Bytes.toBytes("testRow");
-085  private static byte [] FAMILY = 
Bytes.toBytes("testFamily");
-086  private static byte [] QUALIFIER = 
Bytes.toBytes("testQualifier");
-087  private static byte [] VALUE = 
Bytes.toBytes("testValue");
-088
-089  @Rule
-090  public TestName name = new 
TestName();
-091
-092  /**
-093   * @throws java.lang.Exception
-094   */
-095  @BeforeClass
-096  public static void setUpBeforeClass() 
throws Exception {
-097Configuration conf = 
TEST_UTIL.getConfiguration();
-098
conf.setLong(HConstants.HBASE_CLIENT_SCANNER_MAX_RESULT_SIZE_KEY, 10 * 1024 * 
1024);
-099TEST_UTIL.startMiniCluster(3);
-100  }
-101
-102  /**
-103   * @throws java.lang.Exception
-104   */
-105  @AfterClass
-106  public static void tearDownAfterClass() 
throws Exception {
-107TEST_UTIL.shutdownMiniCluster();
-108  }
-109
-110  /**
-111   * @throws java.lang.Exception
-112   */
-113  @Before
-114  public void setUp() throws Exception 
{
-115// Nothing to do.
-116  }
-117
-118  /**
-119   * @throws java.lang.Exception
-120   */
-121  @After
-122  public void tearDown() throws Exception 
{
-123// Nothing to do.
-124  }
-125
-126  /**
-127   * Test from client side for batch of 
scan
-128   *
-129   * @throws Exception
-130   */
-131  @Test
-132  public void testScanBatch() throws 
Exception {
-133final 

[08/29] hbase-site git commit: Published site at 12786f80c14c6f2c3c111a55bbf431fb2e81e828.

2018-12-21 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/13ae5225/testdevapidocs/src-html/org/apache/hadoop/hbase/TestKeyValue.MockKeyValue.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/TestKeyValue.MockKeyValue.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/TestKeyValue.MockKeyValue.html
deleted file mode 100644
index 28e1b9b..000
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/TestKeyValue.MockKeyValue.html
+++ /dev/null
@@ -1,842 +0,0 @@
-http://www.w3.org/TR/html4/loose.dtd;>
-
-
-Source code
-
-
-
-
-001/**
-002 *
-003 * Licensed to the Apache Software 
Foundation (ASF) under one
-004 * or more contributor license 
agreements.  See the NOTICE file
-005 * distributed with this work for 
additional information
-006 * regarding copyright ownership.  The 
ASF licenses this file
-007 * to you under the Apache License, 
Version 2.0 (the
-008 * "License"); you may not use this file 
except in compliance
-009 * with the License.  You may obtain a 
copy of the License at
-010 *
-011 * 
http://www.apache.org/licenses/LICENSE-2.0
-012 *
-013 * Unless required by applicable law or 
agreed to in writing, software
-014 * distributed under the License is 
distributed on an "AS IS" BASIS,
-015 * WITHOUT WARRANTIES OR CONDITIONS OF 
ANY KIND, either express or implied.
-016 * See the License for the specific 
language governing permissions and
-017 * limitations under the License.
-018 */
-019package org.apache.hadoop.hbase;
-020
-021import static 
org.junit.Assert.assertEquals;
-022import static 
org.junit.Assert.assertFalse;
-023import static 
org.junit.Assert.assertNotEquals;
-024import static 
org.junit.Assert.assertNotNull;
-025import static 
org.junit.Assert.assertTrue;
-026
-027import java.io.ByteArrayInputStream;
-028import java.io.ByteArrayOutputStream;
-029import java.io.DataInputStream;
-030import java.io.DataOutputStream;
-031import java.io.IOException;
-032import java.util.Collections;
-033import java.util.Iterator;
-034import java.util.List;
-035import java.util.Set;
-036import java.util.TreeSet;
-037
-038import 
org.apache.hadoop.hbase.testclassification.SmallTests;
-039import 
org.apache.hadoop.hbase.util.ByteBufferUtils;
-040import 
org.apache.hadoop.hbase.util.Bytes;
-041import org.junit.ClassRule;
-042import org.junit.Test;
-043import 
org.junit.experimental.categories.Category;
-044import org.slf4j.Logger;
-045import org.slf4j.LoggerFactory;
-046
-047@Category(SmallTests.class)
-048public class TestKeyValue {
-049  @ClassRule
-050  public static final HBaseClassTestRule 
CLASS_RULE =
-051  
HBaseClassTestRule.forClass(TestKeyValue.class);
-052  private static final Logger LOG = 
LoggerFactory.getLogger(TestKeyValue.class);
-053
-054  @Test
-055  public void testColumnCompare() throws 
Exception {
-056final byte [] a = 
Bytes.toBytes("aaa");
-057byte [] family1 = 
Bytes.toBytes("abc");
-058byte [] qualifier1 = 
Bytes.toBytes("def");
-059byte [] family2 = 
Bytes.toBytes("abcd");
-060byte [] qualifier2 = 
Bytes.toBytes("ef");
-061
-062KeyValue aaa = new KeyValue(a, 
family1, qualifier1, 0L, KeyValue.Type.Put, a);
-063
assertFalse(CellUtil.matchingColumn(aaa, family2, qualifier2));
-064
assertTrue(CellUtil.matchingColumn(aaa, family1, qualifier1));
-065aaa = new KeyValue(a, family2, 
qualifier2, 0L, KeyValue.Type.Put, a);
-066
assertFalse(CellUtil.matchingColumn(aaa, family1, qualifier1));
-067
assertTrue(CellUtil.matchingColumn(aaa, family2,qualifier2));
-068byte [] nullQualifier = new 
byte[0];
-069aaa = new KeyValue(a, family1, 
nullQualifier, 0L, KeyValue.Type.Put, a);
-070
assertTrue(CellUtil.matchingColumn(aaa, family1,null));
-071
assertFalse(CellUtil.matchingColumn(aaa, family2,qualifier2));
-072  }
-073
-074  /**
-075   * Test a corner case when the family 
qualifier is a prefix of the
-076   *  column qualifier.
-077   */
-078  @Test
-079  public void testColumnCompare_prefix() 
throws Exception {
-080final byte [] a = 
Bytes.toBytes("aaa");
-081byte [] family1 = 
Bytes.toBytes("abc");
-082byte [] qualifier1 = 
Bytes.toBytes("def");
-083byte [] family2 = 
Bytes.toBytes("ab");
-084byte [] qualifier2 = 
Bytes.toBytes("def");
-085
-086KeyValue aaa = new KeyValue(a, 
family1, qualifier1, 0L, KeyValue.Type.Put, a);
-087
assertFalse(CellUtil.matchingColumn(aaa, family2, qualifier2));
-088  }
-089
-090  @Test
-091  public void testBasics() throws 
Exception {
-092LOG.info("LOWKEY: " + 
KeyValue.LOWESTKEY.toString());
-093String name = "testBasics";
-094check(Bytes.toBytes(name),
-095  Bytes.toBytes(name), 
Bytes.toBytes(name), 1,
-096  Bytes.toBytes(name));
-097// Test empty value and empty column 
-- both should work. (not empty fam)
-098check(Bytes.toBytes(name), 
Bytes.toBytes(name), null, 1, null);
-099check(HConstants.EMPTY_BYTE_ARRAY, 
Bytes.toBytes(name), null, 

[08/29] hbase-site git commit: Published site at 79d90c87b5bc6d4aa50e6edc52a3f20da708ee29.

2018-12-08 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3defc75b/devapidocs/src-html/org/apache/hadoop/hbase/security/visibility/VisibilityController.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/security/visibility/VisibilityController.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/security/visibility/VisibilityController.html
index 2fd8fae..536220f 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/security/visibility/VisibilityController.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/security/visibility/VisibilityController.html
@@ -234,851 +234,852 @@
 226  }
 227
 228  @Override
-229  public void 
preModifyTable(ObserverContextMasterCoprocessorEnvironment ctx, 
TableName tableName,
-230TableDescriptor currentDescriptor, 
TableDescriptor newDescriptor) throws IOException {
-231if (!authorizationEnabled) {
-232  return;
-233}
-234if 
(LABELS_TABLE_NAME.equals(tableName)) {
-235  throw new 
ConstraintException("Cannot alter " + LABELS_TABLE_NAME);
+229  public TableDescriptor 
preModifyTable(ObserverContextMasterCoprocessorEnvironment ctx,
+230  TableName tableName, 
TableDescriptor currentDescriptor, TableDescriptor newDescriptor)
+231  throws IOException {
+232if (authorizationEnabled) {
+233  if 
(LABELS_TABLE_NAME.equals(tableName)) {
+234throw new 
ConstraintException("Cannot alter " + LABELS_TABLE_NAME);
+235  }
 236}
-237  }
-238
-239  @Override
-240  public void 
preDisableTable(ObserverContextMasterCoprocessorEnvironment ctx, 
TableName tableName)
-241  throws IOException {
-242if (!authorizationEnabled) {
-243  return;
-244}
-245if 
(LABELS_TABLE_NAME.equals(tableName)) {
-246  throw new 
ConstraintException("Cannot disable " + LABELS_TABLE_NAME);
-247}
-248  }
-249
-250  /** Region 
related hooks **/
-251
-252  @Override
-253  public void 
postOpen(ObserverContextRegionCoprocessorEnvironment e) {
-254// Read the entire labels table and 
populate the zk
-255if 
(e.getEnvironment().getRegion().getRegionInfo().getTable().equals(LABELS_TABLE_NAME))
 {
-256  this.labelsRegion = true;
-257  synchronized (this) {
-258this.accessControllerAvailable = 
CoprocessorHost.getLoadedCoprocessors()
-259  
.contains(AccessController.class.getName());
-260  }
-261  
initVisibilityLabelService(e.getEnvironment());
-262} else {
-263  checkAuths = 
e.getEnvironment().getConfiguration()
-264  
.getBoolean(VisibilityConstants.CHECK_AUTHS_FOR_MUTATION, false);
-265  
initVisibilityLabelService(e.getEnvironment());
-266}
-267  }
-268
-269  private void 
initVisibilityLabelService(RegionCoprocessorEnvironment env) {
-270try {
-271  
this.visibilityLabelService.init(env);
-272  this.initialized = true;
-273} catch (IOException ioe) {
-274  LOG.error("Error while initializing 
VisibilityLabelService..", ioe);
-275  throw new RuntimeException(ioe);
-276}
-277  }
-278
-279  @Override
-280  public void 
postSetSplitOrMergeEnabled(final 
ObserverContextMasterCoprocessorEnvironment ctx,
-281  final boolean newValue, final 
MasterSwitchType switchType) throws IOException {
-282  }
-283
-284  @Override
-285  public void 
preBatchMutate(ObserverContextRegionCoprocessorEnvironment c,
-286  
MiniBatchOperationInProgressMutation miniBatchOp) throws IOException 
{
-287if 
(c.getEnvironment().getRegion().getRegionInfo().getTable().isSystemTable()) {
-288  return;
-289}
-290// TODO this can be made as a global 
LRU cache at HRS level?
-291MapString, ListTag 
labelCache = new HashMap();
-292for (int i = 0; i  
miniBatchOp.size(); i++) {
-293  Mutation m = 
miniBatchOp.getOperation(i);
-294  CellVisibility cellVisibility = 
null;
-295  try {
-296cellVisibility = 
m.getCellVisibility();
-297  } catch (DeserializationException 
de) {
-298
miniBatchOp.setOperationStatus(i,
-299new 
OperationStatus(SANITY_CHECK_FAILURE, de.getMessage()));
-300continue;
-301  }
-302  boolean sanityFailure = false;
-303  boolean modifiedTagFound = false;
-304  PairBoolean, Tag pair = new 
Pair(false, null);
-305  for (CellScanner cellScanner = 
m.cellScanner(); cellScanner.advance();) {
-306pair = 
checkForReservedVisibilityTagPresence(cellScanner.current(), pair);
-307if (!pair.getFirst()) {
-308  // Don't disallow reserved tags 
if authorization is disabled
-309  if (authorizationEnabled) {
-310
miniBatchOp.setOperationStatus(i, new OperationStatus(SANITY_CHECK_FAILURE,
-311  "Mutation contains cell 
with reserved type tag"));
-312sanityFailure = true;
-313  }
-314  break;
-315} else {
-316  // Indicates that the cell has 

[08/29] hbase-site git commit: Published site at 640a5e390b525e1c42f3c46bcc5acc59786900f0.

2018-11-14 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2bd8387f/testdevapidocs/src-html/org/apache/hadoop/hbase/master/assignment/TestAssignmentManagerBase.MockRSProcedureDispatcher.MockRemoteCall.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/master/assignment/TestAssignmentManagerBase.MockRSProcedureDispatcher.MockRemoteCall.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/master/assignment/TestAssignmentManagerBase.MockRSProcedureDispatcher.MockRemoteCall.html
index 6d4122b..bd1a182 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/master/assignment/TestAssignmentManagerBase.MockRSProcedureDispatcher.MockRemoteCall.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/master/assignment/TestAssignmentManagerBase.MockRSProcedureDispatcher.MockRemoteCall.html
@@ -269,12 +269,12 @@
 261
 262  protected void 
sendTransitionReport(final ServerName serverName,
 263  final 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo 
regionInfo,
-264  final TransitionCode state) throws 
IOException {
+264  final TransitionCode state, long 
seqId) throws IOException {
 265
ReportRegionStateTransitionRequest.Builder req =
 266  
ReportRegionStateTransitionRequest.newBuilder();
 267
req.setServer(ProtobufUtil.toServerName(serverName));
 268
req.addTransition(RegionStateTransition.newBuilder().addRegionInfo(regionInfo)
-269  
.setTransitionCode(state).setOpenSeqNum(1).build());
+269  
.setTransitionCode(state).setOpenSeqNum(seqId).build());
 270
am.reportRegionStateTransition(req.build());
 271  }
 272
@@ -294,304 +294,307 @@
 286@Override
 287protected RegionOpeningState 
execOpenRegion(ServerName server, RegionOpenInfo openReq)
 288throws IOException {
-289  sendTransitionReport(server, 
openReq.getRegion(), TransitionCode.OPENED);
-290  // Concurrency?
-291  // Now update the state of our 
cluster in regionsToRegionServers.
-292  SortedSetbyte[] regions = 
regionsToRegionServers.get(server);
-293  if (regions == null) {
-294regions = new 
ConcurrentSkipListSetbyte[](Bytes.BYTES_COMPARATOR);
-295
regionsToRegionServers.put(server, regions);
-296  }
-297  RegionInfo hri = 
ProtobufUtil.toRegionInfo(openReq.getRegion());
-298  if 
(regions.contains(hri.getRegionName())) {
-299throw new 
UnsupportedOperationException(hri.getRegionNameAsString());
+289  RegionInfo hri = 
ProtobufUtil.toRegionInfo(openReq.getRegion());
+290  long previousOpenSeqNum =
+291
am.getRegionStates().getOrCreateRegionStateNode(hri).getOpenSeqNum();
+292  sendTransitionReport(server, 
openReq.getRegion(), TransitionCode.OPENED,
+293previousOpenSeqNum + 2);
+294  // Concurrency?
+295  // Now update the state of our 
cluster in regionsToRegionServers.
+296  SortedSetbyte[] regions = 
regionsToRegionServers.get(server);
+297  if (regions == null) {
+298regions = new 
ConcurrentSkipListSetbyte[](Bytes.BYTES_COMPARATOR);
+299
regionsToRegionServers.put(server, regions);
 300  }
-301  regions.add(hri.getRegionName());
-302  return RegionOpeningState.OPENED;
-303}
-304
-305@Override
-306protected CloseRegionResponse 
execCloseRegion(ServerName server, byte[] regionName)
-307throws IOException {
-308  RegionInfo hri = 
am.getRegionInfo(regionName);
-309  sendTransitionReport(server, 
ProtobufUtil.toRegionInfo(hri), TransitionCode.CLOSED);
-310  return 
CloseRegionResponse.newBuilder().setClosed(true).build();
-311}
-312  }
-313
-314  protected static class 
ServerNotYetRunningRsExecutor implements MockRSExecutor {
-315@Override
-316public ExecuteProceduresResponse 
sendRequest(ServerName server, ExecuteProceduresRequest req)
-317throws IOException {
-318  throw new 
ServerNotRunningYetException("wait on server startup");
-319}
-320  }
-321
-322  protected static class FaultyRsExecutor 
implements MockRSExecutor {
-323private final IOException 
exception;
+301  if 
(regions.contains(hri.getRegionName())) {
+302throw new 
UnsupportedOperationException(hri.getRegionNameAsString());
+303  }
+304  regions.add(hri.getRegionName());
+305  return RegionOpeningState.OPENED;
+306}
+307
+308@Override
+309protected CloseRegionResponse 
execCloseRegion(ServerName server, byte[] regionName)
+310throws IOException {
+311  RegionInfo hri = 
am.getRegionInfo(regionName);
+312  sendTransitionReport(server, 
ProtobufUtil.toRegionInfo(hri), TransitionCode.CLOSED, -1);
+313  return 
CloseRegionResponse.newBuilder().setClosed(true).build();
+314}
+315  }
+316
+317  protected static class 
ServerNotYetRunningRsExecutor implements MockRSExecutor {
+318@Override
+319public ExecuteProceduresResponse 
sendRequest(ServerName server, 

[08/29] hbase-site git commit: Published site at 5e84997f2ffdbcf5f849d70c30ddbe2db4039ca4.

2018-11-10 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/27f5bfb5/testdevapidocs/src-html/org/apache/hadoop/hbase/procedure2/TestProcedureBypass.RootProcedure.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/procedure2/TestProcedureBypass.RootProcedure.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/procedure2/TestProcedureBypass.RootProcedure.html
index 7eb899a..f89b826 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/procedure2/TestProcedureBypass.RootProcedure.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/procedure2/TestProcedureBypass.RootProcedure.html
@@ -46,226 +46,262 @@
 038import org.slf4j.Logger;
 039import org.slf4j.LoggerFactory;
 040
-041
+041import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos;
 042
-043@Category({MasterTests.class, 
SmallTests.class})
-044public class TestProcedureBypass {
-045
-046  @ClassRule public static final 
HBaseClassTestRule CLASS_RULE = HBaseClassTestRule
-047  
.forClass(TestProcedureBypass.class);
-048
-049  private static final Logger LOG = 
LoggerFactory.getLogger(TestProcedureBypass.class);
-050
-051  private static final int 
PROCEDURE_EXECUTOR_SLOTS = 1;
-052
-053  private static TestProcEnv procEnv;
-054  private static ProcedureStore 
procStore;
-055
-056  private static 
ProcedureExecutorTestProcEnv procExecutor;
-057
-058  private static 
HBaseCommonTestingUtility htu;
-059
-060  private static FileSystem fs;
-061  private static Path testDir;
-062  private static Path logDir;
-063
-064  private static class TestProcEnv {
-065  }
-066
-067  @BeforeClass
-068  public static void setUp() throws 
Exception {
-069htu = new 
HBaseCommonTestingUtility();
-070
-071// NOTE: The executor will be created 
by each test
-072procEnv = new TestProcEnv();
-073testDir = htu.getDataTestDir();
-074fs = 
testDir.getFileSystem(htu.getConfiguration());
-075assertTrue(testDir.depth()  1);
-076
-077logDir = new Path(testDir, 
"proc-logs");
-078procStore = 
ProcedureTestingUtility.createWalStore(htu.getConfiguration(), logDir);
-079procExecutor = new 
ProcedureExecutor(htu.getConfiguration(), procEnv,
-080procStore);
-081
procStore.start(PROCEDURE_EXECUTOR_SLOTS);
-082ProcedureTestingUtility
-083
.initAndStartWorkers(procExecutor, PROCEDURE_EXECUTOR_SLOTS, true);
-084  }
-085
-086  @Test
-087  public void 
testBypassSuspendProcedure() throws Exception {
-088final SuspendProcedure proc = new 
SuspendProcedure();
-089long id = 
procExecutor.submitProcedure(proc);
-090Thread.sleep(500);
-091//bypass the procedure
-092
assertTrue(procExecutor.bypassProcedure(id, 3, false, false));
-093htu.waitFor(5000, () - 
proc.isSuccess()  proc.isBypass());
-094LOG.info("{} finished", proc);
-095  }
-096
-097  @Test
-098  public void testStuckProcedure() throws 
Exception {
-099final StuckProcedure proc = new 
StuckProcedure();
-100long id = 
procExecutor.submitProcedure(proc);
-101Thread.sleep(500);
-102//bypass the procedure
-103
assertTrue(procExecutor.bypassProcedure(id, 1000, true, false));
-104//Since the procedure is stuck there, 
we need to restart the executor to recovery.
-105
ProcedureTestingUtility.restart(procExecutor);
-106htu.waitFor(5000, () - 
proc.isSuccess()  proc.isBypass());
-107LOG.info("{} finished", proc);
-108  }
-109
-110  @Test
-111  public void 
testBypassingProcedureWithParent() throws Exception {
-112final RootProcedure proc = new 
RootProcedure();
-113long rootId = 
procExecutor.submitProcedure(proc);
-114htu.waitFor(5000, () - 
procExecutor.getProcedures().stream()
-115  .filter(p - p.getParentProcId() 
== rootId).collect(Collectors.toList())
-116  .size()  0);
-117SuspendProcedure suspendProcedure = 
(SuspendProcedure)procExecutor.getProcedures().stream()
-118.filter(p - 
p.getParentProcId() == rootId).collect(Collectors.toList()).get(0);
-119
assertTrue(procExecutor.bypassProcedure(suspendProcedure.getProcId(), 1000, 
false, false));
-120htu.waitFor(5000, () - 
proc.isSuccess()  proc.isBypass());
-121LOG.info("{} finished", proc);
-122  }
-123
-124  @Test
-125  public void 
testBypassingStuckStateMachineProcedure() throws Exception {
-126final StuckStateMachineProcedure proc 
=
-127new 
StuckStateMachineProcedure(procEnv, StuckStateMachineState.START);
-128long id = 
procExecutor.submitProcedure(proc);
-129Thread.sleep(500);
-130// bypass the procedure
-131
assertFalse(procExecutor.bypassProcedure(id, 1000, false, false));
-132
assertTrue(procExecutor.bypassProcedure(id, 1000, true, false));
-133
-134htu.waitFor(5000, () - 
proc.isSuccess()  proc.isBypass());
-135LOG.info("{} finished", proc);
-136  }
-137
-138  @Test
-139  public void 
testBypassingProcedureWithParentRecursive() throws Exception {
-140

[08/29] hbase-site git commit: Published site at 2997b6d0714d5542784baf830e7c16a9ef6b62d6.

2018-07-14 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/19303896/devapidocs/src-html/org/apache/hadoop/hbase/rest/model/ScannerModel.JaxbJsonProviderHolder.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/rest/model/ScannerModel.JaxbJsonProviderHolder.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/rest/model/ScannerModel.JaxbJsonProviderHolder.html
index 73545d5..cc42ba2 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/rest/model/ScannerModel.JaxbJsonProviderHolder.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/rest/model/ScannerModel.JaxbJsonProviderHolder.html
@@ -30,57 +30,57 @@
 022import java.io.IOException;
 023import java.io.Serializable;
 024import java.util.ArrayList;
-025import java.util.List;
-026import java.util.Map;
-027import java.util.NavigableSet;
-028
-029import javax.ws.rs.core.MediaType;
-030import 
javax.xml.bind.annotation.XmlAttribute;
-031import 
javax.xml.bind.annotation.XmlElement;
-032import 
javax.xml.bind.annotation.XmlRootElement;
-033
-034import 
org.apache.hadoop.hbase.CompareOperator;
-035import 
org.apache.hadoop.hbase.HConstants;
-036import 
org.apache.yetus.audience.InterfaceAudience;
-037import 
org.apache.hadoop.hbase.client.Scan;
-038import 
org.apache.hadoop.hbase.filter.BinaryComparator;
-039import 
org.apache.hadoop.hbase.filter.BinaryPrefixComparator;
-040import 
org.apache.hadoop.hbase.filter.BitComparator;
-041import 
org.apache.hadoop.hbase.filter.ByteArrayComparable;
-042import 
org.apache.hadoop.hbase.filter.ColumnCountGetFilter;
-043import 
org.apache.hadoop.hbase.filter.ColumnPaginationFilter;
-044import 
org.apache.hadoop.hbase.filter.ColumnPrefixFilter;
-045import 
org.apache.hadoop.hbase.filter.ColumnRangeFilter;
-046import 
org.apache.hadoop.hbase.filter.CompareFilter;
-047import 
org.apache.hadoop.hbase.filter.DependentColumnFilter;
-048import 
org.apache.hadoop.hbase.filter.FamilyFilter;
-049import 
org.apache.hadoop.hbase.filter.Filter;
-050import 
org.apache.hadoop.hbase.filter.FilterList;
-051import 
org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter;
-052import 
org.apache.hadoop.hbase.filter.InclusiveStopFilter;
-053import 
org.apache.hadoop.hbase.filter.KeyOnlyFilter;
-054import 
org.apache.hadoop.hbase.filter.MultiRowRangeFilter;
-055import 
org.apache.hadoop.hbase.filter.MultiRowRangeFilter.RowRange;
-056import 
org.apache.hadoop.hbase.filter.MultipleColumnPrefixFilter;
-057import 
org.apache.hadoop.hbase.filter.NullComparator;
-058import 
org.apache.hadoop.hbase.filter.PageFilter;
-059import 
org.apache.hadoop.hbase.filter.PrefixFilter;
-060import 
org.apache.hadoop.hbase.filter.QualifierFilter;
-061import 
org.apache.hadoop.hbase.filter.RandomRowFilter;
-062import 
org.apache.hadoop.hbase.filter.RegexStringComparator;
-063import 
org.apache.hadoop.hbase.filter.RowFilter;
-064import 
org.apache.hadoop.hbase.filter.SingleColumnValueExcludeFilter;
-065import 
org.apache.hadoop.hbase.filter.SingleColumnValueFilter;
-066import 
org.apache.hadoop.hbase.filter.SkipFilter;
-067import 
org.apache.hadoop.hbase.filter.SubstringComparator;
-068import 
org.apache.hadoop.hbase.filter.TimestampsFilter;
-069import 
org.apache.hadoop.hbase.filter.ValueFilter;
-070import 
org.apache.hadoop.hbase.filter.WhileMatchFilter;
-071import 
org.apache.hadoop.hbase.protobuf.ProtobufUtil;
-072import 
org.apache.hadoop.hbase.rest.ProtobufMessageHandler;
-073import 
org.apache.hadoop.hbase.rest.protobuf.generated.ScannerMessage.Scanner;
-074import 
org.apache.hadoop.hbase.security.visibility.Authorizations;
-075import 
org.apache.hadoop.hbase.util.Base64;
+025import java.util.Base64;
+026import java.util.List;
+027import java.util.Map;
+028import java.util.NavigableSet;
+029
+030import javax.ws.rs.core.MediaType;
+031import 
javax.xml.bind.annotation.XmlAttribute;
+032import 
javax.xml.bind.annotation.XmlElement;
+033import 
javax.xml.bind.annotation.XmlRootElement;
+034
+035import 
org.apache.hadoop.hbase.CompareOperator;
+036import 
org.apache.hadoop.hbase.HConstants;
+037import 
org.apache.yetus.audience.InterfaceAudience;
+038import 
org.apache.hadoop.hbase.client.Scan;
+039import 
org.apache.hadoop.hbase.filter.BinaryComparator;
+040import 
org.apache.hadoop.hbase.filter.BinaryPrefixComparator;
+041import 
org.apache.hadoop.hbase.filter.BitComparator;
+042import 
org.apache.hadoop.hbase.filter.ByteArrayComparable;
+043import 
org.apache.hadoop.hbase.filter.ColumnCountGetFilter;
+044import 
org.apache.hadoop.hbase.filter.ColumnPaginationFilter;
+045import 
org.apache.hadoop.hbase.filter.ColumnPrefixFilter;
+046import 
org.apache.hadoop.hbase.filter.ColumnRangeFilter;
+047import 
org.apache.hadoop.hbase.filter.CompareFilter;
+048import 
org.apache.hadoop.hbase.filter.DependentColumnFilter;
+049import 
org.apache.hadoop.hbase.filter.FamilyFilter;
+050import 
org.apache.hadoop.hbase.filter.Filter;
+051import 
org.apache.hadoop.hbase.filter.FilterList;
+052import 

[08/29] hbase-site git commit: Published site at d7561cee50acf2e3a52b8a38c71259d60b653ed3.

2018-07-10 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/dbe3a233/devapidocs/src-html/org/apache/hadoop/hbase/master/RegionServerTracker.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/RegionServerTracker.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/RegionServerTracker.html
index 06b90f3..256ed63 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/RegionServerTracker.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/RegionServerTracker.html
@@ -135,64 +135,63 @@
 127ServerName serverName = 
pair.getFirst();
 128RegionServerInfo info = 
pair.getSecond();
 129regionServers.add(serverName);
-130ServerMetrics serverMetrics = 
info != null
-131  ? 
ServerMetricsBuilder.of(serverName,
-132
VersionInfoUtil.getVersionNumber(info.getVersionInfo()))
-133  : 
ServerMetricsBuilder.of(serverName);
-134
serverManager.checkAndRecordNewServer(serverName, serverMetrics);
-135  }
-136  
serverManager.findOutDeadServersAndProcess(deadServersFromPE, 
liveServersFromWALDir);
-137}
-138  }
-139
-140  public void stop() {
-141executor.shutdownNow();
-142  }
-143
-144  private synchronized void refresh() {
-145ListString names;
-146try {
-147  names = 
ZKUtil.listChildrenAndWatchForNewChildren(watcher, 
watcher.getZNodePaths().rsZNode);
-148} catch (KeeperException e) {
-149  // here we need to abort as we 
failed to set watcher on the rs node which means that we can
-150  // not track the node deleted 
evetnt any more.
-151  server.abort("Unexpected zk 
exception getting RS nodes", e);
-152  return;
-153}
-154SetServerName servers =
-155  
names.stream().map(ServerName::parseServerName).collect(Collectors.toSet());
-156for (IteratorServerName iter 
= regionServers.iterator(); iter.hasNext();) {
-157  ServerName sn = iter.next();
-158  if (!servers.contains(sn)) {
-159LOG.info("RegionServer ephemeral 
node deleted, processing expiration [{}]", sn);
-160serverManager.expireServer(sn);
-161iter.remove();
-162  }
-163}
-164// here we do not need to parse the 
region server info as it is useless now, we only need the
-165// server name.
-166boolean newServerAdded = false;
-167for (ServerName sn : servers) {
-168  if (regionServers.add(sn)) {
-169newServerAdded = true;
-170LOG.info("RegionServer ephemeral 
node created, adding [" + sn + "]");
-171  }
-172}
-173if (newServerAdded  
server.isInitialized()) {
-174  // Only call the check to move 
servers if a RegionServer was added to the cluster; in this
-175  // case it could be a server with a 
new version so it makes sense to run the check.
-176  
server.checkIfShouldMoveSystemRegionAsync();
-177}
-178  }
-179
-180  @Override
-181  public void nodeChildrenChanged(String 
path) {
-182if 
(path.equals(watcher.getZNodePaths().rsZNode)  !server.isAborted() 

-183  !server.isStopped()) {
-184  executor.execute(this::refresh);
-185}
-186  }
-187}
+130ServerMetrics serverMetrics = 
info != null ? ServerMetricsBuilder.of(serverName,
+131  
VersionInfoUtil.getVersionNumber(info.getVersionInfo()),
+132  
info.getVersionInfo().getVersion()) : ServerMetricsBuilder.of(serverName);
+133
serverManager.checkAndRecordNewServer(serverName, serverMetrics);
+134  }
+135  
serverManager.findOutDeadServersAndProcess(deadServersFromPE, 
liveServersFromWALDir);
+136}
+137  }
+138
+139  public void stop() {
+140executor.shutdownNow();
+141  }
+142
+143  private synchronized void refresh() {
+144ListString names;
+145try {
+146  names = 
ZKUtil.listChildrenAndWatchForNewChildren(watcher, 
watcher.getZNodePaths().rsZNode);
+147} catch (KeeperException e) {
+148  // here we need to abort as we 
failed to set watcher on the rs node which means that we can
+149  // not track the node deleted 
evetnt any more.
+150  server.abort("Unexpected zk 
exception getting RS nodes", e);
+151  return;
+152}
+153SetServerName servers =
+154  
names.stream().map(ServerName::parseServerName).collect(Collectors.toSet());
+155for (IteratorServerName iter 
= regionServers.iterator(); iter.hasNext();) {
+156  ServerName sn = iter.next();
+157  if (!servers.contains(sn)) {
+158LOG.info("RegionServer ephemeral 
node deleted, processing expiration [{}]", sn);
+159serverManager.expireServer(sn);
+160iter.remove();
+161  }
+162}
+163// here we do not need to parse the 
region server info as it is useless now, we only need the
+164// server name.
+165boolean newServerAdded = false;
+166for (ServerName sn : servers) {
+167  if (regionServers.add(sn)) {
+168newServerAdded = true;
+169

[08/29] hbase-site git commit: Published site at 40a73a5ca73c9e9e2ff9be1bf823056b108686af.

2018-05-31 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/46d8bc28/devapidocs/src-html/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.html
index 67da347..19ce3aa 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.html
@@ -169,381 +169,405 @@
 161  
LOG.info(master.getClientIdAuditPrefix() + " initiates rsgroup info retrieval, 
group="
 162  + groupName);
 163  try {
-164
checkPermission("getRSGroupInfo");
-165RSGroupInfo rsGroupInfo = 
groupAdminServer.getRSGroupInfo(groupName);
-166if (rsGroupInfo != null) {
-167  
builder.setRSGroupInfo(RSGroupProtobufUtil.toProtoGroupInfo(rsGroupInfo));
-168}
-169  } catch (IOException e) {
-170
CoprocessorRpcUtils.setControllerException(controller, e);
-171  }
-172  done.run(builder.build());
-173}
-174
-175@Override
-176public void 
getRSGroupInfoOfTable(RpcController controller,
-177GetRSGroupInfoOfTableRequest 
request, RpcCallbackGetRSGroupInfoOfTableResponse done) {
-178  
GetRSGroupInfoOfTableResponse.Builder builder = 
GetRSGroupInfoOfTableResponse.newBuilder();
-179  TableName tableName = 
ProtobufUtil.toTableName(request.getTableName());
-180  
LOG.info(master.getClientIdAuditPrefix() + " initiates rsgroup info retrieval, 
table="
-181  + tableName);
-182  try {
-183
checkPermission("getRSGroupInfoOfTable");
-184RSGroupInfo RSGroupInfo = 
groupAdminServer.getRSGroupInfoOfTable(tableName);
-185if (RSGroupInfo != null) {
-186  
builder.setRSGroupInfo(RSGroupProtobufUtil.toProtoGroupInfo(RSGroupInfo));
-187}
-188  } catch (IOException e) {
-189
CoprocessorRpcUtils.setControllerException(controller, e);
-190  }
-191  done.run(builder.build());
-192}
-193
-194@Override
-195public void moveServers(RpcController 
controller, MoveServersRequest request,
-196
RpcCallbackMoveServersResponse done) {
-197  MoveServersResponse.Builder builder 
= MoveServersResponse.newBuilder();
-198  SetAddress hostPorts = 
Sets.newHashSet();
-199  for (HBaseProtos.ServerName el : 
request.getServersList()) {
-200
hostPorts.add(Address.fromParts(el.getHostName(), el.getPort()));
-201  }
-202  
LOG.info(master.getClientIdAuditPrefix() + " move servers " + hostPorts +" to 
rsgroup "
-203  + request.getTargetGroup());
-204  try {
-205if 
(master.getMasterCoprocessorHost() != null) {
-206  
master.getMasterCoprocessorHost().preMoveServers(hostPorts, 
request.getTargetGroup());
-207}
-208checkPermission("moveServers");
-209
groupAdminServer.moveServers(hostPorts, request.getTargetGroup());
-210if 
(master.getMasterCoprocessorHost() != null) {
-211  
master.getMasterCoprocessorHost().postMoveServers(hostPorts, 
request.getTargetGroup());
-212}
-213  } catch (IOException e) {
-214
CoprocessorRpcUtils.setControllerException(controller, e);
-215  }
-216  done.run(builder.build());
-217}
-218
-219@Override
-220public void moveTables(RpcController 
controller, MoveTablesRequest request,
-221
RpcCallbackMoveTablesResponse done) {
-222  MoveTablesResponse.Builder builder 
= MoveTablesResponse.newBuilder();
-223  SetTableName tables = new 
HashSet(request.getTableNameList().size());
-224  for (HBaseProtos.TableName 
tableName : request.getTableNameList()) {
-225
tables.add(ProtobufUtil.toTableName(tableName));
-226  }
-227  
LOG.info(master.getClientIdAuditPrefix() + " move tables " + tables +" to 
rsgroup "
-228  + request.getTargetGroup());
-229  try {
-230if 
(master.getMasterCoprocessorHost() != null) {
-231  
master.getMasterCoprocessorHost().preMoveTables(tables, 
request.getTargetGroup());
-232}
-233checkPermission("moveTables");
-234
groupAdminServer.moveTables(tables, request.getTargetGroup());
-235if 
(master.getMasterCoprocessorHost() != null) {
-236  
master.getMasterCoprocessorHost().postMoveTables(tables, 
request.getTargetGroup());
-237}
-238  } catch (IOException e) {
-239
CoprocessorRpcUtils.setControllerException(controller, e);
-240  }
-241  done.run(builder.build());
-242}
-243
-244@Override
-245public void addRSGroup(RpcController 
controller, AddRSGroupRequest request,
-246
RpcCallbackAddRSGroupResponse done) {
-247  AddRSGroupResponse.Builder builder 
= AddRSGroupResponse.newBuilder();
-248  

[08/29] hbase-site git commit: Published site at 477f9fdb32873387231c5fbbff130ba8bf7b5d68.

2018-05-25 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/621479e1/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.DefaultRackManager.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.DefaultRackManager.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.DefaultRackManager.html
index 7f42212..d4bf03c 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.DefaultRackManager.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.DefaultRackManager.html
@@ -1159,434 +1159,442 @@
 1151}
 1152  }
 1153
-1154  public void setRackManager(RackManager 
rackManager) {
-1155this.rackManager = rackManager;
-1156  }
-1157
-1158  protected boolean needsBalance(Cluster 
c) {
-1159ClusterLoadState cs = new 
ClusterLoadState(c.clusterState);
-1160if (cs.getNumServers()  
MIN_SERVER_BALANCE) {
-1161  if (LOG.isDebugEnabled()) {
-1162LOG.debug("Not running balancer 
because only " + cs.getNumServers()
-1163+ " active 
regionserver(s)");
-1164  }
-1165  return false;
-1166}
-1167
if(areSomeRegionReplicasColocated(c)) return true;
-1168// Check if we even need to do any 
load balancing
-1169// HBASE-3681 check sloppiness 
first
-1170float average = cs.getLoadAverage(); 
// for logging
-1171int floor = (int) Math.floor(average 
* (1 - slop));
-1172int ceiling = (int) 
Math.ceil(average * (1 + slop));
-1173if (!(cs.getMaxLoad()  ceiling 
|| cs.getMinLoad()  floor)) {
-1174  NavigableMapServerAndLoad, 
ListRegionInfo serversByLoad = cs.getServersByLoad();
-1175  if (LOG.isTraceEnabled()) {
-1176// If nothing to balance, then 
don't say anything unless trace-level logging.
-1177LOG.trace("Skipping load 
balancing because balanced cluster; " +
-1178  "servers=" + 
cs.getNumServers() +
-1179  " regions=" + 
cs.getNumRegions() + " average=" + average +
-1180  " mostloaded=" + 
serversByLoad.lastKey().getLoad() +
-1181  " leastloaded=" + 
serversByLoad.firstKey().getLoad());
-1182  }
-1183  return false;
-1184}
-1185return true;
-1186  }
-1187
-1188  /**
-1189   * Subclasses should implement this to 
return true if the cluster has nodes that hosts
-1190   * multiple replicas for the same 
region, or, if there are multiple racks and the same
-1191   * rack hosts replicas of the same 
region
-1192   * @param c Cluster information
-1193   * @return whether region replicas are 
currently co-located
-1194   */
-1195  protected boolean 
areSomeRegionReplicasColocated(Cluster c) {
-1196return false;
-1197  }
-1198
-1199  /**
-1200   * Generates a bulk assignment plan to 
be used on cluster startup using a
-1201   * simple round-robin assignment.
-1202   * p
-1203   * Takes a list of all the regions and 
all the servers in the cluster and
-1204   * returns a map of each server to the 
regions that it should be assigned.
-1205   * p
-1206   * Currently implemented as a 
round-robin assignment. Same invariant as load
-1207   * balancing, all servers holding 
floor(avg) or ceiling(avg).
-1208   *
-1209   * TODO: Use block locations from HDFS 
to place regions with their blocks
-1210   *
-1211   * @param regions all regions
-1212   * @param servers all servers
-1213   * @return map of server to the 
regions it should take, or null if no
-1214   * assignment is possible (ie. 
no regions or no servers)
-1215   */
-1216  @Override
-1217  public MapServerName, 
ListRegionInfo roundRobinAssignment(ListRegionInfo 
regions,
-1218  ListServerName servers) 
throws HBaseIOException {
-1219
metricsBalancer.incrMiscInvocations();
-1220MapServerName, 
ListRegionInfo assignments = assignMasterSystemRegions(regions, 
servers);
-1221if (assignments != null  
!assignments.isEmpty()) {
-1222  servers = new 
ArrayList(servers);
-1223  // Guarantee not to put other 
regions on master
-1224  
servers.remove(masterServerName);
-1225  ListRegionInfo 
masterRegions = assignments.get(masterServerName);
-1226  if (!masterRegions.isEmpty()) {
-1227regions = new 
ArrayList(regions);
-1228
regions.removeAll(masterRegions);
-1229  }
-1230}
-1231if (regions == null || 
regions.isEmpty()) {
-1232  return assignments;
-1233}
-1234
-1235int numServers = servers == null ? 0 
: servers.size();
-1236if (numServers == 0) {
-1237  LOG.warn("Wanted to do round robin 
assignment but no servers to assign to");
-1238  return null;
-1239}
-1240
-1241// TODO: instead of 
retainAssignment() and roundRobinAssignment(), we should just run the
-1242// normal LB.balancerCluster() with 
unassignedRegions. We only need to have a candidate
-1243// generator for AssignRegionAction. 
The LB 

[08/29] hbase-site git commit: Published site at c9f8c3436f6e38b5c7807677c5c3e7fc3e19e071.

2018-05-18 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ead846d7/devapidocs/src-html/org/apache/hadoop/hbase/util/FSUtils.HFileFilter.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/FSUtils.HFileFilter.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/FSUtils.HFileFilter.html
index ec995d2..01a18e6 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/util/FSUtils.HFileFilter.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/util/FSUtils.HFileFilter.html
@@ -51,1705 +51,1748 @@
 043import 
java.util.concurrent.ConcurrentHashMap;
 044import 
java.util.concurrent.ExecutionException;
 045import 
java.util.concurrent.ExecutorService;
-046import java.util.concurrent.Future;
-047import java.util.concurrent.FutureTask;
-048import 
java.util.concurrent.ThreadPoolExecutor;
-049import java.util.concurrent.TimeUnit;
-050import java.util.regex.Pattern;
-051import 
org.apache.hadoop.conf.Configuration;
-052import 
org.apache.hadoop.fs.BlockLocation;
-053import 
org.apache.hadoop.fs.FSDataInputStream;
-054import 
org.apache.hadoop.fs.FSDataOutputStream;
-055import org.apache.hadoop.fs.FileStatus;
-056import org.apache.hadoop.fs.FileSystem;
-057import org.apache.hadoop.fs.Path;
-058import org.apache.hadoop.fs.PathFilter;
-059import 
org.apache.hadoop.fs.permission.FsAction;
-060import 
org.apache.hadoop.fs.permission.FsPermission;
-061import 
org.apache.hadoop.hbase.ClusterId;
-062import 
org.apache.hadoop.hbase.HColumnDescriptor;
-063import 
org.apache.hadoop.hbase.HConstants;
-064import 
org.apache.hadoop.hbase.HDFSBlocksDistribution;
-065import 
org.apache.hadoop.hbase.HRegionInfo;
-066import 
org.apache.hadoop.hbase.TableName;
-067import 
org.apache.hadoop.hbase.client.RegionInfo;
-068import 
org.apache.hadoop.hbase.exceptions.DeserializationException;
-069import 
org.apache.hadoop.hbase.fs.HFileSystem;
-070import 
org.apache.hadoop.hbase.io.HFileLink;
-071import 
org.apache.hadoop.hbase.master.HMaster;
-072import 
org.apache.hadoop.hbase.regionserver.HRegion;
-073import 
org.apache.hadoop.hbase.regionserver.StoreFileInfo;
-074import 
org.apache.hadoop.hbase.security.AccessDeniedException;
-075import 
org.apache.hadoop.hbase.util.HBaseFsck.ErrorReporter;
-076import 
org.apache.hadoop.hdfs.DFSClient;
-077import 
org.apache.hadoop.hdfs.DFSHedgedReadMetrics;
-078import 
org.apache.hadoop.hdfs.DistributedFileSystem;
-079import 
org.apache.hadoop.hdfs.protocol.HdfsConstants;
-080import org.apache.hadoop.io.IOUtils;
-081import 
org.apache.hadoop.ipc.RemoteException;
-082import 
org.apache.hadoop.security.UserGroupInformation;
-083import 
org.apache.hadoop.util.Progressable;
-084import 
org.apache.hadoop.util.ReflectionUtils;
-085import 
org.apache.hadoop.util.StringUtils;
-086import 
org.apache.yetus.audience.InterfaceAudience;
-087import org.slf4j.Logger;
-088import org.slf4j.LoggerFactory;
-089
-090import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-091import 
org.apache.hbase.thirdparty.com.google.common.base.Throwables;
-092import 
org.apache.hbase.thirdparty.com.google.common.collect.Iterators;
-093import 
org.apache.hbase.thirdparty.com.google.common.primitives.Ints;
-094
-095import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-096import 
org.apache.hadoop.hbase.shaded.protobuf.generated.FSProtos;
-097
-098/**
-099 * Utility methods for interacting with 
the underlying file system.
-100 */
-101@InterfaceAudience.Private
-102public abstract class FSUtils extends 
CommonFSUtils {
-103  private static final Logger LOG = 
LoggerFactory.getLogger(FSUtils.class);
-104
-105  private static final String 
THREAD_POOLSIZE = "hbase.client.localityCheck.threadPoolSize";
-106  private static final int 
DEFAULT_THREAD_POOLSIZE = 2;
-107
-108  /** Set to true on Windows platforms 
*/
-109  @VisibleForTesting // currently only 
used in testing. TODO refactor into a test class
-110  public static final boolean WINDOWS = 
System.getProperty("os.name").startsWith("Windows");
-111
-112  protected FSUtils() {
-113super();
-114  }
-115
-116  /**
-117   * @return True is 
codefs/code is instance of DistributedFileSystem
-118   * @throws IOException
-119   */
-120  public static boolean 
isDistributedFileSystem(final FileSystem fs) throws IOException {
-121FileSystem fileSystem = fs;
-122// If passed an instance of 
HFileSystem, it fails instanceof DistributedFileSystem.
-123// Check its backing fs for 
dfs-ness.
-124if (fs instanceof HFileSystem) {
-125  fileSystem = 
((HFileSystem)fs).getBackingFs();
-126}
-127return fileSystem instanceof 
DistributedFileSystem;
-128  }
-129
-130  /**
-131   * Compare path component of the Path 
URI; e.g. if hdfs://a/b/c and /a/b/c, it will compare the
-132   * '/a/b/c' part. If you passed in 
'hdfs://a/b/c and b/c, it would return true.  Does not consider
-133   * schema; i.e. if schemas different 
but path or subpath 

[08/29] hbase-site git commit: Published site at .

2017-10-09 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e6ae7c3e/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestHStore.MyCompactingMemStoreWithCustomCompactor.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestHStore.MyCompactingMemStoreWithCustomCompactor.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestHStore.MyCompactingMemStoreWithCustomCompactor.html
index f49cc1a..956e7fd 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestHStore.MyCompactingMemStoreWithCustomCompactor.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestHStore.MyCompactingMemStoreWithCustomCompactor.html
@@ -523,1289 +523,1320 @@
 515assertCheck();
 516  }
 517
-518  /**
-519   * Getting data from files only
-520   * @throws IOException
-521   */
-522  @Test
-523  public void testGet_FromFilesOnly() 
throws IOException {
-524init(this.name.getMethodName());
-525
-526//Put data in memstore
-527this.store.add(new KeyValue(row, 
family, qf1, 1, (byte[])null), null);
-528this.store.add(new KeyValue(row, 
family, qf2, 1, (byte[])null), null);
-529//flush
-530flush(1);
-531
-532//Add more data
-533this.store.add(new KeyValue(row, 
family, qf3, 1, (byte[])null), null);
-534this.store.add(new KeyValue(row, 
family, qf4, 1, (byte[])null), null);
-535//flush
-536flush(2);
-537
-538//Add more data
-539this.store.add(new KeyValue(row, 
family, qf5, 1, (byte[])null), null);
-540this.store.add(new KeyValue(row, 
family, qf6, 1, (byte[])null), null);
-541//flush
-542flush(3);
-543
-544//Get
-545result = 
HBaseTestingUtility.getFromStoreFile(store,
-546get.getRow(),
-547qualifiers);
-548//this.store.get(get, qualifiers, 
result);
-549
-550//Need to sort the result since 
multiple files
-551Collections.sort(result, 
CellComparator.COMPARATOR);
-552
-553//Compare
-554assertCheck();
-555  }
+518  @Test
+519  public void 
testTimeRangeIfSomeCellsAreDroppedInFlush() throws IOException {
+520
testTimeRangeIfSomeCellsAreDroppedInFlush(1);
+521
testTimeRangeIfSomeCellsAreDroppedInFlush(3);
+522
testTimeRangeIfSomeCellsAreDroppedInFlush(5);
+523  }
+524
+525  private void 
testTimeRangeIfSomeCellsAreDroppedInFlush(int maxVersion) throws IOException 
{
+526init(this.name.getMethodName(), 
TEST_UTIL.getConfiguration(),
+527
ColumnFamilyDescriptorBuilder.newBuilder(family).setMaxVersions(maxVersion).build());
+528long currentTs = 100;
+529long minTs = currentTs;
+530// the extra cell won't be flushed to 
disk,
+531// so the min of timerange will be 
different between memStore and hfile.
+532for (int i = 0; i != (maxVersion + 
1); ++i) {
+533  this.store.add(new KeyValue(row, 
family, qf1, ++currentTs, (byte[])null), null);
+534  if (i == 1) {
+535minTs = currentTs;
+536  }
+537}
+538flushStore(store, id++);
+539
+540CollectionHStoreFile files = 
store.getStorefiles();
+541assertEquals(1, files.size());
+542HStoreFile f = 
files.iterator().next();
+543f.initReader();
+544StoreFileReader reader = 
f.getReader();
+545assertEquals(minTs, 
reader.timeRange.getMin());
+546assertEquals(currentTs, 
reader.timeRange.getMax());
+547  }
+548
+549  /**
+550   * Getting data from files only
+551   * @throws IOException
+552   */
+553  @Test
+554  public void testGet_FromFilesOnly() 
throws IOException {
+555init(this.name.getMethodName());
 556
-557  /**
-558   * Getting data from memstore and 
files
-559   * @throws IOException
-560   */
-561  @Test
-562  public void 
testGet_FromMemStoreAndFiles() throws IOException {
-563init(this.name.getMethodName());
-564
-565//Put data in memstore
-566this.store.add(new KeyValue(row, 
family, qf1, 1, (byte[])null), null);
-567this.store.add(new KeyValue(row, 
family, qf2, 1, (byte[])null), null);
-568//flush
-569flush(1);
-570
-571//Add more data
-572this.store.add(new KeyValue(row, 
family, qf3, 1, (byte[])null), null);
-573this.store.add(new KeyValue(row, 
family, qf4, 1, (byte[])null), null);
-574//flush
-575flush(2);
-576
-577//Add more data
-578this.store.add(new KeyValue(row, 
family, qf5, 1, (byte[])null), null);
-579this.store.add(new KeyValue(row, 
family, qf6, 1, (byte[])null), null);
+557//Put data in memstore
+558this.store.add(new KeyValue(row, 
family, qf1, 1, (byte[])null), null);
+559this.store.add(new KeyValue(row, 
family, qf2, 1, (byte[])null), null);
+560//flush
+561flush(1);
+562
+563//Add more data
+564this.store.add(new KeyValue(row, 
family, qf3, 1, (byte[])null), null);
+565this.store.add(new KeyValue(row, 
family, qf4, 1, (byte[])null), null);
+566//flush
+567flush(2);
+568
+569//Add more data
+570