[21/40] hbase-site git commit: Published site at 6d7bc0e98b25215e79f67f107fd0d3306dfcf352.

2018-09-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/738e976e/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterWalManager.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterWalManager.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterWalManager.html
index a7df46f..f0b26f3 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterWalManager.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterWalManager.html
@@ -152,195 +152,219 @@
 144  }
 145
 146  /**
-147   * @return listing of ServerNames found 
in the filesystem under the WAL directory
-148   *   that COULD BE 'alive'; excludes 
those that have a '-splitting' suffix as these are already
-149   *   being split -- they cannot be 
'alive'.
+147   * Get Servernames which are currently 
splitting; paths have a '-splitting' suffix.
+148   * @return ServerName
+149   * @throws IOException IOException
 150   */
-151  public SetServerName 
getLiveServersFromWALDir() throws IOException {
-152Path walDirPath = new Path(rootDir, 
HConstants.HREGION_LOGDIR_NAME);
-153FileStatus[] walDirForLiveServers = 
FSUtils.listStatus(fs, walDirPath,
-154  p - 
!p.getName().endsWith(AbstractFSWALProvider.SPLITTING_EXT));
-155if (walDirForLiveServers == null) {
-156  return Collections.emptySet();
-157}
-158return 
Stream.of(walDirForLiveServers).map(s - {
-159  ServerName serverName = 
AbstractFSWALProvider.getServerNameFromWALDirectoryName(s.getPath());
-160  if (serverName == null) {
-161LOG.warn("Log folder {} doesn't 
look like its name includes a " +
-162  "region server name; leaving in 
place. If you see later errors about missing " +
-163  "write ahead logs they may be 
saved in this location.", s.getPath());
-164return null;
-165  }
-166  return serverName;
-167}).filter(s - s != 
null).collect(Collectors.toSet());
-168  }
-169
-170  /**
-171   * Inspect the log directory to find 
dead servers which need recovery work
-172   * @return A set of ServerNames which 
aren't running but still have WAL files left in file system
-173   * @deprecated With proc-v2, we can 
record the crash server with procedure store, so do not need
-174   * to scan the wal 
directory to find out the splitting wal directory any more. Leave
-175   * it here only because 
{@code RecoverMetaProcedure}(which is also deprecated) uses
-176   * it.
-177   */
-178  @Deprecated
-179  public SetServerName 
getFailedServersFromLogFolders() {
-180boolean retrySplitting = 
!conf.getBoolean("hbase.hlog.split.skip.errors",
-181
WALSplitter.SPLIT_SKIP_ERRORS_DEFAULT);
-182
-183SetServerName serverNames = 
new HashSet();
-184Path logsDirPath = new 
Path(this.rootDir, HConstants.HREGION_LOGDIR_NAME);
-185
-186do {
-187  if (services.isStopped()) {
-188LOG.warn("Master stopped while 
trying to get failed servers.");
-189break;
-190  }
-191  try {
-192if (!this.fs.exists(logsDirPath)) 
return serverNames;
-193FileStatus[] logFolders = 
FSUtils.listStatus(this.fs, logsDirPath, null);
-194// Get online servers after 
getting log folders to avoid log folder deletion of newly
-195// checked in region servers . 
see HBASE-5916
-196SetServerName 
onlineServers = services.getServerManager().getOnlineServers().keySet();
-197
-198if (logFolders == null || 
logFolders.length == 0) {
-199  LOG.debug("No log files to 
split, proceeding...");
-200  return serverNames;
-201}
-202for (FileStatus status : 
logFolders) {
-203  FileStatus[] curLogFiles = 
FSUtils.listStatus(this.fs, status.getPath(), null);
-204  if (curLogFiles == null || 
curLogFiles.length == 0) {
-205// Empty log folder. No 
recovery needed
-206continue;
-207  }
-208  final ServerName serverName = 
AbstractFSWALProvider.getServerNameFromWALDirectoryName(
-209  status.getPath());
-210  if (null == serverName) {
-211LOG.warn("Log folder " + 
status.getPath() + " doesn't look like its name includes a " +
-212"region server name; 
leaving in place. If you see later errors about missing " +
-213"write ahead logs they 
may be saved in this location.");
-214  } else if 
(!onlineServers.contains(serverName)) {
-215LOG.info("Log folder " + 
status.getPath() + " doesn't belong "
-216+ "to a known region 
server, splitting");
-217
serverNames.add(serverName);
-218  } else {
-219LOG.info("Log folder " + 
status.getPath() + " belongs to an existing region server");
-220  }
-221}
-222retrySplitting = false;
-223  } catch (IOException ioe) {
-224LOG.warn("Failed 

[21/40] hbase-site git commit: Published site at 2aae247e3f8f8a393b403a82593bdc3a1ba81193.

2018-09-10 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/80652933/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.DefaultRackManager.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.DefaultRackManager.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.DefaultRackManager.html
index af3b364..d974429 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.DefaultRackManager.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.DefaultRackManager.html
@@ -749,869 +749,894 @@
 741  int region = 
regionsToIndex.get(regionInfo);
 742
 743  int primary = 
regionIndexToPrimaryIndex[region];
-744
-745  // there is a subset relation for 
server  host  rack
-746  // check server first
-747
-748  if 
(contains(primariesOfRegionsPerServer[server], primary)) {
-749// check for whether there are 
other servers that we can place this region
-750for (int i = 0; i  
primariesOfRegionsPerServer.length; i++) {
-751  if (i != server  
!contains(primariesOfRegionsPerServer[i], primary)) {
-752return true; // meaning there 
is a better server
-753  }
-754}
-755return false; // there is not a 
better server to place this
-756  }
-757
-758  // check host
-759  if (multiServersPerHost) { // these 
arrays would only be allocated if we have more than one server per host
-760int host = 
serverIndexToHostIndex[server];
-761if 
(contains(primariesOfRegionsPerHost[host], primary)) {
-762  // check for whether there are 
other hosts that we can place this region
-763  for (int i = 0; i  
primariesOfRegionsPerHost.length; i++) {
-764if (i != host  
!contains(primariesOfRegionsPerHost[i], primary)) {
-765  return true; // meaning 
there is a better host
-766}
-767  }
-768  return false; // there is not a 
better host to place this
-769}
-770  }
-771
-772  // check rack
-773  if (numRacks  1) {
-774int rack = 
serverIndexToRackIndex[server];
-775if 
(contains(primariesOfRegionsPerRack[rack], primary)) {
-776  // check for whether there are 
other racks that we can place this region
-777  for (int i = 0; i  
primariesOfRegionsPerRack.length; i++) {
-778if (i != rack  
!contains(primariesOfRegionsPerRack[i], primary)) {
-779  return true; // meaning 
there is a better rack
-780}
-781  }
-782  return false; // there is not a 
better rack to place this
-783}
-784  }
-785  return false;
-786}
-787
-788void doAssignRegion(RegionInfo 
regionInfo, ServerName serverName) {
-789  if 
(!serversToIndex.containsKey(serverName.getHostAndPort())) {
-790return;
-791  }
-792  int server = 
serversToIndex.get(serverName.getHostAndPort());
-793  int region = 
regionsToIndex.get(regionInfo);
-794  doAction(new 
AssignRegionAction(region, server));
-795}
-796
-797void regionMoved(int region, int 
oldServer, int newServer) {
-798  regionIndexToServerIndex[region] = 
newServer;
-799  if 
(initialRegionIndexToServerIndex[region] == newServer) {
-800numMovedRegions--; //region moved 
back to original location
-801  } else if (oldServer = 0 
 initialRegionIndexToServerIndex[region] == oldServer) {
-802numMovedRegions++; //region moved 
from original location
-803  }
-804  int tableIndex = 
regionIndexToTableIndex[region];
-805  if (oldServer = 0) {
-806
numRegionsPerServerPerTable[oldServer][tableIndex]--;
-807  }
-808  
numRegionsPerServerPerTable[newServer][tableIndex]++;
-809
-810  //check whether this caused 
maxRegionsPerTable in the new Server to be updated
-811  if 
(numRegionsPerServerPerTable[newServer][tableIndex]  
numMaxRegionsPerTable[tableIndex]) {
-812numMaxRegionsPerTable[tableIndex] 
= numRegionsPerServerPerTable[newServer][tableIndex];
-813  } else if (oldServer = 0 
 (numRegionsPerServerPerTable[oldServer][tableIndex] + 1)
-814  == 
numMaxRegionsPerTable[tableIndex]) {
-815//recompute maxRegionsPerTable 
since the previous value was coming from the old server
-816numMaxRegionsPerTable[tableIndex] 
= 0;
-817for (int[] 
aNumRegionsPerServerPerTable : numRegionsPerServerPerTable) {
-818  if 
(aNumRegionsPerServerPerTable[tableIndex]  
numMaxRegionsPerTable[tableIndex]) {
-819
numMaxRegionsPerTable[tableIndex] = aNumRegionsPerServerPerTable[tableIndex];
-820  }
-821}
-822  }
-823
-824  // update for servers
-825  int primary = 
regionIndexToPrimaryIndex[region];
-826  if (oldServer = 0) {
-827

[21/40] hbase-site git commit: Published site at 5fd16f38533591615aa9afa48bb89bcbd8313caf.

2018-06-09 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/4f0b7674/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterRpcServices.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterRpcServices.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterRpcServices.html
index b9a30c4..03c7a70 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterRpcServices.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterRpcServices.html
@@ -462,14 +462,14 @@
 454  RpcController controller, 
RegionServerReportRequest request) throws ServiceException {
 455try {
 456  master.checkServiceStarted();
-457  ClusterStatusProtos.ServerLoad sl = 
request.getLoad();
-458  ServerName serverName = 
ProtobufUtil.toServerName(request.getServer());
-459  ServerMetrics oldLoad = 
master.getServerManager().getLoad(serverName);
-460  ServerMetrics newLoad = 
ServerMetricsBuilder.toServerMetrics(serverName, sl);
-461  
master.getServerManager().regionServerReport(serverName, newLoad);
-462  int version = 
VersionInfoUtil.getCurrentClientVersionNumber();
-463  
master.getAssignmentManager().reportOnlineRegions(serverName,
-464version, 
newLoad.getRegionMetrics().keySet());
+457  int version = 
VersionInfoUtil.getCurrentClientVersionNumber();
+458  ClusterStatusProtos.ServerLoad sl = 
request.getLoad();
+459  ServerName serverName = 
ProtobufUtil.toServerName(request.getServer());
+460  ServerMetrics oldLoad = 
master.getServerManager().getLoad(serverName);
+461  ServerMetrics newLoad = 
ServerMetricsBuilder.toServerMetrics(serverName, version, sl);
+462  
master.getServerManager().regionServerReport(serverName, newLoad);
+463  master.getAssignmentManager()
+464  
.reportOnlineRegions(serverName, newLoad.getRegionMetrics().keySet());
 465  if (sl != null  
master.metricsMaster != null) {
 466// Up our metrics.
 467
master.metricsMaster.incrementRequests(sl.getTotalNumberOfRequests()
@@ -487,1798 +487,1799 @@
 479// Register with server manager
 480try {
 481  master.checkServiceStarted();
-482  InetAddress ia = 
master.getRemoteInetAddress(
-483request.getPort(), 
request.getServerStartCode());
-484  // if regionserver passed hostname 
to use,
-485  // then use it instead of doing a 
reverse DNS lookup
-486  ServerName rs = 
master.getServerManager().regionServerStartup(request, ia);
-487
-488  // Send back some config info
-489  RegionServerStartupResponse.Builder 
resp = createConfigurationSubset();
-490  NameStringPair.Builder entry = 
NameStringPair.newBuilder()
-491
.setName(HConstants.KEY_FOR_HOSTNAME_SEEN_BY_MASTER)
-492.setValue(rs.getHostname());
-493  
resp.addMapEntries(entry.build());
-494
-495  return resp.build();
-496} catch (IOException ioe) {
-497  throw new ServiceException(ioe);
-498}
-499  }
-500
-501  @Override
-502  public ReportRSFatalErrorResponse 
reportRSFatalError(
-503  RpcController controller, 
ReportRSFatalErrorRequest request) throws ServiceException {
-504String errorText = 
request.getErrorMessage();
-505ServerName sn = 
ProtobufUtil.toServerName(request.getServer());
-506String msg = "Region server " + sn
-507  + " reported a fatal error:\n" + 
errorText;
-508LOG.error(msg);
-509master.rsFatals.add(msg);
-510return 
ReportRSFatalErrorResponse.newBuilder().build();
-511  }
-512
-513  @Override
-514  public AddColumnResponse 
addColumn(RpcController controller,
-515  AddColumnRequest req) throws 
ServiceException {
-516try {
-517  long procId = master.addColumn(
-518  
ProtobufUtil.toTableName(req.getTableName()),
-519  
ProtobufUtil.toColumnFamilyDescriptor(req.getColumnFamilies()),
-520  req.getNonceGroup(),
-521  req.getNonce());
-522  if (procId == -1) {
-523// This mean operation was not 
performed in server, so do not set any procId
-524return 
AddColumnResponse.newBuilder().build();
-525  } else {
-526return 
AddColumnResponse.newBuilder().setProcId(procId).build();
-527  }
-528} catch (IOException ioe) {
-529  throw new ServiceException(ioe);
-530}
-531  }
-532
-533  @Override
-534  public AssignRegionResponse 
assignRegion(RpcController controller,
-535  AssignRegionRequest req) throws 
ServiceException {
-536try {
-537  master.checkInitialized();
-538
-539  final RegionSpecifierType type = 
req.getRegion().getType();
-540  if (type != 
RegionSpecifierType.REGION_NAME) {
-541LOG.warn("assignRegion specifier 
type: expected: " + RegionSpecifierType.REGION_NAME
-542  + " actual: " + type);
-543  }
-544
-545  final byte[] regionName = 
req.getRegion().getValue().toByteArray();
-546  final RegionInfo regionInfo = 

[21/40] hbase-site git commit: Published site at e2b0490d18f7cc03aa59475a1b423597ddc481fb.

2018-04-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6c67ddd7/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/SyncTable.SyncMapper.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/SyncTable.SyncMapper.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/SyncTable.SyncMapper.html
index 3165a6c..b6817d9 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/SyncTable.SyncMapper.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/SyncTable.SyncMapper.html
@@ -71,728 +71,754 @@
 063  static final String 
TARGET_TABLE_CONF_KEY = "sync.table.target.table.name";
 064  static final String 
SOURCE_ZK_CLUSTER_CONF_KEY = "sync.table.source.zk.cluster";
 065  static final String 
TARGET_ZK_CLUSTER_CONF_KEY = "sync.table.target.zk.cluster";
-066  static final String 
DRY_RUN_CONF_KEY="sync.table.dry.run";
-067
-068  Path sourceHashDir;
-069  String sourceTableName;
-070  String targetTableName;
-071
-072  String sourceZkCluster;
-073  String targetZkCluster;
-074  boolean dryRun;
-075
-076  Counters counters;
-077
-078  public SyncTable(Configuration conf) 
{
-079super(conf);
-080  }
+066  static final String DRY_RUN_CONF_KEY = 
"sync.table.dry.run";
+067  static final String DO_DELETES_CONF_KEY 
= "sync.table.do.deletes";
+068  static final String DO_PUTS_CONF_KEY = 
"sync.table.do.puts";
+069
+070  Path sourceHashDir;
+071  String sourceTableName;
+072  String targetTableName;
+073
+074  String sourceZkCluster;
+075  String targetZkCluster;
+076  boolean dryRun;
+077  boolean doDeletes = true;
+078  boolean doPuts = true;
+079
+080  Counters counters;
 081
-082  public Job 
createSubmittableJob(String[] args) throws IOException {
-083FileSystem fs = 
sourceHashDir.getFileSystem(getConf());
-084if (!fs.exists(sourceHashDir)) {
-085  throw new IOException("Source hash 
dir not found: " + sourceHashDir);
-086}
-087
-088HashTable.TableHash tableHash = 
HashTable.TableHash.read(getConf(), sourceHashDir);
-089LOG.info("Read source hash manifest: 
" + tableHash);
-090LOG.info("Read " + 
tableHash.partitions.size() + " partition keys");
-091if 
(!tableHash.tableName.equals(sourceTableName)) {
-092  LOG.warn("Table name mismatch - 
manifest indicates hash was taken from: "
-093  + tableHash.tableName + " but 
job is reading from: " + sourceTableName);
-094}
-095if (tableHash.numHashFiles != 
tableHash.partitions.size() + 1) {
-096  throw new RuntimeException("Hash 
data appears corrupt. The number of of hash files created"
-097  + " should be 1 more than the 
number of partition keys.  However, the manifest file "
-098  + " says numHashFiles=" + 
tableHash.numHashFiles + " but the number of partition keys"
-099  + " found in the partitions 
file is " + tableHash.partitions.size());
-100}
-101
-102Path dataDir = new 
Path(sourceHashDir, HashTable.HASH_DATA_DIR);
-103int dataSubdirCount = 0;
-104for (FileStatus file : 
fs.listStatus(dataDir)) {
-105  if 
(file.getPath().getName().startsWith(HashTable.OUTPUT_DATA_FILE_PREFIX)) {
-106dataSubdirCount++;
-107  }
-108}
-109
-110if (dataSubdirCount != 
tableHash.numHashFiles) {
-111  throw new RuntimeException("Hash 
data appears corrupt. The number of of hash files created"
-112  + " should be 1 more than the 
number of partition keys.  However, the number of data dirs"
-113  + " found is " + 
dataSubdirCount + " but the number of partition keys"
-114  + " found in the partitions 
file is " + tableHash.partitions.size());
-115}
-116
-117Job job = 
Job.getInstance(getConf(),getConf().get("mapreduce.job.name",
-118"syncTable_" + sourceTableName + 
"-" + targetTableName));
-119Configuration jobConf = 
job.getConfiguration();
-120job.setJarByClass(HashTable.class);
-121jobConf.set(SOURCE_HASH_DIR_CONF_KEY, 
sourceHashDir.toString());
-122jobConf.set(SOURCE_TABLE_CONF_KEY, 
sourceTableName);
-123jobConf.set(TARGET_TABLE_CONF_KEY, 
targetTableName);
-124if (sourceZkCluster != null) {
-125  
jobConf.set(SOURCE_ZK_CLUSTER_CONF_KEY, sourceZkCluster);
-126}
-127if (targetZkCluster != null) {
-128  
jobConf.set(TARGET_ZK_CLUSTER_CONF_KEY, targetZkCluster);
-129}
-130jobConf.setBoolean(DRY_RUN_CONF_KEY, 
dryRun);
-131
-132
TableMapReduceUtil.initTableMapperJob(targetTableName, tableHash.initScan(),
-133SyncMapper.class, null, null, 
job);
-134
-135job.setNumReduceTasks(0);
-136
-137if (dryRun) {
-138  
job.setOutputFormatClass(NullOutputFormat.class);
-139} else {
-140  // No reducers.  Just write 
straight to table.  Call initTableReducerJob
-141  // because it sets up the 
TableOutputFormat.
-142  
TableMapReduceUtil.initTableReducerJob(targetTableName, null, job, null,
-143  targetZkCluster, 

[21/40] hbase-site git commit: Published site at .

2017-08-12 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c0fcd7f3/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStates.ServerReportEvent.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStates.ServerReportEvent.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStates.ServerReportEvent.html
index c687435..f6f823c 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStates.ServerReportEvent.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStates.ServerReportEvent.html
@@ -440,543 +440,549 @@
 432serverMap.clear();
 433  }
 434
-435  // 
==
-436  //  RegionStateNode helpers
-437  // 
==
-438  protected RegionStateNode 
createRegionNode(final HRegionInfo regionInfo) {
-439RegionStateNode newNode = new 
RegionStateNode(regionInfo);
-440RegionStateNode oldNode = 
regionsMap.putIfAbsent(regionInfo.getRegionName(), newNode);
-441return oldNode != null ? oldNode : 
newNode;
-442  }
-443
-444  protected RegionStateNode 
getOrCreateRegionNode(final HRegionInfo regionInfo) {
-445RegionStateNode node = 
regionsMap.get(regionInfo.getRegionName());
-446return node != null ? node : 
createRegionNode(regionInfo);
-447  }
-448
-449  RegionStateNode 
getRegionNodeFromName(final byte[] regionName) {
-450return regionsMap.get(regionName);
-451  }
-452
-453  protected RegionStateNode 
getRegionNode(final HRegionInfo regionInfo) {
-454return 
getRegionNodeFromName(regionInfo.getRegionName());
-455  }
-456
-457  RegionStateNode 
getRegionNodeFromEncodedName(final String encodedRegionName) {
-458// TODO: Need a map encodedName, 
... but it is just dispatch merge...
-459for (RegionStateNode node: 
regionsMap.values()) {
-460  if 
(node.getRegionInfo().getEncodedName().equals(encodedRegionName)) {
-461return node;
-462  }
-463}
-464return null;
-465  }
-466
-467  public void deleteRegion(final 
HRegionInfo regionInfo) {
-468
regionsMap.remove(regionInfo.getRegionName());
-469// Remove from the offline regions 
map too if there.
-470if 
(this.regionOffline.containsKey(regionInfo)) {
-471  if (LOG.isTraceEnabled()) 
LOG.trace("Removing from regionOffline Map: " + regionInfo);
-472  
this.regionOffline.remove(regionInfo);
-473}
-474  }
-475
-476  ArrayListRegionStateNode 
getTableRegionStateNodes(final TableName tableName) {
-477final 
ArrayListRegionStateNode regions = new 
ArrayListRegionStateNode();
-478for (RegionStateNode node: 
regionsMap.tailMap(tableName.getName()).values()) {
-479  if 
(!node.getTable().equals(tableName)) break;
-480  regions.add(node);
-481}
-482return regions;
-483  }
-484
-485  ArrayListRegionState 
getTableRegionStates(final TableName tableName) {
-486final ArrayListRegionState 
regions = new ArrayListRegionState();
-487for (RegionStateNode node: 
regionsMap.tailMap(tableName.getName()).values()) {
-488  if 
(!node.getTable().equals(tableName)) break;
-489  
regions.add(createRegionState(node));
-490}
-491return regions;
-492  }
-493
-494  ArrayListHRegionInfo 
getTableRegionsInfo(final TableName tableName) {
-495final ArrayListHRegionInfo 
regions = new ArrayListHRegionInfo();
-496for (RegionStateNode node: 
regionsMap.tailMap(tableName.getName()).values()) {
-497  if 
(!node.getTable().equals(tableName)) break;
-498  
regions.add(node.getRegionInfo());
-499}
-500return regions;
-501  }
-502
-503  CollectionRegionStateNode 
getRegionNodes() {
-504return regionsMap.values();
-505  }
-506
-507  public ArrayListRegionState 
getRegionStates() {
-508final ArrayListRegionState 
regions = new ArrayListRegionState(regionsMap.size());
-509for (RegionStateNode node: 
regionsMap.values()) {
-510  
regions.add(createRegionState(node));
-511}
-512return regions;
-513  }
-514
-515  // 
==
-516  //  RegionState helpers
-517  // 
==
-518  public RegionState getRegionState(final 
HRegionInfo regionInfo) {
-519return 
createRegionState(getRegionNode(regionInfo));
-520  }
-521
-522  public RegionState getRegionState(final 
String encodedRegionName) {
-523return 
createRegionState(getRegionNodeFromEncodedName(encodedRegionName));
-524  }
-525
-526  private RegionState 
createRegionState(final RegionStateNode node) {
-527return node == null ? null :
-528  new 
RegionState(node.getRegionInfo(), node.getState(),
-529node.getLastUpdate(), 
node.getRegionLocation());
+435  @VisibleForTesting
+436  public boolean 

[21/40] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-23 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e9db7c5d/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/StripeStoreFileManager.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/StripeStoreFileManager.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/StripeStoreFileManager.html
index b09e20b..3c088c8 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/StripeStoreFileManager.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/StripeStoreFileManager.html
@@ -155,937 +155,942 @@
 147  }
 148
 149  @Override
-150  public void 
insertNewFiles(CollectionStoreFile sfs) throws IOException {
-151CompactionOrFlushMergeCopy cmc = new 
CompactionOrFlushMergeCopy(true);
-152// Passing null does not cause 
NPE??
-153cmc.mergeResults(null, sfs);
-154debugDumpState("Added new files");
-155  }
-156
-157  @Override
-158  public 
ImmutableCollectionStoreFile clearFiles() {
-159ImmutableCollectionStoreFile 
result = state.allFilesCached;
-160this.state = new State();
-161this.fileStarts.clear();
-162this.fileEnds.clear();
-163return result;
-164  }
-165
-166  @Override
-167  public 
ImmutableCollectionStoreFile clearCompactedFiles() {
-168ImmutableCollectionStoreFile 
result = state.allCompactedFilesCached;
-169this.state = new State();
-170return result;
-171  }
-172
-173  @Override
-174  public int getStorefileCount() {
-175return state.allFilesCached.size();
+150  public int getCompactedFilesCount() {
+151return 
state.allCompactedFilesCached.size();
+152  }
+153
+154  @Override
+155  public void 
insertNewFiles(CollectionStoreFile sfs) throws IOException {
+156CompactionOrFlushMergeCopy cmc = new 
CompactionOrFlushMergeCopy(true);
+157// Passing null does not cause 
NPE??
+158cmc.mergeResults(null, sfs);
+159debugDumpState("Added new files");
+160  }
+161
+162  @Override
+163  public 
ImmutableCollectionStoreFile clearFiles() {
+164ImmutableCollectionStoreFile 
result = state.allFilesCached;
+165this.state = new State();
+166this.fileStarts.clear();
+167this.fileEnds.clear();
+168return result;
+169  }
+170
+171  @Override
+172  public 
ImmutableCollectionStoreFile clearCompactedFiles() {
+173ImmutableCollectionStoreFile 
result = state.allCompactedFilesCached;
+174this.state = new State();
+175return result;
 176  }
 177
-178  /** See {@link 
StoreFileManager#getCandidateFilesForRowKeyBefore(KeyValue)}
-179   * for details on this methods. */
-180  @Override
-181  public IteratorStoreFile 
getCandidateFilesForRowKeyBefore(final KeyValue targetKey) {
-182KeyBeforeConcatenatedLists result = 
new KeyBeforeConcatenatedLists();
-183// Order matters for this call.
-184
result.addSublist(state.level0Files);
-185if (!state.stripeFiles.isEmpty()) {
-186  int lastStripeIndex = 
findStripeForRow(CellUtil.cloneRow(targetKey), false);
-187  for (int stripeIndex = 
lastStripeIndex; stripeIndex = 0; --stripeIndex) {
-188
result.addSublist(state.stripeFiles.get(stripeIndex));
-189  }
-190}
-191return result.iterator();
-192  }
-193
-194  /** See {@link 
StoreFileManager#getCandidateFilesForRowKeyBefore(KeyValue)} and
-195   * {@link 
StoreFileManager#updateCandidateFilesForRowKeyBefore(Iterator, KeyValue, 
Cell)}
-196   * for details on this methods. */
-197  @Override
-198  public IteratorStoreFile 
updateCandidateFilesForRowKeyBefore(
-199  IteratorStoreFile 
candidateFiles, final KeyValue targetKey, final Cell candidate) {
-200KeyBeforeConcatenatedLists.Iterator 
original =
-201
(KeyBeforeConcatenatedLists.Iterator)candidateFiles;
-202assert original != null;
-203
ArrayListListStoreFile components = original.getComponents();
-204for (int firstIrrelevant = 0; 
firstIrrelevant  components.size(); ++firstIrrelevant) {
-205  StoreFile sf = 
components.get(firstIrrelevant).get(0);
-206  byte[] endKey = endOf(sf);
-207  // Entries are ordered as such: L0, 
then stripes in reverse order. We never remove
-208  // level 0; we remove the stripe, 
and all subsequent ones, as soon as we find the
-209  // first one that cannot possibly 
have better candidates.
-210  if (!isInvalid(endKey)  
!isOpen(endKey)
-211   
(nonOpenRowCompare(targetKey, endKey) = 0)) {
-212
original.removeComponents(firstIrrelevant);
-213break;
-214  }
-215}
-216return original;
-217  }
-218
-219  private byte[] 
getSplitPoint(CollectionStoreFile sfs) throws IOException {
-220OptionalStoreFile largestFile 
= StoreUtils.getLargestFile(sfs);
-221return largestFile.isPresent()
-222? 
StoreUtils.getFileSplitPoint(largestFile.get(), cellComparator).orElse(null) : 
null;
-223  }
-224
-225  /**
-226   * Override of getSplitPoint that 
determines the split point as the 

[21/40] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-05-27 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f8f0a032/devapidocs/src-html/org/apache/hadoop/hbase/ipc/ServerRpcConnection.ByteBuffByteInput.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/ipc/ServerRpcConnection.ByteBuffByteInput.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/ipc/ServerRpcConnection.ByteBuffByteInput.html
index b363f06..790089d 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/ipc/ServerRpcConnection.ByteBuffByteInput.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/ipc/ServerRpcConnection.ByteBuffByteInput.html
@@ -37,854 +37,810 @@
 029import java.nio.channels.Channels;
 030import 
java.nio.channels.ReadableByteChannel;
 031import 
java.security.GeneralSecurityException;
-032import 
java.security.PrivilegedExceptionAction;
-033import java.util.Properties;
-034
-035import javax.security.sasl.Sasl;
-036import 
javax.security.sasl.SaslException;
-037import javax.security.sasl.SaslServer;
-038
-039import 
org.apache.commons.crypto.cipher.CryptoCipherFactory;
-040import 
org.apache.commons.crypto.random.CryptoRandom;
-041import 
org.apache.commons.crypto.random.CryptoRandomFactory;
-042import 
org.apache.hadoop.hbase.CellScanner;
-043import 
org.apache.hadoop.hbase.DoNotRetryIOException;
-044import 
org.apache.hadoop.hbase.client.VersionInfoUtil;
-045import 
org.apache.hadoop.hbase.codec.Codec;
-046import 
org.apache.hadoop.hbase.io.ByteBufferOutputStream;
-047import 
org.apache.hadoop.hbase.io.crypto.aes.CryptoAES;
-048import 
org.apache.hadoop.hbase.ipc.RpcServer.CallCleanup;
-049import 
org.apache.hadoop.hbase.nio.ByteBuff;
-050import 
org.apache.hadoop.hbase.nio.SingleByteBuff;
-051import 
org.apache.hadoop.hbase.security.AccessDeniedException;
-052import 
org.apache.hadoop.hbase.security.AuthMethod;
-053import 
org.apache.hadoop.hbase.security.HBaseSaslRpcServer;
-054import 
org.apache.hadoop.hbase.security.HBaseSaslRpcServer.SaslDigestCallbackHandler;
-055import 
org.apache.hadoop.hbase.security.HBaseSaslRpcServer.SaslGssCallbackHandler;
-056import 
org.apache.hadoop.hbase.security.SaslStatus;
-057import 
org.apache.hadoop.hbase.security.SaslUtil;
-058import 
org.apache.hadoop.hbase.security.User;
-059import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.BlockingService;
-060import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteInput;
-061import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString;
-062import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream;
-063import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.MethodDescriptor;
-064import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Message;
-065import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.TextFormat;
-066import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.UnsafeByteOperations;
-067import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-068import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.VersionInfo;
-069import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos;
-070import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.ConnectionHeader;
-071import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.RequestHeader;
-072import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.ResponseHeader;
-073import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.UserInformation;
-074import 
org.apache.hadoop.hbase.util.Bytes;
-075import 
org.apache.hadoop.io.BytesWritable;
-076import 
org.apache.hadoop.io.IntWritable;
-077import org.apache.hadoop.io.Writable;
-078import 
org.apache.hadoop.io.WritableUtils;
-079import 
org.apache.hadoop.io.compress.CompressionCodec;
-080import 
org.apache.hadoop.security.UserGroupInformation;
-081import 
org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
-082import 
org.apache.hadoop.security.authorize.AuthorizationException;
-083import 
org.apache.hadoop.security.authorize.ProxyUsers;
-084import 
org.apache.hadoop.security.token.SecretManager.InvalidToken;
-085import 
org.apache.hadoop.security.token.TokenIdentifier;
-086import org.apache.htrace.TraceInfo;
-087
-088/** Reads calls from a connection and 
queues them for handling. */
-089@edu.umd.cs.findbugs.annotations.SuppressWarnings(
-090value="VO_VOLATILE_INCREMENT",
-091justification="False positive 
according to http://sourceforge.net/p/findbugs/bugs/1032/;)
-092abstract class ServerRpcConnection 
implements Closeable {
-093  /**  */
-094  protected final RpcServer rpcServer;
-095  // If the connection header has been 
read or not.
-096  protected boolean connectionHeaderRead 
= false;
-097
-098  protected CallCleanup callCleanup;
-099
-100  // Cache the remote host  port 
info so that even if the socket is
-101  // disconnected, we can say where it 
used to connect to.
-102  protected String hostAddress;
-103  protected int remotePort;
-104