[25/40] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-07 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1b6d8c10/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.AddColumnFamilyProcedureBiConsumer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.AddColumnFamilyProcedureBiConsumer.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.AddColumnFamilyProcedureBiConsumer.html
index 9a6c30b..af6a1dd 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.AddColumnFamilyProcedureBiConsumer.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.AddColumnFamilyProcedureBiConsumer.html
@@ -54,1176 +54,1176 @@
 046import org.apache.commons.io.IOUtils;
 047import org.apache.commons.logging.Log;
 048import 
org.apache.commons.logging.LogFactory;
-049import 
org.apache.hadoop.hbase.HRegionInfo;
-050import 
org.apache.hadoop.hbase.HRegionLocation;
-051import 
org.apache.hadoop.hbase.MetaTableAccessor;
-052import 
org.apache.hadoop.hbase.MetaTableAccessor.QueryType;
-053import 
org.apache.hadoop.hbase.NotServingRegionException;
-054import 
org.apache.hadoop.hbase.ProcedureInfo;
-055import 
org.apache.hadoop.hbase.RegionLocations;
-056import 
org.apache.hadoop.hbase.ServerName;
-057import 
org.apache.hadoop.hbase.NamespaceDescriptor;
-058import 
org.apache.hadoop.hbase.HConstants;
-059import 
org.apache.hadoop.hbase.TableExistsException;
-060import 
org.apache.hadoop.hbase.TableName;
-061import 
org.apache.hadoop.hbase.AsyncMetaTableAccessor;
-062import 
org.apache.hadoop.hbase.TableNotDisabledException;
-063import 
org.apache.hadoop.hbase.TableNotEnabledException;
-064import 
org.apache.hadoop.hbase.TableNotFoundException;
-065import 
org.apache.hadoop.hbase.UnknownRegionException;
-066import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-067import 
org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.AdminRequestCallerBuilder;
-068import 
org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.MasterRequestCallerBuilder;
-069import 
org.apache.hadoop.hbase.client.Scan.ReadType;
-070import 
org.apache.hadoop.hbase.client.replication.ReplicationSerDeHelper;
-071import 
org.apache.hadoop.hbase.client.replication.TableCFs;
-072import 
org.apache.hadoop.hbase.exceptions.DeserializationException;
-073import 
org.apache.hadoop.hbase.ipc.HBaseRpcController;
-074import 
org.apache.hadoop.hbase.quotas.QuotaFilter;
-075import 
org.apache.hadoop.hbase.quotas.QuotaSettings;
-076import 
org.apache.hadoop.hbase.quotas.QuotaTableUtil;
-077import 
org.apache.hadoop.hbase.replication.ReplicationException;
-078import 
org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
-079import 
org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
-080import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback;
-081import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-082import 
org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
-083import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
-084import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest;
-085import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse;
-086import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest;
-087import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionResponse;
-088import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest;
-089import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse;
-090import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionRequest;
-091import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionResponse;
-092import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionRequest;
-093import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionResponse;
-094import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription;
-095import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableSchema;
-096import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest;
-097import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureResponse;
-098import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest;
-099import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse;
-100import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest;
-101import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionResponse;
-102import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceRequest;
-103import 

[25/40] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-23 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e9db7c5d/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/StripeStoreFileManager.CompactionOrFlushMergeCopy.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/StripeStoreFileManager.CompactionOrFlushMergeCopy.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/StripeStoreFileManager.CompactionOrFlushMergeCopy.html
index b09e20b..3c088c8 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/StripeStoreFileManager.CompactionOrFlushMergeCopy.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/StripeStoreFileManager.CompactionOrFlushMergeCopy.html
@@ -155,937 +155,942 @@
 147  }
 148
 149  @Override
-150  public void 
insertNewFiles(CollectionStoreFile sfs) throws IOException {
-151CompactionOrFlushMergeCopy cmc = new 
CompactionOrFlushMergeCopy(true);
-152// Passing null does not cause 
NPE??
-153cmc.mergeResults(null, sfs);
-154debugDumpState("Added new files");
-155  }
-156
-157  @Override
-158  public 
ImmutableCollectionStoreFile clearFiles() {
-159ImmutableCollectionStoreFile 
result = state.allFilesCached;
-160this.state = new State();
-161this.fileStarts.clear();
-162this.fileEnds.clear();
-163return result;
-164  }
-165
-166  @Override
-167  public 
ImmutableCollectionStoreFile clearCompactedFiles() {
-168ImmutableCollectionStoreFile 
result = state.allCompactedFilesCached;
-169this.state = new State();
-170return result;
-171  }
-172
-173  @Override
-174  public int getStorefileCount() {
-175return state.allFilesCached.size();
+150  public int getCompactedFilesCount() {
+151return 
state.allCompactedFilesCached.size();
+152  }
+153
+154  @Override
+155  public void 
insertNewFiles(CollectionStoreFile sfs) throws IOException {
+156CompactionOrFlushMergeCopy cmc = new 
CompactionOrFlushMergeCopy(true);
+157// Passing null does not cause 
NPE??
+158cmc.mergeResults(null, sfs);
+159debugDumpState("Added new files");
+160  }
+161
+162  @Override
+163  public 
ImmutableCollectionStoreFile clearFiles() {
+164ImmutableCollectionStoreFile 
result = state.allFilesCached;
+165this.state = new State();
+166this.fileStarts.clear();
+167this.fileEnds.clear();
+168return result;
+169  }
+170
+171  @Override
+172  public 
ImmutableCollectionStoreFile clearCompactedFiles() {
+173ImmutableCollectionStoreFile 
result = state.allCompactedFilesCached;
+174this.state = new State();
+175return result;
 176  }
 177
-178  /** See {@link 
StoreFileManager#getCandidateFilesForRowKeyBefore(KeyValue)}
-179   * for details on this methods. */
-180  @Override
-181  public IteratorStoreFile 
getCandidateFilesForRowKeyBefore(final KeyValue targetKey) {
-182KeyBeforeConcatenatedLists result = 
new KeyBeforeConcatenatedLists();
-183// Order matters for this call.
-184
result.addSublist(state.level0Files);
-185if (!state.stripeFiles.isEmpty()) {
-186  int lastStripeIndex = 
findStripeForRow(CellUtil.cloneRow(targetKey), false);
-187  for (int stripeIndex = 
lastStripeIndex; stripeIndex = 0; --stripeIndex) {
-188
result.addSublist(state.stripeFiles.get(stripeIndex));
-189  }
-190}
-191return result.iterator();
-192  }
-193
-194  /** See {@link 
StoreFileManager#getCandidateFilesForRowKeyBefore(KeyValue)} and
-195   * {@link 
StoreFileManager#updateCandidateFilesForRowKeyBefore(Iterator, KeyValue, 
Cell)}
-196   * for details on this methods. */
-197  @Override
-198  public IteratorStoreFile 
updateCandidateFilesForRowKeyBefore(
-199  IteratorStoreFile 
candidateFiles, final KeyValue targetKey, final Cell candidate) {
-200KeyBeforeConcatenatedLists.Iterator 
original =
-201
(KeyBeforeConcatenatedLists.Iterator)candidateFiles;
-202assert original != null;
-203
ArrayListListStoreFile components = original.getComponents();
-204for (int firstIrrelevant = 0; 
firstIrrelevant  components.size(); ++firstIrrelevant) {
-205  StoreFile sf = 
components.get(firstIrrelevant).get(0);
-206  byte[] endKey = endOf(sf);
-207  // Entries are ordered as such: L0, 
then stripes in reverse order. We never remove
-208  // level 0; we remove the stripe, 
and all subsequent ones, as soon as we find the
-209  // first one that cannot possibly 
have better candidates.
-210  if (!isInvalid(endKey)  
!isOpen(endKey)
-211   
(nonOpenRowCompare(targetKey, endKey) = 0)) {
-212
original.removeComponents(firstIrrelevant);
-213break;
-214  }
-215}
-216return original;
-217  }
-218
-219  private byte[] 
getSplitPoint(CollectionStoreFile sfs) throws IOException {
-220OptionalStoreFile largestFile 
= StoreUtils.getLargestFile(sfs);
-221return largestFile.isPresent()
-222? 
StoreUtils.getFileSplitPoint(largestFile.get(), 

[25/40] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-05-27 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f8f0a032/devapidocs/src-html/org/apache/hadoop/hbase/HConstants.html
--
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/HConstants.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/HConstants.html
index ac13492..ce4327f 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/HConstants.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/HConstants.html
@@ -1101,284 +1101,280 @@
 1093
"hbase.regionserver.wal.enablecompression";
 1094
 1095  /** Configuration name of WAL storage 
policy
-1096   * Valid values are:
-1097   *  NONE: no preference in destination 
of block replicas
-1098   *  ONE_SSD: place only one block 
replica in SSD and the remaining in default storage
-1099   *  and ALL_SSD: place all block 
replicas on SSD
-1100   *
-1101   * See 
http://hadoop.apache.org/docs/r2.6.0/hadoop-project-dist/hadoop-hdfs/ArchivalStorage.html*/
-1102  public static final String 
WAL_STORAGE_POLICY = "hbase.wal.storage.policy";
-1103  public static final String 
DEFAULT_WAL_STORAGE_POLICY = "NONE";
+1096   * Valid values are: HOT, COLD, WARM, 
ALL_SSD, ONE_SSD, LAZY_PERSIST
+1097   * See 
http://hadoop.apache.org/docs/r2.7.3/hadoop-project-dist/hadoop-hdfs/ArchivalStorage.html*/
+1098  public static final String 
WAL_STORAGE_POLICY = "hbase.wal.storage.policy";
+1099  public static final String 
DEFAULT_WAL_STORAGE_POLICY = "HOT";
+1100
+1101  /** Region in Transition metrics 
threshold time */
+1102  public static final String 
METRICS_RIT_STUCK_WARNING_THRESHOLD =
+1103  
"hbase.metrics.rit.stuck.warning.threshold";
 1104
-1105  /** Region in Transition metrics 
threshold time */
-1106  public static final String 
METRICS_RIT_STUCK_WARNING_THRESHOLD =
-1107  
"hbase.metrics.rit.stuck.warning.threshold";
-1108
-1109  public static final String 
LOAD_BALANCER_SLOP_KEY = "hbase.regions.slop";
-1110
-  /** delimiter used between portions of 
a region name */
-1112  public static final int DELIMITER = 
',';
-1113
-1114  /**
-1115   * QOS attributes: these attributes 
are used to demarcate RPC call processing
-1116   * by different set of handlers. For 
example, HIGH_QOS tagged methods are
-1117   * handled by high priority 
handlers.
-1118   */
-1119  // normal_QOS  replication_QOS 
 replay_QOS  QOS_threshold  admin_QOS  high_QOS
-1120  public static final int NORMAL_QOS = 
0;
-1121  public static final int 
REPLICATION_QOS = 5;
-1122  public static final int REPLAY_QOS = 
6;
-1123  public static final int QOS_THRESHOLD 
= 10;
-1124  public static final int ADMIN_QOS = 
100;
-1125  public static final int HIGH_QOS = 
200;
-1126  public static final int 
SYSTEMTABLE_QOS = HIGH_QOS;
-1127
-1128  /** Directory under /hbase where 
archived hfiles are stored */
-1129  public static final String 
HFILE_ARCHIVE_DIRECTORY = "archive";
-1130
-1131  /**
-1132   * Name of the directory to store all 
snapshots. See SnapshotDescriptionUtils for
-1133   * remaining snapshot constants; this 
is here to keep HConstants dependencies at a minimum and
-1134   * uni-directional.
-1135   */
-1136  public static final String 
SNAPSHOT_DIR_NAME = ".hbase-snapshot";
-1137
-1138  /* Name of old snapshot directory. See 
HBASE-8352 for details on why it needs to be renamed */
-1139  public static final String 
OLD_SNAPSHOT_DIR_NAME = ".snapshot";
-1140
-1141  /** Temporary directory used for table 
creation and deletion */
-1142  public static final String 
HBASE_TEMP_DIRECTORY = ".tmp";
-1143  /**
-1144   * The period (in milliseconds) 
between computing region server point in time metrics
-1145   */
-1146  public static final String 
REGIONSERVER_METRICS_PERIOD = "hbase.regionserver.metrics.period";
-1147  public static final long 
DEFAULT_REGIONSERVER_METRICS_PERIOD = 5000;
-1148  /** Directories that are not HBase 
table directories */
-1149  public static final ListString 
HBASE_NON_TABLE_DIRS =
-1150
Collections.unmodifiableList(Arrays.asList(new String[] {
-1151  HBCK_SIDELINEDIR_NAME, 
HBASE_TEMP_DIRECTORY, MIGRATION_NAME
-1152}));
-1153
-1154  /** Directories that are not HBase 
user table directories */
-1155  public static final ListString 
HBASE_NON_USER_TABLE_DIRS =
-1156
Collections.unmodifiableList(Arrays.asList((String[])ArrayUtils.addAll(
-1157  new String[] { 
TableName.META_TABLE_NAME.getNameAsString() },
-1158  
HBASE_NON_TABLE_DIRS.toArray(;
-1159
-1160  /** Health script related settings. 
*/
-1161  public static final String 
HEALTH_SCRIPT_LOC = "hbase.node.health.script.location";
-1162  public static final String 
HEALTH_SCRIPT_TIMEOUT = "hbase.node.health.script.timeout";
-1163  public static final String 
HEALTH_CHORE_WAKE_FREQ =
-1164  
"hbase.node.health.script.frequency";
-1165  public static final long 
DEFAULT_HEALTH_SCRIPT_TIMEOUT = 6;
-1166  /**
-1167   * The maximum number of health check 
failures a server can encounter