[27/49] hbase-site git commit: Published site at 3810ba2c6edfc531181ffc9e6c68396a0c2d2027.

2018-09-12 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/705d69c4/devapidocs/src-html/org/apache/hadoop/hbase/snapshot/SnapshotDescriptionUtils.CompletedSnaphotDirectoriesFilter.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/snapshot/SnapshotDescriptionUtils.CompletedSnaphotDirectoriesFilter.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/snapshot/SnapshotDescriptionUtils.CompletedSnaphotDirectoriesFilter.html
index 7462d5b..63a00a7 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/snapshot/SnapshotDescriptionUtils.CompletedSnaphotDirectoriesFilter.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/snapshot/SnapshotDescriptionUtils.CompletedSnaphotDirectoriesFilter.html
@@ -122,294 +122,339 @@
 114  /** Temporary directory under the 
snapshot directory to store in-progress snapshots */
 115  public static final String 
SNAPSHOT_TMP_DIR_NAME = ".tmp";
 116
-117  /** This tag will be created in 
in-progess snapshots */
-118  public static final String 
SNAPSHOT_IN_PROGRESS = ".inprogress";
-119  // snapshot operation values
-120  /** Default value if no start time is 
specified */
-121  public static final long 
NO_SNAPSHOT_START_TIME_SPECIFIED = 0;
+117  /**
+118   * The configuration property that 
determines the filepath of the snapshot
+119   * base working directory
+120   */
+121  public static final String 
SNAPSHOT_WORKING_DIR = "hbase.snapshot.working.dir";
 122
-123
-124  public static final String 
MASTER_SNAPSHOT_TIMEOUT_MILLIS = "hbase.snapshot.master.timeout.millis";
-125
-126  /** By default, wait 300 seconds for a 
snapshot to complete */
-127  public static final long 
DEFAULT_MAX_WAIT_TIME = 6 * 5 ;
+123  /** This tag will be created in 
in-progess snapshots */
+124  public static final String 
SNAPSHOT_IN_PROGRESS = ".inprogress";
+125  // snapshot operation values
+126  /** Default value if no start time is 
specified */
+127  public static final long 
NO_SNAPSHOT_START_TIME_SPECIFIED = 0;
 128
 129
-130  /**
-131   * By default, check to see if the 
snapshot is complete (ms)
-132   * @deprecated Use {@link 
#DEFAULT_MAX_WAIT_TIME} instead.
-133   * */
-134  @Deprecated
-135  public static final int 
SNAPSHOT_TIMEOUT_MILLIS_DEFAULT = 6 * 5;
-136
-137  /**
-138   * Conf key for # of ms elapsed before 
injecting a snapshot timeout error when waiting for
-139   * completion.
-140   * @deprecated Use {@link 
#MASTER_SNAPSHOT_TIMEOUT_MILLIS} instead.
-141   */
-142  @Deprecated
-143  public static final String 
SNAPSHOT_TIMEOUT_MILLIS_KEY = "hbase.snapshot.master.timeoutMillis";
-144
-145  private SnapshotDescriptionUtils() {
-146// private constructor for utility 
class
-147  }
-148
-149  /**
-150   * @param conf {@link Configuration} 
from which to check for the timeout
-151   * @param type type of snapshot being 
taken
-152   * @param defaultMaxWaitTime Default 
amount of time to wait, if none is in the configuration
-153   * @return the max amount of time the 
master should wait for a snapshot to complete
-154   */
-155  public static long 
getMaxMasterTimeout(Configuration conf, SnapshotDescription.Type type,
-156  long defaultMaxWaitTime) {
-157String confKey;
-158switch (type) {
-159case DISABLED:
-160default:
-161  confKey = 
MASTER_SNAPSHOT_TIMEOUT_MILLIS;
-162}
-163return Math.max(conf.getLong(confKey, 
defaultMaxWaitTime),
-164
conf.getLong(SNAPSHOT_TIMEOUT_MILLIS_KEY, defaultMaxWaitTime));
-165  }
-166
-167  /**
-168   * Get the snapshot root directory. All 
the snapshots are kept under this directory, i.e.
-169   * ${hbase.rootdir}/.snapshot
-170   * @param rootDir hbase root 
directory
-171   * @return the base directory in which 
all snapshots are kept
-172   */
-173  public static Path 
getSnapshotRootDir(final Path rootDir) {
-174return new Path(rootDir, 
HConstants.SNAPSHOT_DIR_NAME);
-175  }
-176
-177  /**
-178   * Get the directory for a specified 
snapshot. This directory is a sub-directory of snapshot root
-179   * directory and all the data files for 
a snapshot are kept under this directory.
-180   * @param snapshot snapshot being 
taken
-181   * @param rootDir hbase root 
directory
-182   * @return the final directory for the 
completed snapshot
-183   */
-184  public static Path 
getCompletedSnapshotDir(final SnapshotDescription snapshot, final Path rootDir) 
{
-185return 
getCompletedSnapshotDir(snapshot.getName(), rootDir);
-186  }
-187
-188  /**
-189   * Get the directory for a completed 
snapshot. This directory is a sub-directory of snapshot root
-190   * directory and all the data files for 
a snapshot are kept under this directory.
-191   * @param snapshotName name of the 
snapshot being taken
-192   * @param rootDir hbase root 
directory
-193   * @return the final directory for the 
completed snapshot
-194   */
-195  public static Path 
getCompletedSnapshotDir(final String snapshotName, final Path 

[27/49] hbase-site git commit: Published site at .

2017-09-24 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e6bd0774/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/MasterObserver.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/MasterObserver.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/MasterObserver.html
index 06c94bc..2725449 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/MasterObserver.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/MasterObserver.html
@@ -50,8 +50,8 @@
 042import 
org.apache.hadoop.hbase.procedure2.LockedResource;
 043import 
org.apache.hadoop.hbase.procedure2.Procedure;
 044import 
org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
-045import 
org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
-046import 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.Quotas;
+045import 
org.apache.hadoop.hbase.quotas.GlobalQuotaSettings;
+046import 
org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
 047import 
org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription;
 048import 
org.apache.yetus.audience.InterfaceAudience;
 049import 
org.apache.yetus.audience.InterfaceStability;
@@ -1083,389 +1083,393 @@
 1075   * Called before the quota for the 
user is stored.
 1076   * @param ctx the environment to 
interact with the framework and master
 1077   * @param userName the name of user
-1078   * @param quotas the quota settings
+1078   * @param quotas the current quota for 
the user
 1079   */
 1080  default void preSetUserQuota(final 
ObserverContextMasterCoprocessorEnvironment ctx,
-1081  final String userName, final 
Quotas quotas) throws IOException {}
+1081  final String userName, final 
GlobalQuotaSettings quotas) throws IOException {}
 1082
 1083  /**
 1084   * Called after the quota for the user 
is stored.
 1085   * @param ctx the environment to 
interact with the framework and master
 1086   * @param userName the name of user
-1087   * @param quotas the quota settings
+1087   * @param quotas the resulting quota 
for the user
 1088   */
 1089  default void postSetUserQuota(final 
ObserverContextMasterCoprocessorEnvironment ctx,
-1090  final String userName, final 
Quotas quotas) throws IOException {}
+1090  final String userName, final 
GlobalQuotaSettings quotas) throws IOException {}
 1091
 1092  /**
 1093   * Called before the quota for the 
user on the specified table is stored.
 1094   * @param ctx the environment to 
interact with the framework and master
 1095   * @param userName the name of user
 1096   * @param tableName the name of the 
table
-1097   * @param quotas the quota settings
+1097   * @param quotas the current quota for 
the user on the table
 1098   */
-1099  default void preSetUserQuota(final 
ObserverContextMasterCoprocessorEnvironment ctx,
-1100  final String userName, final 
TableName tableName, final Quotas quotas) throws IOException {}
-1101
-1102  /**
-1103   * Called after the quota for the user 
on the specified table is stored.
-1104   * @param ctx the environment to 
interact with the framework and master
-1105   * @param userName the name of user
-1106   * @param tableName the name of the 
table
-1107   * @param quotas the quota settings
-1108   */
-1109  default void postSetUserQuota(final 
ObserverContextMasterCoprocessorEnvironment ctx,
-1110  final String userName, final 
TableName tableName, final Quotas quotas) throws IOException {}
-
-1112  /**
-1113   * Called before the quota for the 
user on the specified namespace is stored.
-1114   * @param ctx the environment to 
interact with the framework and master
-1115   * @param userName the name of user
-1116   * @param namespace the name of the 
namespace
-1117   * @param quotas the quota settings
-1118   */
-1119  default void preSetUserQuota(final 
ObserverContextMasterCoprocessorEnvironment ctx,
-1120  final String userName, final 
String namespace, final Quotas quotas) throws IOException {}
-1121
-1122  /**
-1123   * Called after the quota for the user 
on the specified namespace is stored.
-1124   * @param ctx the environment to 
interact with the framework and master
-1125   * @param userName the name of user
-1126   * @param namespace the name of the 
namespace
-1127   * @param quotas the quota settings
-1128   */
-1129  default void postSetUserQuota(final 
ObserverContextMasterCoprocessorEnvironment ctx,
-1130  final String userName, final 
String namespace, final Quotas quotas) throws IOException {}
-1131
-1132  /**
-1133   * Called before the quota for the 
table is stored.
-1134   * @param ctx the environment to 
interact with the framework and master
-1135   * @param tableName the name of the 
table
-1136   * @param quotas the quota settings
-1137   */
-1138  default void preSetTableQuota(final 
ObserverContextMasterCoprocessorEnvironment ctx,
-1139  final TableName tableName, 

[27/49] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-14 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b17bf22b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.AddColumnFamilyProcedureBiConsumer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.AddColumnFamilyProcedureBiConsumer.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.AddColumnFamilyProcedureBiConsumer.html
index 99a09f9..eb9099e 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.AddColumnFamilyProcedureBiConsumer.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.AddColumnFamilyProcedureBiConsumer.html
@@ -42,2811 +42,2861 @@
 034import java.util.concurrent.TimeUnit;
 035import 
java.util.concurrent.atomic.AtomicReference;
 036import java.util.function.BiConsumer;
-037import java.util.regex.Pattern;
-038import java.util.stream.Collectors;
-039
-040import 
com.google.common.annotations.VisibleForTesting;
-041
-042import io.netty.util.Timeout;
-043import io.netty.util.TimerTask;
+037import java.util.function.Function;
+038import java.util.regex.Pattern;
+039import java.util.stream.Collectors;
+040
+041import 
com.google.common.annotations.VisibleForTesting;
+042import com.google.protobuf.Message;
+043import com.google.protobuf.RpcChannel;
 044
-045import java.util.stream.Stream;
-046
-047import org.apache.commons.io.IOUtils;
-048import org.apache.commons.logging.Log;
-049import 
org.apache.commons.logging.LogFactory;
-050import 
org.apache.hadoop.hbase.ClusterStatus;
-051import 
org.apache.hadoop.hbase.HRegionInfo;
-052import 
org.apache.hadoop.hbase.HRegionLocation;
-053import 
org.apache.hadoop.hbase.MetaTableAccessor;
-054import 
org.apache.hadoop.hbase.MetaTableAccessor.QueryType;
-055import 
org.apache.hadoop.hbase.NotServingRegionException;
-056import 
org.apache.hadoop.hbase.ProcedureInfo;
-057import 
org.apache.hadoop.hbase.RegionLoad;
-058import 
org.apache.hadoop.hbase.RegionLocations;
-059import 
org.apache.hadoop.hbase.ServerName;
-060import 
org.apache.hadoop.hbase.NamespaceDescriptor;
-061import 
org.apache.hadoop.hbase.HConstants;
-062import 
org.apache.hadoop.hbase.TableExistsException;
-063import 
org.apache.hadoop.hbase.TableName;
-064import 
org.apache.hadoop.hbase.AsyncMetaTableAccessor;
-065import 
org.apache.hadoop.hbase.TableNotDisabledException;
-066import 
org.apache.hadoop.hbase.TableNotEnabledException;
-067import 
org.apache.hadoop.hbase.TableNotFoundException;
-068import 
org.apache.hadoop.hbase.UnknownRegionException;
-069import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-070import 
org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.AdminRequestCallerBuilder;
-071import 
org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.MasterRequestCallerBuilder;
-072import 
org.apache.hadoop.hbase.client.Scan.ReadType;
-073import 
org.apache.hadoop.hbase.client.replication.ReplicationSerDeHelper;
-074import 
org.apache.hadoop.hbase.client.replication.TableCFs;
-075import 
org.apache.hadoop.hbase.client.security.SecurityCapability;
-076import 
org.apache.hadoop.hbase.exceptions.DeserializationException;
-077import 
org.apache.hadoop.hbase.ipc.HBaseRpcController;
-078import 
org.apache.hadoop.hbase.procedure2.LockInfo;
-079import 
org.apache.hadoop.hbase.quotas.QuotaFilter;
-080import 
org.apache.hadoop.hbase.quotas.QuotaSettings;
-081import 
org.apache.hadoop.hbase.quotas.QuotaTableUtil;
-082import 
org.apache.hadoop.hbase.replication.ReplicationException;
-083import 
org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
-084import 
org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
-085import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback;
-086import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-087import 
org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
-088import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
-089import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactionQueuesRequest;
-090import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactionQueuesResponse;
-091import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest;
-092import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse;
-093import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest;
-094import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionResponse;
-095import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest;
-096import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse;
-097import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionRequest;
-098import 

[27/49] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-16 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/cd7ae54c/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.PrepareFlushResult.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.PrepareFlushResult.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.PrepareFlushResult.html
index 959e3ef..ac32645 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.PrepareFlushResult.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.PrepareFlushResult.html
@@ -1401,6821 +1401,6830 @@
 1393  @Override
 1394  public boolean isSplittable() {
 1395boolean result = isAvailable() 
 !hasReferences();
-1396LOG.info("ASKED IF SPLITTABLE " + 
result, new Throwable("LOGGING"));
-1397return result;
-1398  }
-1399
-1400  @Override
-1401  public boolean isMergeable() {
-1402if (!isAvailable()) {
-1403  LOG.debug("Region " + this
-1404  + " is not mergeable because 
it is closing or closed");
-1405  return false;
-1406}
-1407if (hasReferences()) {
-1408  LOG.debug("Region " + this
-1409  + " is not mergeable because 
it has references");
-1410  return false;
-1411}
-1412
-1413return true;
-1414  }
-1415
-1416  public boolean areWritesEnabled() {
-1417synchronized(this.writestate) {
-1418  return 
this.writestate.writesEnabled;
-1419}
-1420  }
+1396LOG.info("ASKED IF SPLITTABLE " + 
result + " " + getRegionInfo().getShortNameToLog(),
+1397  new Throwable("LOGGING: 
REMOVE"));
+1398// REMOVE BELOW
+1399LOG.info("DEBUG LIST ALL FILES");
+1400for (Store store: 
this.stores.values()) {
+1401  LOG.info("store " + 
store.getColumnFamilyName());
+1402  for (StoreFile sf: 
store.getStorefiles()) {
+1403
LOG.info(sf.toStringDetailed());
+1404  }
+1405}
+1406return result;
+1407  }
+1408
+1409  @Override
+1410  public boolean isMergeable() {
+1411if (!isAvailable()) {
+1412  LOG.debug("Region " + this
+1413  + " is not mergeable because 
it is closing or closed");
+1414  return false;
+1415}
+1416if (hasReferences()) {
+1417  LOG.debug("Region " + this
+1418  + " is not mergeable because 
it has references");
+1419  return false;
+1420}
 1421
-1422  @VisibleForTesting
-1423  public MultiVersionConcurrencyControl 
getMVCC() {
-1424return mvcc;
-1425  }
-1426
-1427  @Override
-1428  public long getMaxFlushedSeqId() {
-1429return maxFlushedSeqId;
-1430  }
-1431
-1432  @Override
-1433  public long 
getReadPoint(IsolationLevel isolationLevel) {
-1434if (isolationLevel != null 
 isolationLevel == IsolationLevel.READ_UNCOMMITTED) {
-1435  // This scan can read even 
uncommitted transactions
-1436  return Long.MAX_VALUE;
-1437}
-1438return mvcc.getReadPoint();
+1422return true;
+1423  }
+1424
+1425  public boolean areWritesEnabled() {
+1426synchronized(this.writestate) {
+1427  return 
this.writestate.writesEnabled;
+1428}
+1429  }
+1430
+1431  @VisibleForTesting
+1432  public MultiVersionConcurrencyControl 
getMVCC() {
+1433return mvcc;
+1434  }
+1435
+1436  @Override
+1437  public long getMaxFlushedSeqId() {
+1438return maxFlushedSeqId;
 1439  }
 1440
 1441  @Override
-1442  public long 
getReadpoint(IsolationLevel isolationLevel) {
-1443return 
getReadPoint(isolationLevel);
-1444  }
-1445
-1446  @Override
-1447  public boolean 
isLoadingCfsOnDemandDefault() {
-1448return 
this.isLoadingCfsOnDemandDefault;
-1449  }
-1450
-1451  /**
-1452   * Close down this HRegion.  Flush the 
cache, shut down each HStore, don't
-1453   * service any more calls.
-1454   *
-1455   * pThis method could take 
some time to execute, so don't call it from a
-1456   * time-sensitive thread.
-1457   *
-1458   * @return Vector of all the storage 
files that the HRegion's component
-1459   * HStores make use of.  It's a list 
of all StoreFile objects. Returns empty
-1460   * vector if already closed and null 
if judged that it should not close.
-1461   *
-1462   * @throws IOException e
-1463   * @throws DroppedSnapshotException 
Thrown when replay of wal is required
-1464   * because a Snapshot was not properly 
persisted. The region is put in closing mode, and the
-1465   * caller MUST abort after this.
-1466   */
-1467  public Mapbyte[], 
ListStoreFile close() throws IOException {
-1468return close(false);
-1469  }
-1470
-1471  private final Object closeLock = new 
Object();
-1472
-1473  /** Conf key for the periodic flush 
interval */
-1474  public static final String 
MEMSTORE_PERIODIC_FLUSH_INTERVAL =
-1475  
"hbase.regionserver.optionalcacheflushinterval";
-1476  /** Default interval for the memstore 
flush */
-1477  public static final int 
DEFAULT_CACHE_FLUSH_INTERVAL = 360;
-1478  /** Default interval for System tables 
memstore 

[27/49] hbase-site git commit: Published site at 4b3e38705cb24aee82615b1b9af47ed549ea1358.

2016-03-03 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/4ce8323f/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/compactions/DateTieredCompactionPolicy.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/compactions/DateTieredCompactionPolicy.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/compactions/DateTieredCompactionPolicy.html
index 2ee77a0..4251c6f 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/compactions/DateTieredCompactionPolicy.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/compactions/DateTieredCompactionPolicy.html
@@ -143,163 +143,170 @@
 135
partitionFilesToBuckets(candidatesInWindow, comConf.getBaseWindowMillis(),
 136  comConf.getWindowsPerTier(), 
now);
 137LOG.debug("Compaction buckets are: " 
+ buckets);
-138
-139return newestBucket(buckets, 
comConf.getIncomingWindowMin(), now,
-140  comConf.getBaseWindowMillis(), 
mayUseOffPeak);
-141  }
-142
-143  /**
-144   * @param buckets the list of buckets, 
sorted from newest to oldest, from which to return the
-145   *  newest bucket within 
thresholds.
-146   * @param incomingWindowThreshold 
minimum number of storeFiles in a bucket to qualify.
-147   * @param maxThreshold maximum number 
of storeFiles to compact at once (the returned bucket will
-148   *  be trimmed down to this).
-149   * @return a bucket (a list of store 
files within a window to be compacted).
-150   * @throws IOException error
-151   */
-152  private ArrayListStoreFile 
newestBucket(ListArrayListStoreFile buckets,
-153  int incomingWindowThreshold, long 
now, long baseWindowMillis, boolean mayUseOffPeak)
-154  throws IOException {
-155Window incomingWindow = 
getInitialWindow(now, baseWindowMillis);
-156for (ArrayListStoreFile 
bucket : buckets) {
-157  int minThreshold =
-158  
incomingWindow.compareToTimestamp(bucket.get(0).getMaximumTimestamp()) = 0 
? comConf
-159  .getIncomingWindowMin() : 
comConf.getMinFilesToCompact();
-160  
compactionPolicyPerWindow.setMinThreshold(minThreshold);
-161  ArrayListStoreFile 
candidates =
-162  
compactionPolicyPerWindow.applyCompactionPolicy(bucket, mayUseOffPeak, 
false);
-163  if (candidates != null  
!candidates.isEmpty()) {
-164return candidates;
-165  }
-166}
-167return null;
-168  }
-169
-170  /**
-171   * We receive store files sorted in 
ascending order by seqId then scan the list of files. If the
-172   * current file has a maxTimestamp 
older than last known maximum, treat this file as it carries
-173   * the last known maximum. This way 
both seqId and timestamp are in the same order. If files carry
-174   * the same maxTimestamps, they are 
ordered by seqId. We then reverse the list so they are ordered
-175   * by seqId and maxTimestamp in 
decending order and build the time windows. All the out-of-order
-176   * data into the same compaction 
windows, guaranteeing contiguous compaction based on sequence id.
-177   */
-178  private static 
ListArrayListStoreFile 
partitionFilesToBuckets(IterableStoreFile storeFiles,
-179  long baseWindowSizeMillis, int 
windowsPerTier, long now) {
-180
ListArrayListStoreFile buckets = Lists.newArrayList();
-181Window window = getInitialWindow(now, 
baseWindowSizeMillis);
-182
-183ListPairStoreFile, 
Long storefileMaxTimestampPairs =
-184
Lists.newArrayListWithCapacity(Iterables.size(storeFiles));
-185long maxTimestampSeen = 
Long.MIN_VALUE;
-186for (StoreFile storeFile : 
storeFiles) {
-187  // if there is out-of-order data,
-188  // we put them in the same window 
as the last file in increasing order
-189  maxTimestampSeen = 
Math.max(maxTimestampSeen, storeFile.getMaximumTimestamp());
-190  storefileMaxTimestampPairs.add(new 
PairStoreFile, Long(storeFile, maxTimestampSeen));
-191}
-192
-193
Collections.reverse(storefileMaxTimestampPairs);
-194PeekingIteratorPairStoreFile, 
Long it =
-195
Iterators.peekingIterator(storefileMaxTimestampPairs.iterator());
-196
-197while (it.hasNext()) {
-198  int compResult = 
window.compareToTimestamp(it.peek().getSecond());
-199  if (compResult  0) {
-200// If the file is too old for the 
window, switch to the next window
-201window = 
window.nextWindow(windowsPerTier);
-202  } else {
-203// The file is within the target 
window
-204ArrayListStoreFile bucket 
= Lists.newArrayList();
-205// Add all files in the same 
window to current bucket. For incoming window
-206// we tolerate files with future 
data although it is sub-optimal
-207while (it.hasNext()  
window.compareToTimestamp(it.peek().getSecond()) = 0) {
-208  
bucket.add(it.next().getFirst());
-209}
-210if (!bucket.isEmpty()) {
-211