[15/30] hbase-site git commit: Published site at 931156f66b1decc19d89f8bb3ce9e5f355fb4fb2.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6b8b907f/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/ProcedureStore.ProcedureIterator.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/ProcedureStore.ProcedureIterator.html b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/ProcedureStore.ProcedureIterator.html index 2945c58..8de499d 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/ProcedureStore.ProcedureIterator.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/ProcedureStore.ProcedureIterator.html @@ -93,8 +93,8 @@ 085boolean hasNext(); 086 087/** -088 * Calling this method does not need to converting the protobuf message to the Procedure class, -089 * so if it returns true we can call {@link #skipNext()} to skip the procedure without +088 * Calling this method does not need to convert the protobuf message to the Procedure class, so +089 * if it returns true we can call {@link #skipNext()} to skip the procedure without 090 * deserializing. This could increase the performance. 091 * @return true if the iterator next element is a completed procedure. 092 */ http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6b8b907f/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/ProcedureStore.ProcedureLoader.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/ProcedureStore.ProcedureLoader.html b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/ProcedureStore.ProcedureLoader.html index 2945c58..8de499d 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/ProcedureStore.ProcedureLoader.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/ProcedureStore.ProcedureLoader.html @@ -93,8 +93,8 @@ 085boolean hasNext(); 086 087/** -088 * Calling this method does not need to converting the protobuf message to the Procedure class, -089 * so if it returns true we can call {@link #skipNext()} to skip the procedure without +088 * Calling this method does not need to convert the protobuf message to the Procedure class, so +089 * if it returns true we can call {@link #skipNext()} to skip the procedure without 090 * deserializing. This could increase the performance. 091 * @return true if the iterator next element is a completed procedure. 092 */ http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6b8b907f/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/ProcedureStore.ProcedureStoreListener.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/ProcedureStore.ProcedureStoreListener.html b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/ProcedureStore.ProcedureStoreListener.html index 2945c58..8de499d 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/ProcedureStore.ProcedureStoreListener.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/ProcedureStore.ProcedureStoreListener.html @@ -93,8 +93,8 @@ 085boolean hasNext(); 086 087/** -088 * Calling this method does not need to converting the protobuf message to the Procedure class, -089 * so if it returns true we can call {@link #skipNext()} to skip the procedure without +088 * Calling this method does not need to convert the protobuf message to the Procedure class, so +089 * if it returns true we can call {@link #skipNext()} to skip the procedure without 090 * deserializing. This could increase the performance. 091 * @return true if the iterator next element is a completed procedure. 092 */ http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6b8b907f/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/ProcedureStore.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/ProcedureStore.html b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/ProcedureStore.html index 2945c58..8de499d 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/ProcedureStore.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/ProcedureStore.html @@ -93,8 +93,8 @@ 085boolean hasNext(); 086 087/** -088 * Calling this method does not need to converting the protobuf message to the Procedure class, -089 * so if it returns true we can call {@link #skipNext()} to skip the procedure without +088 * Calling this method does not need to convert the protobuf message to the Procedure class, so +089 * if it returns true we can call {@link #skipNext()} to skip the
[15/30] hbase-site git commit: Published site at a8e184dc77470bdf9d62e19c5d36bc1de7cf4c6d.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/5fd895c6/devapidocs/org/apache/hadoop/hbase/client/package-tree.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/client/package-tree.html b/devapidocs/org/apache/hadoop/hbase/client/package-tree.html index a4111b8..d0c7850 100644 --- a/devapidocs/org/apache/hadoop/hbase/client/package-tree.html +++ b/devapidocs/org/apache/hadoop/hbase/client/package-tree.html @@ -552,24 +552,24 @@ java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true; title="class or interface in java.lang">EnumE (implements java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true; title="class or interface in java.lang">ComparableT, java.io.https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true; title="class or interface in java.io">Serializable) -org.apache.hadoop.hbase.client.CompactionState +org.apache.hadoop.hbase.client.TableState.State +org.apache.hadoop.hbase.client.RegionLocateType org.apache.hadoop.hbase.client.CompactType -org.apache.hadoop.hbase.client.Durability +org.apache.hadoop.hbase.client.IsolationLevel +org.apache.hadoop.hbase.client.MasterSwitchType +org.apache.hadoop.hbase.client.ScannerCallable.MoreResults +org.apache.hadoop.hbase.client.SnapshotType +org.apache.hadoop.hbase.client.AbstractResponse.ResponseType +org.apache.hadoop.hbase.client.RequestController.ReturnCode org.apache.hadoop.hbase.client.AsyncRequestFutureImpl.Retry -org.apache.hadoop.hbase.client.Scan.ReadType +org.apache.hadoop.hbase.client.Durability org.apache.hadoop.hbase.client.MobCompactPartitionPolicy -org.apache.hadoop.hbase.client.AbstractResponse.ResponseType -org.apache.hadoop.hbase.client.RegionLocateType -org.apache.hadoop.hbase.client.AsyncScanSingleRegionRpcRetryingCaller.ScanResumerState -org.apache.hadoop.hbase.client.SnapshotType -org.apache.hadoop.hbase.client.ScannerCallable.MoreResults org.apache.hadoop.hbase.client.AsyncScanSingleRegionRpcRetryingCaller.ScanControllerState -org.apache.hadoop.hbase.client.MasterSwitchType -org.apache.hadoop.hbase.client.IsolationLevel org.apache.hadoop.hbase.client.Consistency -org.apache.hadoop.hbase.client.TableState.State +org.apache.hadoop.hbase.client.AsyncScanSingleRegionRpcRetryingCaller.ScanResumerState org.apache.hadoop.hbase.client.AsyncProcessTask.SubmittedRows -org.apache.hadoop.hbase.client.RequestController.ReturnCode +org.apache.hadoop.hbase.client.CompactionState +org.apache.hadoop.hbase.client.Scan.ReadType http://git-wip-us.apache.org/repos/asf/hbase-site/blob/5fd895c6/devapidocs/org/apache/hadoop/hbase/executor/package-tree.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/executor/package-tree.html b/devapidocs/org/apache/hadoop/hbase/executor/package-tree.html index ce950b0..ae0124e 100644 --- a/devapidocs/org/apache/hadoop/hbase/executor/package-tree.html +++ b/devapidocs/org/apache/hadoop/hbase/executor/package-tree.html @@ -104,8 +104,8 @@ java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true; title="class or interface in java.lang">EnumE (implements java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true; title="class or interface in java.lang">ComparableT, java.io.https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true; title="class or interface in java.io">Serializable) -org.apache.hadoop.hbase.executor.EventType org.apache.hadoop.hbase.executor.ExecutorType +org.apache.hadoop.hbase.executor.EventType http://git-wip-us.apache.org/repos/asf/hbase-site/blob/5fd895c6/devapidocs/org/apache/hadoop/hbase/filter/package-tree.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/filter/package-tree.html b/devapidocs/org/apache/hadoop/hbase/filter/package-tree.html index 2e7de6e..1d258d9 100644 --- a/devapidocs/org/apache/hadoop/hbase/filter/package-tree.html +++ b/devapidocs/org/apache/hadoop/hbase/filter/package-tree.html @@ -183,14 +183,14 @@ java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true; title="class or interface in java.lang">EnumE (implements java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true; title="class or interface in java.lang">ComparableT, java.io.https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true; title="class or interface in java.io">Serializable) -org.apache.hadoop.hbase.filter.FilterList.Operator -org.apache.hadoop.hbase.filter.CompareFilter.CompareOp org.apache.hadoop.hbase.filter.FuzzyRowFilter.Order -org.apache.hadoop.hbase.filter.Filter.ReturnCode +org.apache.hadoop.hbase.filter.RegexStringComparator.EngineType
[15/30] hbase-site git commit: Published site at 59867eeeebd28fcc49f338ef36769fb6a9bff4dc.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/67e3bccd/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BulkLoadListener.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BulkLoadListener.html b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BulkLoadListener.html index 6cd6a17..85dd23b 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BulkLoadListener.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BulkLoadListener.html @@ -2119,6526 +2119,6532 @@ 2111 } 2112 2113 status = TaskMonitor.get().createStatus("Compacting " + store + " in " + this); -2114 if (this.closed.get()) { -2115String msg = "Skipping compaction on " + this + " because closed"; -2116LOG.debug(msg); -2117status.abort(msg); -2118return false; -2119 } -2120 boolean wasStateSet = false; -2121 try { -2122synchronized (writestate) { -2123 if (writestate.writesEnabled) { -2124wasStateSet = true; -2125 writestate.compacting.incrementAndGet(); -2126 } else { -2127String msg = "NOT compacting region " + this + ". Writes disabled."; -2128LOG.info(msg); -2129status.abort(msg); -2130return false; -2131 } -2132} -2133LOG.info("Starting compaction of {} in {}{}", store, this, -2134 (compaction.getRequest().isOffPeak()?" as an off-peak compaction":"")); -2135doRegionCompactionPrep(); -2136try { -2137 status.setStatus("Compacting store " + store); -2138 // We no longer need to cancel the request on the way out of this -2139 // method because Store#compact will clean up unconditionally -2140 requestNeedsCancellation = false; -2141 store.compact(compaction, throughputController, user); -2142} catch (InterruptedIOException iioe) { -2143 String msg = "compaction interrupted"; -2144 LOG.info(msg, iioe); -2145 status.abort(msg); -2146 return false; -2147} -2148 } finally { -2149if (wasStateSet) { -2150 synchronized (writestate) { -2151 writestate.compacting.decrementAndGet(); -2152if (writestate.compacting.get() = 0) { -2153 writestate.notifyAll(); -2154} -2155 } -2156} -2157 } -2158 status.markComplete("Compaction complete"); -2159 return true; -2160} finally { -2161 if (requestNeedsCancellation) store.cancelRequestedCompaction(compaction); -2162 if (status != null) status.cleanup(); -2163} -2164 } -2165 -2166 /** -2167 * Flush the cache. -2168 * -2169 * pWhen this method is called the cache will be flushed unless: -2170 * ol -2171 * lithe cache is empty/li -2172 * lithe region is closed./li -2173 * lia flush is already in progress/li -2174 * liwrites are disabled/li -2175 * /ol -2176 * -2177 * pThis method may block for some time, so it should not be called from a -2178 * time-sensitive thread. -2179 * @param force whether we want to force a flush of all stores -2180 * @return FlushResult indicating whether the flush was successful or not and if -2181 * the region needs compacting -2182 * -2183 * @throws IOException general io exceptions -2184 * because a snapshot was not properly persisted. -2185 */ -2186 // TODO HBASE-18905. We might have to expose a requestFlush API for CPs -2187 public FlushResult flush(boolean force) throws IOException { -2188return flushcache(force, false, FlushLifeCycleTracker.DUMMY); -2189 } -2190 -2191 public interface FlushResult { -2192enum Result { -2193 FLUSHED_NO_COMPACTION_NEEDED, -2194 FLUSHED_COMPACTION_NEEDED, -2195 // Special case where a flush didn't run because there's nothing in the memstores. Used when -2196 // bulk loading to know when we can still load even if a flush didn't happen. -2197 CANNOT_FLUSH_MEMSTORE_EMPTY, -2198 CANNOT_FLUSH -2199} -2200 -2201/** @return the detailed result code */ -2202Result getResult(); -2203 -2204/** @return true if the memstores were flushed, else false */ -2205boolean isFlushSucceeded(); -2206 -2207/** @return True if the flush requested a compaction, else false */ -2208boolean isCompactionNeeded(); -2209 } +2114 status.enableStatusJournal(false); +2115 if (this.closed.get()) { +2116String msg = "Skipping compaction on " + this + " because closed"; +2117LOG.debug(msg); +2118status.abort(msg); +2119return false; +2120 } +2121 boolean wasStateSet = false; +2122 try { +2123synchronized (writestate) { +2124 if
[15/30] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e80e3339/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.MovedRegionsCleaner.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.MovedRegionsCleaner.html b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.MovedRegionsCleaner.html index 2bbae5e..9ddbcd4 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.MovedRegionsCleaner.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.MovedRegionsCleaner.html @@ -33,32 +33,32 @@ 025import java.net.BindException; 026import java.net.InetAddress; 027import java.net.InetSocketAddress; -028import java.util.ArrayList; -029import java.util.Collection; -030import java.util.Collections; -031import java.util.Comparator; -032import java.util.HashSet; -033import java.util.Iterator; -034import java.util.List; -035import java.util.Map; -036import java.util.Map.Entry; -037import java.util.Objects; -038import java.util.Set; -039import java.util.SortedMap; -040import java.util.TreeMap; -041import java.util.TreeSet; -042import java.util.concurrent.ConcurrentHashMap; -043import java.util.concurrent.ConcurrentMap; -044import java.util.concurrent.ConcurrentSkipListMap; -045import java.util.concurrent.CountDownLatch; -046import java.util.concurrent.TimeUnit; -047import java.util.concurrent.atomic.AtomicBoolean; -048import java.util.concurrent.locks.ReentrantReadWriteLock; -049import java.util.function.Function; -050import javax.management.MalformedObjectNameException; -051import javax.management.ObjectName; -052import javax.servlet.http.HttpServlet; -053import org.apache.commons.lang3.RandomUtils; +028import java.time.Duration; +029import java.util.ArrayList; +030import java.util.Collection; +031import java.util.Collections; +032import java.util.Comparator; +033import java.util.HashSet; +034import java.util.Iterator; +035import java.util.List; +036import java.util.Map; +037import java.util.Map.Entry; +038import java.util.Objects; +039import java.util.Set; +040import java.util.SortedMap; +041import java.util.TreeMap; +042import java.util.TreeSet; +043import java.util.concurrent.ConcurrentHashMap; +044import java.util.concurrent.ConcurrentMap; +045import java.util.concurrent.ConcurrentSkipListMap; +046import java.util.concurrent.atomic.AtomicBoolean; +047import java.util.concurrent.locks.ReentrantReadWriteLock; +048import java.util.function.Function; +049import javax.management.MalformedObjectNameException; +050import javax.management.ObjectName; +051import javax.servlet.http.HttpServlet; +052import org.apache.commons.lang3.RandomUtils; +053import org.apache.commons.lang3.StringUtils; 054import org.apache.commons.lang3.SystemUtils; 055import org.apache.hadoop.conf.Configuration; 056import org.apache.hadoop.fs.FileSystem; @@ -177,16 +177,16 @@ 169import org.apache.hadoop.ipc.RemoteException; 170import org.apache.hadoop.metrics2.util.MBeans; 171import org.apache.hadoop.util.ReflectionUtils; -172import org.apache.hadoop.util.StringUtils; -173import org.apache.yetus.audience.InterfaceAudience; -174import org.apache.zookeeper.KeeperException; -175import org.slf4j.Logger; -176import org.slf4j.LoggerFactory; -177import sun.misc.Signal; -178import sun.misc.SignalHandler; -179 -180import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; -181import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; +172import org.apache.yetus.audience.InterfaceAudience; +173import org.apache.zookeeper.KeeperException; +174import org.slf4j.Logger; +175import org.slf4j.LoggerFactory; +176import sun.misc.Signal; +177import sun.misc.SignalHandler; +178 +179import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; +180import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; +181import org.apache.hbase.thirdparty.com.google.common.base.Throwables; 182import org.apache.hbase.thirdparty.com.google.common.collect.Maps; 183import org.apache.hbase.thirdparty.com.google.protobuf.BlockingRpcChannel; 184import org.apache.hbase.thirdparty.com.google.protobuf.RpcController; @@ -393,13 +393,13 @@ 385 final AtomicBoolean online = new AtomicBoolean(false); 386 387 // zookeeper connection and watcher -388 protected ZKWatcher zooKeeper; +388 protected final ZKWatcher zooKeeper; 389 390 // master address tracker -391 private MasterAddressTracker masterAddressTracker; +391 private final MasterAddressTracker masterAddressTracker; 392 393 // Cluster Status Tracker -394 protected ClusterStatusTracker clusterStatusTracker; +394 protected final ClusterStatusTracker clusterStatusTracker; 395 396 // Log Splitting Worker 397 private SplitLogWorker splitLogWorker; @@ -532,3227 +532,3219 @@ 524 private final boolean
[15/30] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/dc5c2985/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BatchOperation.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BatchOperation.html b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BatchOperation.html index 6848d28..69caaf6 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BatchOperation.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BatchOperation.html @@ -212,7941 +212,7899 @@ 204 public static final String LOAD_CFS_ON_DEMAND_CONFIG_KEY = 205 "hbase.hregion.scan.loadColumnFamiliesOnDemand"; 206 -207 public static final String HREGION_UNASSIGN_FOR_FNFE = "hbase.hregion.unassign.for.fnfe"; -208 public static final boolean DEFAULT_HREGION_UNASSIGN_FOR_FNFE = true; +207 public static final String HBASE_MAX_CELL_SIZE_KEY = "hbase.server.keyvalue.maxsize"; +208 public static final int DEFAULT_MAX_CELL_SIZE = 10485760; 209 -210 public static final String HBASE_MAX_CELL_SIZE_KEY = "hbase.server.keyvalue.maxsize"; -211 public static final int DEFAULT_MAX_CELL_SIZE = 10485760; -212 -213 /** -214 * This is the global default value for durability. All tables/mutations not -215 * defining a durability or using USE_DEFAULT will default to this value. -216 */ -217 private static final Durability DEFAULT_DURABILITY = Durability.SYNC_WAL; -218 -219 final AtomicBoolean closed = new AtomicBoolean(false); -220 -221 /* Closing can take some time; use the closing flag if there is stuff we don't -222 * want to do while in closing state; e.g. like offer this region up to the -223 * master as a region to close if the carrying regionserver is overloaded. -224 * Once set, it is never cleared. -225 */ -226 final AtomicBoolean closing = new AtomicBoolean(false); -227 -228 /** -229 * The max sequence id of flushed data on this region. There is no edit in memory that is -230 * less that this sequence id. -231 */ -232 private volatile long maxFlushedSeqId = HConstants.NO_SEQNUM; -233 -234 /** -235 * Record the sequence id of last flush operation. Can be in advance of -236 * {@link #maxFlushedSeqId} when flushing a single column family. In this case, -237 * {@link #maxFlushedSeqId} will be older than the oldest edit in memory. -238 */ -239 private volatile long lastFlushOpSeqId = HConstants.NO_SEQNUM; -240 -241 /** -242 * The sequence id of the last replayed open region event from the primary region. This is used -243 * to skip entries before this due to the possibility of replay edits coming out of order from -244 * replication. -245 */ -246 protected volatile long lastReplayedOpenRegionSeqId = -1L; -247 protected volatile long lastReplayedCompactionSeqId = -1L; -248 -249 // -250 // Members -251 // -252 -253 // map from a locked row to the context for that lock including: -254 // - CountDownLatch for threads waiting on that row -255 // - the thread that owns the lock (allow reentrancy) -256 // - reference count of (reentrant) locks held by the thread -257 // - the row itself -258 private final ConcurrentHashMapHashedBytes, RowLockContext lockedRows = -259 new ConcurrentHashMap(); +210 /** +211 * This is the global default value for durability. All tables/mutations not +212 * defining a durability or using USE_DEFAULT will default to this value. +213 */ +214 private static final Durability DEFAULT_DURABILITY = Durability.SYNC_WAL; +215 +216 final AtomicBoolean closed = new AtomicBoolean(false); +217 +218 /* Closing can take some time; use the closing flag if there is stuff we don't +219 * want to do while in closing state; e.g. like offer this region up to the +220 * master as a region to close if the carrying regionserver is overloaded. +221 * Once set, it is never cleared. +222 */ +223 final AtomicBoolean closing = new AtomicBoolean(false); +224 +225 /** +226 * The max sequence id of flushed data on this region. There is no edit in memory that is +227 * less that this sequence id. +228 */ +229 private volatile long maxFlushedSeqId = HConstants.NO_SEQNUM; +230 +231 /** +232 * Record the sequence id of last flush operation. Can be in advance of +233 * {@link #maxFlushedSeqId} when flushing a single column family. In this case, +234 * {@link #maxFlushedSeqId} will be older than the oldest edit in memory. +235 */ +236 private volatile long lastFlushOpSeqId = HConstants.NO_SEQNUM; +237 +238 /** +239 * The sequence id of the last replayed open region event from the primary region. This is used +240 * to skip entries before this due to the possibility of replay edits
[15/30] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2bb8bd01/hbase-annotations/integration.html -- diff --git a/hbase-annotations/integration.html b/hbase-annotations/integration.html index ee2a4ea..e9cdad1 100644 --- a/hbase-annotations/integration.html +++ b/hbase-annotations/integration.html @@ -7,7 +7,7 @@ - + Apache HBase - Annotations CI Management @@ -126,7 +126,7 @@ https://www.apache.org/;>The Apache Software Foundation. All rights reserved. - Last Published: 2017-09-17 + Last Published: 2017-09-18 http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2bb8bd01/hbase-annotations/issue-tracking.html -- diff --git a/hbase-annotations/issue-tracking.html b/hbase-annotations/issue-tracking.html index cc6788f..b7498ad 100644 --- a/hbase-annotations/issue-tracking.html +++ b/hbase-annotations/issue-tracking.html @@ -7,7 +7,7 @@ - + Apache HBase - Annotations Issue Management @@ -123,7 +123,7 @@ https://www.apache.org/;>The Apache Software Foundation. All rights reserved. - Last Published: 2017-09-17 + Last Published: 2017-09-18 http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2bb8bd01/hbase-annotations/license.html -- diff --git a/hbase-annotations/license.html b/hbase-annotations/license.html index bf60e96..c97e5fb 100644 --- a/hbase-annotations/license.html +++ b/hbase-annotations/license.html @@ -7,7 +7,7 @@ - + Apache HBase - Annotations Project Licenses @@ -326,7 +326,7 @@ https://www.apache.org/;>The Apache Software Foundation. All rights reserved. - Last Published: 2017-09-17 + Last Published: 2017-09-18 http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2bb8bd01/hbase-annotations/mail-lists.html -- diff --git a/hbase-annotations/mail-lists.html b/hbase-annotations/mail-lists.html index 3330e84..6bb481a 100644 --- a/hbase-annotations/mail-lists.html +++ b/hbase-annotations/mail-lists.html @@ -7,7 +7,7 @@ - + Apache HBase - Annotations Project Mailing Lists @@ -176,7 +176,7 @@ https://www.apache.org/;>The Apache Software Foundation. All rights reserved. - Last Published: 2017-09-17 + Last Published: 2017-09-18 http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2bb8bd01/hbase-annotations/plugin-management.html -- diff --git a/hbase-annotations/plugin-management.html b/hbase-annotations/plugin-management.html index 2409f30..8c765e6 100644 --- a/hbase-annotations/plugin-management.html +++ b/hbase-annotations/plugin-management.html @@ -7,7 +7,7 @@ - + Apache HBase - Annotations Project Plugin Management @@ -271,7 +271,7 @@ https://www.apache.org/;>The Apache Software Foundation. All rights reserved. - Last Published: 2017-09-17 + Last Published: 2017-09-18 http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2bb8bd01/hbase-annotations/plugins.html -- diff --git a/hbase-annotations/plugins.html b/hbase-annotations/plugins.html index 0bdecbb..7c17a82 100644 --- a/hbase-annotations/plugins.html +++ b/hbase-annotations/plugins.html @@ -7,7 +7,7 @@ - + Apache HBase - Annotations Project Plugins @@ -222,7 +222,7 @@ https://www.apache.org/;>The Apache Software Foundation. All rights reserved. - Last Published: 2017-09-17 + Last Published: 2017-09-18 http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2bb8bd01/hbase-annotations/project-info.html -- diff --git a/hbase-annotations/project-info.html b/hbase-annotations/project-info.html index dfb1494..2b1fd6c 100644 --- a/hbase-annotations/project-info.html +++ b/hbase-annotations/project-info.html @@ -7,7
[15/30] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/495ddb86/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/querymatcher/ScanQueryMatcher.MatchCode.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/querymatcher/ScanQueryMatcher.MatchCode.html b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/querymatcher/ScanQueryMatcher.MatchCode.html index cf26433..07c9f18 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/querymatcher/ScanQueryMatcher.MatchCode.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/querymatcher/ScanQueryMatcher.MatchCode.html @@ -298,122 +298,135 @@ 290 public abstract boolean moreRowsMayExistAfter(Cell cell); 291 292 public Cell getKeyForNextColumn(Cell cell) { -293ColumnCount nextColumn = columns.getColumnHint(); -294if (nextColumn == null) { -295 return CellUtil.createLastOnRowCol(cell); -296} else { -297 return CellUtil.createFirstOnRowCol(cell, nextColumn.getBuffer(), nextColumn.getOffset(), -298nextColumn.getLength()); -299} -300 } -301 -302 /** -303 * @param nextIndexed the key of the next entry in the block index (if any) -304 * @param currentCell The Cell we're using to calculate the seek key -305 * @return result of the compare between the indexed key and the key portion of the passed cell -306 */ -307 public int compareKeyForNextRow(Cell nextIndexed, Cell currentCell) { -308return rowComparator.compareKeyBasedOnColHint(nextIndexed, currentCell, 0, 0, null, 0, 0, -309 HConstants.OLDEST_TIMESTAMP, Type.Minimum.getCode()); -310 } -311 -312 /** -313 * @param nextIndexed the key of the next entry in the block index (if any) -314 * @param currentCell The Cell we're using to calculate the seek key -315 * @return result of the compare between the indexed key and the key portion of the passed cell -316 */ -317 public int compareKeyForNextColumn(Cell nextIndexed, Cell currentCell) { -318ColumnCount nextColumn = columns.getColumnHint(); -319if (nextColumn == null) { -320 return rowComparator.compareKeyBasedOnColHint(nextIndexed, currentCell, 0, 0, null, 0, 0, -321HConstants.OLDEST_TIMESTAMP, Type.Minimum.getCode()); -322} else { -323 return rowComparator.compareKeyBasedOnColHint(nextIndexed, currentCell, -324currentCell.getFamilyOffset(), currentCell.getFamilyLength(), nextColumn.getBuffer(), -325nextColumn.getOffset(), nextColumn.getLength(), HConstants.LATEST_TIMESTAMP, -326Type.Maximum.getCode()); -327} -328 } -329 -330 /** -331 * @return the Filter -332 */ -333 public abstract Filter getFilter(); -334 -335 /** -336 * Delegate to {@link Filter#getNextCellHint(Cell)}. If no filter, return {@code null}. -337 */ -338 public abstract Cell getNextKeyHint(Cell cell) throws IOException; -339 -340 @Override -341 public void beforeShipped() throws IOException { -342if (this.currentRow != null) { -343 this.currentRow = CellUtil.createFirstOnRow(CellUtil.copyRow(this.currentRow)); -344} -345if (columns != null) { -346 columns.beforeShipped(); -347} -348 } -349 -350 protected static Cell createStartKeyFromRow(byte[] startRow, ScanInfo scanInfo) { -351return CellUtil.createFirstDeleteFamilyCellOnRow(startRow, scanInfo.getFamily()); -352 } -353 -354 protected static PairDeleteTracker, ColumnTracker getTrackers(RegionCoprocessorHost host, -355 NavigableSetbyte[] columns, ScanInfo scanInfo, long oldestUnexpiredTS, Scan userScan) -356 throws IOException { -357int resultMaxVersion = scanInfo.getMaxVersions(); -358int maxVersionToCheck = resultMaxVersion; -359if (userScan != null) { -360 if (userScan.isRaw()) { -361resultMaxVersion = userScan.getMaxVersions(); -362 } else { -363resultMaxVersion = Math.min(userScan.getMaxVersions(), scanInfo.getMaxVersions()); -364 } -365 maxVersionToCheck = userScan.hasFilter() ? scanInfo.getMaxVersions() : resultMaxVersion; -366} -367 -368DeleteTracker deleteTracker; -369if (scanInfo.isNewVersionBehavior() (userScan == null || !userScan.isRaw())) { -370 deleteTracker = new NewVersionBehaviorTracker(columns, scanInfo.getMinVersions(), -371 scanInfo.getMaxVersions(), resultMaxVersion, oldestUnexpiredTS); -372} else { -373 deleteTracker = new ScanDeleteTracker(); -374} -375if (host != null) { -376 deleteTracker = host.postInstantiateDeleteTracker(deleteTracker); -377 if (deleteTracker instanceof VisibilityScanDeleteTracker scanInfo.isNewVersionBehavior()) { -378deleteTracker = new VisibilityNewVersionBehaivorTracker(columns, scanInfo.getMinVersions(), -379scanInfo.getMaxVersions(), resultMaxVersion, oldestUnexpiredTS); -380 } -381} -382 -383ColumnTracker
[15/30] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/cef8af03/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.RegionScannerCloseCallBack.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.RegionScannerCloseCallBack.html b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.RegionScannerCloseCallBack.html index 7070913..244b038 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.RegionScannerCloseCallBack.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.RegionScannerCloseCallBack.html @@ -123,3372 +123,3373 @@ 115import org.apache.hadoop.hbase.regionserver.wal.WALEdit; 116import org.apache.hadoop.hbase.security.Superusers; 117import org.apache.hadoop.hbase.security.User; -118import org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting; -119import org.apache.hadoop.hbase.shaded.com.google.common.cache.Cache; -120import org.apache.hadoop.hbase.shaded.com.google.common.cache.CacheBuilder; -121import org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString; -122import org.apache.hadoop.hbase.shaded.com.google.protobuf.Message; -123import org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController; -124import org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException; -125import org.apache.hadoop.hbase.shaded.com.google.protobuf.TextFormat; -126import org.apache.hadoop.hbase.shaded.com.google.protobuf.UnsafeByteOperations; -127import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; -128import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter; -129import org.apache.hadoop.hbase.shaded.protobuf.ResponseConverter; -130import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService; -131import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactionQueuesRequest; -132import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactionQueuesResponse; -133import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest; -134import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse; -135import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest; -136import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionResponse; -137import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresRequest; -138import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresResponse; -139import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest; -140import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse; -141import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionRequest; -142import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionResponse; -143import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoRequest; -144import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse; -145import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadRequest; -146import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse; -147import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetServerInfoRequest; -148import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetServerInfoResponse; -149import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetStoreFileRequest; -150import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetStoreFileResponse; -151import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionRequest; -152import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionRequest.RegionOpenInfo; -153import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionResponse; -154import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionResponse.RegionOpeningState; -155import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryRequest; -156import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryResponse; -157import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterRequest; -158import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterResponse; -159import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerRequest; -160import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerResponse; -161import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationRequest; -162import
[15/30] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7f23ee04/devapidocs/org/apache/hadoop/hbase/wal/class-use/WAL.Entry.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/wal/class-use/WAL.Entry.html b/devapidocs/org/apache/hadoop/hbase/wal/class-use/WAL.Entry.html index efbfe0b..d88f15d 100644 --- a/devapidocs/org/apache/hadoop/hbase/wal/class-use/WAL.Entry.html +++ b/devapidocs/org/apache/hadoop/hbase/wal/class-use/WAL.Entry.html @@ -421,7 +421,7 @@ Input/OutputFormats, a table indexing MapReduce job, and utility methods. private http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true; title="class or interface in java.util">ListWAL.Entry -ReplicationSourceWALReaderThread.WALEntryBatch.walEntries +ReplicationSourceWALReader.WALEntryBatch.walEntries @@ -438,7 +438,7 @@ Input/OutputFormats, a table indexing MapReduce job, and utility methods. private WAL.Entry -ReplicationSourceWALReaderThread.filterEntry(WAL.Entryentry) +ReplicationSourceWALReader.filterEntry(WAL.Entryentry) WAL.Entry @@ -459,7 +459,7 @@ Input/OutputFormats, a table indexing MapReduce job, and utility methods. http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true; title="class or interface in java.util">ListWAL.Entry -ReplicationSourceWALReaderThread.WALEntryBatch.getWalEntries() +ReplicationSourceWALReader.WALEntryBatch.getWalEntries() http://docs.oracle.com/javase/8/docs/api/java/util/Iterator.html?is-external=true; title="class or interface in java.util">IteratorWAL.Entry @@ -476,7 +476,7 @@ Input/OutputFormats, a table indexing MapReduce job, and utility methods. void -ReplicationSourceWALReaderThread.WALEntryBatch.addEntry(WAL.Entryentry) +ReplicationSourceWALReader.WALEntryBatch.addEntry(WAL.Entryentry) WAL.Entry @@ -484,11 +484,11 @@ Input/OutputFormats, a table indexing MapReduce job, and utility methods. private WAL.Entry -ReplicationSourceWALReaderThread.filterEntry(WAL.Entryentry) +ReplicationSourceWALReader.filterEntry(WAL.Entryentry) private long -ReplicationSourceWALReaderThread.getEntrySize(WAL.Entryentry) +ReplicationSourceWALReader.getEntrySize(WAL.Entryentry) boolean @@ -496,14 +496,14 @@ Input/OutputFormats, a table indexing MapReduce job, and utility methods. private void -ReplicationSourceWALReaderThread.updateBatchStats(ReplicationSourceWALReaderThread.WALEntryBatchbatch, +ReplicationSourceWALReader.updateBatchStats(ReplicationSourceWALReader.WALEntryBatchbatch, WAL.Entryentry, longentryPosition, longentrySize) private boolean -ReplicationSourceWALReaderThread.updateSerialReplPos(ReplicationSourceWALReaderThread.WALEntryBatchbatch, +ReplicationSourceWALReader.updateSerialReplPos(ReplicationSourceWALReader.WALEntryBatchbatch, WAL.Entryentry) http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7f23ee04/devapidocs/org/apache/hadoop/hbase/wal/package-tree.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/wal/package-tree.html b/devapidocs/org/apache/hadoop/hbase/wal/package-tree.html index 3bd3900..53d258e 100644 --- a/devapidocs/org/apache/hadoop/hbase/wal/package-tree.html +++ b/devapidocs/org/apache/hadoop/hbase/wal/package-tree.html @@ -166,8 +166,8 @@ java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true; title="class or interface in java.lang">EnumE (implements java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true; title="class or interface in java.lang">ComparableT, java.io.http://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true; title="class or interface in java.io">Serializable) -org.apache.hadoop.hbase.wal.WALFactory.Providers org.apache.hadoop.hbase.wal.RegionGroupingProvider.Strategies +org.apache.hadoop.hbase.wal.WALFactory.Providers http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7f23ee04/devapidocs/overview-tree.html -- diff --git a/devapidocs/overview-tree.html b/devapidocs/overview-tree.html index 364b144..964a9f1 100644 --- a/devapidocs/overview-tree.html +++ b/devapidocs/overview-tree.html @@ -3012,7 +3012,7 @@ org.apache.hadoop.hbase.replication.regionserver.ReplicationSource.LogsComparator (implements java.util.http://docs.oracle.com/javase/8/docs/api/java/util/Comparator.html?is-external=true; title="class or interface in java.util">ComparatorT) org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceFactory org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceManager (implements org.apache.hadoop.hbase.replication.ReplicationListener) -org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceWALReaderThread.WALEntryBatch
[15/30] hbase-site git commit: Published site at 28cd48b673ca743d193874b2951bc995699e8e89.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/89b638a4/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.html b/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.html index 52d47c0..b8ef76a 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.html @@ -67,15 +67,15 @@ 059 requiredArguments = { 060@org.jamon.annotations.Argument(name = "master", type = "HMaster")}, 061 optionalArguments = { -062@org.jamon.annotations.Argument(name = "frags", type = "MapString,Integer"), -063@org.jamon.annotations.Argument(name = "catalogJanitorEnabled", type = "boolean"), -064@org.jamon.annotations.Argument(name = "filter", type = "String"), -065@org.jamon.annotations.Argument(name = "deadServers", type = "SetServerName"), -066@org.jamon.annotations.Argument(name = "format", type = "String"), -067@org.jamon.annotations.Argument(name = "servers", type = "ListServerName"), -068@org.jamon.annotations.Argument(name = "metaLocation", type = "ServerName"), -069@org.jamon.annotations.Argument(name = "assignmentManager", type = "AssignmentManager"), -070@org.jamon.annotations.Argument(name = "serverManager", type = "ServerManager")}) +062@org.jamon.annotations.Argument(name = "serverManager", type = "ServerManager"), +063@org.jamon.annotations.Argument(name = "metaLocation", type = "ServerName"), +064@org.jamon.annotations.Argument(name = "frags", type = "MapString,Integer"), +065@org.jamon.annotations.Argument(name = "filter", type = "String"), +066@org.jamon.annotations.Argument(name = "deadServers", type = "SetServerName"), +067@org.jamon.annotations.Argument(name = "format", type = "String"), +068@org.jamon.annotations.Argument(name = "assignmentManager", type = "AssignmentManager"), +069@org.jamon.annotations.Argument(name = "servers", type = "ListServerName"), +070@org.jamon.annotations.Argument(name = "catalogJanitorEnabled", type = "boolean")}) 071public class MasterStatusTmpl 072 extends org.jamon.AbstractTemplateProxy 073{ @@ -116,159 +116,159 @@ 108 return m_master; 109} 110private HMaster m_master; -111// 21, 1 -112public void setFrags(MapString,Integer frags) +111// 28, 1 +112public void setServerManager(ServerManager serverManager) 113{ -114 // 21, 1 -115 m_frags = frags; -116 m_frags__IsNotDefault = true; +114 // 28, 1 +115 m_serverManager = serverManager; +116 m_serverManager__IsNotDefault = true; 117} -118public MapString,Integer getFrags() +118public ServerManager getServerManager() 119{ -120 return m_frags; +120 return m_serverManager; 121} -122private MapString,Integer m_frags; -123public boolean getFrags__IsNotDefault() +122private ServerManager m_serverManager; +123public boolean getServerManager__IsNotDefault() 124{ -125 return m_frags__IsNotDefault; +125 return m_serverManager__IsNotDefault; 126} -127private boolean m_frags__IsNotDefault; -128// 25, 1 -129public void setCatalogJanitorEnabled(boolean catalogJanitorEnabled) +127private boolean m_serverManager__IsNotDefault; +128// 22, 1 +129public void setMetaLocation(ServerName metaLocation) 130{ -131 // 25, 1 -132 m_catalogJanitorEnabled = catalogJanitorEnabled; -133 m_catalogJanitorEnabled__IsNotDefault = true; +131 // 22, 1 +132 m_metaLocation = metaLocation; +133 m_metaLocation__IsNotDefault = true; 134} -135public boolean getCatalogJanitorEnabled() +135public ServerName getMetaLocation() 136{ -137 return m_catalogJanitorEnabled; +137 return m_metaLocation; 138} -139private boolean m_catalogJanitorEnabled; -140public boolean getCatalogJanitorEnabled__IsNotDefault() +139private ServerName m_metaLocation; +140public boolean getMetaLocation__IsNotDefault() 141{ -142 return m_catalogJanitorEnabled__IsNotDefault; +142 return m_metaLocation__IsNotDefault; 143} -144private boolean m_catalogJanitorEnabled__IsNotDefault; -145// 26, 1 -146public void setFilter(String filter) +144private boolean m_metaLocation__IsNotDefault; +145// 21, 1 +146public void setFrags(MapString,Integer frags) 147{ -148 // 26, 1 -149 m_filter = filter; -150 m_filter__IsNotDefault = true; +148 // 21, 1 +149 m_frags = frags; +150 m_frags__IsNotDefault = true; 151} -152public String getFilter() +152public MapString,Integer getFrags() 153{ -154 return m_filter; +154 return m_frags; 155
[15/30] hbase-site git commit: Published site at 845d00a16bc22cced0a2eead3d0ba48989968fb6.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6d411951/devapidocs/src-html/org/apache/hadoop/hbase/ipc/RpcServer.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/ipc/RpcServer.html b/devapidocs/src-html/org/apache/hadoop/hbase/ipc/RpcServer.html index 4975851..bf27873 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/ipc/RpcServer.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/ipc/RpcServer.html @@ -193,2475 +193,2411 @@ 185 */ 186 private static final int DEFAULT_MAX_CALLQUEUE_SIZE = 1024 * 1024 * 1024; 187 -188 private static final String WARN_DELAYED_CALLS = "hbase.ipc.warn.delayedrpc.number"; +188 private final IPCUtil ipcUtil; 189 -190 private static final int DEFAULT_WARN_DELAYED_CALLS = 1000; -191 -192 private final int warnDelayedCalls; -193 -194 private AtomicInteger delayedCalls; -195 private final IPCUtil ipcUtil; +190 private static final String AUTH_FAILED_FOR = "Auth failed for "; +191 private static final String AUTH_SUCCESSFUL_FOR = "Auth successful for "; +192 private static final Log AUDITLOG = LogFactory.getLog("SecurityLogger." + +193Server.class.getName()); +194 protected SecretManagerTokenIdentifier secretManager; +195 protected ServiceAuthorizationManager authManager; 196 -197 private static final String AUTH_FAILED_FOR = "Auth failed for "; -198 private static final String AUTH_SUCCESSFUL_FOR = "Auth successful for "; -199 private static final Log AUDITLOG = LogFactory.getLog("SecurityLogger." + -200Server.class.getName()); -201 protected SecretManagerTokenIdentifier secretManager; -202 protected ServiceAuthorizationManager authManager; -203 -204 /** This is set to Call object before Handler invokes an RPC and ybdie -205 * after the call returns. -206 */ -207 protected static final ThreadLocalCall CurCall = new ThreadLocalCall(); -208 -209 /** Keeps MonitoredRPCHandler per handler thread. */ -210 static final ThreadLocalMonitoredRPCHandler MONITORED_RPC -211 = new ThreadLocalMonitoredRPCHandler(); -212 -213 protected final InetSocketAddress bindAddress; -214 protected int port; // port we listen on -215 protected InetSocketAddress address; // inet address we listen on -216 private int readThreads; // number of read threads -217 protected int maxIdleTime; // the maximum idle time after -218 // which a client may be -219 // disconnected -220 protected int thresholdIdleConnections; // the number of idle -221 // connections after which we -222 // will start cleaning up idle -223 // connections -224 int maxConnectionsToNuke; // the max number of -225 // connections to nuke -226 // during a cleanup -227 -228 protected MetricsHBaseServer metrics; -229 -230 protected final Configuration conf; -231 -232 private int maxQueueSize; -233 protected int socketSendBufferSize; -234 protected final boolean tcpNoDelay; // if T then disable Nagle's Algorithm -235 protected final boolean tcpKeepAlive; // if T then use keepalives -236 protected final long purgeTimeout; // in milliseconds +197 /** This is set to Call object before Handler invokes an RPC and ybdie +198 * after the call returns. +199 */ +200 protected static final ThreadLocalCall CurCall = new ThreadLocalCall(); +201 +202 /** Keeps MonitoredRPCHandler per handler thread. */ +203 static final ThreadLocalMonitoredRPCHandler MONITORED_RPC +204 = new ThreadLocalMonitoredRPCHandler(); +205 +206 protected final InetSocketAddress bindAddress; +207 protected int port; // port we listen on +208 protected InetSocketAddress address; // inet address we listen on +209 private int readThreads; // number of read threads +210 protected int maxIdleTime; // the maximum idle time after +211 // which a client may be +212 // disconnected +213 protected int thresholdIdleConnections; // the number of idle +214 // connections after which we +215 // will start cleaning up idle +216 // connections +217 int maxConnectionsToNuke; // the max number of +218 // connections to nuke