This is an automated email from the ASF dual-hosted git repository.
wchevreuil pushed a commit to branch HBASE-29585
in repository https://gitbox.apache.org/repos/asf/hbase.git
The following commit(s) were added to refs/heads/HBASE-29585 by this push:
new 7c5fa67fb72 HBASE-29668 Add row cache framework (#7398)
7c5fa67fb72 is described below
commit 7c5fa67fb72ad753e7807e419f147c508deb79c6
Author: EungsopYoo <[email protected]>
AuthorDate: Mon Feb 23 19:34:17 2026 +0900
HBASE-29668 Add row cache framework (#7398)
Co-authored-by: terence.yoo <[email protected]>
---
.../hadoop/hbase/client/TableDescriptor.java | 8 ++
.../hbase/client/TableDescriptorBuilder.java | 25 +++++
.../java/org/apache/hadoop/hbase/HConstants.java | 12 ++
.../hadoop/hbase/MockRegionServerServices.java | 6 +
.../hadoop/hbase/io/util/MemorySizeUtil.java | 25 ++++-
.../apache/hadoop/hbase/regionserver/HRegion.java | 82 +++++++++++++-
.../hadoop/hbase/regionserver/HRegionServer.java | 10 ++
.../hadoop/hbase/regionserver/RSRpcServices.java | 20 +++-
.../apache/hadoop/hbase/regionserver/Region.java | 6 +
.../hbase/regionserver/RegionServerServices.java | 2 +
.../apache/hadoop/hbase/regionserver/RowCache.java | 125 +++++++++++++++++++++
.../hadoop/hbase/regionserver/RowCacheKey.java | 71 ++++++++++++
.../hbase/regionserver/RowCacheStrategy.java | 98 ++++++++++++++++
.../apache/hadoop/hbase/regionserver/RowCells.java | 57 ++++++++++
.../hadoop/hbase/io/util/TestMemorySizeUtil.java | 18 +++
.../hadoop/hbase/master/MockRegionServer.java | 6 +
.../hadoop/hbase/regionserver/TestRowCacheKey.java | 113 +++++++++++++++++++
.../hadoop/hbase/regionserver/TestRowCells.java | 74 ++++++++++++
hbase-shell/src/main/ruby/hbase/admin.rb | 1 +
19 files changed, 748 insertions(+), 11 deletions(-)
diff --git
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptor.java
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptor.java
index 817f9e2d4b1..69871d9dc2f 100644
---
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptor.java
+++
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptor.java
@@ -316,4 +316,12 @@ public interface TableDescriptor {
}
return !enabled;
}
+
+ /**
+ * Checks whether row caching is enabled for this table. Note that row
caching is applied at the
+ * entire row level, not at the column family level.
+ * @return {@code true} if row caching is enabled, {@code false} if
disabled, or {@code null} if
+ * not explicitly set
+ */
+ Boolean getRowCacheEnabled();
}
diff --git
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptorBuilder.java
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptorBuilder.java
index 8636b006e83..99692b36fa4 100644
---
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptorBuilder.java
+++
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptorBuilder.java
@@ -227,6 +227,15 @@ public class TableDescriptorBuilder {
private final static Map<String, String> DEFAULT_VALUES = new HashMap<>();
private final static Set<Bytes> RESERVED_KEYWORDS = new HashSet<>();
+ /**
+ * Used by HBase Shell interface to access this metadata attribute which
denotes if the row cache
+ * is enabled.
+ */
+ @InterfaceAudience.Private
+ public static final String ROW_CACHE_ENABLED = "ROW_CACHE_ENABLED";
+ private static final Bytes ROW_CACHE_ENABLED_KEY = new
Bytes(Bytes.toBytes(ROW_CACHE_ENABLED));
+ private static final boolean DEFAULT_ROW_CACHE_ENABLED = false;
+
static {
DEFAULT_VALUES.put(MAX_FILESIZE,
String.valueOf(HConstants.DEFAULT_MAX_FILE_SIZE));
DEFAULT_VALUES.put(READONLY, String.valueOf(DEFAULT_READONLY));
@@ -236,6 +245,7 @@ public class TableDescriptorBuilder {
DEFAULT_VALUES.put(PRIORITY, String.valueOf(DEFAULT_PRIORITY));
// Setting ERASURE_CODING_POLICY to NULL so that it is not considered as
metadata
DEFAULT_VALUES.put(ERASURE_CODING_POLICY,
String.valueOf(DEFAULT_ERASURE_CODING_POLICY));
+ DEFAULT_VALUES.put(ROW_CACHE_ENABLED,
String.valueOf(DEFAULT_ROW_CACHE_ENABLED));
DEFAULT_VALUES.keySet().stream().map(s -> new Bytes(Bytes.toBytes(s)))
.forEach(RESERVED_KEYWORDS::add);
RESERVED_KEYWORDS.add(IS_META_KEY);
@@ -565,6 +575,11 @@ public class TableDescriptorBuilder {
return new ModifyableTableDescriptor(desc);
}
+ public TableDescriptorBuilder setRowCacheEnabled(boolean rowCacheEnabled) {
+ desc.setRowCacheEnabled(rowCacheEnabled);
+ return this;
+ }
+
private static final class ModifyableTableDescriptor
implements TableDescriptor, Comparable<ModifyableTableDescriptor> {
@@ -1510,6 +1525,16 @@ public class TableDescriptorBuilder {
return Optional.empty();
}
}
+
+ @Override
+ public Boolean getRowCacheEnabled() {
+ Bytes value = getValue(ROW_CACHE_ENABLED_KEY);
+ return value == null ? null :
Boolean.valueOf(Bytes.toString(value.get()));
+ }
+
+ public ModifyableTableDescriptor setRowCacheEnabled(boolean enabled) {
+ return setValue(ROW_CACHE_ENABLED_KEY, Boolean.toString(enabled));
+ }
}
/**
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
index 1051686d32e..6a51172e9a7 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
@@ -1017,6 +1017,18 @@ public final class HConstants {
public static final float HFILE_BLOCK_CACHE_SIZE_DEFAULT = 0.4f;
+ /**
+ * Configuration key for the size of the row cache
+ */
+ public static final String ROW_CACHE_SIZE_KEY = "row.cache.size";
+ public static final float ROW_CACHE_SIZE_DEFAULT = 0.0f;
+
+ /**
+ * Configuration key for the row cache enabled
+ */
+ public static final String ROW_CACHE_ENABLED_KEY = "row.cache.enabled";
+ public static final boolean ROW_CACHE_ENABLED_DEFAULT = false;
+
/**
* Configuration key for the memory size of the block cache
*/
diff --git
a/hbase-server/src/main/java/org/apache/hadoop/hbase/MockRegionServerServices.java
b/hbase-server/src/main/java/org/apache/hadoop/hbase/MockRegionServerServices.java
index 32ad587ad96..d13900258e6 100644
---
a/hbase-server/src/main/java/org/apache/hadoop/hbase/MockRegionServerServices.java
+++
b/hbase-server/src/main/java/org/apache/hadoop/hbase/MockRegionServerServices.java
@@ -50,6 +50,7 @@ import
org.apache.hadoop.hbase.regionserver.MetricsRegionServer;
import org.apache.hadoop.hbase.regionserver.RegionServerAccounting;
import org.apache.hadoop.hbase.regionserver.RegionServerServices;
import org.apache.hadoop.hbase.regionserver.ReplicationSourceService;
+import org.apache.hadoop.hbase.regionserver.RowCache;
import org.apache.hadoop.hbase.regionserver.SecureBulkLoadManager;
import org.apache.hadoop.hbase.regionserver.ServerNonceManager;
import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequester;
@@ -386,4 +387,9 @@ public class MockRegionServerServices implements
RegionServerServices {
public RegionReplicationBufferManager getRegionReplicationBufferManager() {
return null;
}
+
+ @Override
+ public RowCache getRowCache() {
+ return null;
+ }
}
diff --git
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/util/MemorySizeUtil.java
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/util/MemorySizeUtil.java
index 7ada303d293..6ad962a4634 100644
---
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/util/MemorySizeUtil.java
+++
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/util/MemorySizeUtil.java
@@ -93,25 +93,29 @@ public class MemorySizeUtil {
}
float memStoreFraction = getGlobalMemStoreHeapPercent(conf, false);
float blockCacheFraction = getBlockCacheHeapPercent(conf);
+ float rowCacheFraction =
+ conf.getFloat(HConstants.ROW_CACHE_SIZE_KEY,
HConstants.ROW_CACHE_SIZE_DEFAULT);
float minFreeHeapFraction = getRegionServerMinFreeHeapFraction(conf);
int memStorePercent = (int) (memStoreFraction * 100);
int blockCachePercent = (int) (blockCacheFraction * 100);
+ int rowCachePercent = (int) (rowCacheFraction * 100);
int minFreeHeapPercent = (int) (minFreeHeapFraction * 100);
- int usedPercent = memStorePercent + blockCachePercent;
+ int usedPercent = memStorePercent + blockCachePercent + rowCachePercent;
int maxAllowedUsed = 100 - minFreeHeapPercent;
if (usedPercent > maxAllowedUsed) {
throw new RuntimeException(String.format(
"RegionServer heap memory allocation is invalid: total memory usage
exceeds 100%% "
- + "(memStore + blockCache + requiredFreeHeap). "
- + "Check the following configuration values:%n" + " - %s = %.2f%n"
+ " - %s = %s%n"
- + " - %s = %s%n" + " - %s = %s",
+ + "(memStore + blockCache + rowCache + requiredFreeHeap). "
+ + "Check the following configuration values:" + "%n - %s = %.2f" +
"%n - %s = %s"
+ + "%n - %s = %s" + "%n - %s = %s" + "%n - %s = %s",
MEMSTORE_SIZE_KEY, memStoreFraction,
HConstants.HFILE_BLOCK_CACHE_MEMORY_SIZE_KEY,
conf.get(HConstants.HFILE_BLOCK_CACHE_MEMORY_SIZE_KEY),
HConstants.HFILE_BLOCK_CACHE_SIZE_KEY,
conf.get(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY),
HBASE_REGION_SERVER_FREE_HEAP_MIN_MEMORY_SIZE_KEY,
- conf.get(HBASE_REGION_SERVER_FREE_HEAP_MIN_MEMORY_SIZE_KEY)));
+ conf.get(HBASE_REGION_SERVER_FREE_HEAP_MIN_MEMORY_SIZE_KEY),
HConstants.ROW_CACHE_SIZE_KEY,
+ conf.get(HConstants.ROW_CACHE_SIZE_KEY)));
}
}
@@ -313,4 +317,15 @@ public class MemorySizeUtil {
}
return (long) (bucketCacheSize * 1024 * 1024);
}
+
+ public static long getRowCacheSize(Configuration conf) {
+ long max = -1L;
+ final MemoryUsage usage = safeGetHeapMemoryUsage();
+ if (usage != null) {
+ max = usage.getMax();
+ }
+ float globalRowCachePercent =
+ conf.getFloat(HConstants.ROW_CACHE_SIZE_KEY,
HConstants.ROW_CACHE_SIZE_DEFAULT);
+ return ((long) (max * globalRowCachePercent));
+ }
}
diff --git
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index 9b7daee0f66..60bd4cee6b7 100644
---
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -65,6 +65,7 @@ import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.atomic.LongAdder;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReadWriteLock;
@@ -375,6 +376,7 @@ public class HRegion implements HeapSize,
PropagatingConfigurationObserver, Regi
final LongAdder flushesQueued = new LongAdder();
private BlockCache blockCache;
+ private RowCache rowCache;
private MobFileCache mobFileCache;
private final WAL wal;
private final HRegionFileSystem fs;
@@ -433,6 +435,16 @@ public class HRegion implements HeapSize,
PropagatingConfigurationObserver, Regi
*/
private long openSeqNum = HConstants.NO_SEQNUM;
+ /**
+ * Basically the same as openSeqNum, but it is updated when bulk load is
done.
+ */
+ private final AtomicLong rowCacheSeqNum = new
AtomicLong(HConstants.NO_SEQNUM);
+
+ /**
+ * The setting for whether to enable row cache for this region.
+ */
+ private final boolean isRowCacheEnabled;
+
/**
* The default setting for whether to enable on-demand CF loading for scan
requests to this
* region. Requests can override it.
@@ -847,6 +859,7 @@ public class HRegion implements HeapSize,
PropagatingConfigurationObserver, Regi
if (this.rsServices != null) {
this.blockCache = rsServices.getBlockCache().orElse(null);
this.mobFileCache = rsServices.getMobFileCache().orElse(null);
+ this.rowCache = rsServices.getRowCache();
}
this.regionServicesForStores = new RegionServicesForStores(this,
rsServices);
@@ -929,6 +942,16 @@ public class HRegion implements HeapSize,
PropagatingConfigurationObserver, Regi
minBlockSizeBytes =
Arrays.stream(this.htableDescriptor.getColumnFamilies())
.mapToInt(ColumnFamilyDescriptor::getBlocksize).min().orElse(HConstants.DEFAULT_BLOCKSIZE);
+
+ this.isRowCacheEnabled = checkRowCacheConfig();
+ }
+
+ private boolean checkRowCacheConfig() {
+ Boolean fromDescriptor = htableDescriptor.getRowCacheEnabled();
+ // The setting from TableDescriptor has higher priority than the global
configuration
+ return fromDescriptor != null
+ ? fromDescriptor
+ : conf.getBoolean(HConstants.ROW_CACHE_ENABLED_KEY,
HConstants.ROW_CACHE_ENABLED_DEFAULT);
}
private void setHTableSpecificConf() {
@@ -3236,6 +3259,31 @@ public class HRegion implements HeapSize,
PropagatingConfigurationObserver, Regi
return getScanner(scan, null);
}
+ RegionScannerImpl getScannerWithResults(Get get, Scan scan, List<Cell>
results)
+ throws IOException {
+ if (!rowCache.canCacheRow(get, this)) {
+ return getScannerWithResults(scan, results);
+ }
+
+ // Try get from row cache
+ RowCacheKey key = new RowCacheKey(this, get.getRow());
+ if (rowCache.tryGetFromCache(key, get, results)) {
+ // Cache is hit, and then no scanner is created
+ return null;
+ }
+
+ RegionScannerImpl scanner = getScannerWithResults(scan, results);
+ rowCache.populateCache(results, key);
+ return scanner;
+ }
+
+ private RegionScannerImpl getScannerWithResults(Scan scan, List<Cell>
results)
+ throws IOException {
+ RegionScannerImpl scanner = getScanner(scan);
+ scanner.next(results);
+ return scanner;
+ }
+
@Override
public RegionScannerImpl getScanner(Scan scan, List<KeyValueScanner>
additionalScanners)
throws IOException {
@@ -4775,9 +4823,10 @@ public class HRegion implements HeapSize,
PropagatingConfigurationObserver, Regi
}
OperationStatus[] batchMutate(Mutation[] mutations, boolean atomic) throws
IOException {
- return TraceUtil.trace(
- () -> batchMutate(mutations, atomic, HConstants.NO_NONCE,
HConstants.NO_NONCE),
- () -> createRegionSpan("Region.batchMutate"));
+ OperationStatus[] operationStatuses =
+ rowCache.mutateWithRowCacheBarrier(this, Arrays.asList(mutations),
+ () -> this.batchMutate(mutations, atomic, HConstants.NO_NONCE,
HConstants.NO_NONCE));
+ return TraceUtil.trace(() -> operationStatuses, () ->
createRegionSpan("Region.batchMutate"));
}
/**
@@ -5062,7 +5111,9 @@ public class HRegion implements HeapSize,
PropagatingConfigurationObserver, Regi
public CheckAndMutateResult checkAndMutate(CheckAndMutate checkAndMutate,
long nonceGroup,
long nonce) throws IOException {
- return TraceUtil.trace(() -> checkAndMutateInternal(checkAndMutate,
nonceGroup, nonce),
+ CheckAndMutateResult checkAndMutateResult =
rowCache.mutateWithRowCacheBarrier(this,
+ checkAndMutate.getRow(), () -> this.checkAndMutate(checkAndMutate,
nonceGroup, nonce));
+ return TraceUtil.trace(() -> checkAndMutateResult,
() -> createRegionSpan("Region.checkAndMutate"));
}
@@ -5261,6 +5312,12 @@ public class HRegion implements HeapSize,
PropagatingConfigurationObserver, Regi
private OperationStatus mutate(Mutation mutation, boolean atomic, long
nonceGroup, long nonce)
throws IOException {
+ return rowCache.mutateWithRowCacheBarrier(this, mutation.getRow(),
+ () -> this.mutateInternal(mutation, atomic, nonceGroup, nonce));
+ }
+
+ private OperationStatus mutateInternal(Mutation mutation, boolean atomic,
long nonceGroup,
+ long nonce) throws IOException {
OperationStatus[] status =
this.batchMutate(new Mutation[] { mutation }, atomic, nonceGroup, nonce);
if
(status[0].getOperationStatusCode().equals(OperationStatusCode.SANITY_CHECK_FAILURE))
{
@@ -7881,6 +7938,7 @@ public class HRegion implements HeapSize,
PropagatingConfigurationObserver, Regi
LOG.debug("checking classloading for " +
this.getRegionInfo().getEncodedName());
TableDescriptorChecker.checkClassLoading(cConfig, htableDescriptor);
this.openSeqNum = initialize(reporter);
+ this.rowCacheSeqNum.set(this.openSeqNum);
this.mvcc.advanceTo(openSeqNum);
// The openSeqNum must be increased every time when a region is
assigned, as we rely on it to
// determine whether a region has been successfully reopened. So here we
always write open
@@ -8709,6 +8767,22 @@ public class HRegion implements HeapSize,
PropagatingConfigurationObserver, Regi
return this.openSeqNum;
}
+ public long getRowCacheSeqNum() {
+ return this.rowCacheSeqNum.get();
+ }
+
+ @Override
+ public boolean isRowCacheEnabled() {
+ return isRowCacheEnabled;
+ }
+
+ /**
+ * This is used to invalidate the row cache of the bulk-loaded region.
+ */
+ public void increaseRowCacheSeqNum() {
+ this.rowCacheSeqNum.incrementAndGet();
+ }
+
@Override
public Map<byte[], Long> getMaxStoreSeqId() {
return this.maxSeqIdInStores;
diff --git
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
index cd49ceb753e..6bd865c50d8 100644
---
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
+++
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
@@ -498,6 +498,11 @@ public class HRegionServer extends
HBaseServerBase<RSRpcServices>
// A timer submit requests to the PrefetchExecutor
private PrefetchExecutorNotifier prefetchExecutorNotifier;
+ /**
+ * The row cache service
+ */
+ private final RowCache rowCache = new RowCache(getConfiguration());
+
/**
* Starts a HRegionServer at the default location.
* <p/>
@@ -3718,4 +3723,9 @@ public class HRegionServer extends
HBaseServerBase<RSRpcServices>
public RegionReplicationBufferManager getRegionReplicationBufferManager() {
return regionReplicationBufferManager;
}
+
+ @Override
+ public RowCache getRowCache() {
+ return this.rowCache;
+ }
}
diff --git
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
index fdfea375e09..35371cb74ae 100644
---
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
+++
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
@@ -2336,6 +2336,23 @@ public class RSRpcServices extends
HBaseRpcServicesBase<HRegionServer>
@Override
public BulkLoadHFileResponse bulkLoadHFile(final RpcController controller,
final BulkLoadHFileRequest request) throws ServiceException {
+ HRegion region;
+ try {
+ region = getRegion(request.getRegion());
+ } catch (IOException ie) {
+ throw new ServiceException(ie);
+ }
+
+ if (!region.isRowCacheEnabled()) {
+ return bulkLoadHFileInternal(request);
+ }
+
+ // TODO: implement row cache logic for bulk load
+ return bulkLoadHFileInternal(request);
+ }
+
+ BulkLoadHFileResponse bulkLoadHFileInternal(final BulkLoadHFileRequest
request)
+ throws ServiceException {
long start = EnvironmentEdgeManager.currentTime();
List<String> clusterIds = new ArrayList<>(request.getClusterIdsList());
if (clusterIds.contains(this.server.getClusterId())) {
@@ -2592,8 +2609,7 @@ public class RSRpcServices extends
HBaseRpcServicesBase<HRegionServer>
RegionScannerImpl scanner = null;
long blockBytesScannedBefore = context.getBlockBytesScanned();
try {
- scanner = region.getScanner(scan);
- scanner.next(results);
+ scanner = region.getScannerWithResults(get, scan, results);
} finally {
if (scanner != null) {
if (closeCallBack == null) {
diff --git
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java
index 42069e58092..48d4add69ee 100644
---
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java
+++
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java
@@ -577,4 +577,10 @@ public interface Region extends ConfigurationObserver {
* estimating quota consumption.
*/
int getMinBlockSizeBytes();
+
+ /**
+ * Returns whether the row cache is enabled for this region.
+ * @return true if the row cache is enabled for this region
+ */
+ boolean isRowCacheEnabled();
}
diff --git
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java
index a46e2dae695..719d27a3a8a 100644
---
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java
+++
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java
@@ -304,4 +304,6 @@ public interface RegionServerServices extends Server,
MutableOnlineRegions, Favo
@Override
List<HRegion> getRegions();
+
+ RowCache getRowCache();
}
diff --git
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RowCache.java
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RowCache.java
new file mode 100644
index 00000000000..d5198c751fa
--- /dev/null
+++
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RowCache.java
@@ -0,0 +1,125 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver;
+
+import java.io.IOException;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.client.Get;
+import org.apache.hadoop.hbase.client.Mutation;
+
+/**
+ * Facade for row-level caching in the RegionServer.
+ *
+ * <p>{@code RowCache} coordinates cache access for Get operations and
+ * enforces cache consistency during mutations. It delegates actual
+ * storage and eviction policy decisions (e.g., LRU, LFU) to a
+ * {@link RowCacheStrategy} implementation.</p>
+ *
+ * <p>This class is responsible for:
+ * <ul>
+ * <li>Determining whether row cache is enabled for a region</li>
+ * <li>Attempting cache lookups before falling back to the normal read
path</li>
+ * <li>Populating the cache after successful reads</li>
+ * <li>Evicting affected rows on mutations to maintain correctness</li>
+ * </ul>
+ *
+ * <p>{@code RowCache} does not implement caching policy or storage directly;
+ * those concerns are encapsulated by {@code RowCacheStrategy}.</p>
+ */
[email protected]
+public class RowCache {
+ private final boolean enabledByConf;
+ private final RowCacheStrategy rowCacheStrategy;
+
+ @FunctionalInterface
+ interface RowOperation<R> {
+ R execute() throws IOException;
+ }
+
+ <R> R execute(RowOperation<R> operation) throws IOException {
+ return operation.execute();
+ }
+
+ RowCache(Configuration conf) {
+ enabledByConf =
+ conf.getFloat(HConstants.ROW_CACHE_SIZE_KEY,
HConstants.ROW_CACHE_SIZE_DEFAULT) > 0;
+ // TODO: implement row cache
+ rowCacheStrategy = null;
+ }
+
+ <R> R mutateWithRowCacheBarrier(HRegion region, byte[] row, RowOperation<R>
operation)
+ throws IOException {
+ if (!region.isRowCacheEnabled()) {
+ return operation.execute();
+ }
+
+ RowCacheKey key = new RowCacheKey(region, row);
+ // TODO: implement mutate with row cache barrier logic
+ evictRow(key);
+ return execute(operation);
+ }
+
+ <R> R mutateWithRowCacheBarrier(HRegion region, List<Mutation> mutations,
+ RowOperation<R> operation) throws IOException {
+ if (!region.isRowCacheEnabled()) {
+ return operation.execute();
+ }
+
+ // TODO: implement mutate with row cache barrier logic
+ Set<RowCacheKey> rowCacheKeys = new HashSet<>(mutations.size());
+ mutations.forEach(mutation -> rowCacheKeys.add(new RowCacheKey(region,
mutation.getRow())));
+ rowCacheKeys.forEach(this::evictRow);
+
+ return execute(operation);
+ }
+
+ void evictRow(RowCacheKey key) {
+ rowCacheStrategy.evictRow(key);
+ }
+
+ boolean canCacheRow(Get get, Region region) {
+ // TODO: implement logic to determine if the row can be cached
+ return false;
+ }
+
+ boolean tryGetFromCache(RowCacheKey key, Get get, List<Cell> results) {
+ RowCells row = rowCacheStrategy.getRow(key, get.getCacheBlocks());
+
+ if (row == null) {
+ return false;
+ }
+
+ results.addAll(row.getCells());
+ // TODO: implement update of metrics
+ return true;
+ }
+
+ void populateCache(List<Cell> results, RowCacheKey key) {
+ // TODO: implement with barrier to avoid cache read during mutation
+ try {
+ rowCacheStrategy.cacheRow(key, new RowCells(results));
+ } catch (CloneNotSupportedException ignored) {
+ // Not able to cache row cells, ignore
+ }
+ }
+}
diff --git
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RowCacheKey.java
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RowCacheKey.java
new file mode 100644
index 00000000000..09ec68194ea
--- /dev/null
+++
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RowCacheKey.java
@@ -0,0 +1,71 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver;
+
+import java.util.Arrays;
+import java.util.Objects;
+import org.apache.hadoop.hbase.io.HeapSize;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.ClassSize;
+
[email protected]
+public class RowCacheKey implements HeapSize {
+ public static final long FIXED_OVERHEAD =
ClassSize.estimateBase(RowCacheKey.class, false);
+
+ private final String encodedRegionName;
+ private final byte[] rowKey;
+
+ // When a region is reopened or bulk-loaded, its rowCacheSeqNum is used to
generate new keys that
+ // bypass the existing cache. This mechanism is effective when
ROW_CACHE_EVICT_ON_CLOSE is set to
+ // false.
+ private final long rowCacheSeqNum;
+
+ public RowCacheKey(HRegion region, byte[] rowKey) {
+ this.encodedRegionName = region.getRegionInfo().getEncodedName();
+ this.rowKey = Objects.requireNonNull(rowKey, "rowKey cannot be null");
+ this.rowCacheSeqNum = region.getRowCacheSeqNum();
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (o == null || getClass() != o.getClass()) return false;
+ RowCacheKey that = (RowCacheKey) o;
+ return rowCacheSeqNum == that.rowCacheSeqNum
+ && Objects.equals(encodedRegionName, that.encodedRegionName)
+ && Objects.deepEquals(rowKey, that.rowKey);
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(encodedRegionName, Arrays.hashCode(rowKey),
rowCacheSeqNum);
+ }
+
+ @Override
+ public String toString() {
+ return encodedRegionName + '_' + Bytes.toStringBinary(rowKey) + '_' +
rowCacheSeqNum;
+ }
+
+ @Override
+ public long heapSize() {
+ return FIXED_OVERHEAD + ClassSize.align(rowKey.length);
+ }
+
+ boolean isSameRegion(HRegion region) {
+ return
this.encodedRegionName.equals(region.getRegionInfo().getEncodedName());
+ }
+}
diff --git
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RowCacheStrategy.java
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RowCacheStrategy.java
new file mode 100644
index 00000000000..0b95f7eb957
--- /dev/null
+++
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RowCacheStrategy.java
@@ -0,0 +1,98 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver;
+
+/**
+ * Strategy interface for row-level caching used by {@link RowCache}.
+ * <p>
+ * This interface defines the contract for concrete row cache implementations
(e.g., LRU, LFU).
+ * Implementations are responsible for managing the in-memory storage of rows
retrieved by Get
+ * operations, applying eviction policies, tracking cache statistics, and
enforcing size
+ * constraints.
+ * </p>
+ * <p>
+ * The {@code RowCacheStrategy} focuses solely on cache management concerns.
It does not participate
+ * in scanner creation or read-path control logic, which are handled by
higher-level components.
+ * </p>
+ */
[email protected]
+public interface RowCacheStrategy {
+ /**
+ * Cache the specified row.
+ * @param key the key of the row to cache
+ * @param value the cells of the row to cache
+ */
+ void cacheRow(RowCacheKey key, RowCells value);
+
+ /**
+ * Evict the specified row.
+ * @param key the key of the row to evict
+ */
+ void evictRow(RowCacheKey key);
+
+ /**
+ * Evict all rows belonging to the specified region. This is heavy operation
as it iterates the
+ * entire RowCache key set.
+ * @param region the region whose rows should be evicted
+ */
+ void evictRowsByRegion(HRegion region);
+
+ /**
+ * Get the number of rows in the cache.
+ * @return the number of rows in the cache
+ */
+ long getCount();
+
+ /**
+ * Get the number of rows evicted from the cache.
+ * @return the number of rows evicted from the cache
+ */
+ long getEvictedRowCount();
+
+ /**
+ * Get the hit count.
+ * @return the hit count
+ */
+ long getHitCount();
+
+ /**
+ * Get the maximum size of the cache in bytes.
+ * @return the maximum size of the cache in bytes
+ */
+ long getMaxSize();
+
+ /**
+ * Get the miss count.
+ * @return the miss count
+ */
+ long getMissCount();
+
+ /**
+ * Get the specified row from the cache.
+ * @param key the key of the row to get
+ * @param caching whether caching is enabled for this request
+ * @return the cells of the row, or null if not found or caching is disabled
+ */
+ RowCells getRow(RowCacheKey key, boolean caching);
+
+ /**
+ * Get the current size of the cache in bytes.
+ * @return the current size of the cache in bytes
+ */
+ long getSize();
+}
diff --git
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RowCells.java
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RowCells.java
new file mode 100644
index 00000000000..2f44058e0a2
--- /dev/null
+++
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RowCells.java
@@ -0,0 +1,57 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver;
+
+import java.util.ArrayList;
+import java.util.List;
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.ExtendedCell;
+import org.apache.hadoop.hbase.io.HeapSize;
+import org.apache.hadoop.hbase.util.ClassSize;
+
[email protected]
+public class RowCells implements HeapSize {
+ public static final long FIXED_OVERHEAD =
ClassSize.estimateBase(RowCells.class, false);
+
+ private final List<Cell> cells = new ArrayList<>();
+
+ public RowCells(List<Cell> cells) throws CloneNotSupportedException {
+ for (Cell cell : cells) {
+ if (!(cell instanceof ExtendedCell extCell)) {
+ throw new CloneNotSupportedException("Cell is not an ExtendedCell");
+ }
+ try {
+ // To garbage collect the objects referenced by the cells
+ this.cells.add(extCell.deepClone());
+ } catch (RuntimeException e) {
+ // throw new CloneNotSupportedException("Deep clone failed");
+ this.cells.add(extCell);
+ }
+ }
+ }
+
+ @Override
+ public long heapSize() {
+ long cellsSize = cells.stream().mapToLong(Cell::heapSize).sum();
+ return FIXED_OVERHEAD + cellsSize;
+ }
+
+ public List<Cell> getCells() {
+ return cells;
+ }
+}
diff --git
a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/util/TestMemorySizeUtil.java
b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/util/TestMemorySizeUtil.java
index 5f00c34dbcb..3cf1fc33753 100644
---
a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/util/TestMemorySizeUtil.java
+++
b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/util/TestMemorySizeUtil.java
@@ -52,6 +52,13 @@ public class TestMemorySizeUtil {
assertEquals(HConstants.HBASE_CLUSTER_MINIMUM_MEMORY_THRESHOLD, 0.2f,
0.0f);
MemorySizeUtil.validateRegionServerHeapMemoryAllocation(conf);
+ // when memstore size + block cache size + row cache size + default free
heap min size == 1.0
+ conf.setFloat(MemorySizeUtil.MEMSTORE_SIZE_KEY, 0.4f);
+ conf.setFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, 0.39f);
+ conf.setFloat(HConstants.ROW_CACHE_SIZE_KEY, 0.01f);
+ assertEquals(HConstants.HBASE_CLUSTER_MINIMUM_MEMORY_THRESHOLD, 0.2f,
0.0f);
+ MemorySizeUtil.validateRegionServerHeapMemoryAllocation(conf);
+
// when memstore size + block cache size + default free heap min size > 1.0
conf.setFloat(MemorySizeUtil.MEMSTORE_SIZE_KEY, 0.5f);
assertThrows(RuntimeException.class,
@@ -60,6 +67,7 @@ public class TestMemorySizeUtil {
// when free heap min size is set to 0, it should not throw an exception
conf.setFloat(MemorySizeUtil.MEMSTORE_SIZE_KEY, 0.5f);
conf.setFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, 0.5f);
+ conf.setFloat(HConstants.ROW_CACHE_SIZE_KEY, 0.0f);
conf.setLong(MemorySizeUtil.HBASE_REGION_SERVER_FREE_HEAP_MIN_MEMORY_SIZE_KEY,
0L);
MemorySizeUtil.validateRegionServerHeapMemoryAllocation(conf);
@@ -86,4 +94,14 @@ public class TestMemorySizeUtil {
minFreeHeapFraction =
MemorySizeUtil.getRegionServerMinFreeHeapFraction(conf);
assertEquals(0.0f, minFreeHeapFraction, 0.0f);
}
+
+ @Test
+ public void testGetRowCacheSize() {
+ float rowCacheSizeRatio = 0.01f;
+ conf.setFloat(HConstants.ROW_CACHE_SIZE_KEY, rowCacheSizeRatio);
+ long rowCacheSizeBytes = MemorySizeUtil.getRowCacheSize(conf);
+
+ long maxMemory = MemorySizeUtil.safeGetHeapMemoryUsage().getMax();
+ assertEquals((long) (maxMemory * rowCacheSizeRatio), rowCacheSizeBytes);
+ }
}
diff --git
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java
index a25bae6ec7b..8b223db6552 100644
---
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java
+++
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java
@@ -64,6 +64,7 @@ import
org.apache.hadoop.hbase.regionserver.MetricsRegionServer;
import org.apache.hadoop.hbase.regionserver.RegionServerAccounting;
import org.apache.hadoop.hbase.regionserver.RegionServerServices;
import org.apache.hadoop.hbase.regionserver.ReplicationSourceService;
+import org.apache.hadoop.hbase.regionserver.RowCache;
import org.apache.hadoop.hbase.regionserver.SecureBulkLoadManager;
import org.apache.hadoop.hbase.regionserver.ServerNonceManager;
import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequester;
@@ -757,4 +758,9 @@ class MockRegionServer implements
AdminProtos.AdminService.BlockingInterface,
ReplicateWALEntryRequest request) throws ServiceException {
return null;
}
+
+ @Override
+ public RowCache getRowCache() {
+ return null;
+ }
}
diff --git
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRowCacheKey.java
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRowCacheKey.java
new file mode 100644
index 00000000000..ee75fd25192
--- /dev/null
+++
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRowCacheKey.java
@@ -0,0 +1,113 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotEquals;
+import static org.junit.Assert.assertNotSame;
+
+import org.apache.hadoop.hbase.HBaseClassTestRule;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.RegionInfo;
+import org.apache.hadoop.hbase.testclassification.IOTests;
+import org.apache.hadoop.hbase.testclassification.SmallTests;
+import org.junit.BeforeClass;
+import org.junit.ClassRule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.mockito.Mockito;
+
+@Category({ IOTests.class, SmallTests.class })
+public class TestRowCacheKey {
+ @ClassRule
+ public static final HBaseClassTestRule CLASS_RULE =
+ HBaseClassTestRule.forClass(TestRowCacheKey.class);
+
+ private static HRegion region1;
+ private static HRegion region2;
+ private static RegionInfo regionInfo1;
+
+ @BeforeClass
+ public static void beforeClass() {
+ TableName tableName = TableName.valueOf("table1");
+
+ regionInfo1 = Mockito.mock(RegionInfo.class);
+ Mockito.when(regionInfo1.getEncodedName()).thenReturn("region1");
+ Mockito.when(regionInfo1.getTable()).thenReturn(tableName);
+
+ region1 = Mockito.mock(HRegion.class);
+ Mockito.when(region1.getRegionInfo()).thenReturn(regionInfo1);
+
+ RegionInfo regionInfo2 = Mockito.mock(RegionInfo.class);
+ Mockito.when(regionInfo2.getEncodedName()).thenReturn("region2");
+ Mockito.when(regionInfo2.getTable()).thenReturn(tableName);
+
+ region2 = Mockito.mock(HRegion.class);
+ Mockito.when(region2.getRegionInfo()).thenReturn(regionInfo2);
+ }
+
+ @Test
+ public void testEquality() {
+ RowCacheKey key11 = new RowCacheKey(region1, "row1".getBytes());
+ RowCacheKey key12 = new RowCacheKey(region1, "row2".getBytes());
+ RowCacheKey key21 = new RowCacheKey(region2, "row1".getBytes());
+ RowCacheKey key22 = new RowCacheKey(region2, "row2".getBytes());
+ RowCacheKey key11Another = new RowCacheKey(region1, "row1".getBytes());
+ assertNotSame(key11, key11Another);
+
+ // Ensure hashCode works well
+ assertNotEquals(key11.hashCode(), key12.hashCode());
+ assertNotEquals(key11.hashCode(), key21.hashCode());
+ assertNotEquals(key11.hashCode(), key22.hashCode());
+ assertEquals(key11.hashCode(), key11Another.hashCode());
+
+ // Ensure equals works well
+ assertNotEquals(key11, key12);
+ assertNotEquals(key11, key21);
+ assertNotEquals(key11, key22);
+ assertEquals(key11, key11Another);
+ }
+
+ @Test
+ public void testDifferentRowCacheSeqNum() {
+ RowCacheKey key1 = new RowCacheKey(region1, "row1".getBytes());
+
+ HRegion region1Another = Mockito.mock(HRegion.class);
+ Mockito.when(region1Another.getRegionInfo()).thenReturn(regionInfo1);
+ Mockito.when(region1Another.getRowCacheSeqNum()).thenReturn(1L);
+ RowCacheKey key1Another = new RowCacheKey(region1Another,
"row1".getBytes());
+
+ assertNotEquals(key1.hashCode(), key1Another.hashCode());
+ assertNotEquals(key1, key1Another);
+ }
+
+ @Test
+ public void testHeapSize() {
+ RowCacheKey key;
+ long base = RowCacheKey.FIXED_OVERHEAD;
+
+ key = new RowCacheKey(region1, "1".getBytes());
+ assertEquals(base + 8, key.heapSize());
+
+ key = new RowCacheKey(region1, "12345678".getBytes());
+ assertEquals(base + 8, key.heapSize());
+
+ key = new RowCacheKey(region1, "123456789".getBytes());
+ assertEquals(base + 8 * 2, key.heapSize());
+ }
+}
diff --git
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRowCells.java
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRowCells.java
new file mode 100644
index 00000000000..6307bbce305
--- /dev/null
+++
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRowCells.java
@@ -0,0 +1,74 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotSame;
+
+import java.util.ArrayList;
+import java.util.List;
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.HBaseClassTestRule;
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.KeyValueTestUtil;
+import org.apache.hadoop.hbase.testclassification.RegionServerTests;
+import org.apache.hadoop.hbase.testclassification.SmallTests;
+import org.junit.ClassRule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+@Category({ RegionServerTests.class, SmallTests.class })
+public class TestRowCells {
+ @ClassRule
+ public static final HBaseClassTestRule CLASS_RULE =
+ HBaseClassTestRule.forClass(TestRowCells.class);
+
+ @Test
+ public void testDeepClone() throws CloneNotSupportedException {
+ List<Cell> cells = new ArrayList<>();
+ KeyValue kv1 = KeyValueTestUtil.create("row", "CF", "q1", 1, "v1");
+ cells.add(kv1);
+ KeyValue kv2 = KeyValueTestUtil.create("row", "CF", "q12", 2, "v22");
+ cells.add(kv2);
+ RowCells rowCells = new RowCells(cells);
+
+ // Ensure deep clone happened
+ assertNotSame(kv1, rowCells.getCells().get(0));
+ assertEquals(kv1, rowCells.getCells().get(0));
+ assertNotSame(kv2, rowCells.getCells().get(1));
+ assertEquals(kv2, rowCells.getCells().get(1));
+ }
+
+ @Test
+ public void testHeapSize() throws CloneNotSupportedException {
+ List<Cell> cells;
+ RowCells rowCells;
+
+ cells = new ArrayList<>();
+ rowCells = new RowCells(cells);
+ assertEquals(RowCells.FIXED_OVERHEAD, rowCells.heapSize());
+
+ cells = new ArrayList<>();
+ KeyValue kv1 = KeyValueTestUtil.create("row", "CF", "q1", 1, "v1");
+ cells.add(kv1);
+ KeyValue kv2 = KeyValueTestUtil.create("row", "CF", "q22", 2, "v22");
+ cells.add(kv2);
+ rowCells = new RowCells(cells);
+ assertEquals(RowCells.FIXED_OVERHEAD + kv1.heapSize() + kv2.heapSize(),
rowCells.heapSize());
+ }
+}
diff --git a/hbase-shell/src/main/ruby/hbase/admin.rb
b/hbase-shell/src/main/ruby/hbase/admin.rb
index 93cc312338c..f4d14eab7d1 100644
--- a/hbase-shell/src/main/ruby/hbase/admin.rb
+++ b/hbase-shell/src/main/ruby/hbase/admin.rb
@@ -1610,6 +1610,7 @@ module Hbase
tdb.setRegionMemStoreReplication(JBoolean.valueOf(arg.delete(TableDescriptorBuilder::REGION_MEMSTORE_REPLICATION)))
if arg.include?(TableDescriptorBuilder::REGION_MEMSTORE_REPLICATION)
tdb.setRegionSplitPolicyClassName(arg.delete(TableDescriptorBuilder::SPLIT_POLICY))
if arg.include?(TableDescriptorBuilder::SPLIT_POLICY)
tdb.setRegionReplication(JInteger.valueOf(arg.delete(TableDescriptorBuilder::REGION_REPLICATION)))
if arg.include?(TableDescriptorBuilder::REGION_REPLICATION)
+
tdb.setRowCacheEnabled(JBoolean.valueOf(arg.delete(TableDescriptorBuilder::ROW_CACHE_ENABLED)))
if arg.include?(TableDescriptorBuilder::ROW_CACHE_ENABLED)
set_user_metadata(tdb, arg.delete(METADATA)) if arg[METADATA]
set_descriptor_config(tdb, arg.delete(CONFIGURATION)) if
arg[CONFIGURATION]
end