This is an automated email from the ASF dual-hosted git repository.

bbeaudreault pushed a commit to branch hubspot-2
in repository https://gitbox.apache.org/repos/asf/hbase.git

commit 588ad6b6eb102f3f0e3c83d69d958b2b772cf5db
Author: Bryan Beaudreault <bbeaudrea...@hubspot.com>
AuthorDate: Fri Jul 1 12:33:56 2022 -0400

    HubSpot Backport: HBASE-27170 ByteBuffAllocator leak when decompressing 
blocks near minSizeForReservoirUse
---
 .../apache/hadoop/hbase/io/hfile/HFileBlock.java   | 43 ++++++++++------------
 1 file changed, 20 insertions(+), 23 deletions(-)

diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java
index eb2557cab5a..07f8f81e810 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java
@@ -636,8 +636,8 @@ public class HFileBlock implements Cacheable {
       return this;
     }
 
-    HFileBlock unpacked = shallowClone(this);
-    unpacked.allocateBuffer(); // allocates space for the decompressed block
+    ByteBuff newBuf = allocateBuffer(); // allocates space for the 
decompressed block
+    HFileBlock unpacked = shallowClone(this, newBuf);
     boolean succ = false;
     try {
       HFileBlockDecodingContext ctx = blockType == BlockType.ENCODED_DATA
@@ -663,20 +663,21 @@ public class HFileBlock implements Cacheable {
    * from the existing buffer. Does not change header fields.
    * Reserve room to keep checksum bytes too.
    */
-  private void allocateBuffer() {
+  private ByteBuff allocateBuffer() {
     int cksumBytes = totalChecksumBytes();
     int headerSize = headerSize();
     int capacityNeeded = headerSize + uncompressedSizeWithoutHeader + 
cksumBytes;
 
+    ByteBuff source = buf.duplicate();
     ByteBuff newBuf = allocator.allocate(capacityNeeded);
 
     // Copy header bytes into newBuf.
-    buf.position(0);
-    newBuf.put(0, buf, 0, headerSize);
+    source.position(0);
+    newBuf.put(0, source, 0, headerSize);
 
-    buf = newBuf;
     // set limit to exclude next block's header
-    buf.limit(capacityNeeded);
+    newBuf.limit(capacityNeeded);
+    return newBuf;
   }
 
   /**
@@ -2060,27 +2061,23 @@ public class HFileBlock implements Cacheable {
                    " onDiskDataSizeWithHeader " + onDiskDataSizeWithHeader;
   }
 
-  private static HFileBlockBuilder createBuilder(HFileBlock blk){
-    return new HFileBlockBuilder()
-          .withBlockType(blk.blockType)
-          .withOnDiskSizeWithoutHeader(blk.onDiskSizeWithoutHeader)
-          .withUncompressedSizeWithoutHeader(blk.uncompressedSizeWithoutHeader)
-          .withPrevBlockOffset(blk.prevBlockOffset)
-          .withByteBuff(blk.buf.duplicate()) // Duplicate the buffer.
-          .withOffset(blk.offset)
-          .withOnDiskDataSizeWithHeader(blk.onDiskDataSizeWithHeader)
-          .withNextBlockOnDiskSize(blk.nextBlockOnDiskSize)
-          .withHFileContext(blk.fileContext)
-          .withByteBuffAllocator(blk.allocator)
-          .withShared(blk.isSharedMem());
+  private static HFileBlockBuilder createBuilder(HFileBlock blk, ByteBuff 
newBuff) {
+    return new HFileBlockBuilder().withBlockType(blk.blockType)
+      .withOnDiskSizeWithoutHeader(blk.onDiskSizeWithoutHeader)
+      .withUncompressedSizeWithoutHeader(blk.uncompressedSizeWithoutHeader)
+      
.withPrevBlockOffset(blk.prevBlockOffset).withByteBuff(newBuff).withOffset(blk.offset)
+      .withOnDiskDataSizeWithHeader(blk.onDiskDataSizeWithHeader)
+      
.withNextBlockOnDiskSize(blk.nextBlockOnDiskSize).withHFileContext(blk.fileContext)
+      .withByteBuffAllocator(blk.allocator).withShared(!newBuff.hasArray());
   }
 
-  static HFileBlock shallowClone(HFileBlock blk) {
-    return createBuilder(blk).build();
+  static HFileBlock shallowClone(HFileBlock blk, ByteBuff newBuf) {
+    return createBuilder(blk, newBuf).build();
   }
 
   static HFileBlock deepCloneOnHeap(HFileBlock blk) {
     ByteBuff deepCloned = ByteBuff.wrap(ByteBuffer.wrap(blk.buf.toBytes(0, 
blk.buf.limit())));
-    return 
createBuilder(blk).withByteBuff(deepCloned).withShared(false).build();
+    return createBuilder(blk, 
blk.buf.duplicate()).withByteBuff(deepCloned).withShared(false)
+      .build();
   }
 }

Reply via email to