http://git-wip-us.apache.org/repos/asf/hbase/blob/834f87b2/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/PrefixTreeCodec.java
----------------------------------------------------------------------
diff --git 
a/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/PrefixTreeCodec.java
 
b/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/PrefixTreeCodec.java
index d903d79..29f4811 100644
--- 
a/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/PrefixTreeCodec.java
+++ 
b/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/PrefixTreeCodec.java
@@ -43,6 +43,7 @@ import 
org.apache.hadoop.hbase.io.encoding.HFileBlockDefaultEncodingContext;
 import org.apache.hadoop.hbase.io.encoding.HFileBlockEncodingContext;
 import org.apache.hadoop.hbase.io.hfile.BlockType;
 import org.apache.hadoop.hbase.io.hfile.HFileContext;
+import org.apache.hadoop.hbase.nio.ByteBuff;
 import org.apache.hadoop.hbase.util.ByteBufferUtils;
 import org.apache.hadoop.io.WritableUtils;
 
@@ -54,7 +55,8 @@ import org.apache.hadoop.io.WritableUtils;
  * PrefixTreeDataBlockEncoder implementation of DataBlockEncoder. This is the 
primary entry point
  * for PrefixTree encoding and decoding. Encoding is delegated to instances of
  * {@link PrefixTreeEncoder}, and decoding is delegated to instances of
- * {@link org.apache.hadoop.hbase.codec.prefixtree.scanner.CellSearcher}. 
Encoder and decoder instances are
+ * {@link org.apache.hadoop.hbase.codec.prefixtree.scanner.CellSearcher}.
+ * Encoder and decoder instances are
  * created and recycled by static PtEncoderFactory and PtDecoderFactory.
  */
 @InterfaceAudience.Private
@@ -114,12 +116,14 @@ public class PrefixTreeCodec implements DataBlockEncoder {
 
 
   @Override
-  public Cell getFirstKeyCellInBlock(ByteBuffer block) {
+  public Cell getFirstKeyCellInBlock(ByteBuff block) {
     block.rewind();
     PrefixTreeArraySearcher searcher = null;
     try {
       // should i includeMemstoreTS (second argument)?  i think 
PrefixKeyDeltaEncoder is, so i will
-      searcher = DecoderFactory.checkOut(block, true);
+      // TODO : Change to work with BBs
+      searcher = DecoderFactory.checkOut(block.asSubByteBuffer(block.limit() - 
block.position()),
+          true);
       if (!searcher.positionAtFirstCell()) {
         return null;
       }

http://git-wip-us.apache.org/repos/asf/hbase/blob/834f87b2/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/PrefixTreeSeeker.java
----------------------------------------------------------------------
diff --git 
a/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/PrefixTreeSeeker.java
 
b/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/PrefixTreeSeeker.java
index eefd953..93610c4 100644
--- 
a/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/PrefixTreeSeeker.java
+++ 
b/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/PrefixTreeSeeker.java
@@ -57,6 +57,7 @@ public class PrefixTreeSeeker implements EncodedSeeker {
   @Override
   public void setCurrentBuffer(ByteBuffer fullBlockBuffer) {
     block = fullBlockBuffer;
+    // TODO : change to Bytebuff
     ptSearcher = DecoderFactory.checkOut(block, includeMvccVersion);
     rewind();
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/834f87b2/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheableDeserializer.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheableDeserializer.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheableDeserializer.java
index f56a921..26555ef 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheableDeserializer.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheableDeserializer.java
@@ -18,9 +18,9 @@
 package org.apache.hadoop.hbase.io.hfile;
 
 import java.io.IOException;
-import java.nio.ByteBuffer;
 
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.nio.ByteBuff;
 
 /**
  * Interface for a deserializer. Throws an IOException if the serialized data 
is
@@ -33,7 +33,7 @@ public interface CacheableDeserializer<T extends Cacheable> {
    *
    * @return T the deserialized object.
    */
-  T deserialize(ByteBuffer b) throws IOException;
+  T deserialize(ByteBuff b) throws IOException;
 
   /**
    * 
@@ -43,7 +43,7 @@ public interface CacheableDeserializer<T extends Cacheable> {
    * @return T the deserialized object.
    * @throws IOException
    */
-  T deserialize(ByteBuffer b, boolean reuse) throws IOException;
+  T deserialize(ByteBuff b, boolean reuse) throws IOException;
 
   /**
    * Get the identifier of this deserialiser. Identifier is unique for each

http://git-wip-us.apache.org/repos/asf/hbase/blob/834f87b2/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CompoundBloomFilter.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CompoundBloomFilter.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CompoundBloomFilter.java
index 11436ce..6890884 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CompoundBloomFilter.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CompoundBloomFilter.java
@@ -21,11 +21,11 @@ package org.apache.hadoop.hbase.io.hfile;
 
 import java.io.DataInput;
 import java.io.IOException;
-import java.nio.ByteBuffer;
 
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.nio.ByteBuff;
 import org.apache.hadoop.hbase.util.BloomFilter;
 import org.apache.hadoop.hbase.util.BloomFilterUtil;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -93,7 +93,7 @@ public class CompoundBloomFilter extends 
CompoundBloomFilterBase
   }
 
   @Override
-  public boolean contains(byte[] key, int keyOffset, int keyLength, ByteBuffer 
bloom) {
+  public boolean contains(byte[] key, int keyOffset, int keyLength, ByteBuff 
bloom) {
     // We try to store the result in this variable so we can update stats for
     // testing, but when an error happens, we log a message and return.
 
@@ -120,7 +120,7 @@ public class CompoundBloomFilter extends 
CompoundBloomFilterBase
                 + Bytes.toStringBinary(key, keyOffset, keyLength), ex);
       }
 
-      ByteBuffer bloomBuf = bloomBlock.getBufferReadOnly();
+      ByteBuff bloomBuf = bloomBlock.getBufferReadOnly();
       result = BloomFilterUtil.contains(key, keyOffset, keyLength,
           bloomBuf, bloomBlock.headerSize(),
           bloomBlock.getUncompressedSizeWithoutHeader(), hash, hashCount);
@@ -137,7 +137,7 @@ public class CompoundBloomFilter extends 
CompoundBloomFilterBase
   }
 
   @Override
-  public boolean contains(Cell keyCell, ByteBuffer bloom) {
+  public boolean contains(Cell keyCell, ByteBuff bloom) {
     // We try to store the result in this variable so we can update stats for
     // testing, but when an error happens, we log a message and return.
     int block = index.rootBlockContainingKey(keyCell);

http://git-wip-us.apache.org/repos/asf/hbase/blob/834f87b2/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java
index 6653c23..71ac506 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java
@@ -26,7 +26,6 @@ import java.io.DataOutputStream;
 import java.io.IOException;
 import java.io.SequenceInputStream;
 import java.net.InetSocketAddress;
-import java.nio.ByteBuffer;
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.Comparator;
@@ -55,6 +54,7 @@ import org.apache.hadoop.hbase.fs.HFileSystem;
 import org.apache.hadoop.hbase.io.FSDataInputStreamWrapper;
 import org.apache.hadoop.hbase.io.compress.Compression;
 import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
+import org.apache.hadoop.hbase.nio.ByteBuff;
 import org.apache.hadoop.hbase.protobuf.ProtobufMagic;
 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
@@ -389,7 +389,7 @@ public class HFile {
 
     HFileScanner getScanner(boolean cacheBlocks, final boolean pread, final 
boolean isCompaction);
 
-    ByteBuffer getMetaBlock(String metaBlockName, boolean cacheBlock) throws 
IOException;
+    ByteBuff getMetaBlock(String metaBlockName, boolean cacheBlock) throws 
IOException;
 
     Map<byte[], byte[]> loadFileInfo() throws IOException;
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/834f87b2/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java
index f3bf0b7..8672d62 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java
@@ -34,14 +34,16 @@ import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.fs.HFileSystem;
-import org.apache.hadoop.hbase.io.ByteBufferInputStream;
 import org.apache.hadoop.hbase.io.FSDataInputStreamWrapper;
+import org.apache.hadoop.hbase.io.ByteBuffInputStream;
 import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
 import org.apache.hadoop.hbase.io.encoding.HFileBlockDecodingContext;
 import org.apache.hadoop.hbase.io.encoding.HFileBlockDefaultDecodingContext;
 import org.apache.hadoop.hbase.io.encoding.HFileBlockDefaultEncodingContext;
 import org.apache.hadoop.hbase.io.encoding.HFileBlockEncodingContext;
-import org.apache.hadoop.hbase.util.ByteBufferUtils;
+import org.apache.hadoop.hbase.nio.ByteBuff;
+import org.apache.hadoop.hbase.nio.MultiByteBuff;
+import org.apache.hadoop.hbase.nio.SingleByteBuff;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.ChecksumType;
 import org.apache.hadoop.hbase.util.ClassSize;
@@ -104,8 +106,9 @@ public class HFileBlock implements Cacheable {
   static final byte[] DUMMY_HEADER_NO_CHECKSUM =
      new byte[HConstants.HFILEBLOCK_HEADER_SIZE_NO_CHECKSUM];
 
-  public static final int BYTE_BUFFER_HEAP_SIZE = (int) ClassSize.estimateBase(
-      ByteBuffer.wrap(new byte[0], 0, 0).getClass(), false);
+  // How to get the estimate correctly? if it is a singleBB?
+  public static final int MULTI_BYTE_BUFFER_HEAP_SIZE = (int) 
ClassSize.estimateBase(
+      new MultiByteBuff(ByteBuffer.wrap(new byte[0], 0, 0)).getClass(), false);
 
   // meta.usesHBaseChecksum+offset+nextBlockOnDiskSizeWithHeader
   public static final int EXTRA_SERIALIZATION_SPACE = Bytes.SIZEOF_BYTE + 
Bytes.SIZEOF_INT
@@ -118,14 +121,16 @@ public class HFileBlock implements Cacheable {
 
   static final CacheableDeserializer<Cacheable> blockDeserializer =
       new CacheableDeserializer<Cacheable>() {
-        public HFileBlock deserialize(ByteBuffer buf, boolean reuse) throws 
IOException{
+        public HFileBlock deserialize(ByteBuff buf, boolean reuse) throws 
IOException{
           buf.limit(buf.limit() - 
HFileBlock.EXTRA_SERIALIZATION_SPACE).rewind();
-          ByteBuffer newByteBuffer;
+          ByteBuff newByteBuffer;
           if (reuse) {
             newByteBuffer = buf.slice();
           } else {
-           newByteBuffer = ByteBuffer.allocate(buf.limit());
-           newByteBuffer.put(buf);
+            // Used only in tests
+            int len = buf.limit();
+            newByteBuffer = new SingleByteBuff(ByteBuffer.allocate(len));
+            newByteBuffer.put(0, buf, buf.position(), len);
           }
           buf.position(buf.limit());
           buf.limit(buf.limit() + HFileBlock.EXTRA_SERIALIZATION_SPACE);
@@ -145,7 +150,8 @@ public class HFileBlock implements Cacheable {
         }
 
         @Override
-        public HFileBlock deserialize(ByteBuffer b) throws IOException {
+        public HFileBlock deserialize(ByteBuff b) throws IOException {
+          // Used only in tests
           return deserialize(b, false);
         }
       };
@@ -174,7 +180,7 @@ public class HFileBlock implements Cacheable {
   private final int onDiskDataSizeWithHeader;
 
   /** The in-memory representation of the hfile block */
-  private ByteBuffer buf;
+  private ByteBuff buf;
 
   /** Meta data that holds meta information on the hfileblock */
   private HFileContext fileContext;
@@ -209,7 +215,7 @@ public class HFileBlock implements Cacheable {
    * @param fileContext HFile meta data
    */
   HFileBlock(BlockType blockType, int onDiskSizeWithoutHeader, int 
uncompressedSizeWithoutHeader,
-      long prevBlockOffset, ByteBuffer buf, boolean fillHeader, long offset,
+      long prevBlockOffset, ByteBuff buf, boolean fillHeader, long offset,
       int onDiskDataSizeWithHeader, HFileContext fileContext) {
     this.blockType = blockType;
     this.onDiskSizeWithoutHeader = onDiskSizeWithoutHeader;
@@ -224,6 +230,13 @@ public class HFileBlock implements Cacheable {
     this.buf.rewind();
   }
 
+  HFileBlock(BlockType blockType, int onDiskSizeWithoutHeader, int 
uncompressedSizeWithoutHeader,
+      long prevBlockOffset, ByteBuffer buf, boolean fillHeader, long offset,
+      int onDiskDataSizeWithHeader, HFileContext fileContext) {
+    this(blockType, onDiskSizeWithoutHeader, uncompressedSizeWithoutHeader, 
prevBlockOffset,
+        new SingleByteBuff(buf), fillHeader, offset, onDiskDataSizeWithHeader, 
fileContext);
+  }
+
   /**
    * Copy constructor. Creates a shallow copy of {@code that}'s buffer.
    */
@@ -239,6 +252,9 @@ public class HFileBlock implements Cacheable {
     this.nextBlockOnDiskSizeWithHeader = that.nextBlockOnDiskSizeWithHeader;
   }
 
+  HFileBlock(ByteBuffer b, boolean usesHBaseChecksum) throws IOException {
+    this(new SingleByteBuff(b), usesHBaseChecksum);
+  }
   /**
    * Creates a block from an existing buffer starting with a header. Rewinds
    * and takes ownership of the buffer. By definition of rewind, ignores the
@@ -247,7 +263,7 @@ public class HFileBlock implements Cacheable {
    * because majorNumbers indicate the format of a HFile whereas minorNumbers
    * indicate the format inside a HFileBlock.
    */
-  HFileBlock(ByteBuffer b, boolean usesHBaseChecksum) throws IOException {
+  HFileBlock(ByteBuff b, boolean usesHBaseChecksum) throws IOException {
     b.rewind();
     blockType = BlockType.read(b);
     onDiskSizeWithoutHeader = b.getInt();
@@ -334,8 +350,8 @@ public class HFileBlock implements Cacheable {
    *
    * @return the buffer with header skipped and checksum omitted.
    */
-  public ByteBuffer getBufferWithoutHeader() {
-    ByteBuffer dup = this.buf.duplicate();
+  public ByteBuff getBufferWithoutHeader() {
+    ByteBuff dup = this.buf.duplicate();
     dup.position(headerSize());
     dup.limit(buf.limit() - totalChecksumBytes());
     return dup.slice();
@@ -343,15 +359,15 @@ public class HFileBlock implements Cacheable {
 
   /**
    * Returns the buffer this block stores internally. The clients must not
-   * modify the buffer object. This method has to be public because it is
-   * used in {@link CompoundBloomFilter} to avoid object
-   *  creation on every Bloom filter lookup, but has to be used with caution.
-   *   Checksum data is not included in the returned buffer but header data is.
+   * modify the buffer object. This method has to be public because it is used
+   * in {@link CompoundBloomFilter} to avoid object creation on every Bloom
+   * filter lookup, but has to be used with caution. Checksum data is not
+   * included in the returned buffer but header data is.
    *
    * @return the buffer of this block for read-only operations
    */
-  public ByteBuffer getBufferReadOnly() {
-    ByteBuffer dup = this.buf.duplicate();
+  public ByteBuff getBufferReadOnly() {
+    ByteBuff dup = this.buf.duplicate();
     dup.limit(buf.limit() - totalChecksumBytes());
     return dup.slice();
   }
@@ -363,8 +379,8 @@ public class HFileBlock implements Cacheable {
    *
    * @return the buffer with header and checksum included for read-only 
operations
    */
-  public ByteBuffer getBufferReadOnlyWithHeader() {
-    ByteBuffer dup = this.buf.duplicate();
+  public ByteBuff getBufferReadOnlyWithHeader() {
+    ByteBuff dup = this.buf.duplicate();
     return dup.slice();
   }
 
@@ -374,8 +390,8 @@ public class HFileBlock implements Cacheable {
    *
    * @return the byte buffer with header and checksum included
    */
-  ByteBuffer getBufferWithHeader() {
-    ByteBuffer dupBuf = buf.duplicate();
+  ByteBuff getBufferWithHeader() {
+    ByteBuff dupBuf = buf.duplicate();
     dupBuf.rewind();
     return dupBuf;
   }
@@ -417,7 +433,8 @@ public class HFileBlock implements Cacheable {
     sanityCheckAssertion(buf.getLong(), prevBlockOffset, "prevBlocKOffset");
     if (this.fileContext.isUseHBaseChecksum()) {
       sanityCheckAssertion(buf.get(), 
this.fileContext.getChecksumType().getCode(), "checksumType");
-      sanityCheckAssertion(buf.getInt(), 
this.fileContext.getBytesPerChecksum(), "bytesPerChecksum");
+      sanityCheckAssertion(buf.getInt(), 
this.fileContext.getBytesPerChecksum(),
+          "bytesPerChecksum");
       sanityCheckAssertion(buf.getInt(), onDiskDataSizeWithHeader, 
"onDiskDataSizeWithHeader");
     }
 
@@ -463,7 +480,7 @@ public class HFileBlock implements Cacheable {
       dataBegin = Bytes.toStringBinary(buf.array(), buf.arrayOffset() + 
headerSize(),
           Math.min(32, buf.limit() - buf.arrayOffset() - headerSize()));
     } else {
-      ByteBuffer bufWithoutHeader = getBufferWithoutHeader();
+      ByteBuff bufWithoutHeader = getBufferWithoutHeader();
       byte[] dataBeginBytes = new byte[Math.min(32,
           bufWithoutHeader.limit() - bufWithoutHeader.position())];
       bufWithoutHeader.get(dataBeginBytes);
@@ -489,7 +506,7 @@ public class HFileBlock implements Cacheable {
       if (buf.hasArray()) {
         dataBegin = Bytes.toStringBinary(buf.array(), buf.arrayOffset(), 
Math.min(32, buf.limit()));
       } else {
-        ByteBuffer bufDup = getBufferReadOnly();
+        ByteBuff bufDup = getBufferReadOnly();
         byte[] dataBeginBytes = new byte[Math.min(32, bufDup.limit() - 
bufDup.position())];
         bufDup.get(dataBeginBytes);
         dataBegin = Bytes.toStringBinary(dataBeginBytes);
@@ -521,7 +538,7 @@ public class HFileBlock implements Cacheable {
     HFileBlockDecodingContext ctx = blockType == BlockType.ENCODED_DATA ?
       reader.getBlockDecodingContext() : 
reader.getDefaultBlockDecodingContext();
 
-    ByteBuffer dup = this.buf.duplicate();
+    ByteBuff dup = this.buf.duplicate();
     dup.position(this.headerSize());
     dup = dup.slice();
     ctx.prepareDecoding(unpacked.getOnDiskSizeWithoutHeader(),
@@ -534,16 +551,14 @@ public class HFileBlock implements Cacheable {
       // Below call to copyFromBufferToBuffer() will try positional read/write 
from/to buffers when
       // any of the buffer is DBB. So we change the limit on a dup buffer. No 
copying just create
       // new BB objects
-      ByteBuffer inDup = this.buf.duplicate();
+      ByteBuff inDup = this.buf.duplicate();
       inDup.limit(inDup.limit() + headerSize());
-      ByteBuffer outDup = unpacked.buf.duplicate();
+      ByteBuff outDup = unpacked.buf.duplicate();
       outDup.limit(outDup.limit() + unpacked.headerSize());
-      ByteBufferUtils.copyFromBufferToBuffer(
-          outDup,
-          inDup,
-          this.onDiskDataSizeWithHeader,
+      outDup.put(
           unpacked.headerSize() + unpacked.uncompressedSizeWithoutHeader
-              + unpacked.totalChecksumBytes(), unpacked.headerSize());
+              + unpacked.totalChecksumBytes(), inDup, 
this.onDiskDataSizeWithHeader,
+          unpacked.headerSize());
     }
     return unpacked;
   }
@@ -571,11 +586,10 @@ public class HFileBlock implements Cacheable {
 
     // Copy header bytes into newBuf.
     // newBuf is HBB so no issue in calling array()
-    ByteBuffer dup = buf.duplicate();
-    dup.position(0);
-    dup.get(newBuf.array(), newBuf.arrayOffset(), headerSize);
+    buf.position(0);
+    buf.get(newBuf.array(), newBuf.arrayOffset(), headerSize);
 
-    buf = newBuf;
+    buf = new SingleByteBuff(newBuf);
     // set limit to exclude next block's header
     buf.limit(headerSize + uncompressedSizeWithoutHeader + cksumBytes);
   }
@@ -627,16 +641,16 @@ public class HFileBlock implements Cacheable {
    * @return a byte stream reading the data + checksum of this block
    */
   public DataInputStream getByteStream() {
-    ByteBuffer dup = this.buf.duplicate();
+    ByteBuff dup = this.buf.duplicate();
     dup.position(this.headerSize());
-    return new DataInputStream(new ByteBufferInputStream(dup));
+    return new DataInputStream(new ByteBuffInputStream(dup));
   }
 
   @Override
   public long heapSize() {
     long size = ClassSize.align(
         ClassSize.OBJECT +
-        // Block type, byte buffer and meta references
+        // Block type, multi byte buffer and meta references
         3 * ClassSize.REFERENCE +
         // On-disk size, uncompressed size, and next block's on-disk size
         // bytePerChecksum and onDiskDataSize
@@ -649,7 +663,7 @@ public class HFileBlock implements Cacheable {
 
     if (buf != null) {
       // Deep overhead of the byte buffer. Needs to be aligned separately.
-      size += ClassSize.align(buf.capacity() + BYTE_BUFFER_HEAP_SIZE);
+      size += ClassSize.align(buf.capacity() + MULTI_BYTE_BUFFER_HEAP_SIZE);
     }
 
     return ClassSize.align(size);
@@ -1724,7 +1738,7 @@ public class HFileBlock implements Cacheable {
 
   @Override
   public void serialize(ByteBuffer destination) {
-    ByteBufferUtils.copyFromBufferToBuffer(destination, this.buf, 0, 
getSerializedLength()
+    this.buf.get(destination, 0, getSerializedLength()
         - EXTRA_SERIALIZATION_SPACE);
     serializeExtraInfo(destination);
   }
@@ -1786,7 +1800,7 @@ public class HFileBlock implements Cacheable {
     if (castedComparison.uncompressedSizeWithoutHeader != 
this.uncompressedSizeWithoutHeader) {
       return false;
     }
-    if (ByteBufferUtils.compareTo(this.buf, 0, this.buf.limit(), 
castedComparison.buf, 0,
+    if (ByteBuff.compareTo(this.buf, 0, this.buf.limit(), 
castedComparison.buf, 0,
         castedComparison.buf.limit()) != 0) {
       return false;
     }
@@ -1876,7 +1890,7 @@ public class HFileBlock implements Cacheable {
    * This is mostly helpful for debugging. This assumes that the block
    * has minor version > 0.
    */
-  static String toStringHeader(ByteBuffer buf) throws IOException {
+  static String toStringHeader(ByteBuff buf) throws IOException {
     byte[] magicBuf = new byte[Math.min(buf.limit() - buf.position(), 
BlockType.MAGIC_LENGTH)];
     buf.get(magicBuf);
     BlockType bt = BlockType.parse(magicBuf, 0, BlockType.MAGIC_LENGTH);

http://git-wip-us.apache.org/repos/asf/hbase/blob/834f87b2/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java
index 86b5e15..85190d6 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java
@@ -45,9 +45,10 @@ import org.apache.hadoop.hbase.KeyValueUtil;
 import org.apache.hadoop.hbase.io.HeapSize;
 import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
 import org.apache.hadoop.hbase.io.hfile.HFile.CachingBlockReader;
-import org.apache.hadoop.hbase.util.ByteBufferUtils;
+import org.apache.hadoop.hbase.nio.ByteBuff;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.ClassSize;
+import org.apache.hadoop.hbase.util.Pair;
 import org.apache.hadoop.io.WritableUtils;
 import org.apache.hadoop.util.StringUtils;
 
@@ -342,7 +343,7 @@ public class HFileBlockIndex {
 
         // Locate the entry corresponding to the given key in the non-root
         // (leaf or intermediate-level) index block.
-        ByteBuffer buffer = block.getBufferWithoutHeader();
+        ByteBuff buffer = block.getBufferWithoutHeader();
         index = locateNonRootIndexEntry(buffer, key, comparator);
         if (index == -1) {
           // This has to be changed
@@ -396,14 +397,14 @@ public class HFileBlockIndex {
             midLeafBlockOffset, midLeafBlockOnDiskSize, true, true, false, 
true,
             BlockType.LEAF_INDEX, null);
 
-        ByteBuffer b = midLeafBlock.getBufferWithoutHeader();
+        ByteBuff b = midLeafBlock.getBufferWithoutHeader();
         int numDataBlocks = b.getInt();
-        int keyRelOffset = b.getInt(Bytes.SIZEOF_INT * (midKeyEntry + 1));
-        int keyLen = b.getInt(Bytes.SIZEOF_INT * (midKeyEntry + 2)) -
+        int keyRelOffset = b.getIntStrictlyForward(Bytes.SIZEOF_INT * 
(midKeyEntry + 1));
+        int keyLen = b.getIntStrictlyForward(Bytes.SIZEOF_INT * (midKeyEntry + 
2)) -
             keyRelOffset;
         int keyOffset = Bytes.SIZEOF_INT * (numDataBlocks + 2) + keyRelOffset
             + SECONDARY_INDEX_ENTRY_OVERHEAD;
-        byte[] bytes = ByteBufferUtils.toBytes(b, keyOffset, keyLen);
+        byte[] bytes = b.toBytes(keyOffset, keyLen);
         targetMidKey = new KeyValue.KeyOnlyKeyValue(bytes, 0, bytes.length);
       } else {
         // The middle of the root-level index.
@@ -653,7 +654,7 @@ public class HFileBlockIndex {
      * @param i the ith position
      * @return The indexed key at the ith position in the nonRootIndex.
      */
-    protected byte[] getNonRootIndexedKey(ByteBuffer nonRootIndex, int i) {
+    protected byte[] getNonRootIndexedKey(ByteBuff nonRootIndex, int i) {
       int numEntries = nonRootIndex.getInt(0);
       if (i < 0 || i >= numEntries) {
         return null;
@@ -678,7 +679,7 @@ public class HFileBlockIndex {
         targetKeyRelOffset - SECONDARY_INDEX_ENTRY_OVERHEAD;
 
       // TODO check whether we can make BB backed Cell here? So can avoid 
bytes copy.
-      return ByteBufferUtils.toBytes(nonRootIndex, targetKeyOffset, 
targetKeyLength);
+      return nonRootIndex.toBytes(targetKeyOffset, targetKeyLength);
     }
 
     /**
@@ -697,10 +698,10 @@ public class HFileBlockIndex {
      *         -1 otherwise
      * @throws IOException
      */
-    static int binarySearchNonRootIndex(Cell key, ByteBuffer nonRootIndex,
+    static int binarySearchNonRootIndex(Cell key, ByteBuff nonRootIndex,
         CellComparator comparator) {
 
-      int numEntries = nonRootIndex.getInt(0);
+      int numEntries = nonRootIndex.getIntStrictlyForward(0);
       int low = 0;
       int high = numEntries - 1;
       int mid = 0;
@@ -713,12 +714,12 @@ public class HFileBlockIndex {
       // keys[numEntries] = Infinity, then we are maintaining an invariant that
       // keys[low - 1] < key < keys[high + 1] while narrowing down the range.
       KeyValue.KeyOnlyKeyValue nonRootIndexKV = new KeyValue.KeyOnlyKeyValue();
+      Pair<ByteBuffer, Integer> pair = new Pair<ByteBuffer, Integer>();
       while (low <= high) {
         mid = (low + high) >>> 1;
 
         // Midkey's offset relative to the end of secondary index
-        int midKeyRelOffset = nonRootIndex.getInt(
-            Bytes.SIZEOF_INT * (mid + 1));
+      int midKeyRelOffset = 
nonRootIndex.getIntStrictlyForward(Bytes.SIZEOF_INT * (mid + 1));
 
         // The offset of the middle key in the blockIndex buffer
         int midKeyOffset = entriesOffset       // Skip secondary index
@@ -728,16 +729,17 @@ public class HFileBlockIndex {
         // We subtract the two consecutive secondary index elements, which
         // gives us the size of the whole (offset, onDiskSize, key) tuple. We
         // then need to subtract the overhead of offset and onDiskSize.
-        int midLength = nonRootIndex.getInt(Bytes.SIZEOF_INT * (mid + 2)) -
+        int midLength = nonRootIndex.getIntStrictlyForward(Bytes.SIZEOF_INT * 
(mid + 2)) -
             midKeyRelOffset - SECONDARY_INDEX_ENTRY_OVERHEAD;
 
         // we have to compare in this order, because the comparator order
         // has special logic when the 'left side' is a special key.
         // TODO make KeyOnlyKeyValue to be Buffer backed and avoid array() 
call. This has to be
         // done after HBASE-12224 & HBASE-12282
-        // TODO avaoid array call.
-        nonRootIndexKV.setKey(nonRootIndex.array(),
-            nonRootIndex.arrayOffset() + midKeyOffset, midLength);
+        // TODO avoid array call.
+        nonRootIndex.asSubByteBuffer(midKeyOffset, midLength, pair);
+        nonRootIndexKV.setKey(pair.getFirst().array(),
+            pair.getFirst().arrayOffset() + pair.getSecond(), midLength);
         int cmp = comparator.compareKeyIgnoresMvcc(key, nonRootIndexKV);
 
         // key lives above the midpoint
@@ -787,19 +789,20 @@ public class HFileBlockIndex {
      *         return -1 in the case the given key is before the first key.
      *
      */
-    static int locateNonRootIndexEntry(ByteBuffer nonRootBlock, Cell key,
+    static int locateNonRootIndexEntry(ByteBuff nonRootBlock, Cell key,
         CellComparator comparator) {
       int entryIndex = binarySearchNonRootIndex(key, nonRootBlock, comparator);
 
       if (entryIndex != -1) {
-        int numEntries = nonRootBlock.getInt(0);
+        int numEntries = nonRootBlock.getIntStrictlyForward(0);
 
         // The end of secondary index and the beginning of entries themselves.
         int entriesOffset = Bytes.SIZEOF_INT * (numEntries + 2);
 
         // The offset of the entry we are interested in relative to the end of
         // the secondary index.
-        int entryRelOffset = nonRootBlock.getInt(Bytes.SIZEOF_INT * (1 + 
entryIndex));
+        int entryRelOffset = nonRootBlock
+            .getIntStrictlyForward(Bytes.SIZEOF_INT * (1 + entryIndex));
 
         nonRootBlock.position(entriesOffset + entryRelOffset);
       }

http://git-wip-us.apache.org/repos/asf/hbase/blob/834f87b2/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java
index 13836ae..ae2f6c1 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java
@@ -1,5 +1,4 @@
 /*
- *
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -48,11 +47,12 @@ import org.apache.hadoop.hbase.io.encoding.DataBlockEncoder;
 import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
 import org.apache.hadoop.hbase.io.encoding.HFileBlockDecodingContext;
 import org.apache.hadoop.hbase.io.hfile.HFile.FileInfo;
+import org.apache.hadoop.hbase.nio.ByteBuff;
 import org.apache.hadoop.hbase.security.EncryptionUtil;
 import org.apache.hadoop.hbase.security.User;
-import org.apache.hadoop.hbase.util.ByteBufferUtils;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.IdLock;
+import org.apache.hadoop.hbase.util.Pair;
 import org.apache.hadoop.io.WritableUtils;
 import org.apache.htrace.Trace;
 import org.apache.htrace.TraceScope;
@@ -436,7 +436,7 @@ public class HFileReaderImpl implements HFile.Reader, 
Configurable {
   }
 
   protected static class HFileScannerImpl implements HFileScanner {
-    private ByteBuffer blockBuffer;
+    private ByteBuff blockBuffer;
     protected final boolean cacheBlocks;
     protected final boolean pread;
     protected final boolean isCompaction;
@@ -450,6 +450,8 @@ public class HFileReaderImpl implements HFile.Reader, 
Configurable {
     private int currTagsLen;
     private KeyValue.KeyOnlyKeyValue keyOnlyKv = new 
KeyValue.KeyOnlyKeyValue();
     protected HFileBlock block;
+    // A pair for reusing in blockSeek() so that we don't garbage lot of 
objects
+    final Pair<ByteBuffer, Integer> pair = new Pair<ByteBuffer, Integer>();
 
     /**
      * The next indexed key is to keep track of the indexed key of the next 
data block.
@@ -510,19 +512,21 @@ public class HFileReaderImpl implements HFile.Reader, 
Configurable {
       // inlined and is not too big to compile. We also manage position in 
ByteBuffer ourselves
       // because it is faster than going via range-checked ByteBuffer methods 
or going through a
       // byte buffer array a byte at a time.
-      int p = blockBuffer.position() + blockBuffer.arrayOffset();
       // Get a long at a time rather than read two individual ints. In 
micro-benchmarking, even
       // with the extra bit-fiddling, this is order-of-magnitude faster than 
getting two ints.
-      long ll = Bytes.toLong(blockBuffer.array(), p);
+      // Trying to imitate what was done - need to profile if this is better or
+      // earlier way is better by doing mark and reset?
+      // But ensure that you read long instead of two ints
+      long ll = blockBuffer.getLongStrictlyForward(blockBuffer.position());
       // Read top half as an int of key length and bottom int as value length
       this.currKeyLen = (int)(ll >> Integer.SIZE);
       this.currValueLen = (int)(Bytes.MASK_FOR_LOWER_INT_IN_LONG ^ ll);
       checkKeyValueLen();
       // Move position past the key and value lengths and then beyond the key 
and value
-      p += (Bytes.SIZEOF_LONG + currKeyLen + currValueLen);
+      int p = blockBuffer.position() +  (Bytes.SIZEOF_LONG + currKeyLen + 
currValueLen);
       if (reader.getFileContext().isIncludesTags()) {
         // Tags length is a short.
-        this.currTagsLen = Bytes.toShort(blockBuffer.array(), p);
+        this.currTagsLen = blockBuffer.getShortStrictlyForward(p);
         checkTagsLen();
         p += (Bytes.SIZEOF_SHORT + currTagsLen);
       }
@@ -560,14 +564,14 @@ public class HFileReaderImpl implements HFile.Reader, 
Configurable {
       // This is Bytes#bytesToVint inlined so can save a few instructions in 
this hot method; i.e.
       // previous if one-byte vint, we'd redo the vint call to find int size.
       // Also the method is kept small so can be inlined.
-      byte firstByte = blockBuffer.array()[position];
+      byte firstByte = blockBuffer.getByteStrictlyForward(position);
       int len = WritableUtils.decodeVIntSize(firstByte);
       if (len == 1) {
         this.currMemstoreTS = firstByte;
       } else {
         long i = 0;
         for (int idx = 0; idx < len - 1; idx++) {
-          byte b = blockBuffer.array()[position + 1 + idx];
+          byte b = blockBuffer.get(position + 1 + idx);
           i = i << 8;
           i = i | (b & 0xFF);
         }
@@ -598,13 +602,14 @@ public class HFileReaderImpl implements HFile.Reader, 
Configurable {
      */
     protected int blockSeek(Cell key, boolean seekBefore) {
       int klen, vlen, tlen = 0;
-      long memstoreTS = 0;
-      int memstoreTSLen = 0;
       int lastKeyValueSize = -1;
+      int pos = -1;
       do {
-        blockBuffer.mark();
-        klen = blockBuffer.getInt();
-        vlen = blockBuffer.getInt();
+        pos = blockBuffer.position();
+        // Better to ensure that we use the BB Utils here
+        long ll = blockBuffer.getLongStrictlyForward(pos);
+        klen = (int)(ll >> Integer.SIZE);
+        vlen = (int)(Bytes.MASK_FOR_LOWER_INT_IN_LONG ^ ll);
         if (klen < 0 || vlen < 0 || klen > blockBuffer.limit()
             || vlen > blockBuffer.limit()) {
           throw new IllegalStateException("Invalid klen " + klen + " or vlen "
@@ -612,77 +617,68 @@ public class HFileReaderImpl implements HFile.Reader, 
Configurable {
               + block.getOffset() + ", block length: " + blockBuffer.limit() + 
", position: "
               + blockBuffer.position() + " (without header).");
         }
-        ByteBufferUtils.skip(blockBuffer, klen + vlen);
+        pos += Bytes.SIZEOF_LONG;
+        blockBuffer.asSubByteBuffer(pos, klen, pair);
+        // TODO :change here after Bufferbackedcells come
+        keyOnlyKv.setKey(pair.getFirst().array(), 
pair.getFirst().arrayOffset() + pair.getSecond(),
+            klen);
+        int comp = reader.getComparator().compareKeyIgnoresMvcc(key, 
keyOnlyKv);
+        pos += klen + vlen;
         if (this.reader.getFileContext().isIncludesTags()) {
           // Read short as unsigned, high byte first
-          tlen = ((blockBuffer.get() & 0xff) << 8) ^ (blockBuffer.get() & 
0xff);
+          tlen = ((blockBuffer.getByteStrictlyForward(pos) & 0xff) << 8)
+              ^ (blockBuffer.getByteStrictlyForward(pos + 1) & 0xff);
           if (tlen < 0 || tlen > blockBuffer.limit()) {
             throw new IllegalStateException("Invalid tlen " + tlen + ". Block 
offset: "
                 + block.getOffset() + ", block length: " + blockBuffer.limit() 
+ ", position: "
                 + blockBuffer.position() + " (without header).");
           }
-          ByteBufferUtils.skip(blockBuffer, tlen);
+          // add the two bytes read for the tags.
+          pos += tlen + (Bytes.SIZEOF_SHORT);
         }
         if (this.reader.shouldIncludeMemstoreTS()) {
-          if (this.reader.isDecodeMemstoreTS()) {
-            memstoreTS = Bytes.readAsVLong(blockBuffer.array(), 
blockBuffer.arrayOffset()
-                + blockBuffer.position());
-            memstoreTSLen = WritableUtils.getVIntSize(memstoreTS);
-          } else {
-            memstoreTS = 0;
-            memstoreTSLen = 1;
-          }
+          // Directly read the mvcc based on current position
+          readMvccVersion(pos);
         }
-        blockBuffer.reset();
-        int keyOffset =
-          blockBuffer.arrayOffset() + blockBuffer.position() + 
(Bytes.SIZEOF_INT * 2);
-        keyOnlyKv.setKey(blockBuffer.array(), keyOffset, klen);
-        int comp = reader.getComparator().compareKeyIgnoresMvcc(key, 
keyOnlyKv);
-
         if (comp == 0) {
           if (seekBefore) {
             if (lastKeyValueSize < 0) {
               throw new IllegalStateException("blockSeek with seekBefore "
-                  + "at the first key of the block: key="
-                  + CellUtil.getCellKeyAsString(key)
+                  + "at the first key of the block: key=" + 
CellUtil.getCellKeyAsString(key)
                   + ", blockOffset=" + block.getOffset() + ", onDiskSize="
                   + block.getOnDiskSizeWithHeader());
             }
-            blockBuffer.position(blockBuffer.position() - lastKeyValueSize);
+            blockBuffer.moveBack(lastKeyValueSize);
             readKeyValueLen();
             return 1; // non exact match.
           }
           currKeyLen = klen;
           currValueLen = vlen;
           currTagsLen = tlen;
-          if (this.reader.shouldIncludeMemstoreTS()) {
-            currMemstoreTS = memstoreTS;
-            currMemstoreTSLen = memstoreTSLen;
-          }
           return 0; // indicate exact match
         } else if (comp < 0) {
-          if (lastKeyValueSize > 0)
-            blockBuffer.position(blockBuffer.position() - lastKeyValueSize);
+          if (lastKeyValueSize > 0) {
+            blockBuffer.moveBack(lastKeyValueSize);
+          }
           readKeyValueLen();
           if (lastKeyValueSize == -1 && blockBuffer.position() == 0) {
             return HConstants.INDEX_KEY_MAGIC;
           }
           return 1;
         }
-
         // The size of this key/value tuple, including key/value length fields.
-        lastKeyValueSize = klen + vlen + memstoreTSLen + KEY_VALUE_LEN_SIZE;
+        lastKeyValueSize = klen + vlen + currMemstoreTSLen + 
KEY_VALUE_LEN_SIZE;
         // include tag length also if tags included with KV
-        if (this.reader.getFileContext().isIncludesTags()) {
+        if (reader.getFileContext().isIncludesTags()) {
           lastKeyValueSize += tlen + Bytes.SIZEOF_SHORT;
         }
-        blockBuffer.position(blockBuffer.position() + lastKeyValueSize);
-      } while (blockBuffer.remaining() > 0);
+        blockBuffer.skip(lastKeyValueSize);
+      } while (blockBuffer.hasRemaining());
 
       // Seek to the last key we successfully read. This will happen if this is
       // the last key/value pair in the file, in which case the following call
       // to next() has to return false.
-      blockBuffer.position(blockBuffer.position() - lastKeyValueSize);
+      blockBuffer.moveBack(lastKeyValueSize);
       readKeyValueLen();
       return 1; // didn't exactly find it.
     }
@@ -849,6 +845,7 @@ public class HFileReaderImpl implements HFile.Reader, 
Configurable {
     @Override
     public ByteBuffer getValue() {
       assertSeeked();
+      // TODO : change here after BufferBacked cells come
       return ByteBuffer.wrap(
           blockBuffer.array(),
           blockBuffer.arrayOffset() + blockBuffer.position()
@@ -1030,15 +1027,14 @@ public class HFileReaderImpl implements HFile.Reader, 
Configurable {
     }
 
     protected Cell getFirstKeyCellInBlock(HFileBlock curBlock) {
-      ByteBuffer buffer = curBlock.getBufferWithoutHeader();
+      ByteBuff buffer = curBlock.getBufferWithoutHeader();
       // It is safe to manipulate this buffer because we own the buffer object.
       buffer.rewind();
       int klen = buffer.getInt();
-      buffer.getInt();
-      ByteBuffer keyBuff = buffer.slice();
-      keyBuff.limit(klen);
-      keyBuff.rewind();
-      // Create a KeyOnlyKv now. 
+      buffer.skip(Bytes.SIZEOF_INT);// Skip value len part
+      ByteBuffer keyBuff = buffer.asSubByteBuffer(klen);
+      keyBuff.limit(keyBuff.position() + klen);
+      // Create a KeyOnlyKv now.
       // TODO : Will change when Buffer backed cells come
       return new KeyValue.KeyOnlyKeyValue(keyBuff.array(), 
keyBuff.arrayOffset()
           + keyBuff.position(), klen);
@@ -1188,7 +1184,7 @@ public class HFileReaderImpl implements HFile.Reader, 
Configurable {
    * @throws IOException
    */
   @Override
-  public ByteBuffer getMetaBlock(String metaBlockName, boolean cacheBlock)
+  public ByteBuff getMetaBlock(String metaBlockName, boolean cacheBlock)
       throws IOException {
     if (trailer.getMetaIndexCount() == 0) {
       return null; // there are no meta blocks
@@ -1457,22 +1453,22 @@ public class HFileReaderImpl implements HFile.Reader, 
Configurable {
           + " doesn't support data block encoding "
           + DataBlockEncoding.getNameFromId(dataBlockEncoderId));
       }
-
-      seeker.setCurrentBuffer(getEncodedBuffer(newBlock));
+      ByteBuff encodedBuffer = getEncodedBuffer(newBlock);
+      // TODO : Change the DBEs to work with ByteBuffs
+      
seeker.setCurrentBuffer(encodedBuffer.asSubByteBuffer(encodedBuffer.limit()));
       blockFetches++;
 
       // Reset the next indexed key
       this.nextIndexedKey = null;
     }
 
-    private ByteBuffer getEncodedBuffer(HFileBlock newBlock) {
-      ByteBuffer origBlock = newBlock.getBufferReadOnly();
-      ByteBuffer encodedBlock = ByteBuffer.wrap(origBlock.array(),
-          origBlock.arrayOffset() + newBlock.headerSize() +
-          DataBlockEncoding.ID_SIZE,
-          newBlock.getUncompressedSizeWithoutHeader() -
-          DataBlockEncoding.ID_SIZE).slice();
-      return encodedBlock;
+    private ByteBuff getEncodedBuffer(HFileBlock newBlock) {
+      ByteBuff origBlock = newBlock.getBufferReadOnly();
+      int pos = newBlock.headerSize() + DataBlockEncoding.ID_SIZE;
+      origBlock.position(pos);
+      origBlock
+          .limit(pos + newBlock.getUncompressedSizeWithoutHeader() - 
DataBlockEncoding.ID_SIZE);
+      return origBlock.slice();
     }
 
     @Override

http://git-wip-us.apache.org/repos/asf/hbase/blob/834f87b2/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/MemcachedBlockCache.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/MemcachedBlockCache.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/MemcachedBlockCache.java
index 57e7f28..f12f3b4 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/MemcachedBlockCache.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/MemcachedBlockCache.java
@@ -29,10 +29,13 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.nio.ByteBuff;
+import org.apache.hadoop.hbase.nio.SingleByteBuff;
 import org.apache.hadoop.hbase.util.Addressing;
 import org.apache.htrace.Trace;
 import org.apache.htrace.TraceScope;
 
+
 import java.io.IOException;
 import java.net.InetSocketAddress;
 import java.nio.ByteBuffer;
@@ -255,7 +258,7 @@ public class MemcachedBlockCache implements BlockCache {
     @Override
     public HFileBlock decode(CachedData d) {
       try {
-        ByteBuffer buf = ByteBuffer.wrap(d.getData());
+        ByteBuff buf = new SingleByteBuff(ByteBuffer.wrap(d.getData()));
         return (HFileBlock) HFileBlock.blockDeserializer.deserialize(buf, 
true);
       } catch (IOException e) {
         LOG.warn("Error deserializing data from memcached",e);

http://git-wip-us.apache.org/repos/asf/hbase/blob/834f87b2/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
index dfada87..f05a255 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
@@ -62,6 +62,8 @@ import org.apache.hadoop.hbase.io.hfile.CacheableDeserializer;
 import org.apache.hadoop.hbase.io.hfile.CacheableDeserializerIdManager;
 import org.apache.hadoop.hbase.io.hfile.CachedBlock;
 import org.apache.hadoop.hbase.io.hfile.HFileBlock;
+import org.apache.hadoop.hbase.nio.ByteBuff;
+import org.apache.hadoop.hbase.nio.SingleByteBuff;
 import org.apache.hadoop.hbase.util.ConcurrentIndex;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.HasThread;
@@ -416,9 +418,12 @@ public class BucketCache implements BlockCache, HeapSize {
         // maybe changed. If we lock BlockCacheKey instead of offset, then we 
can only check
         // existence here.
         if (bucketEntry.equals(backingMap.get(key))) {
+          // TODO : change this area - should be removed after server cells and
+          // 12295 are available
           int len = bucketEntry.getLength();
-          ByteBuffer bb = ByteBuffer.allocate(len);
-          int lenRead = ioEngine.read(bb, bucketEntry.offset());
+          ByteBuffer buf = ByteBuffer.allocate(len);
+          int lenRead = ioEngine.read(buf, bucketEntry.offset());
+          ByteBuff bb = new SingleByteBuff(buf);
           if (lenRead != len) {
             throw new RuntimeException("Only " + lenRead + " bytes read, " + 
len + " expected");
           }
@@ -1269,7 +1274,7 @@ public class BucketCache implements BlockCache, HeapSize {
       try {
         if (data instanceof HFileBlock) {
           HFileBlock block = (HFileBlock) data;
-          ByteBuffer sliceBuf = block.getBufferReadOnlyWithHeader();
+          ByteBuff sliceBuf = block.getBufferReadOnlyWithHeader();
           sliceBuf.rewind();
           assert len == sliceBuf.limit() + 
HFileBlock.EXTRA_SERIALIZATION_SPACE ||
             len == sliceBuf.limit() + block.headerSize() + 
HFileBlock.EXTRA_SERIALIZATION_SPACE;

http://git-wip-us.apache.org/repos/asf/hbase/blob/834f87b2/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/ByteBufferIOEngine.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/ByteBufferIOEngine.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/ByteBufferIOEngine.java
index de10667..03c65de 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/ByteBufferIOEngine.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/ByteBufferIOEngine.java
@@ -22,6 +22,7 @@ import java.io.IOException;
 import java.nio.ByteBuffer;
 
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.nio.ByteBuff;
 import org.apache.hadoop.hbase.util.ByteBufferArray;
 
 /**
@@ -78,6 +79,11 @@ public class ByteBufferIOEngine implements IOEngine {
         dstBuffer.arrayOffset());
   }
 
+  @Override
+  public ByteBuff read(long offset, int len) throws IOException {
+    return bufferArray.asSubByteBuff(offset, len);
+  }
+
   /**
    * Transfers data from the given byte buffer to the buffer array
    * @param srcBuffer the given byte buffer from which bytes are to be read
@@ -92,6 +98,14 @@ public class ByteBufferIOEngine implements IOEngine {
         srcBuffer.arrayOffset());
   }
 
+  @Override
+  public void write(ByteBuff srcBuffer, long offset) throws IOException {
+    // When caching block into BucketCache there will be single buffer backing 
for this HFileBlock.
+    // This will work for now. But from the DFS itself if we get DBB then this 
may not hold true.
+    assert srcBuffer.hasArray();
+    bufferArray.putMultiple(offset, srcBuffer.remaining(), srcBuffer.array(),
+        srcBuffer.arrayOffset());
+  }
   /**
    * No operation for the sync in the memory IO engine
    */

http://git-wip-us.apache.org/repos/asf/hbase/blob/834f87b2/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.java
index 7b6b25f..b1960c4 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.java
@@ -26,6 +26,8 @@ import java.nio.channels.FileChannel;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.nio.ByteBuff;
+import org.apache.hadoop.hbase.nio.SingleByteBuff;
 import org.apache.hadoop.util.StringUtils;
 
 /**
@@ -125,4 +127,20 @@ public class FileIOEngine implements IOEngine {
       LOG.error("Can't shutdown cleanly", ex);
     }
   }
+
+  @Override
+  public ByteBuff read(long offset, int len) throws IOException {
+    ByteBuffer dstBuffer = ByteBuffer.allocate(len);
+    int read = read(dstBuffer, offset);
+    dstBuffer.limit(read);
+    return new SingleByteBuff(dstBuffer);
+  }
+
+  @Override
+  public void write(ByteBuff srcBuffer, long offset) throws IOException {
+    // When caching block into BucketCache there will be single buffer backing 
for this HFileBlock.
+    assert srcBuffer.hasArray();
+    fileChannel.write(
+        ByteBuffer.wrap(srcBuffer.array(), srcBuffer.arrayOffset(), 
srcBuffer.remaining()), offset);
+  }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/834f87b2/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/IOEngine.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/IOEngine.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/IOEngine.java
index 430c5af..862042f 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/IOEngine.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/IOEngine.java
@@ -22,6 +22,7 @@ import java.io.IOException;
 import java.nio.ByteBuffer;
 
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.nio.ByteBuff;
 
 /**
  * A class implementing IOEngine interface supports data services for
@@ -40,10 +41,22 @@ public interface IOEngine {
    * @param offset The offset in the IO engine where the first byte to be read
    * @return number of bytes read
    * @throws IOException
+   * @throws RuntimeException when the length of the ByteBuff read is less 
than 'len'
    */
   int read(ByteBuffer dstBuffer, long offset) throws IOException;
 
   /**
+   * Transfers data from IOEngine at the given offset to an MultiByteBuffer
+   * @param offset the offset from which the underlying buckets should be read
+   * @param len the length upto which the buckets should be read
+   * @return the MultiByteBuffer formed from the underlying ByteBuffers 
forming the
+   * buckets
+   * @throws IOException
+   * @throws RuntimeException when the length of the ByteBuff read is less 
than 'len'
+   */
+  ByteBuff read(long offset, int len) throws IOException;
+
+  /**
    * Transfers data from the given byte buffer to IOEngine
    * @param srcBuffer the given byte buffer from which bytes are to be read
    * @param offset The offset in the IO engine where the first byte to be
@@ -53,6 +66,14 @@ public interface IOEngine {
   void write(ByteBuffer srcBuffer, long offset) throws IOException;
 
   /**
+   * Transfers the data from the given MultiByteBuffer to IOEngine
+   * @param srcBuffer the given MultiBytebufffers from which bytes are to be 
read
+   * @param offset the offset in the IO engine where the first byte to be 
written
+   * @throws IOException
+   */
+  void write(ByteBuff srcBuffer, long offset) throws IOException;
+
+  /**
    * Sync the data to IOEngine after writing
    * @throws IOException
    */

http://git-wip-us.apache.org/repos/asf/hbase/blob/834f87b2/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java
index 50da338..9b5e222 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java
@@ -38,8 +38,6 @@ import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.KeyValueUtil;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.TableNotFoundException;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;

http://git-wip-us.apache.org/repos/asf/hbase/blob/834f87b2/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java
index 891a59d..eb76440 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java
@@ -21,7 +21,6 @@ package org.apache.hadoop.hbase.regionserver;
 import java.io.DataInput;
 import java.io.IOException;
 import java.net.InetSocketAddress;
-import java.nio.ByteBuffer;
 import java.util.Arrays;
 import java.util.Collection;
 import java.util.Collections;
@@ -51,6 +50,7 @@ import org.apache.hadoop.hbase.io.hfile.CacheConfig;
 import org.apache.hadoop.hbase.io.hfile.HFile;
 import org.apache.hadoop.hbase.io.hfile.HFileContext;
 import org.apache.hadoop.hbase.io.hfile.HFileScanner;
+import org.apache.hadoop.hbase.nio.ByteBuff;
 import org.apache.hadoop.hbase.regionserver.compactions.Compactor;
 import org.apache.hadoop.hbase.util.BloomFilter;
 import org.apache.hadoop.hbase.util.BloomFilterFactory;
@@ -1285,7 +1285,7 @@ public class StoreFile {
 
       try {
         boolean shouldCheckBloom;
-        ByteBuffer bloom;
+        ByteBuff bloom;
         if (bloomFilter.supportsAutoLoading()) {
           bloom = null;
           shouldCheckBloom = true;

http://git-wip-us.apache.org/repos/asf/hbase/blob/834f87b2/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
index 95e0d8e..37573c2 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
@@ -38,7 +38,6 @@ import org.apache.hadoop.hbase.CellUtil;
 import org.apache.hadoop.hbase.DoNotRetryIOException;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.KeyValueUtil;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.client.IsolationLevel;
 import org.apache.hadoop.hbase.client.Scan;

http://git-wip-us.apache.org/repos/asf/hbase/blob/834f87b2/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilter.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilter.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilter.java
index 315ed97..197ff12 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilter.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilter.java
@@ -18,10 +18,10 @@
  */
 package org.apache.hadoop.hbase.util;
 
-import java.nio.ByteBuffer;
 
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.nio.ByteBuff;
 
 /**
  *
@@ -83,7 +83,7 @@ public interface BloomFilter extends BloomFilterBase {
    *        is supported.
    * @return true if matched by bloom, false if not
    */
-  boolean contains(Cell keyCell, ByteBuffer bloom);
+  boolean contains(Cell keyCell, ByteBuff bloom);
 
   /**
    * Check if the specified key is contained in the bloom filter.
@@ -95,7 +95,7 @@ public interface BloomFilter extends BloomFilterBase {
    *        is supported.
    * @return true if matched by bloom, false if not
    */
-  boolean contains(byte[] buf, int offset, int length, ByteBuffer bloom);
+  boolean contains(byte[] buf, int offset, int length, ByteBuff bloom);
 
   /**
    * @return true if this Bloom filter can automatically load its data

http://git-wip-us.apache.org/repos/asf/hbase/blob/834f87b2/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilterChunk.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilterChunk.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilterChunk.java
index 9fff872..1e77984 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilterChunk.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilterChunk.java
@@ -26,8 +26,6 @@ import java.nio.ByteBuffer;
 
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 
-import com.google.common.annotations.VisibleForTesting;
-
 /**
  * The basic building block for the {@link 
org.apache.hadoop.hbase.io.hfile.CompoundBloomFilter}
  */
@@ -183,35 +181,6 @@ public class BloomFilterChunk implements BloomFilterBase {
     ++this.keyCount;
   }
 
-  @VisibleForTesting
-  boolean contains(byte [] buf) {
-    return contains(buf, 0, buf.length, this.bloom);
-  }
-
-  @VisibleForTesting
-  boolean contains(byte [] buf, int offset, int length) {
-    return contains(buf, offset, length, bloom);
-  }
-
-  @VisibleForTesting
-  boolean contains(byte[] buf, ByteBuffer bloom) {
-    return contains(buf, 0, buf.length, bloom);
-  }
-
-  public boolean contains(byte[] buf, int offset, int length, ByteBuffer 
theBloom) {
-    if (theBloom == null) {
-      theBloom = bloom;
-    }
-
-    if (theBloom.limit() != byteSize) {
-      throw new IllegalArgumentException("Bloom does not match expected size:"
-          + " theBloom.limit()=" + theBloom.limit() + ", byteSize=" + 
byteSize);
-    }
-
-    return BloomFilterUtil.contains(buf, offset, length, theBloom, 0, (int) 
byteSize, hash,
-        hashCount);
-  }
-
   //---------------------------------------------------------------------------
   /** Private helpers */
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/834f87b2/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilterUtil.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilterUtil.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilterUtil.java
index dd90b2b..fd30710 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilterUtil.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilterUtil.java
@@ -22,9 +22,10 @@ import java.text.NumberFormat;
 import java.util.Random;
 
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.nio.ByteBuff;
 
 /**
- * Utility methods related to BloomFilters 
+ * Utility methods related to BloomFilters
  */
 @InterfaceAudience.Private
 public final class BloomFilterUtil {
@@ -193,7 +194,7 @@ public final class BloomFilterUtil {
   }
 
   public static boolean contains(byte[] buf, int offset, int length,
-      ByteBuffer bloomBuf, int bloomOffset, int bloomSize, Hash hash,
+      ByteBuff bloomBuf, int bloomOffset, int bloomSize, Hash hash,
       int hashCount) {
 
     int hash1 = hash.hash(buf, offset, length, 0);
@@ -206,7 +207,7 @@ public final class BloomFilterUtil {
       for (int i = 0; i < hashCount; i++) {
         int hashLoc = Math.abs(compositeHash % bloomBitSize);
         compositeHash += hash2;
-        if (!get(hashLoc, bloomBuf, bloomOffset)) {
+        if (!checkBit(hashLoc, bloomBuf, bloomOffset)) {
           return false;
         }
       }
@@ -214,29 +215,28 @@ public final class BloomFilterUtil {
       // Test mode with "fake lookups" to estimate "ideal false positive rate".
       for (int i = 0; i < hashCount; i++) {
         int hashLoc = randomGeneratorForTest.nextInt(bloomBitSize);
-        if (!get(hashLoc, bloomBuf, bloomOffset)){
+        if (!checkBit(hashLoc, bloomBuf, bloomOffset)){
           return false;
         }
       }
     }
     return true;
   }
-  
+
   /**
    * Check if bit at specified index is 1.
    *
    * @param pos index of bit
    * @return true if bit at specified index is 1, false if 0.
    */
-  public static boolean get(int pos, ByteBuffer bloomBuf, int bloomOffset) {
+   static boolean checkBit(int pos, ByteBuff bloomBuf, int bloomOffset) {
     int bytePos = pos >> 3; //pos / 8
     int bitPos = pos & 0x7; //pos % 8
-    // TODO access this via Util API which can do Unsafe access if possible(?)
     byte curByte = bloomBuf.get(bloomOffset + bytePos);
     curByte &= bitvals[bitPos];
     return (curByte != 0);
   }
-  
+
   /**
    * A human-readable string with statistics for the given Bloom filter.
    *

http://git-wip-us.apache.org/repos/asf/hbase/blob/834f87b2/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/TableLockChecker.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/TableLockChecker.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/TableLockChecker.java
index 49594bc..92e432c 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/TableLockChecker.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/TableLockChecker.java
@@ -24,7 +24,6 @@ import 
org.apache.hadoop.hbase.InterProcessLock.MetadataHandler;
 import org.apache.hadoop.hbase.master.TableLockManager;
 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
-import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.HBaseFsck;
 import org.apache.hadoop.hbase.util.HBaseFsck.ErrorReporter;

http://git-wip-us.apache.org/repos/asf/hbase/blob/834f87b2/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
index 0dd8bea..5ab7424 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
@@ -80,6 +80,7 @@ import org.apache.hadoop.hbase.filter.RegexStringComparator;
 import org.apache.hadoop.hbase.filter.RowFilter;
 import org.apache.hadoop.hbase.filter.SingleColumnValueFilter;
 import org.apache.hadoop.hbase.filter.WhileMatchFilter;
+import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
 import org.apache.hadoop.hbase.io.hfile.BlockCache;
 import org.apache.hadoop.hbase.io.hfile.CacheConfig;
 import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel;
@@ -182,6 +183,7 @@ public class TestFromClientSide {
      final byte[] T3 = Bytes.toBytes("T3");
      HColumnDescriptor hcd = new HColumnDescriptor(FAMILY)
          .setKeepDeletedCells(KeepDeletedCells.TRUE)
+         .setDataBlockEncoding(DataBlockEncoding.PREFIX_TREE)
          .setMaxVersions(3);
 
      HTableDescriptor desc = new HTableDescriptor(TABLENAME);

http://git-wip-us.apache.org/repos/asf/hbase/blob/834f87b2/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestDataBlockEncoders.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestDataBlockEncoders.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestDataBlockEncoders.java
index 48cc5b9..f4160db 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestDataBlockEncoders.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestDataBlockEncoders.java
@@ -44,6 +44,7 @@ import org.apache.hadoop.hbase.io.compress.Compression;
 import 
org.apache.hadoop.hbase.io.hfile.HFileBlock.Writer.BufferGrabbingByteArrayOutputStream;
 import org.apache.hadoop.hbase.io.hfile.HFileContext;
 import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder;
+import org.apache.hadoop.hbase.nio.MultiByteBuff;
 import org.apache.hadoop.hbase.testclassification.IOTests;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -303,7 +304,8 @@ public class TestDataBlockEncoders {
       DataBlockEncoder encoder = encoding.getEncoder();
       ByteBuffer encodedBuffer = encodeKeyValues(encoding, sampleKv,
           getEncodingContext(Compression.Algorithm.NONE, encoding));
-      Cell key = encoder.getFirstKeyCellInBlock(encodedBuffer);
+      Cell key = encoder.getFirstKeyCellInBlock(new MultiByteBuff(
+          encodedBuffer));
       KeyValue keyBuffer = null;
       if(encoding == DataBlockEncoding.PREFIX_TREE) {
         // This is not an actual case. So the Prefix tree block is not loaded 
in case of Prefix_tree

http://git-wip-us.apache.org/repos/asf/hbase/blob/834f87b2/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/CacheTestUtils.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/CacheTestUtils.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/CacheTestUtils.java
index b0a2ba2..122b7fc 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/CacheTestUtils.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/CacheTestUtils.java
@@ -39,6 +39,9 @@ import 
org.apache.hadoop.hbase.MultithreadedTestUtil.TestThread;
 import org.apache.hadoop.hbase.io.HeapSize;
 import org.apache.hadoop.hbase.io.compress.Compression;
 import org.apache.hadoop.hbase.io.hfile.bucket.BucketCache;
+import org.apache.hadoop.hbase.nio.ByteBuff;
+import org.apache.hadoop.hbase.nio.MultiByteBuff;
+import org.apache.hadoop.hbase.nio.SingleByteBuff;
 import org.apache.hadoop.hbase.util.ChecksumType;
 
 public class CacheTestUtils {
@@ -123,7 +126,7 @@ public class CacheTestUtils {
   public static void testCacheSimple(BlockCache toBeTested, int blockSize,
       int numBlocks) throws Exception {
 
-    HFileBlockPair[] blocks = generateHFileBlocks(numBlocks, blockSize);
+    HFileBlockPair[] blocks = generateHFileBlocks(blockSize, numBlocks);
     // Confirm empty
     for (HFileBlockPair block : blocks) {
       assertNull(toBeTested.getBlock(block.blockName, true, false, true));
@@ -253,7 +256,7 @@ public class CacheTestUtils {
       new CacheableDeserializer<Cacheable>() {
 
       @Override
-      public Cacheable deserialize(ByteBuffer b) throws IOException {
+      public Cacheable deserialize(ByteBuff b) throws IOException {
         int len = b.getInt();
         Thread.yield();
         byte buf[] = new byte[len];
@@ -267,7 +270,7 @@ public class CacheTestUtils {
       }
 
       @Override
-      public Cacheable deserialize(ByteBuffer b, boolean reuse)
+      public Cacheable deserialize(ByteBuff b, boolean reuse)
           throws IOException {
         return deserialize(b);
       }
@@ -326,8 +329,8 @@ public class CacheTestUtils {
       // declare our data size to be smaller than it by the serialization space
       // required.
 
-      ByteBuffer cachedBuffer = ByteBuffer.allocate(blockSize
-          - HFileBlock.EXTRA_SERIALIZATION_SPACE);
+      SingleByteBuff cachedBuffer = new 
SingleByteBuff(ByteBuffer.allocate(blockSize
+          - HFileBlock.EXTRA_SERIALIZATION_SPACE));
       rand.nextBytes(cachedBuffer.array());
       cachedBuffer.rewind();
       int onDiskSizeWithoutHeader = blockSize

http://git-wip-us.apache.org/repos/asf/hbase/blob/834f87b2/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheConfig.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheConfig.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheConfig.java
index ce78a37..110d92b 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheConfig.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheConfig.java
@@ -38,6 +38,8 @@ import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.testclassification.IOTests;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.io.hfile.bucket.BucketCache;
+import org.apache.hadoop.hbase.nio.ByteBuff;
+import org.apache.hadoop.hbase.nio.MultiByteBuff;
 import org.apache.hadoop.hbase.util.Threads;
 import org.junit.After;
 import org.junit.Before;
@@ -71,13 +73,13 @@ public class TestCacheConfig {
     }
 
     @Override
-    public Cacheable deserialize(ByteBuffer b, boolean reuse) throws 
IOException {
+    public Cacheable deserialize(ByteBuff b, boolean reuse) throws IOException 
{
       LOG.info("Deserialized " + b + ", reuse=" + reuse);
       return cacheable;
     }
 
     @Override
-    public Cacheable deserialize(ByteBuffer b) throws IOException {
+    public Cacheable deserialize(ByteBuff b) throws IOException {
       LOG.info("Deserialized " + b);
       return cacheable;
     }

http://git-wip-us.apache.org/repos/asf/hbase/blob/834f87b2/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestChecksum.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestChecksum.java 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestChecksum.java
index de8d3b9..ca62bf5 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestChecksum.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestChecksum.java
@@ -48,6 +48,8 @@ import org.apache.hadoop.hbase.testclassification.SmallTests;
 import org.apache.hadoop.hbase.fs.HFileSystem;
 import org.apache.hadoop.hbase.io.FSDataInputStreamWrapper;
 import org.apache.hadoop.hbase.io.compress.Compression;
+import org.apache.hadoop.hbase.nio.ByteBuff;
+import org.apache.hadoop.hbase.nio.MultiByteBuff;
 import org.apache.hadoop.hbase.util.ChecksumType;
 import org.junit.Before;
 import org.junit.Test;
@@ -126,7 +128,7 @@ public class TestChecksum {
       HFileBlock.FSReader hbr = new HFileBlock.FSReaderImpl(
           is, totalSize, (HFileSystem) fs, path, meta);
       HFileBlock b = hbr.readBlockData(0, -1, -1, false);
-      ByteBuffer data = b.getBufferWithoutHeader();
+      ByteBuff data = b.getBufferWithoutHeader();
       for (int i = 0; i < 1000; i++) {
         assertEquals(i, data.getInt());
       }
@@ -194,7 +196,7 @@ public class TestChecksum {
         assertEquals(algo == GZ ? 2173 : 4936, 
                      b.getOnDiskSizeWithoutHeader() - b.totalChecksumBytes());
         // read data back from the hfile, exclude header and checksum
-        ByteBuffer bb = b.unpack(meta, hbr).getBufferWithoutHeader(); // read 
back data
+        ByteBuff bb = b.unpack(meta, hbr).getBufferWithoutHeader(); // read 
back data
         DataInputStream in = new DataInputStream(
                                new ByteArrayInputStream(
                                  bb.array(), bb.arrayOffset(), bb.limit()));

http://git-wip-us.apache.org/repos/asf/hbase/blob/834f87b2/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFile.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFile.java 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFile.java
index af8a6cc..3258991 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFile.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFile.java
@@ -45,6 +45,8 @@ import org.apache.hadoop.hbase.Tag;
 import org.apache.hadoop.hbase.io.compress.Compression;
 import org.apache.hadoop.hbase.io.hfile.HFile.Reader;
 import org.apache.hadoop.hbase.io.hfile.HFile.Writer;
+import org.apache.hadoop.hbase.nio.ByteBuff;
+import org.apache.hadoop.hbase.nio.MultiByteBuff;
 import org.apache.hadoop.hbase.testclassification.IOTests;
 import org.apache.hadoop.hbase.testclassification.SmallTests;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -326,11 +328,14 @@ public class TestHFile extends HBaseTestCase {
 
   private void readNumMetablocks(Reader reader, int n) throws IOException {
     for (int i = 0; i < n; i++) {
-      ByteBuffer actual = reader.getMetaBlock("HFileMeta" + i, false);
+      ByteBuff actual = reader.getMetaBlock("HFileMeta" + i, false);
       ByteBuffer expected =
         ByteBuffer.wrap(("something to test" + i).getBytes());
-      assertEquals("failed to match metadata",
-        Bytes.toStringBinary(expected), Bytes.toStringBinary(actual));
+      assertEquals(
+          "failed to match metadata",
+          Bytes.toStringBinary(expected),
+          Bytes.toStringBinary(actual.array(), actual.arrayOffset() + 
actual.position(),
+              actual.capacity()));
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/834f87b2/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlock.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlock.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlock.java
index dfc5569..c6aba43 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlock.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlock.java
@@ -1,5 +1,4 @@
 /*
- *
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -57,6 +56,9 @@ import org.apache.hadoop.hbase.fs.HFileSystem;
 import org.apache.hadoop.hbase.io.compress.Compression;
 import org.apache.hadoop.hbase.io.compress.Compression.Algorithm;
 import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
+import org.apache.hadoop.hbase.nio.ByteBuff;
+import org.apache.hadoop.hbase.nio.MultiByteBuff;
+import org.apache.hadoop.hbase.nio.SingleByteBuff;
 import org.apache.hadoop.hbase.testclassification.IOTests;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -437,7 +439,7 @@ public class TestHFileBlock {
               assertTrue("Packed heapSize should be < unpacked heapSize",
                 packedHeapsize < blockUnpacked.heapSize());
             }
-            ByteBuffer actualBuffer = blockUnpacked.getBufferWithoutHeader();
+            ByteBuff actualBuffer = blockUnpacked.getBufferWithoutHeader();
             if (encoding != DataBlockEncoding.NONE) {
               // We expect a two-byte big-endian encoding id.
               assertEquals(
@@ -454,14 +456,15 @@ public class TestHFileBlock {
             expectedBuffer.rewind();
 
             // test if content matches, produce nice message
-            assertBuffersEqual(expectedBuffer, actualBuffer, algo, encoding, 
pread);
+            assertBuffersEqual(new SingleByteBuff(expectedBuffer), 
actualBuffer, algo, encoding,
+                pread);
 
             // test serialized blocks
             for (boolean reuseBuffer : new boolean[] { false, true }) {
               ByteBuffer serialized = 
ByteBuffer.allocate(blockFromHFile.getSerializedLength());
               blockFromHFile.serialize(serialized);
-              HFileBlock deserialized =
-                (HFileBlock) 
blockFromHFile.getDeserializer().deserialize(serialized, reuseBuffer);
+              HFileBlock deserialized = (HFileBlock) 
blockFromHFile.getDeserializer().deserialize(
+                  new SingleByteBuff(serialized), reuseBuffer);
               assertEquals(
                 "Serialization did not preserve block state. reuseBuffer=" + 
reuseBuffer,
                 blockFromHFile, deserialized);
@@ -483,8 +486,8 @@ public class TestHFileBlock {
     return String.format("compression %s, encoding %s, pread %s", compression, 
encoding, pread);
   }
 
-  static void assertBuffersEqual(ByteBuffer expectedBuffer,
-      ByteBuffer actualBuffer, Compression.Algorithm compression,
+  static void assertBuffersEqual(ByteBuff expectedBuffer,
+      ByteBuff actualBuffer, Compression.Algorithm compression,
       DataBlockEncoding encoding, boolean pread) {
     if (!actualBuffer.equals(expectedBuffer)) {
       int prefix = 0;
@@ -506,7 +509,7 @@ public class TestHFileBlock {
    * Convert a few next bytes in the given buffer at the given position to
    * string. Used for error messages.
    */
-  private static String nextBytesToStr(ByteBuffer buf, int pos) {
+  private static String nextBytesToStr(ByteBuff buf, int pos) {
     int maxBytes = buf.limit() - pos;
     int numBytes = Math.min(16, maxBytes);
     return Bytes.toStringBinary(buf.array(), buf.arrayOffset() + pos,
@@ -595,7 +598,7 @@ public class TestHFileBlock {
               b = b.unpack(meta, hbr);
               // b's buffer has header + data + checksum while
               // expectedContents have header + data only
-              ByteBuffer bufRead = b.getBufferWithHeader();
+              ByteBuff bufRead = b.getBufferWithHeader();
               ByteBuffer bufExpected = expectedContents.get(i);
               boolean bytesAreCorrect = Bytes.compareTo(bufRead.array(),
                   bufRead.arrayOffset(),
@@ -617,7 +620,7 @@ public class TestHFileBlock {
                   bufRead.arrayOffset(), Math.min(32 + 10, bufRead.limit()));
                 if (detailedLogging) {
                   LOG.warn("expected header" +
-                           HFileBlock.toStringHeader(bufExpected) +
+                           HFileBlock.toStringHeader(new 
SingleByteBuff(bufExpected)) +
                            "\nfound    header" +
                            HFileBlock.toStringHeader(bufRead));
                   LOG.warn("bufread offset " + bufRead.arrayOffset() +
@@ -821,9 +824,9 @@ public class TestHFileBlock {
 
   protected void testBlockHeapSizeInternals() {
     if (ClassSize.is32BitJVM()) {
-      assertTrue(HFileBlock.BYTE_BUFFER_HEAP_SIZE == 64);
+      assertTrue(HFileBlock.MULTI_BYTE_BUFFER_HEAP_SIZE == 64);
     } else {
-      assertTrue(HFileBlock.BYTE_BUFFER_HEAP_SIZE == 80);
+      assertTrue(HFileBlock.MULTI_BYTE_BUFFER_HEAP_SIZE == 104);
     }
 
     for (int size : new int[] { 100, 256, 12345 }) {
@@ -839,9 +842,9 @@ public class TestHFileBlock {
       HFileBlock block = new HFileBlock(BlockType.DATA, size, size, -1, buf,
           HFileBlock.FILL_HEADER, -1,
           0, meta);
-      long byteBufferExpectedSize =
-          ClassSize.align(ClassSize.estimateBase(buf.getClass(), true)
-              + HConstants.HFILEBLOCK_HEADER_SIZE + size);
+      long byteBufferExpectedSize = ClassSize.align(ClassSize.estimateBase(
+          new MultiByteBuff(buf).getClass(), true)
+          + HConstants.HFILEBLOCK_HEADER_SIZE + size);
       long hfileMetaSize =  
ClassSize.align(ClassSize.estimateBase(HFileContext.class, true));
       long hfileBlockExpectedSize =
           ClassSize.align(ClassSize.estimateBase(HFileBlock.class, true));

http://git-wip-us.apache.org/repos/asf/hbase/blob/834f87b2/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockCompatibility.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockCompatibility.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockCompatibility.java
index ebe35b3..9efb5fc 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockCompatibility.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockCompatibility.java
@@ -48,6 +48,9 @@ import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
 import org.apache.hadoop.hbase.io.encoding.HFileBlockDefaultEncodingContext;
 import org.apache.hadoop.hbase.io.encoding.HFileBlockEncodingContext;
 import org.apache.hadoop.hbase.io.hfile.HFileBlock.BlockWritable;
+import org.apache.hadoop.hbase.nio.ByteBuff;
+import org.apache.hadoop.hbase.nio.MultiByteBuff;
+import org.apache.hadoop.hbase.nio.SingleByteBuff;
 import org.apache.hadoop.hbase.testclassification.IOTests;
 import org.apache.hadoop.hbase.testclassification.SmallTests;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -310,7 +313,7 @@ public class TestHFileBlockCompatibility {
 
             assertEquals((int) encodedSizes.get(blockId),
                 b.getUncompressedSizeWithoutHeader());
-            ByteBuffer actualBuffer = b.getBufferWithoutHeader();
+            ByteBuff actualBuffer = b.getBufferWithoutHeader();
             if (encoding != DataBlockEncoding.NONE) {
               // We expect a two-byte big-endian encoding id.
               assertEquals(0, actualBuffer.get(0));
@@ -323,7 +326,7 @@ public class TestHFileBlockCompatibility {
             expectedBuffer.rewind();
 
             // test if content matches, produce nice message
-            TestHFileBlock.assertBuffersEqual(expectedBuffer, actualBuffer,
+            TestHFileBlock.assertBuffersEqual(new 
SingleByteBuff(expectedBuffer), actualBuffer,
               algo, encoding, pread);
           }
           is.close();

http://git-wip-us.apache.org/repos/asf/hbase/blob/834f87b2/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockIndex.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockIndex.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockIndex.java
index 279c4ea..bc82aee 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockIndex.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockIndex.java
@@ -52,6 +52,8 @@ import org.apache.hadoop.hbase.io.compress.Compression;
 import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
 import org.apache.hadoop.hbase.io.hfile.HFileBlockIndex.BlockIndexChunk;
 import org.apache.hadoop.hbase.io.hfile.HFileBlockIndex.BlockIndexReader;
+import org.apache.hadoop.hbase.nio.ByteBuff;
+import org.apache.hadoop.hbase.nio.MultiByteBuff;
 import org.apache.hadoop.hbase.testclassification.IOTests;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -407,7 +409,7 @@ public class TestHFileBlockIndex {
       KeyValue.KeyOnlyKeyValue cell = new KeyValue.KeyOnlyKeyValue(
           arrayHoldingKey, searchKey.length / 2, searchKey.length);
       int searchResult = BlockIndexReader.binarySearchNonRootIndex(cell,
-          nonRootIndex, CellComparator.COMPARATOR);
+          new MultiByteBuff(nonRootIndex), CellComparator.COMPARATOR);
       String lookupFailureMsg = "Failed to look up key #" + i + " ("
           + Bytes.toStringBinary(searchKey) + ")";
 
@@ -432,7 +434,7 @@ public class TestHFileBlockIndex {
       // Now test we can get the offset and the on-disk-size using a
       // higher-level API function.s
       boolean locateBlockResult =
-          (BlockIndexReader.locateNonRootIndexEntry(nonRootIndex, cell,
+          (BlockIndexReader.locateNonRootIndexEntry(new 
MultiByteBuff(nonRootIndex), cell,
           CellComparator.COMPARATOR) != -1);
 
       if (i == 0) {
@@ -605,15 +607,15 @@ public class TestHFileBlockIndex {
       while ((block = iter.nextBlock()) != null) {
         if (block.getBlockType() != BlockType.LEAF_INDEX)
           return;
-        ByteBuffer b = block.getBufferReadOnly();
+        ByteBuff b = block.getBufferReadOnly();
         int n = b.getInt();
         // One int for the number of items, and n + 1 for the secondary index.
         int entriesOffset = Bytes.SIZEOF_INT * (n + 2);
 
         // Get all the keys from the leaf index block. S
         for (int i = 0; i < n; ++i) {
-          int keyRelOffset = b.getInt(Bytes.SIZEOF_INT * (i + 1));
-          int nextKeyRelOffset = b.getInt(Bytes.SIZEOF_INT * (i + 2));
+          int keyRelOffset = b.getIntStrictlyForward(Bytes.SIZEOF_INT * (i + 
1));
+          int nextKeyRelOffset = b.getIntStrictlyForward(Bytes.SIZEOF_INT * (i 
+ 2));
           int keyLen = nextKeyRelOffset - keyRelOffset;
           int keyOffset = b.arrayOffset() + entriesOffset + keyRelOffset +
               HFileBlockIndex.SECONDARY_INDEX_ENTRY_OVERHEAD;

Reply via email to