(hbase-thirdparty) branch master updated: HBASE-28493 [hbase-thirdparty] Bump protobuf version (#117)
This is an automated email from the ASF dual-hosted git repository. zhangduo pushed a commit to branch master in repository https://gitbox.apache.org/repos/asf/hbase-thirdparty.git The following commit(s) were added to refs/heads/master by this push: new d7883b5 HBASE-28493 [hbase-thirdparty] Bump protobuf version (#117) d7883b5 is described below commit d7883b5dde5a06014318d3140cd17ce1b1994c41 Author: Duo Zhang AuthorDate: Wed Apr 10 11:44:26 2024 +0800 HBASE-28493 [hbase-thirdparty] Bump protobuf version (#117) Signed-off-by: Nick Dimiduk Signed-off-by: Andrew Purtell --- .../src/main/patches/HBASE-15789_V2.patch | 29 +++--- .../src/main/patches/HBASE-17239.patch | 8 +++--- pom.xml| 2 +- 3 files changed, 19 insertions(+), 20 deletions(-) diff --git a/hbase-shaded-protobuf/src/main/patches/HBASE-15789_V2.patch b/hbase-shaded-protobuf/src/main/patches/HBASE-15789_V2.patch index 8e09530..5bca44c 100644 --- a/hbase-shaded-protobuf/src/main/patches/HBASE-15789_V2.patch +++ b/hbase-shaded-protobuf/src/main/patches/HBASE-15789_V2.patch @@ -1,8 +1,8 @@ diff --git a/src/main/java/com/google/protobuf/ByteBufferWriter.java b/src/main/java/com/google/protobuf/ByteBufferWriter.java -index 3970b0ea5..571b0a975 100644 +index e7d9eec8e..e17fefd39 100644 --- a/src/main/java/com/google/protobuf/ByteBufferWriter.java +++ b/src/main/java/com/google/protobuf/ByteBufferWriter.java -@@ -111,7 +111,7 @@ final class ByteBufferWriter { +@@ -88,7 +88,7 @@ final class ByteBufferWriter { } } @@ -359,13 +359,13 @@ index 0..320977290 + } +} diff --git a/src/main/java/com/google/protobuf/ByteString.java b/src/main/java/com/google/protobuf/ByteString.java -index 2569d5dad..bebee3373 100644 +index 8ba729c8e..62aae86ee 100644 --- a/src/main/java/com/google/protobuf/ByteString.java +++ b/src/main/java/com/google/protobuf/ByteString.java -@@ -445,6 +445,13 @@ public abstract class ByteString implements Iterable, Serializable { +@@ -422,6 +422,13 @@ public abstract class ByteString implements Iterable, Serializable { } } - + + /** + * Wraps the given bytes into a {@code ByteString}. Intended for internal only usage. + */ @@ -377,14 +377,13 @@ index 2569d5dad..bebee3373 100644 * Wraps the given bytes into a {@code ByteString}. Intended for internal usage within the library * to force a classload of ByteString before LiteralByteString. diff --git a/src/main/java/com/google/protobuf/CodedInputStream.java b/src/main/java/com/google/protobuf/CodedInputStream.java -index ae94ee887..ce1d05875 100644 +index 224ced529..64c3efa37 100644 --- a/src/main/java/com/google/protobuf/CodedInputStream.java +++ b/src/main/java/com/google/protobuf/CodedInputStream.java -@@ -200,6 +200,16 @@ public abstract class CodedInputStream { - throw InvalidProtocolBufferException.recursionLimitExceeded(); +@@ -178,6 +178,15 @@ public abstract class CodedInputStream { } } -+ + + /** Create a new CodedInputStream wrapping the given {@link ByteInput}. */ + public static CodedInputStream newInstance(ByteInput buf, boolean bufferIsImmutable) { +return new ByteInputDecoder(buf, bufferIsImmutable); @@ -397,7 +396,7 @@ index ae94ee887..ce1d05875 100644 /** Disable construction/inheritance outside of this class. */ private CodedInputStream() {} -@@ -3965,4 +3975,652 @@ public abstract class CodedInputStream { +@@ -3957,4 +3966,652 @@ public abstract class CodedInputStream { } } } @@ -1051,10 +1050,10 @@ index ae94ee887..ce1d05875 100644 + } } diff --git a/src/main/java/com/google/protobuf/Utf8.java b/src/main/java/com/google/protobuf/Utf8.java -index 7c6823d91..20afe8255 100644 +index d52006754..92ed1f1f7 100644 --- a/src/main/java/com/google/protobuf/Utf8.java +++ b/src/main/java/com/google/protobuf/Utf8.java -@@ -219,6 +219,16 @@ final class Utf8 { +@@ -196,6 +196,16 @@ final class Utf8 { } } @@ -1071,7 +1070,7 @@ index 7c6823d91..20afe8255 100644 // These UTF-8 handling methods are copied from Guava's Utf8 class with a modification to throw // a protocol buffer local exception. This exception is then caught in CodedOutputStream so it can // fallback to more lenient behavior. -@@ -341,6 +351,24 @@ final class Utf8 { +@@ -318,6 +328,24 @@ final class Utf8 { return processor.decodeUtf8(bytes, index, size); } @@ -1096,10 +1095,10 @@ index 7c6823d91..20afe8255 100644 /** * Encodes the given characters to the target {@link ByteBuffer} using UTF-8 encoding. * -@@ -717,6 +745,169 @@ final class Utf8 { +@@ -694,6 +722,169 @@ final class Utf8 { return new String(resultArr, 0, resultPos); } - + +public boolean isValidUtf8(ByteInput buffer, int index, int limit) { + return partialIsValidUtf8(COMPLETE, buffer, index, limit) == COMPLETE; +} diff --git
(hbase) branch master updated: HBASE-28448 CompressionTest hangs when run over a Ozone ofs path (#5771)
This is an automated email from the ASF dual-hosted git repository. weichiu pushed a commit to branch master in repository https://gitbox.apache.org/repos/asf/hbase.git The following commit(s) were added to refs/heads/master by this push: new adc79a0a9c2 HBASE-28448 CompressionTest hangs when run over a Ozone ofs path (#5771) adc79a0a9c2 is described below commit adc79a0a9c2b579915a902f611a66edfddf3149c Author: Wei-Chiu Chuang AuthorDate: Tue Apr 9 10:55:39 2024 -0700 HBASE-28448 CompressionTest hangs when run over a Ozone ofs path (#5771) This bug was found via HDDS-10564. --- .../apache/hadoop/hbase/util/CompressionTest.java | 21 +++-- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/CompressionTest.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/CompressionTest.java index 0870dbe6f9b..9065ebf116b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/CompressionTest.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/CompressionTest.java @@ -152,17 +152,18 @@ public class CompressionTest { Configuration conf = new Configuration(); Path path = new Path(args[0]); -FileSystem fs = path.getFileSystem(conf); -if (fs.exists(path)) { - System.err.println("The specified path exists, aborting!"); - System.exit(1); -} +try (FileSystem fs = path.getFileSystem(conf)) { + if (fs.exists(path)) { +System.err.println("The specified path exists, aborting!"); +System.exit(1); + } -try { - doSmokeTest(fs, path, args[1]); -} finally { - fs.delete(path, false); + try { +doSmokeTest(fs, path, args[1]); + } finally { +fs.delete(path, false); + } + System.out.println("SUCCESS"); } -System.out.println("SUCCESS"); } }
(hbase-site) branch asf-site updated: INFRA-10751 Empty commit
This is an automated email from the ASF dual-hosted git repository. git-site-role pushed a commit to branch asf-site in repository https://gitbox.apache.org/repos/asf/hbase-site.git The following commit(s) were added to refs/heads/asf-site by this push: new 2d734e0df60 INFRA-10751 Empty commit 2d734e0df60 is described below commit 2d734e0df60bfd961ce47de4a5e15f5f07f7d519 Author: jenkins AuthorDate: Tue Apr 9 14:44:51 2024 + INFRA-10751 Empty commit
(hbase) branch branch-2.5 updated: HBASE-28485 Re-use ZstdDecompressCtx/ZstdCompressCtx for performance (#5797)
This is an automated email from the ASF dual-hosted git repository. ndimiduk pushed a commit to branch branch-2.5 in repository https://gitbox.apache.org/repos/asf/hbase.git The following commit(s) were added to refs/heads/branch-2.5 by this push: new 82ad5f224e3 HBASE-28485 Re-use ZstdDecompressCtx/ZstdCompressCtx for performance (#5797) 82ad5f224e3 is described below commit 82ad5f224e398f3522ddc63f8fa3c5a02365ca63 Author: Charles Connell AuthorDate: Tue Apr 9 04:20:47 2024 -0400 HBASE-28485 Re-use ZstdDecompressCtx/ZstdCompressCtx for performance (#5797) Co-authored-by: Charles Connell Signed-off-by: Andrew Purtell Signed-off-by: Nick Dimiduk --- .../hbase/io/compress/zstd/ZstdCompressor.java | 20 ++-- .../hbase/io/compress/zstd/ZstdDecompressor.java | 19 --- 2 files changed, 26 insertions(+), 13 deletions(-) diff --git a/hbase-compression/hbase-compression-zstd/src/main/java/org/apache/hadoop/hbase/io/compress/zstd/ZstdCompressor.java b/hbase-compression/hbase-compression-zstd/src/main/java/org/apache/hadoop/hbase/io/compress/zstd/ZstdCompressor.java index 4d34d4825d3..b48db9106fb 100644 --- a/hbase-compression/hbase-compression-zstd/src/main/java/org/apache/hadoop/hbase/io/compress/zstd/ZstdCompressor.java +++ b/hbase-compression/hbase-compression-zstd/src/main/java/org/apache/hadoop/hbase/io/compress/zstd/ZstdCompressor.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hbase.io.compress.zstd; import com.github.luben.zstd.Zstd; +import com.github.luben.zstd.ZstdCompressCtx; import com.github.luben.zstd.ZstdDictCompress; import java.io.IOException; import java.nio.ByteBuffer; @@ -39,6 +40,7 @@ public class ZstdCompressor implements CanReinit, Compressor { protected long bytesRead, bytesWritten; protected int dictId; protected ZstdDictCompress dict; + protected ZstdCompressCtx ctx; ZstdCompressor(final int level, final int bufferSize, final byte[] dictionary) { this.level = level; @@ -46,9 +48,12 @@ public class ZstdCompressor implements CanReinit, Compressor { this.inBuf = ByteBuffer.allocateDirect(bufferSize); this.outBuf = ByteBuffer.allocateDirect(bufferSize); this.outBuf.position(bufferSize); +this.ctx = new ZstdCompressCtx(); +this.ctx.setLevel(level); if (dictionary != null) { this.dictId = ZstdCodec.getDictionaryId(dictionary); this.dict = new ZstdDictCompress(dictionary, level); + this.ctx.loadDict(this.dict); } } @@ -79,12 +84,7 @@ public class ZstdCompressor implements CanReinit, Compressor { } else { outBuf.clear(); } -int written; -if (dict != null) { - written = Zstd.compress(outBuf, inBuf, dict); -} else { - written = Zstd.compress(outBuf, inBuf, level); -} +int written = ctx.compress(outBuf, inBuf); bytesWritten += written; inBuf.clear(); finished = true; @@ -170,6 +170,14 @@ public class ZstdCompressor implements CanReinit, Compressor { bytesWritten = 0; finish = false; finished = false; +ctx.reset(); +ctx.setLevel(level); +if (dict != null) { + ctx.loadDict(dict); +} else { + // loadDict((byte[]) accepts null to clear the dictionary + ctx.loadDict((byte[]) null); +} } @Override diff --git a/hbase-compression/hbase-compression-zstd/src/main/java/org/apache/hadoop/hbase/io/compress/zstd/ZstdDecompressor.java b/hbase-compression/hbase-compression-zstd/src/main/java/org/apache/hadoop/hbase/io/compress/zstd/ZstdDecompressor.java index ef0a0f87651..79826c96d5e 100644 --- a/hbase-compression/hbase-compression-zstd/src/main/java/org/apache/hadoop/hbase/io/compress/zstd/ZstdDecompressor.java +++ b/hbase-compression/hbase-compression-zstd/src/main/java/org/apache/hadoop/hbase/io/compress/zstd/ZstdDecompressor.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hbase.io.compress.zstd; -import com.github.luben.zstd.Zstd; +import com.github.luben.zstd.ZstdDecompressCtx; import com.github.luben.zstd.ZstdDictDecompress; import java.io.IOException; import java.nio.ByteBuffer; @@ -39,15 +39,18 @@ public class ZstdDecompressor implements CanReinit, Decompressor { protected boolean finished; protected int dictId; protected ZstdDictDecompress dict; + protected ZstdDecompressCtx ctx; ZstdDecompressor(final int bufferSize, final byte[] dictionary) { this.bufferSize = bufferSize; this.inBuf = ByteBuffer.allocateDirect(bufferSize); this.outBuf = ByteBuffer.allocateDirect(bufferSize); this.outBuf.position(bufferSize); +this.ctx = new ZstdDecompressCtx(); if (dictionary != null) { this.dictId = ZstdCodec.getDictionaryId(dictionary); this.dict = new ZstdDictDecompress(dictionary); + this.ctx.loadDict(this.dict); } } @@ -67,12 +70,7 @@ public class ZstdDecompressor implements CanReinit,
(hbase) branch branch-2.6 updated: HBASE-28485 Re-use ZstdDecompressCtx/ZstdCompressCtx for performance (#5797)
This is an automated email from the ASF dual-hosted git repository. ndimiduk pushed a commit to branch branch-2.6 in repository https://gitbox.apache.org/repos/asf/hbase.git The following commit(s) were added to refs/heads/branch-2.6 by this push: new 65e2f2318b7 HBASE-28485 Re-use ZstdDecompressCtx/ZstdCompressCtx for performance (#5797) 65e2f2318b7 is described below commit 65e2f2318b74b55206b48725d6eec83374a52a98 Author: Charles Connell AuthorDate: Tue Apr 9 04:20:47 2024 -0400 HBASE-28485 Re-use ZstdDecompressCtx/ZstdCompressCtx for performance (#5797) Co-authored-by: Charles Connell Signed-off-by: Andrew Purtell Signed-off-by: Nick Dimiduk --- .../hbase/io/compress/zstd/ZstdCompressor.java | 20 ++-- .../hbase/io/compress/zstd/ZstdDecompressor.java | 19 --- 2 files changed, 26 insertions(+), 13 deletions(-) diff --git a/hbase-compression/hbase-compression-zstd/src/main/java/org/apache/hadoop/hbase/io/compress/zstd/ZstdCompressor.java b/hbase-compression/hbase-compression-zstd/src/main/java/org/apache/hadoop/hbase/io/compress/zstd/ZstdCompressor.java index 4d34d4825d3..b48db9106fb 100644 --- a/hbase-compression/hbase-compression-zstd/src/main/java/org/apache/hadoop/hbase/io/compress/zstd/ZstdCompressor.java +++ b/hbase-compression/hbase-compression-zstd/src/main/java/org/apache/hadoop/hbase/io/compress/zstd/ZstdCompressor.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hbase.io.compress.zstd; import com.github.luben.zstd.Zstd; +import com.github.luben.zstd.ZstdCompressCtx; import com.github.luben.zstd.ZstdDictCompress; import java.io.IOException; import java.nio.ByteBuffer; @@ -39,6 +40,7 @@ public class ZstdCompressor implements CanReinit, Compressor { protected long bytesRead, bytesWritten; protected int dictId; protected ZstdDictCompress dict; + protected ZstdCompressCtx ctx; ZstdCompressor(final int level, final int bufferSize, final byte[] dictionary) { this.level = level; @@ -46,9 +48,12 @@ public class ZstdCompressor implements CanReinit, Compressor { this.inBuf = ByteBuffer.allocateDirect(bufferSize); this.outBuf = ByteBuffer.allocateDirect(bufferSize); this.outBuf.position(bufferSize); +this.ctx = new ZstdCompressCtx(); +this.ctx.setLevel(level); if (dictionary != null) { this.dictId = ZstdCodec.getDictionaryId(dictionary); this.dict = new ZstdDictCompress(dictionary, level); + this.ctx.loadDict(this.dict); } } @@ -79,12 +84,7 @@ public class ZstdCompressor implements CanReinit, Compressor { } else { outBuf.clear(); } -int written; -if (dict != null) { - written = Zstd.compress(outBuf, inBuf, dict); -} else { - written = Zstd.compress(outBuf, inBuf, level); -} +int written = ctx.compress(outBuf, inBuf); bytesWritten += written; inBuf.clear(); finished = true; @@ -170,6 +170,14 @@ public class ZstdCompressor implements CanReinit, Compressor { bytesWritten = 0; finish = false; finished = false; +ctx.reset(); +ctx.setLevel(level); +if (dict != null) { + ctx.loadDict(dict); +} else { + // loadDict((byte[]) accepts null to clear the dictionary + ctx.loadDict((byte[]) null); +} } @Override diff --git a/hbase-compression/hbase-compression-zstd/src/main/java/org/apache/hadoop/hbase/io/compress/zstd/ZstdDecompressor.java b/hbase-compression/hbase-compression-zstd/src/main/java/org/apache/hadoop/hbase/io/compress/zstd/ZstdDecompressor.java index ef0a0f87651..79826c96d5e 100644 --- a/hbase-compression/hbase-compression-zstd/src/main/java/org/apache/hadoop/hbase/io/compress/zstd/ZstdDecompressor.java +++ b/hbase-compression/hbase-compression-zstd/src/main/java/org/apache/hadoop/hbase/io/compress/zstd/ZstdDecompressor.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hbase.io.compress.zstd; -import com.github.luben.zstd.Zstd; +import com.github.luben.zstd.ZstdDecompressCtx; import com.github.luben.zstd.ZstdDictDecompress; import java.io.IOException; import java.nio.ByteBuffer; @@ -39,15 +39,18 @@ public class ZstdDecompressor implements CanReinit, Decompressor { protected boolean finished; protected int dictId; protected ZstdDictDecompress dict; + protected ZstdDecompressCtx ctx; ZstdDecompressor(final int bufferSize, final byte[] dictionary) { this.bufferSize = bufferSize; this.inBuf = ByteBuffer.allocateDirect(bufferSize); this.outBuf = ByteBuffer.allocateDirect(bufferSize); this.outBuf.position(bufferSize); +this.ctx = new ZstdDecompressCtx(); if (dictionary != null) { this.dictId = ZstdCodec.getDictionaryId(dictionary); this.dict = new ZstdDictDecompress(dictionary); + this.ctx.loadDict(this.dict); } } @@ -67,12 +70,7 @@ public class ZstdDecompressor implements CanReinit,
(hbase-thirdparty) branch master updated: HBASE-28492 [hbase-thirdparty] Bump dependency versions before releasing (#115)
This is an automated email from the ASF dual-hosted git repository. zhangduo pushed a commit to branch master in repository https://gitbox.apache.org/repos/asf/hbase-thirdparty.git The following commit(s) were added to refs/heads/master by this push: new 83ac4a7 HBASE-28492 [hbase-thirdparty] Bump dependency versions before releasing (#115) 83ac4a7 is described below commit 83ac4a7141a9eb76af6a69ab0ccaeb28bb4d125c Author: Duo Zhang AuthorDate: Tue Apr 9 18:04:23 2024 +0800 HBASE-28492 [hbase-thirdparty] Bump dependency versions before releasing (#115) Signed-off-by: Nick Dimiduk --- pom.xml | 8 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/pom.xml b/pom.xml index b9da4fe..f450bbe 100644 --- a/pom.xml +++ b/pom.xml @@ -137,20 +137,20 @@ 3.25.2 4.1.108.Final 2.0.61.Final -33.0.0-jre +33.1.0-jre 1.6.0 4.4 -2.21.1 +2.26.1 2.10.1 9.4.54.v20240208 3.1.0 -2.41 +2.42 2.6.1 2.1.6 1.3.5 2.0.2 3.30.2-GA - 2.16.1 + 2.17.0
(hbase) branch branch-2 updated: HBASE-28485 Re-use ZstdDecompressCtx/ZstdCompressCtx for performance (#5797)
This is an automated email from the ASF dual-hosted git repository. ndimiduk pushed a commit to branch branch-2 in repository https://gitbox.apache.org/repos/asf/hbase.git The following commit(s) were added to refs/heads/branch-2 by this push: new 9f60f1821f8 HBASE-28485 Re-use ZstdDecompressCtx/ZstdCompressCtx for performance (#5797) 9f60f1821f8 is described below commit 9f60f1821f88ac26b29c111d9d0ec1e0aad2c52e Author: Charles Connell AuthorDate: Tue Apr 9 04:20:47 2024 -0400 HBASE-28485 Re-use ZstdDecompressCtx/ZstdCompressCtx for performance (#5797) Co-authored-by: Charles Connell Signed-off-by: Andrew Purtell Signed-off-by: Nick Dimiduk --- .../hbase/io/compress/zstd/ZstdCompressor.java | 20 ++-- .../hbase/io/compress/zstd/ZstdDecompressor.java | 19 --- 2 files changed, 26 insertions(+), 13 deletions(-) diff --git a/hbase-compression/hbase-compression-zstd/src/main/java/org/apache/hadoop/hbase/io/compress/zstd/ZstdCompressor.java b/hbase-compression/hbase-compression-zstd/src/main/java/org/apache/hadoop/hbase/io/compress/zstd/ZstdCompressor.java index 4d34d4825d3..b48db9106fb 100644 --- a/hbase-compression/hbase-compression-zstd/src/main/java/org/apache/hadoop/hbase/io/compress/zstd/ZstdCompressor.java +++ b/hbase-compression/hbase-compression-zstd/src/main/java/org/apache/hadoop/hbase/io/compress/zstd/ZstdCompressor.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hbase.io.compress.zstd; import com.github.luben.zstd.Zstd; +import com.github.luben.zstd.ZstdCompressCtx; import com.github.luben.zstd.ZstdDictCompress; import java.io.IOException; import java.nio.ByteBuffer; @@ -39,6 +40,7 @@ public class ZstdCompressor implements CanReinit, Compressor { protected long bytesRead, bytesWritten; protected int dictId; protected ZstdDictCompress dict; + protected ZstdCompressCtx ctx; ZstdCompressor(final int level, final int bufferSize, final byte[] dictionary) { this.level = level; @@ -46,9 +48,12 @@ public class ZstdCompressor implements CanReinit, Compressor { this.inBuf = ByteBuffer.allocateDirect(bufferSize); this.outBuf = ByteBuffer.allocateDirect(bufferSize); this.outBuf.position(bufferSize); +this.ctx = new ZstdCompressCtx(); +this.ctx.setLevel(level); if (dictionary != null) { this.dictId = ZstdCodec.getDictionaryId(dictionary); this.dict = new ZstdDictCompress(dictionary, level); + this.ctx.loadDict(this.dict); } } @@ -79,12 +84,7 @@ public class ZstdCompressor implements CanReinit, Compressor { } else { outBuf.clear(); } -int written; -if (dict != null) { - written = Zstd.compress(outBuf, inBuf, dict); -} else { - written = Zstd.compress(outBuf, inBuf, level); -} +int written = ctx.compress(outBuf, inBuf); bytesWritten += written; inBuf.clear(); finished = true; @@ -170,6 +170,14 @@ public class ZstdCompressor implements CanReinit, Compressor { bytesWritten = 0; finish = false; finished = false; +ctx.reset(); +ctx.setLevel(level); +if (dict != null) { + ctx.loadDict(dict); +} else { + // loadDict((byte[]) accepts null to clear the dictionary + ctx.loadDict((byte[]) null); +} } @Override diff --git a/hbase-compression/hbase-compression-zstd/src/main/java/org/apache/hadoop/hbase/io/compress/zstd/ZstdDecompressor.java b/hbase-compression/hbase-compression-zstd/src/main/java/org/apache/hadoop/hbase/io/compress/zstd/ZstdDecompressor.java index ef0a0f87651..79826c96d5e 100644 --- a/hbase-compression/hbase-compression-zstd/src/main/java/org/apache/hadoop/hbase/io/compress/zstd/ZstdDecompressor.java +++ b/hbase-compression/hbase-compression-zstd/src/main/java/org/apache/hadoop/hbase/io/compress/zstd/ZstdDecompressor.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hbase.io.compress.zstd; -import com.github.luben.zstd.Zstd; +import com.github.luben.zstd.ZstdDecompressCtx; import com.github.luben.zstd.ZstdDictDecompress; import java.io.IOException; import java.nio.ByteBuffer; @@ -39,15 +39,18 @@ public class ZstdDecompressor implements CanReinit, Decompressor { protected boolean finished; protected int dictId; protected ZstdDictDecompress dict; + protected ZstdDecompressCtx ctx; ZstdDecompressor(final int bufferSize, final byte[] dictionary) { this.bufferSize = bufferSize; this.inBuf = ByteBuffer.allocateDirect(bufferSize); this.outBuf = ByteBuffer.allocateDirect(bufferSize); this.outBuf.position(bufferSize); +this.ctx = new ZstdDecompressCtx(); if (dictionary != null) { this.dictId = ZstdCodec.getDictionaryId(dictionary); this.dict = new ZstdDictDecompress(dictionary); + this.ctx.loadDict(this.dict); } } @@ -67,12 +70,7 @@ public class ZstdDecompressor implements CanReinit,
(hbase) branch branch-3 updated: HBASE-28485 Re-use ZstdDecompressCtx/ZstdCompressCtx for performance (#5797)
This is an automated email from the ASF dual-hosted git repository. ndimiduk pushed a commit to branch branch-3 in repository https://gitbox.apache.org/repos/asf/hbase.git The following commit(s) were added to refs/heads/branch-3 by this push: new 75b1516df22 HBASE-28485 Re-use ZstdDecompressCtx/ZstdCompressCtx for performance (#5797) 75b1516df22 is described below commit 75b1516df2277366073256acb523b674523722cf Author: Charles Connell AuthorDate: Tue Apr 9 04:20:47 2024 -0400 HBASE-28485 Re-use ZstdDecompressCtx/ZstdCompressCtx for performance (#5797) Co-authored-by: Charles Connell Signed-off-by: Andrew Purtell Signed-off-by: Nick Dimiduk --- .../hbase/io/compress/zstd/ZstdCompressor.java | 20 ++-- .../hbase/io/compress/zstd/ZstdDecompressor.java | 19 --- 2 files changed, 26 insertions(+), 13 deletions(-) diff --git a/hbase-compression/hbase-compression-zstd/src/main/java/org/apache/hadoop/hbase/io/compress/zstd/ZstdCompressor.java b/hbase-compression/hbase-compression-zstd/src/main/java/org/apache/hadoop/hbase/io/compress/zstd/ZstdCompressor.java index 4d34d4825d3..b48db9106fb 100644 --- a/hbase-compression/hbase-compression-zstd/src/main/java/org/apache/hadoop/hbase/io/compress/zstd/ZstdCompressor.java +++ b/hbase-compression/hbase-compression-zstd/src/main/java/org/apache/hadoop/hbase/io/compress/zstd/ZstdCompressor.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hbase.io.compress.zstd; import com.github.luben.zstd.Zstd; +import com.github.luben.zstd.ZstdCompressCtx; import com.github.luben.zstd.ZstdDictCompress; import java.io.IOException; import java.nio.ByteBuffer; @@ -39,6 +40,7 @@ public class ZstdCompressor implements CanReinit, Compressor { protected long bytesRead, bytesWritten; protected int dictId; protected ZstdDictCompress dict; + protected ZstdCompressCtx ctx; ZstdCompressor(final int level, final int bufferSize, final byte[] dictionary) { this.level = level; @@ -46,9 +48,12 @@ public class ZstdCompressor implements CanReinit, Compressor { this.inBuf = ByteBuffer.allocateDirect(bufferSize); this.outBuf = ByteBuffer.allocateDirect(bufferSize); this.outBuf.position(bufferSize); +this.ctx = new ZstdCompressCtx(); +this.ctx.setLevel(level); if (dictionary != null) { this.dictId = ZstdCodec.getDictionaryId(dictionary); this.dict = new ZstdDictCompress(dictionary, level); + this.ctx.loadDict(this.dict); } } @@ -79,12 +84,7 @@ public class ZstdCompressor implements CanReinit, Compressor { } else { outBuf.clear(); } -int written; -if (dict != null) { - written = Zstd.compress(outBuf, inBuf, dict); -} else { - written = Zstd.compress(outBuf, inBuf, level); -} +int written = ctx.compress(outBuf, inBuf); bytesWritten += written; inBuf.clear(); finished = true; @@ -170,6 +170,14 @@ public class ZstdCompressor implements CanReinit, Compressor { bytesWritten = 0; finish = false; finished = false; +ctx.reset(); +ctx.setLevel(level); +if (dict != null) { + ctx.loadDict(dict); +} else { + // loadDict((byte[]) accepts null to clear the dictionary + ctx.loadDict((byte[]) null); +} } @Override diff --git a/hbase-compression/hbase-compression-zstd/src/main/java/org/apache/hadoop/hbase/io/compress/zstd/ZstdDecompressor.java b/hbase-compression/hbase-compression-zstd/src/main/java/org/apache/hadoop/hbase/io/compress/zstd/ZstdDecompressor.java index ef0a0f87651..79826c96d5e 100644 --- a/hbase-compression/hbase-compression-zstd/src/main/java/org/apache/hadoop/hbase/io/compress/zstd/ZstdDecompressor.java +++ b/hbase-compression/hbase-compression-zstd/src/main/java/org/apache/hadoop/hbase/io/compress/zstd/ZstdDecompressor.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hbase.io.compress.zstd; -import com.github.luben.zstd.Zstd; +import com.github.luben.zstd.ZstdDecompressCtx; import com.github.luben.zstd.ZstdDictDecompress; import java.io.IOException; import java.nio.ByteBuffer; @@ -39,15 +39,18 @@ public class ZstdDecompressor implements CanReinit, Decompressor { protected boolean finished; protected int dictId; protected ZstdDictDecompress dict; + protected ZstdDecompressCtx ctx; ZstdDecompressor(final int bufferSize, final byte[] dictionary) { this.bufferSize = bufferSize; this.inBuf = ByteBuffer.allocateDirect(bufferSize); this.outBuf = ByteBuffer.allocateDirect(bufferSize); this.outBuf.position(bufferSize); +this.ctx = new ZstdDecompressCtx(); if (dictionary != null) { this.dictId = ZstdCodec.getDictionaryId(dictionary); this.dict = new ZstdDictDecompress(dictionary); + this.ctx.loadDict(this.dict); } } @@ -67,12 +70,7 @@ public class ZstdDecompressor implements CanReinit,
(hbase) branch master updated (bf836a98073 -> 1a089cd3935)
This is an automated email from the ASF dual-hosted git repository. ndimiduk pushed a change to branch master in repository https://gitbox.apache.org/repos/asf/hbase.git from bf836a98073 HBASE-26192 Master UI hbck should provide a JSON formatted output option (#5772) add 1a089cd3935 HBASE-28485 Re-use ZstdDecompressCtx/ZstdCompressCtx for performance (#5797) No new revisions were added by this update. Summary of changes: .../hbase/io/compress/zstd/ZstdCompressor.java | 20 ++-- .../hbase/io/compress/zstd/ZstdDecompressor.java | 19 --- 2 files changed, 26 insertions(+), 13 deletions(-)