This is an automated email from the ASF dual-hosted git repository.
ndimiduk pushed a commit to branch branch-2.6
in repository https://gitbox.apache.org/repos/asf/hbase.git
The following commit(s) were added to refs/heads/branch-2.6 by this push:
new 24ca14bdc09 HBASE-29222 Avoid expensive tracing calls if tracing is
disabled (#6863)
24ca14bdc09 is described below
commit 24ca14bdc0904a051c55f6b8d72a1159d0d6cbbb
Author: jbewing <[email protected]>
AuthorDate: Fri May 23 10:06:58 2025 -0400
HBASE-29222 Avoid expensive tracing calls if tracing is disabled (#6863)
Signed-off-by: Nick Dimiduk <[email protected]>
---
.../apache/hadoop/hbase/io/util/BlockIOUtils.java | 70 +++++++++++++++-------
.../apache/hadoop/hbase/io/hfile/HFileBlock.java | 33 +++++++---
2 files changed, 72 insertions(+), 31 deletions(-)
diff --git
a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/util/BlockIOUtils.java
b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/util/BlockIOUtils.java
index 9641c72dfbc..bf737b2e124 100644
---
a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/util/BlockIOUtils.java
+++
b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/util/BlockIOUtils.java
@@ -86,14 +86,12 @@ public final class BlockIOUtils {
*/
public static void readFully(ByteBuff buf, FSDataInputStream dis, int
length) throws IOException {
final Span span = Span.current();
- final AttributesBuilder attributesBuilder =
builderFromContext(Context.current());
if (!isByteBufferReadable(dis)) {
// If InputStream does not support the ByteBuffer read, just read to
heap and copy bytes to
// the destination ByteBuff.
byte[] heapBuf = new byte[length];
IOUtils.readFully(dis, heapBuf, 0, length);
- annotateHeapBytesRead(attributesBuilder, length);
- span.addEvent("BlockIOUtils.readFully", attributesBuilder.build());
+ span.addEvent("BlockIOUtils.readFully", getHeapBytesReadAttributes(span,
length));
copyToByteBuff(heapBuf, 0, length, buf);
return;
}
@@ -125,8 +123,8 @@ public final class BlockIOUtils {
}
}
} finally {
- annotateBytesRead(attributesBuilder, directBytesRead, heapBytesRead);
- span.addEvent("BlockIOUtils.readFully", attributesBuilder.build());
+ span.addEvent("BlockIOUtils.readFully",
+ getDirectAndHeapBytesReadAttributes(span, directBytesRead,
heapBytesRead));
}
}
@@ -159,9 +157,8 @@ public final class BlockIOUtils {
}
} finally {
final Span span = Span.current();
- final AttributesBuilder attributesBuilder =
builderFromContext(Context.current());
- annotateHeapBytesRead(attributesBuilder, heapBytesRead);
- span.addEvent("BlockIOUtils.readFullyWithHeapBuffer",
attributesBuilder.build());
+ span.addEvent("BlockIOUtils.readFullyWithHeapBuffer",
+ getHeapBytesReadAttributes(span, heapBytesRead));
}
}
@@ -200,9 +197,7 @@ public final class BlockIOUtils {
}
} finally {
final Span span = Span.current();
- final AttributesBuilder attributesBuilder =
builderFromContext(Context.current());
- annotateHeapBytesRead(attributesBuilder, heapBytesRead);
- span.addEvent("BlockIOUtils.readWithExtra", attributesBuilder.build());
+ span.addEvent("BlockIOUtils.readWithExtra",
getHeapBytesReadAttributes(span, heapBytesRead));
}
return bytesRemaining <= 0;
}
@@ -260,9 +255,8 @@ public final class BlockIOUtils {
}
} finally {
final Span span = Span.current();
- final AttributesBuilder attributesBuilder =
builderFromContext(Context.current());
- annotateBytesRead(attributesBuilder, directBytesRead, heapBytesRead);
- span.addEvent("BlockIOUtils.readWithExtra", attributesBuilder.build());
+ span.addEvent("BlockIOUtils.readWithExtra",
+ getDirectAndHeapBytesReadAttributes(span, directBytesRead,
heapBytesRead));
}
return (extraLen > 0) && (bytesRead == necessaryLen + extraLen);
}
@@ -333,9 +327,7 @@ public final class BlockIOUtils {
}
} finally {
final Span span = Span.current();
- final AttributesBuilder attributesBuilder =
builderFromContext(Context.current());
- annotateHeapBytesRead(attributesBuilder, bytesRead);
- span.addEvent("BlockIOUtils.preadWithExtra", attributesBuilder.build());
+ span.addEvent("BlockIOUtils.preadWithExtra",
getHeapBytesReadAttributes(span, bytesRead));
}
copyToByteBuff(buf, 0, bytesRead, buff);
return (extraLen > 0) && (bytesRead == necessaryLen + extraLen);
@@ -383,9 +375,8 @@ public final class BlockIOUtils {
}
} finally {
final Span span = Span.current();
- final AttributesBuilder attributesBuilder =
builderFromContext(Context.current());
- annotateBytesRead(attributesBuilder, directBytesRead, heapBytesRead);
- span.addEvent("BlockIOUtils.preadWithExtra", attributesBuilder.build());
+ span.addEvent("BlockIOUtils.preadWithExtra",
+ getDirectAndHeapBytesReadAttributes(span, directBytesRead,
heapBytesRead));
}
return (extraLen > 0) && (bytesRead == necessaryLen + extraLen);
@@ -414,6 +405,41 @@ public final class BlockIOUtils {
return len;
}
+ /**
+ * Builds OpenTelemetry attributes to be recorded on a span for an event
which reads direct and
+ * heap bytes. This will short-circuit and record nothing if OpenTelemetry
isn't enabled.
+ */
+ private static Attributes getDirectAndHeapBytesReadAttributes(Span span, int
directBytesRead,
+ int heapBytesRead) {
+ // It's expensive to record these attributes, so we avoid the cost of
doing this if the span
+ // isn't going to be persisted
+ if (!span.isRecording()) {
+ return Attributes.empty();
+ }
+
+ final AttributesBuilder attributesBuilder =
builderFromContext(Context.current());
+ annotateBytesRead(attributesBuilder, directBytesRead, heapBytesRead);
+
+ return attributesBuilder.build();
+ }
+
+ /**
+ * Builds OpenTelemtry attributes to be recorded on a span for an event
which reads just heap
+ * bytes. This will short-circuit and record nothing if OpenTelemetry isn't
enabled.
+ */
+ private static Attributes getHeapBytesReadAttributes(Span span, int
heapBytesRead) {
+ // It's expensive to record these attributes, so we avoid the cost of
doing this if the span
+ // isn't going to be persisted
+ if (!span.isRecording()) {
+ return Attributes.empty();
+ }
+
+ final AttributesBuilder attributesBuilder =
builderFromContext(Context.current());
+ annotateHeapBytesRead(attributesBuilder, heapBytesRead);
+
+ return attributesBuilder.build();
+ }
+
/**
* Construct a fresh {@link AttributesBuilder} from the provided {@link
Context}, populated with
* relevant attributes populated by {@link
HFileContextAttributesBuilderConsumer#CONTEXT_KEY}.
@@ -438,8 +464,8 @@ public final class BlockIOUtils {
* Conditionally annotate {@code attributesBuilder} with appropriate
attributes when values are
* non-zero.
*/
- private static void annotateBytesRead(AttributesBuilder attributesBuilder,
long directBytesRead,
- long heapBytesRead) {
+ private static void annotateBytesRead(AttributesBuilder attributesBuilder,
int directBytesRead,
+ int heapBytesRead) {
if (directBytesRead > 0) {
attributesBuilder.put(DIRECT_BYTES_READ_KEY, directBytesRead);
}
diff --git
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java
index c68b0389f63..066cb84de0f 100644
---
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java
+++
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java
@@ -1723,9 +1723,7 @@ public class HFileBlock implements Cacheable {
long onDiskSizeWithHeaderL, boolean pread, boolean verifyChecksum,
boolean updateMetrics,
boolean intoHeap) throws IOException {
final Span span = Span.current();
- final AttributesBuilder attributesBuilder = Attributes.builder();
- Optional.of(Context.current()).map(val -> val.get(CONTEXT_KEY))
- .ifPresent(c -> c.accept(attributesBuilder));
+ final Attributes attributes = getReadDataBlockInternalAttributes(span);
if (offset < 0) {
throw new IOException("Invalid offset=" + offset + " trying to read "
+ "block (onDiskSize="
+ onDiskSizeWithHeaderL + ")");
@@ -1761,7 +1759,7 @@ public class HFileBlock implements Cacheable {
if (LOG.isTraceEnabled()) {
LOG.trace("Extra seek to get block size!", new RuntimeException());
}
- span.addEvent("Extra seek to get block size!",
attributesBuilder.build());
+ span.addEvent("Extra seek to get block size!", attributes);
headerBuf = HEAP.allocate(hdrSize);
readAtOffset(is, headerBuf, hdrSize, false, offset, pread);
headerBuf.rewind();
@@ -1775,7 +1773,7 @@ public class HFileBlock implements Cacheable {
if (!checkCheckSumTypeOnHeaderBuf(headerBuf)) {
if (verifyChecksum) {
invalidateNextBlockHeader();
- span.addEvent("Falling back to HDFS checksumming.",
attributesBuilder.build());
+ span.addEvent("Falling back to HDFS checksumming.", attributes);
return null;
} else {
throw new IOException(
@@ -1789,7 +1787,7 @@ public class HFileBlock implements Cacheable {
if (!checkOnDiskSizeWithHeader(onDiskSizeWithHeader)) {
if (verifyChecksum) {
invalidateNextBlockHeader();
- span.addEvent("Falling back to HDFS checksumming.",
attributesBuilder.build());
+ span.addEvent("Falling back to HDFS checksumming.", attributes);
return null;
} else {
throw new IOException("Invalid onDiskSizeWithHeader=" +
onDiskSizeWithHeader);
@@ -1830,7 +1828,7 @@ public class HFileBlock implements Cacheable {
// Verify checksum of the data before using it for building HFileBlock.
if (verifyChecksum && !validateChecksum(offset, curBlock, hdrSize)) {
invalidateNextBlockHeader();
- span.addEvent("Falling back to HDFS checksumming.",
attributesBuilder.build());
+ span.addEvent("Falling back to HDFS checksumming.", attributes);
return null;
}
@@ -1847,7 +1845,7 @@ public class HFileBlock implements Cacheable {
// requested. The block size provided by the caller (presumably
from the block index)
// does not match the block size written to the block header.
treat this as
// HBase-checksum failure.
- span.addEvent("Falling back to HDFS checksumming.",
attributesBuilder.build());
+ span.addEvent("Falling back to HDFS checksumming.", attributes);
invalidateNextBlockHeader();
return null;
}
@@ -1877,7 +1875,7 @@ public class HFileBlock implements Cacheable {
LOG.warn("Read Block Slow: read {} cost {} ms, threshold = {} ms",
hFileBlock, duration,
this.readWarnTime);
}
- span.addEvent("Read block", attributesBuilder.build());
+ span.addEvent("Read block", attributes);
// Cache next block header if we read it for the next time through
here.
if (nextBlockOnDiskSize != -1) {
cacheNextBlockHeader(offset + hFileBlock.getOnDiskSizeWithHeader(),
onDiskBlock,
@@ -2211,4 +2209,21 @@ public class HFileBlock implements Cacheable {
.wrap(ByteBuffer.wrap(blk.bufWithoutChecksum.toBytes(0,
blk.bufWithoutChecksum.limit())));
return createBuilder(blk, deepCloned).build();
}
+
+ /**
+ * Returns OpenTelemetry Attributes for a Span that is reading a data block
with relevant
+ * metadata. Will short-circuit if the span isn't going to be captured/OTEL
isn't enabled.
+ */
+ private static Attributes getReadDataBlockInternalAttributes(Span span) {
+ // It's expensive to record these attributes, so we avoid the cost of
doing this if the span
+ // isn't going to be persisted
+ if (!span.isRecording()) {
+ return Attributes.empty();
+ }
+
+ final AttributesBuilder attributesBuilder = Attributes.builder();
+ Optional.of(Context.current()).map(val -> val.get(CONTEXT_KEY))
+ .ifPresent(c -> c.accept(attributesBuilder));
+ return attributesBuilder.build();
+ }
}