This is an automated email from the ASF dual-hosted git repository.
dajac pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/kafka.git
The following commit(s) were added to refs/heads/trunk by this push:
new f8acfa5257f KAFKA-17621; Reduce logging verbosity on
ConsumerGroupHeartbeat path (#17288)
f8acfa5257f is described below
commit f8acfa5257f6ba6c229884b638053d52b1b3e68a
Author: David Jacot <[email protected]>
AuthorDate: Thu Sep 26 20:00:44 2024 +0200
KAFKA-17621; Reduce logging verbosity on ConsumerGroupHeartbeat path
(#17288)
While running large scale performance tests, we noticed that the logging on
the ConsumerGroupHeartbeat path took a significant amount of CPU. It is mainly
due to the very large data structures that we print out. I made a pass on those
logs and I switched some of them to debug.
Reviewers: Lianet Magrans <[email protected]>
---
.../coordinator/group/GroupMetadataManager.java | 41 ++++++++++++++--------
1 file changed, 27 insertions(+), 14 deletions(-)
diff --git
a/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/GroupMetadataManager.java
b/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/GroupMetadataManager.java
index b367009a621..384c5295c97 100644
---
a/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/GroupMetadataManager.java
+++
b/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/GroupMetadataManager.java
@@ -1793,8 +1793,10 @@ public class GroupMetadataManager {
);
if (!subscriptionMetadata.equals(group.subscriptionMetadata())) {
- log.info("[GroupId {}] Computed new subscription metadata:
{}.",
- groupId, subscriptionMetadata);
+ if (log.isDebugEnabled()) {
+ log.debug("[GroupId {}] Computed new subscription
metadata: {}.",
+ groupId, subscriptionMetadata);
+ }
bumpGroupEpoch = true;
records.add(newConsumerGroupSubscriptionMetadataRecord(groupId,
subscriptionMetadata));
}
@@ -1984,8 +1986,10 @@ public class GroupMetadataManager {
);
if (!subscriptionMetadata.equals(group.subscriptionMetadata())) {
- log.info("[GroupId {}] Computed new subscription metadata:
{}.",
- groupId, subscriptionMetadata);
+ if (log.isDebugEnabled()) {
+ log.debug("[GroupId {}] Computed new subscription
metadata: {}.",
+ groupId, subscriptionMetadata);
+ }
bumpGroupEpoch = true;
records.add(newConsumerGroupSubscriptionMetadataRecord(groupId,
subscriptionMetadata));
}
@@ -2350,13 +2354,13 @@ public class GroupMetadataManager {
records.add(newConsumerGroupMemberSubscriptionRecord(groupId,
updatedMember));
if
(!updatedMember.subscribedTopicNames().equals(member.subscribedTopicNames())) {
- log.info("[GroupId {}] Member {} updated its subscribed topics
to: {}.",
+ log.debug("[GroupId {}] Member {} updated its subscribed
topics to: {}.",
groupId, memberId, updatedMember.subscribedTopicNames());
return true;
}
if
(!updatedMember.subscribedTopicRegex().equals(member.subscribedTopicRegex())) {
- log.info("[GroupId {}] Member {} updated its subscribed regex
to: {}.",
+ log.debug("[GroupId {}] Member {} updated its subscribed regex
to: {}.",
groupId, memberId, updatedMember.subscribedTopicRegex());
return true;
}
@@ -2432,10 +2436,12 @@ public class GroupMetadataManager {
if (!updatedMember.equals(member)) {
records.add(newConsumerGroupCurrentAssignmentRecord(groupId,
updatedMember));
- log.info("[GroupId {}] Member {} new assignment state: epoch={},
previousEpoch={}, state={}, "
- + "assignedPartitions={} and revokedPartitions={}.",
- groupId, updatedMember.memberId(),
updatedMember.memberEpoch(), updatedMember.previousMemberEpoch(),
updatedMember.state(),
- assignmentToString(updatedMember.assignedPartitions()),
assignmentToString(updatedMember.partitionsPendingRevocation()));
+ if (log.isDebugEnabled()) {
+ log.debug("[GroupId {}] Member {} new assignment state:
epoch={}, previousEpoch={}, state={}, "
+ + "assignedPartitions={} and revokedPartitions={}.",
+ groupId, updatedMember.memberId(),
updatedMember.memberEpoch(), updatedMember.previousMemberEpoch(),
updatedMember.state(),
+ assignmentToString(updatedMember.assignedPartitions()),
assignmentToString(updatedMember.partitionsPendingRevocation()));
+ }
// Schedule/cancel the rebalance timeout if the member uses the
consumer protocol.
// The members using classic protocol only have join timer and
sync timer.
@@ -2543,8 +2549,13 @@ public class GroupMetadataManager {
assignmentResultBuilder.build();
long assignorTimeMs = time.milliseconds() - startTimeMs;
- log.info("[GroupId {}] Computed a new target assignment for epoch
{} with '{}' assignor in {}ms: {}.",
- group.groupId(), groupEpoch, preferredServerAssignor,
assignorTimeMs, assignmentResult.targetAssignment());
+ if (log.isDebugEnabled()) {
+ log.debug("[GroupId {}] Computed a new target assignment for
epoch {} with '{}' assignor in {}ms: {}.",
+ group.groupId(), groupEpoch, preferredServerAssignor,
assignorTimeMs, assignmentResult.targetAssignment());
+ } else {
+ log.info("[GroupId {}] Computed a new target assignment for
epoch {} with '{}' assignor in {}ms.",
+ group.groupId(), groupEpoch, preferredServerAssignor,
assignorTimeMs);
+ }
records.addAll(assignmentResult.records());
@@ -2741,8 +2752,10 @@ public class GroupMetadataManager {
);
if (!subscriptionMetadata.equals(group.subscriptionMetadata())) {
- log.info("[GroupId {}] Computed new subscription metadata:
{}.",
- group.groupId(), subscriptionMetadata);
+ if (log.isDebugEnabled()) {
+ log.debug("[GroupId {}] Computed new subscription
metadata: {}.",
+ group.groupId(), subscriptionMetadata);
+ }
records.add(newConsumerGroupSubscriptionMetadataRecord(group.groupId(),
subscriptionMetadata));
}