This is an automated email from the ASF dual-hosted git repository.
chia7712 pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/kafka.git
The following commit(s) were added to refs/heads/trunk by this push:
new 1f23bb132b6 MINOR: Include exception objects when logging group
coordinator errors (#20806)
1f23bb132b6 is described below
commit 1f23bb132b6bbcecf9aa1d8e8feeafafd372a744
Author: Sean Quah <[email protected]>
AuthorDate: Thu Nov 6 16:05:49 2025 +0000
MINOR: Include exception objects when logging group coordinator errors
(#20806)
...so that we capture stack traces for these errors.
Reviewers: Chia-Ping Tsai <[email protected]>
---
.../coordinator/common/runtime/CoordinatorExecutorImpl.java | 2 +-
.../kafka/coordinator/common/runtime/CoordinatorRuntime.java | 10 +++++-----
.../kafka/coordinator/group/GroupCoordinatorService.java | 2 +-
.../apache/kafka/coordinator/group/GroupMetadataManager.java | 8 ++++----
4 files changed, 11 insertions(+), 11 deletions(-)
diff --git
a/coordinator-common/src/main/java/org/apache/kafka/coordinator/common/runtime/CoordinatorExecutorImpl.java
b/coordinator-common/src/main/java/org/apache/kafka/coordinator/common/runtime/CoordinatorExecutorImpl.java
index bcd8fc795fb..986bcd1c5b0 100644
---
a/coordinator-common/src/main/java/org/apache/kafka/coordinator/common/runtime/CoordinatorExecutorImpl.java
+++
b/coordinator-common/src/main/java/org/apache/kafka/coordinator/common/runtime/CoordinatorExecutorImpl.java
@@ -107,7 +107,7 @@ public class CoordinatorExecutorImpl<S extends
CoordinatorShard<U>, U> implement
"the coordinator is not active.", key,
exception.getMessage());
} else {
log.error("The write event for the task {} failed due to
{}. Ignoring it. ",
- key, exception.getMessage());
+ key, exception.getMessage(), exception);
}
return null;
diff --git
a/coordinator-common/src/main/java/org/apache/kafka/coordinator/common/runtime/CoordinatorRuntime.java
b/coordinator-common/src/main/java/org/apache/kafka/coordinator/common/runtime/CoordinatorRuntime.java
index 552e7e72eff..06017417233 100644
---
a/coordinator-common/src/main/java/org/apache/kafka/coordinator/common/runtime/CoordinatorRuntime.java
+++
b/coordinator-common/src/main/java/org/apache/kafka/coordinator/common/runtime/CoordinatorRuntime.java
@@ -399,7 +399,7 @@ public class CoordinatorRuntime<S extends
CoordinatorShard<U>, U> implements Aut
schedule(key, retryBackoff, TimeUnit.MILLISECONDS,
true, retryBackoff, operation);
} else {
log.error("The write event {} for the timer {}
failed due to {}. Ignoring it. ",
- event.name, key, ex.getMessage());
+ event.name, key, ex.getMessage(), ex);
}
return null;
@@ -717,7 +717,7 @@ public class CoordinatorRuntime<S extends
CoordinatorShard<U>, U> implements Aut
}
} catch (Throwable ex) {
log.error("Failed to load metadata from {} with
epoch {} due to {}.",
- tp, epoch, ex.toString());
+ tp, epoch, ex.getMessage(), ex);
context.transitionTo(CoordinatorState.FAILED);
}
} else {
@@ -820,7 +820,7 @@ public class CoordinatorRuntime<S extends
CoordinatorShard<U>, U> implements Aut
// Free up the current batch.
freeCurrentBatch();
} catch (Throwable t) {
- log.error("Writing records to {} failed due to: {}.", tp,
t.getMessage());
+ log.error("Writing records to {} failed due to: {}.", tp,
t.getMessage(), t);
failCurrentBatch(t);
// We rethrow the exception for the caller to handle it
too.
throw t;
@@ -1057,7 +1057,7 @@ public class CoordinatorRuntime<S extends
CoordinatorShard<U>, U> implements Aut
currentBatch.builder.append(recordToAppend);
currentBatch.nextOffset++;
} catch (Throwable t) {
- log.error("Replaying record {} to {} failed due to:
{}.", recordToReplay, tp, t.getMessage());
+ log.error("Replaying record {} to {} failed due to:
{}.", recordToReplay, tp, t.getMessage(), t);
// Add the event to the list of pending events
associated with the last
// batch in order to fail it too.
@@ -2446,7 +2446,7 @@ public class CoordinatorRuntime<S extends
CoordinatorShard<U>, U> implements Aut
// It's very unlikely that we will ever see an
exception here, since we
// already make an effort to catch exceptions in
the unload method.
log.error("Failed to unload metadata for {} with
epoch {} due to {}.",
- tp, partitionEpoch, ex.toString());
+ tp, partitionEpoch, ex.getMessage(), ex);
} finally {
// Always remove the coordinator context,
otherwise the coordinator
// shard could be permanently stuck.
diff --git
a/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/GroupCoordinatorService.java
b/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/GroupCoordinatorService.java
index 3cb59ca3f81..9c300c529b5 100644
---
a/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/GroupCoordinatorService.java
+++
b/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/GroupCoordinatorService.java
@@ -1825,7 +1825,7 @@ public class GroupCoordinatorService implements
GroupCoordinator {
persister.readSummary(ReadShareGroupStateSummaryParameters.from(readSummaryRequestData))
.whenComplete((result, error) -> {
if (error != null) {
- log.error("Failed to read summary of the share partition");
+ log.error("Failed to read summary of the share partition",
error);
future.completeExceptionally(error);
return;
}
diff --git
a/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/GroupMetadataManager.java
b/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/GroupMetadataManager.java
index a4d37677e3f..9f551a5d02c 100644
---
a/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/GroupMetadataManager.java
+++
b/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/GroupMetadataManager.java
@@ -3457,7 +3457,7 @@ public class GroupMetadataManager {
) {
if (exception != null) {
log.error("[GroupId {}] Couldn't update regular expression due to:
{}",
- groupId, exception.getMessage());
+ groupId, exception.getMessage(), exception);
return new CoordinatorResult<>(List.of());
}
@@ -3933,7 +3933,7 @@ public class GroupMetadataManager {
} catch (PartitionAssignorException ex) {
String msg = String.format("Failed to compute a new target
assignment for epoch %d: %s",
groupEpoch, ex.getMessage());
- log.error("[GroupId {}] {}.", group.groupId(), msg);
+ log.error("[GroupId {}] {}.", group.groupId(), msg, ex);
throw new UnknownServerException(msg, ex);
}
}
@@ -3994,7 +3994,7 @@ public class GroupMetadataManager {
} catch (PartitionAssignorException ex) {
String msg = String.format("Failed to compute a new target
assignment for epoch %d: %s",
groupEpoch, ex.getMessage());
- log.error("[GroupId {}] {}.", group.groupId(), msg);
+ log.error("[GroupId {}] {}.", group.groupId(), msg, ex);
throw new UnknownServerException(msg, ex);
}
}
@@ -4053,7 +4053,7 @@ public class GroupMetadataManager {
} catch (TaskAssignorException ex) {
String msg = String.format("Failed to compute a new target
assignment for epoch %d: %s",
groupEpoch, ex.getMessage());
- log.error("[GroupId {}] {}.", group.groupId(), msg);
+ log.error("[GroupId {}] {}.", group.groupId(), msg, ex);
throw new UnknownServerException(msg, ex);
}
}