Copilot commented on code in PR #3145:
URL: https://github.com/apache/brpc/pull/3145#discussion_r2626838260
##########
src/brpc/rdma/rdma_endpoint.cpp:
##########
@@ -898,18 +914,22 @@ ssize_t RdmaEndpoint::CutFromIOBufList(butil::IOBuf**
from, size_t ndata) {
_sq_current = 0;
}
- // Update _window_size. Note that _window_size will never be negative.
+ // Update `_remote_rq_window_size' and `_sq_window_size'. Note that
+ // `_remote_rq_window_size' and `_sq_window_size' will never be
negative.
// Because there is at most one thread can enter this function for each
- // Socket, and the other thread of HandleCompletion can only add this
- // counter.
- _window_size.fetch_sub(1, butil::memory_order_relaxed);
+ // Socket, and the other thread of HandleCompletion can only add these
+ // counters.
+ remote_rq_window_size =
+ _remote_rq_window_size.fetch_sub(1, butil::memory_order_relaxed) -
1;
+ sq_window_size = _sq_window_size.fetch_sub(1,
butil::memory_order_relaxed) - 1;
}
return total_len;
}
int RdmaEndpoint::SendAck(int num) {
- if (_new_rq_wrs.fetch_add(num, butil::memory_order_relaxed) >
_remote_window_capacity / 2) {
+ if (_new_rq_wrs.fetch_add(num, butil::memory_order_relaxed) >
_remote_window_capacity / 2 &&
+ _sq_imm_window_size > 0) {
return SendImm(_new_rq_wrs.exchange(0, butil::memory_order_relaxed));
Review Comment:
There is a race condition in the logic that checks and decrements
`_sq_imm_window_size`. The function checks if `_sq_imm_window_size > 0` at line
932, but `_sq_imm_window_size` is not an atomic variable and is decremented at
line 964 without synchronization. If multiple threads call SendAck
concurrently, both could see `_sq_imm_window_size > 0`, both could call
SendImm, and `_sq_imm_window_size` could go negative, which violates the
invariant stated in the comment at lines 961-963.
##########
src/brpc/rdma/rdma_endpoint.cpp:
##########
@@ -1280,88 +1329,112 @@ void RdmaEndpoint::DeallocateResources() {
move_to_rdma_resource_list = true;
}
}
- int fd = -1;
- if (_resource->comp_channel) {
- fd = _resource->comp_channel->fd;
+
+ if (NULL != _resource->send_cq) {
+ IbvAckCqEvents(_resource->send_cq, _send_cq_events);
+ }
+ if (NULL != _resource->recv_cq) {
+ IbvAckCqEvents(_resource->recv_cq, _recv_cq_events);
}
- int err;
+
+ bool remove_consumer = true;
if (!move_to_rdma_resource_list) {
- if (_resource->qp) {
- err = IbvDestroyQp(_resource->qp);
- if (err != 0) {
- LOG(WARNING) << "Fail to destroy QP: " << berror(err);
- }
+ if (NULL != _resource->qp) {
+ int err = IbvDestroyQp(_resource->qp);
+ LOG_IF(WARNING, 0 != err) << "Fail to destroy QP: " << berror(err);
_resource->qp = NULL;
}
- if (_resource->cq) {
- IbvAckCqEvents(_resource->cq, _cq_events);
- err = IbvDestroyCq(_resource->cq);
- if (err != 0) {
- PLOG(WARNING) << "Fail to destroy CQ: " << berror(err);
- }
- _resource->cq = NULL;
- }
- if (_resource->comp_channel) {
- // destroy comp_channel will destroy this fd
+
+ DeallocateCq(_resource->polling_cq);
+ DeallocateCq(_resource->send_cq);
+ DeallocateCq(_resource->recv_cq);
+
+ if (NULL != _resource->comp_channel) {
+ // Destroy send_comp_channel will destroy this fd,
// so that we should remove it from epoll fd first
- _socket->_io_event.RemoveConsumer(fd);
- fd = -1;
- err = IbvDestroyCompChannel(_resource->comp_channel);
- if (err != 0) {
- LOG(WARNING) << "Fail to destroy CQ channel: " << berror(err);
- }
- _resource->comp_channel = NULL;
+ int fd = _resource->comp_channel->fd;
+ GetGlobalEventDispatcher(fd,
_socket->_io_event.bthread_tag()).RemoveConsumer(fd);
+ remove_consumer = false;
+ int err = IbvDestroyCompChannel(_resource->comp_channel);
+ LOG_IF(WARNING, 0 != err) << "Fail to destroy CQ channel: " <<
berror(err);
+
}
+
+ _resource->polling_cq = NULL;
+ _resource->send_cq = NULL;
+ _resource->recv_cq = NULL;
+ _resource->comp_channel = NULL;
delete _resource;
- _resource = NULL;
}
- SocketUniquePtr s;
- if (_cq_sid != INVALID_SOCKET_ID) {
+ if (INVALID_SOCKET_ID != _cq_sid) {
+ SocketUniquePtr s;
if (Socket::Address(_cq_sid, &s) == 0) {
- s->_user = NULL; // do not release user (this RdmaEndpoint)
- if (fd >= 0) {
- _socket->_io_event.RemoveConsumer(fd);
+ if (remove_consumer) {
+ s->_io_event.RemoveConsumer(s->_fd);
}
- s->_fd = -1; // already remove fd from epoll fd
+ s->_user = NULL; // Do not release user (this RdmaEndpoint).
+ s->_fd = -1; // Already remove fd from epoll fd.
s->SetFailed();
}
- _cq_sid = INVALID_SOCKET_ID;
}
if (move_to_rdma_resource_list) {
- if (_resource->cq) {
- IbvAckCqEvents(_resource->cq, _cq_events);
- }
BAIDU_SCOPED_LOCK(*g_rdma_resource_mutex);
_resource->next = g_rdma_resource_list;
g_rdma_resource_list = _resource;
}
-
- _resource = NULL;
}
static const int MAX_CQ_EVENTS = 128;
-int RdmaEndpoint::GetAndAckEvents() {
- int events = 0; void* context = NULL;
- while (1) {
- if (IbvGetCqEvent(_resource->comp_channel, &_resource->cq, &context)
!= 0) {
+int RdmaEndpoint::GetAndAckEvents(SocketUniquePtr& s) {
+ void* context = NULL;
+ ibv_cq* cq = NULL;
+ while (true) {
+ if (IbvGetCqEvent(_resource->comp_channel, &cq, &context) != 0) {
if (errno != EAGAIN) {
+ const int saved_errno = errno;
+ PLOG(ERROR) << "Fail to get cq event from " <<
s->description();
+ s->SetFailed(saved_errno, "Fail to get cq event from %s: %s",
+ s->description().c_str(), berror(saved_errno));
return -1;
}
break;
}
- ++events;
+ if (cq == _resource->send_cq) {
+ ++_send_cq_events;
+ } else if (cq == _resource->recv_cq) {
+ ++_recv_cq_events;
+ }
}
- if (events == 0) {
- return 0;
+ if (_send_cq_events >= MAX_CQ_EVENTS) {
+ IbvAckCqEvents(_resource->send_cq, _send_cq_events);
+ _send_cq_events = 0;
+ }
+ if (_recv_cq_events >= MAX_CQ_EVENTS) {
+ IbvAckCqEvents(_resource->recv_cq, _recv_cq_events);
+ _recv_cq_events = 0;
}
- _cq_events += events;
- if (_cq_events >= MAX_CQ_EVENTS) {
- IbvAckCqEvents(_resource->cq, _cq_events);
- _cq_events = 0;
+ return 0;
+}
+
+
+
+int RdmaEndpoint::ReqNotifyCq(bool send_cq) {
+ errno = ibv_req_notify_cq(
+ send_cq ? _resource->send_cq : _resource->recv_cq,
+ send_cq ? 0 : 1);
+ if (0 != errno) {
+ const int saved_errno = errno;
+ PLOG(WARNING) << "Fail to arm" << (send_cq ? "send" : "recv")
Review Comment:
Missing space in the error message string concatenation. The output will be
"Fail to armsend CQ comp channel" or "Fail to armrecv CQ comp channel" without
spaces.
```suggestion
PLOG(WARNING) << "Fail to arm " << (send_cq ? "send" : "recv")
```
##########
src/brpc/rdma/rdma_endpoint.cpp:
##########
@@ -1491,20 +1580,23 @@ std::string RdmaEndpoint::GetStateStr() const {
}
}
-void RdmaEndpoint::DebugInfo(std::ostream& os) const {
- os << "\nrdma_state=ON"
- << "\nhandshake_state=" << GetStateStr()
- << "\nrdma_window_size=" <<
_window_size.load(butil::memory_order_relaxed)
- << "\nrdma_local_window_capacity=" << _local_window_capacity
- << "\nrdma_remote_window_capacity=" << _remote_window_capacity
- << "\nrdma_sbuf_head=" << _sq_current
- << "\nrdma_sbuf_tail=" << _sq_sent
- << "\nrdma_rbuf_head=" << _rq_received
- << "\nrdma_unacked_rq_wr=" << _new_rq_wrs
- << "\nrdma_received_ack=" << _accumulated_ack
- << "\nrdma_unsolicited_sent=" << _unsolicited
- << "\nrdma_unsignaled_sq_wr=" << _sq_unsignaled
- << "\n";
+void RdmaEndpoint::DebugInfo(std::ostream& os, butil::StringPiece connector)
const {
+ os << "rdma_state=ON"
+ << connector << "handshake_state=" << GetStateStr()
+ << connector << "rdma__sq_imm_window_size=" << _sq_imm_window_size
+ << connector << "rdma_remote_rq_window_size=" <<
_remote_rq_window_size.load(butil::memory_order_relaxed)
+ << connector << "rdma_sq_window_size=" <<
_sq_window_size.load(butil::memory_order_relaxed)
+ << connector << "rdma_local_window_capacity=" << _local_window_capacity
+ << connector << "rdma_remote_window_capacity=" <<
_remote_window_capacity
+ << connector << "rdma_sbuf_head=" << _sq_current
+ << connector << "rdma_sbuf_tail=" << _sq_sent
+ << connector << "rdma_rbuf_head=" << _rq_received
+ << connector << "rdma_unacked_rq_wr=" << _new_rq_wrs
+ << connector << "rdma_received_ack=" << _accumulated_ack
+ << connector << "rdma_unsolicited_sent=" << _unsolicited
+ << connector << "rdma_unsignaled_sq_wr=" << _sq_unsignaled
+ << connector << "rdma_new_rq_wrs=" <<
_new_rq_wrs.load(butil::memory_order_relaxed)
+ << connector << "";
Review Comment:
The debug output ends with an empty string concatenation which serves no
purpose. This line can be removed or the empty string should be removed from
the concatenation.
```suggestion
<< connector;
```
##########
src/brpc/rdma/rdma_endpoint.cpp:
##########
@@ -1491,20 +1580,23 @@ std::string RdmaEndpoint::GetStateStr() const {
}
}
-void RdmaEndpoint::DebugInfo(std::ostream& os) const {
- os << "\nrdma_state=ON"
- << "\nhandshake_state=" << GetStateStr()
- << "\nrdma_window_size=" <<
_window_size.load(butil::memory_order_relaxed)
- << "\nrdma_local_window_capacity=" << _local_window_capacity
- << "\nrdma_remote_window_capacity=" << _remote_window_capacity
- << "\nrdma_sbuf_head=" << _sq_current
- << "\nrdma_sbuf_tail=" << _sq_sent
- << "\nrdma_rbuf_head=" << _rq_received
- << "\nrdma_unacked_rq_wr=" << _new_rq_wrs
- << "\nrdma_received_ack=" << _accumulated_ack
- << "\nrdma_unsolicited_sent=" << _unsolicited
- << "\nrdma_unsignaled_sq_wr=" << _sq_unsignaled
- << "\n";
+void RdmaEndpoint::DebugInfo(std::ostream& os, butil::StringPiece connector)
const {
+ os << "rdma_state=ON"
+ << connector << "handshake_state=" << GetStateStr()
+ << connector << "rdma__sq_imm_window_size=" << _sq_imm_window_size
Review Comment:
There is an inconsistent double underscore in the variable name:
'rdma__sq_imm_window_size' should be 'rdma_sq_imm_window_size' to match the
naming pattern of all other variables in this debug output.
```suggestion
<< connector << "rdma_sq_imm_window_size=" << _sq_imm_window_size
```
##########
src/brpc/rdma/rdma_endpoint.cpp:
##########
@@ -790,13 +803,16 @@ ssize_t RdmaEndpoint::CutFromIOBufList(butil::IOBuf**
from, size_t ndata) {
size_t total_len = 0;
size_t current = 0;
- uint32_t window = 0;
+ uint32_t remote_rq_window_size =
+ _remote_rq_window_size.load(butil::memory_order_relaxed);
+ uint32_t sq_window_size =
+ _sq_window_size.load(butil::memory_order_relaxed);
ibv_send_wr wr;
int max_sge = GetRdmaMaxSge();
ibv_sge sglist[max_sge];
while (current < ndata) {
Review Comment:
The window size values are loaded once at the start of the function, but
these atomic variables can be modified by other threads (HandleCompletion).
After the first iteration of the while loop, the local variables
`remote_rq_window_size` and `sq_window_size` become stale and don't reflect the
actual window sizes that were decremented at lines 923-924. This could lead to
incorrect behavior where the loop continues even though window space has been
exhausted, or stops prematurely when space becomes available.
```suggestion
ibv_send_wr wr;
int max_sge = GetRdmaMaxSge();
ibv_sge sglist[max_sge];
while (current < ndata) {
uint32_t remote_rq_window_size =
_remote_rq_window_size.load(butil::memory_order_relaxed);
uint32_t sq_window_size =
_sq_window_size.load(butil::memory_order_relaxed);
```
##########
src/brpc/rdma/rdma_endpoint.cpp:
##########
@@ -1280,88 +1329,112 @@ void RdmaEndpoint::DeallocateResources() {
move_to_rdma_resource_list = true;
}
}
- int fd = -1;
- if (_resource->comp_channel) {
- fd = _resource->comp_channel->fd;
+
+ if (NULL != _resource->send_cq) {
+ IbvAckCqEvents(_resource->send_cq, _send_cq_events);
+ }
+ if (NULL != _resource->recv_cq) {
+ IbvAckCqEvents(_resource->recv_cq, _recv_cq_events);
}
- int err;
+
+ bool remove_consumer = true;
if (!move_to_rdma_resource_list) {
- if (_resource->qp) {
- err = IbvDestroyQp(_resource->qp);
- if (err != 0) {
- LOG(WARNING) << "Fail to destroy QP: " << berror(err);
- }
+ if (NULL != _resource->qp) {
+ int err = IbvDestroyQp(_resource->qp);
+ LOG_IF(WARNING, 0 != err) << "Fail to destroy QP: " << berror(err);
_resource->qp = NULL;
}
- if (_resource->cq) {
- IbvAckCqEvents(_resource->cq, _cq_events);
- err = IbvDestroyCq(_resource->cq);
- if (err != 0) {
- PLOG(WARNING) << "Fail to destroy CQ: " << berror(err);
- }
- _resource->cq = NULL;
- }
- if (_resource->comp_channel) {
- // destroy comp_channel will destroy this fd
+
+ DeallocateCq(_resource->polling_cq);
+ DeallocateCq(_resource->send_cq);
+ DeallocateCq(_resource->recv_cq);
+
+ if (NULL != _resource->comp_channel) {
+ // Destroy send_comp_channel will destroy this fd,
// so that we should remove it from epoll fd first
- _socket->_io_event.RemoveConsumer(fd);
- fd = -1;
- err = IbvDestroyCompChannel(_resource->comp_channel);
- if (err != 0) {
- LOG(WARNING) << "Fail to destroy CQ channel: " << berror(err);
- }
- _resource->comp_channel = NULL;
+ int fd = _resource->comp_channel->fd;
+ GetGlobalEventDispatcher(fd,
_socket->_io_event.bthread_tag()).RemoveConsumer(fd);
+ remove_consumer = false;
+ int err = IbvDestroyCompChannel(_resource->comp_channel);
+ LOG_IF(WARNING, 0 != err) << "Fail to destroy CQ channel: " <<
berror(err);
+
}
+
+ _resource->polling_cq = NULL;
+ _resource->send_cq = NULL;
+ _resource->recv_cq = NULL;
+ _resource->comp_channel = NULL;
delete _resource;
Review Comment:
The `_resource` pointer is deleted but not set to NULL when
`move_to_rdma_resource_list` is false. However, at line 216 in the Reset()
function, `_resource` is unconditionally set to NULL. This creates an
inconsistency - if DeallocateResources deletes the resource and Reset sets it
to NULL again, there's a potential path where `_resource` could be accessed
after deletion but before Reset is called. The function should set `_resource =
NULL` after the delete at line 1367.
```suggestion
delete _resource;
_resource = NULL;
```
##########
src/brpc/rdma/rdma_endpoint.cpp:
##########
@@ -1280,88 +1329,112 @@ void RdmaEndpoint::DeallocateResources() {
move_to_rdma_resource_list = true;
}
}
- int fd = -1;
- if (_resource->comp_channel) {
- fd = _resource->comp_channel->fd;
+
+ if (NULL != _resource->send_cq) {
+ IbvAckCqEvents(_resource->send_cq, _send_cq_events);
+ }
+ if (NULL != _resource->recv_cq) {
+ IbvAckCqEvents(_resource->recv_cq, _recv_cq_events);
}
- int err;
+
+ bool remove_consumer = true;
if (!move_to_rdma_resource_list) {
- if (_resource->qp) {
- err = IbvDestroyQp(_resource->qp);
- if (err != 0) {
- LOG(WARNING) << "Fail to destroy QP: " << berror(err);
- }
+ if (NULL != _resource->qp) {
+ int err = IbvDestroyQp(_resource->qp);
+ LOG_IF(WARNING, 0 != err) << "Fail to destroy QP: " << berror(err);
_resource->qp = NULL;
}
- if (_resource->cq) {
- IbvAckCqEvents(_resource->cq, _cq_events);
- err = IbvDestroyCq(_resource->cq);
- if (err != 0) {
- PLOG(WARNING) << "Fail to destroy CQ: " << berror(err);
- }
- _resource->cq = NULL;
- }
- if (_resource->comp_channel) {
- // destroy comp_channel will destroy this fd
+
+ DeallocateCq(_resource->polling_cq);
+ DeallocateCq(_resource->send_cq);
+ DeallocateCq(_resource->recv_cq);
+
+ if (NULL != _resource->comp_channel) {
+ // Destroy send_comp_channel will destroy this fd,
// so that we should remove it from epoll fd first
- _socket->_io_event.RemoveConsumer(fd);
- fd = -1;
- err = IbvDestroyCompChannel(_resource->comp_channel);
- if (err != 0) {
- LOG(WARNING) << "Fail to destroy CQ channel: " << berror(err);
- }
- _resource->comp_channel = NULL;
+ int fd = _resource->comp_channel->fd;
+ GetGlobalEventDispatcher(fd,
_socket->_io_event.bthread_tag()).RemoveConsumer(fd);
+ remove_consumer = false;
+ int err = IbvDestroyCompChannel(_resource->comp_channel);
+ LOG_IF(WARNING, 0 != err) << "Fail to destroy CQ channel: " <<
berror(err);
+
}
+
+ _resource->polling_cq = NULL;
+ _resource->send_cq = NULL;
+ _resource->recv_cq = NULL;
+ _resource->comp_channel = NULL;
delete _resource;
- _resource = NULL;
}
- SocketUniquePtr s;
- if (_cq_sid != INVALID_SOCKET_ID) {
+ if (INVALID_SOCKET_ID != _cq_sid) {
+ SocketUniquePtr s;
if (Socket::Address(_cq_sid, &s) == 0) {
- s->_user = NULL; // do not release user (this RdmaEndpoint)
- if (fd >= 0) {
- _socket->_io_event.RemoveConsumer(fd);
+ if (remove_consumer) {
+ s->_io_event.RemoveConsumer(s->_fd);
}
- s->_fd = -1; // already remove fd from epoll fd
+ s->_user = NULL; // Do not release user (this RdmaEndpoint).
+ s->_fd = -1; // Already remove fd from epoll fd.
s->SetFailed();
}
- _cq_sid = INVALID_SOCKET_ID;
}
if (move_to_rdma_resource_list) {
- if (_resource->cq) {
- IbvAckCqEvents(_resource->cq, _cq_events);
- }
BAIDU_SCOPED_LOCK(*g_rdma_resource_mutex);
_resource->next = g_rdma_resource_list;
g_rdma_resource_list = _resource;
}
-
- _resource = NULL;
}
static const int MAX_CQ_EVENTS = 128;
-int RdmaEndpoint::GetAndAckEvents() {
- int events = 0; void* context = NULL;
- while (1) {
- if (IbvGetCqEvent(_resource->comp_channel, &_resource->cq, &context)
!= 0) {
+int RdmaEndpoint::GetAndAckEvents(SocketUniquePtr& s) {
+ void* context = NULL;
+ ibv_cq* cq = NULL;
+ while (true) {
+ if (IbvGetCqEvent(_resource->comp_channel, &cq, &context) != 0) {
if (errno != EAGAIN) {
+ const int saved_errno = errno;
+ PLOG(ERROR) << "Fail to get cq event from " <<
s->description();
+ s->SetFailed(saved_errno, "Fail to get cq event from %s: %s",
+ s->description().c_str(), berror(saved_errno));
return -1;
}
break;
}
- ++events;
+ if (cq == _resource->send_cq) {
+ ++_send_cq_events;
+ } else if (cq == _resource->recv_cq) {
+ ++_recv_cq_events;
Review Comment:
If an unexpected CQ event is received (neither send_cq nor recv_cq), it will
be silently ignored without incrementing any event counter. However, this CQ
event still needs to be acknowledged later. This could lead to a mismatch
between the number of events received and acknowledged, potentially causing
resource leaks or blocking issues. Consider logging a warning or handling this
case explicitly.
```suggestion
++_recv_cq_events;
} else {
// Unexpected CQ event that does not belong to this endpoint's
send/recv CQs.
LOG(WARNING) << "Unexpected CQ event from cq=" << cq
<< " for endpoint " << s->description();
// Acknowledge this single event immediately to avoid leaking
unacknowledged events.
if (cq != NULL) {
IbvAckCqEvents(cq, 1);
}
```
##########
src/brpc/rdma/rdma_endpoint.cpp:
##########
@@ -1491,20 +1580,23 @@ std::string RdmaEndpoint::GetStateStr() const {
}
}
-void RdmaEndpoint::DebugInfo(std::ostream& os) const {
- os << "\nrdma_state=ON"
- << "\nhandshake_state=" << GetStateStr()
- << "\nrdma_window_size=" <<
_window_size.load(butil::memory_order_relaxed)
- << "\nrdma_local_window_capacity=" << _local_window_capacity
- << "\nrdma_remote_window_capacity=" << _remote_window_capacity
- << "\nrdma_sbuf_head=" << _sq_current
- << "\nrdma_sbuf_tail=" << _sq_sent
- << "\nrdma_rbuf_head=" << _rq_received
- << "\nrdma_unacked_rq_wr=" << _new_rq_wrs
- << "\nrdma_received_ack=" << _accumulated_ack
- << "\nrdma_unsolicited_sent=" << _unsolicited
- << "\nrdma_unsignaled_sq_wr=" << _sq_unsignaled
- << "\n";
+void RdmaEndpoint::DebugInfo(std::ostream& os, butil::StringPiece connector)
const {
+ os << "rdma_state=ON"
+ << connector << "handshake_state=" << GetStateStr()
+ << connector << "rdma__sq_imm_window_size=" << _sq_imm_window_size
+ << connector << "rdma_remote_rq_window_size=" <<
_remote_rq_window_size.load(butil::memory_order_relaxed)
+ << connector << "rdma_sq_window_size=" <<
_sq_window_size.load(butil::memory_order_relaxed)
+ << connector << "rdma_local_window_capacity=" << _local_window_capacity
+ << connector << "rdma_remote_window_capacity=" <<
_remote_window_capacity
+ << connector << "rdma_sbuf_head=" << _sq_current
+ << connector << "rdma_sbuf_tail=" << _sq_sent
+ << connector << "rdma_rbuf_head=" << _rq_received
+ << connector << "rdma_unacked_rq_wr=" << _new_rq_wrs
+ << connector << "rdma_received_ack=" << _accumulated_ack
+ << connector << "rdma_unsolicited_sent=" << _unsolicited
+ << connector << "rdma_unsignaled_sq_wr=" << _sq_unsignaled
+ << connector << "rdma_new_rq_wrs=" <<
_new_rq_wrs.load(butil::memory_order_relaxed)
Review Comment:
The variable '_new_rq_wrs' is output twice in the debug info: once at line
1594 as 'rdma_unacked_rq_wr' and again at line 1598 as 'rdma_new_rq_wrs'. This
appears to be duplicated information that should be consolidated into a single
output.
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]