rbulter commented on a change in pull request #279:
URL: https://github.com/apache/celix/pull/279#discussion_r542433619
##########
File path: bundles/pubsub/pubsub_admin_tcp/src/pubsub_tcp_handler.c
##########
@@ -964,58 +998,30 @@ int
pubsub_tcpHandler_addAcceptConnectionCallback(pubsub_tcpHandler_t *handle, v
return result;
}
-static inline
-int pubsub_tcpHandler_writeSocket(pubsub_tcpHandler_t *handle,
psa_tcp_connection_entry_t *entry, struct msghdr* msg, unsigned int size, int
flag ) {
- int nbytes = 0;
- int msgSize = 0;
- if (entry->fd >= 0 && size && msg->msg_iovlen) {
- int expectedReadSize = size;
- unsigned int offset = 0;
- nbytes = size;
- while (nbytes > 0 && expectedReadSize > 0) {
- // Read the message header
- nbytes = sendmsg(entry->fd, msg, flag | MSG_NOSIGNAL);
- // Update admin
- expectedReadSize -= nbytes;
- msgSize += nbytes;
- // Not all written
- if (expectedReadSize && nbytes > 0) {
- unsigned int readSize = 0;
- unsigned int readIndex = 0;
- unsigned int i = 0;
- for (i = 0; i < msg->msg_iovlen; i++) {
- if (nbytes < msg->msg_iov[i].iov_len) {
- readIndex = i;
- break;
- }
- readSize+= msg->msg_iov[i].iov_len;
- }
- msg->msg_iov = &msg->msg_iov[readIndex];
- msg->msg_iovlen -= readIndex;
- char* buffer = (char*)msg->msg_iov->iov_base;
- offset = nbytes - readSize;
- msg->msg_iov->iov_base = &buffer[offset];
- msg->msg_iov->iov_len = msg->msg_iov->iov_len - offset;
- }
- }
- }
- if (nbytes <=0) msgSize = nbytes;
- return msgSize;
-}
+
//
// Write large data to TCP. .
//
int pubsub_tcpHandler_write(pubsub_tcpHandler_t *handle,
pubsub_protocol_message_t *message, struct iovec *msgIoVec,
size_t msg_iov_len, int flags) {
- celixThreadRwlock_readLock(&handle->dbLock);
int result = 0;
int connFdCloseQueue[hashMap_size(handle->connection_fd_map)];
Review comment:
Solved
----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
For queries about this service, please contact Infrastructure at:
[email protected]