This is an automated email from the ASF dual-hosted git repository.

liuhan pushed a commit to branch program-too-long
in repository https://gitbox.apache.org/repos/asf/skywalking-rover.git

commit 1ffe0bcb95d6d4492bbc434da18e56ec8980d084
Author: mrproliu <[email protected]>
AuthorDate: Fri Dec 27 07:43:53 2024 +0800

    Reduce program size
---
 bpf/include/socket_data.h | 83 ++++++++++++++++++++---------------------------
 1 file changed, 35 insertions(+), 48 deletions(-)

diff --git a/bpf/include/socket_data.h b/bpf/include/socket_data.h
index 248b34c..59e02cb 100644
--- a/bpf/include/socket_data.h
+++ b/bpf/include/socket_data.h
@@ -114,8 +114,10 @@ static __always_inline void 
__upload_socket_data_with_buffer(void *ctx, __u8 ind
         return;
     }
 
-    if (size > sizeof(socket_data_event->buffer)) {
+    bool is_reach_buffer_size = false;
+    if (size >= sizeof(socket_data_event->buffer)) {
         size = sizeof(socket_data_event->buffer);
+        is_reach_buffer_size = true;
     }
     if (size <= 0) {
         rover_discard_buf(socket_data_event);
@@ -137,8 +139,9 @@ static __always_inline void 
__upload_socket_data_with_buffer(void *ctx, __u8 ind
     socket_data_event->data_len = size;
     socket_data_event->finished = is_finished;
     socket_data_event->have_reduce_after_chunk = have_reduce_after_chunk;
-    asm volatile("%[size] &= 0x7ff;\n" ::[size] "+r"(size) :);
-    bpf_probe_read(&socket_data_event->buffer, size, buf);
+    asm volatile("%[size] &= 0x7ff;\n" ::[size] "+r"(size) :);  // for the BPF 
verifier, and the max size is: 2047
+    // is the size reach the max buffer size, then it must add 1 back to 
2048(sizeof(socket_data_event->buffer)==2048)
+    bpf_probe_read(&socket_data_event->buffer, is_reach_buffer_size ? size + 1 
: size, buf);
     rover_submit_buf(ctx, &socket_data_upload_queue, socket_data_event, 
sizeof(*socket_data_event));
 }
 
@@ -148,14 +151,8 @@ static __always_inline void upload_socket_data_buf(void 
*ctx, char* buf, ssize_t
     for (__u8 index = 0; index < SOCKET_UPLOAD_CHUNK_LIMIT; index++) {
         // calculate bytes need to send
         ssize_t remaining = size - already_send;
-        size_t need_send_in_chunk = 0;
-        __u8 have_reduce_after_chunk = 0;
-        if (remaining > MAX_TRANSMIT_SOCKET_READ_LENGTH) {
-            need_send_in_chunk = MAX_TRANSMIT_SOCKET_READ_LENGTH;
-            have_reduce_after_chunk = 1;
-        } else {
-            need_send_in_chunk = remaining;
-        }
+        size_t need_send_in_chunk = remaining > 
MAX_TRANSMIT_SOCKET_READ_LENGTH ? MAX_TRANSMIT_SOCKET_READ_LENGTH : remaining;
+        __u8 have_reduce_after_chunk = remaining > 
MAX_TRANSMIT_SOCKET_READ_LENGTH ? 1 : 0;
 
         __u32 is_finished = (need_send_in_chunk + already_send) >= size || 
index == (SOCKET_UPLOAD_CHUNK_LIMIT - 1) ? true : false;
         __u8 sequence = index;
@@ -169,47 +166,37 @@ static __always_inline void upload_socket_data_buf(void 
*ctx, char* buf, ssize_t
     }
 }
 
-#define UPLOAD_PER_SOCKET_DATA_IOV() \
-if (iov_index < iovlen) {                                                   \
-    struct iovec cur_iov;                                                   \
-    bpf_probe_read(&cur_iov, sizeof(cur_iov), &iov[iov_index]);             \
-    ssize_t remaining = size - already_send;                                \
-    size_t need_send_in_chunk = remaining - cur_iov_sended;                 \
-    __u8 have_reduce_after_chunk = 0;                                       \
-    if (cur_iov_sended + need_send_in_chunk > cur_iov.iov_len) {            \
-        need_send_in_chunk = cur_iov.iov_len - cur_iov_sended;              \
-        if (need_send_in_chunk > MAX_TRANSMIT_SOCKET_READ_LENGTH) {         \
-            need_send_in_chunk = MAX_TRANSMIT_SOCKET_READ_LENGTH;           \
-            have_reduce_after_chunk = 1;                                    \
-        } else {                                                            \
-            iov_index++;                                                    \
-            cur_iov_sended = 0;                                             \
-        }                                                                   \
-    } else if (need_send_in_chunk > MAX_TRANSMIT_SOCKET_READ_LENGTH) {      \
-        need_send_in_chunk = MAX_TRANSMIT_SOCKET_READ_LENGTH;               \
-        have_reduce_after_chunk = 1;                                        \
-    }                                                                       \
-    __u32 is_finished = (need_send_in_chunk + already_send) >= size || 
loop_count == (SOCKET_UPLOAD_CHUNK_LIMIT - 1) ? true : false;                   
         \
-    __upload_socket_data_with_buffer(ctx, loop_count, cur_iov.iov_base + 
cur_iov_sended, need_send_in_chunk, is_finished, have_reduce_after_chunk, 
args);      \
-    already_send += need_send_in_chunk;                                        
                                                      \
-    loop_count++;                                                              
                                                      \
-}
-
 static __always_inline void upload_socket_data_iov(void *ctx, struct iovec* 
iov, const size_t iovlen, ssize_t size, struct upload_data_args *args) {
     ssize_t already_send = 0;
     ssize_t cur_iov_sended = 0;
     __u8 iov_index = 0;
-    __u8 loop_count = 0;
-
-    // each count is same with SOCKET_UPLOAD_CHUNK_LIMIT
-    UPLOAD_PER_SOCKET_DATA_IOV();
-    UPLOAD_PER_SOCKET_DATA_IOV();
-    UPLOAD_PER_SOCKET_DATA_IOV();
-    UPLOAD_PER_SOCKET_DATA_IOV();
-    UPLOAD_PER_SOCKET_DATA_IOV();
-    UPLOAD_PER_SOCKET_DATA_IOV();
-    UPLOAD_PER_SOCKET_DATA_IOV();
-    UPLOAD_PER_SOCKET_DATA_IOV();
+
+#pragma unroll
+    for (__u8 index = 0; index < SOCKET_UPLOAD_CHUNK_LIMIT && iov_index < 
iovlen; index++) {
+        struct iovec cur_iov;
+        bpf_probe_read(&cur_iov, sizeof(cur_iov), &iov[iov_index]);
+
+        ssize_t remaining = size - already_send;
+        size_t need_send_in_chunk = remaining - cur_iov_sended;
+        __u8 have_reduce_after_chunk = 0;
+        if (cur_iov_sended + need_send_in_chunk > cur_iov.iov_len) {
+            need_send_in_chunk = cur_iov.iov_len - cur_iov_sended;
+            if (need_send_in_chunk > MAX_TRANSMIT_SOCKET_READ_LENGTH) {
+                need_send_in_chunk = MAX_TRANSMIT_SOCKET_READ_LENGTH;
+                have_reduce_after_chunk = 1;
+            } else {
+                iov_index++;
+                cur_iov_sended = 0;
+            }
+        } else if (need_send_in_chunk > MAX_TRANSMIT_SOCKET_READ_LENGTH) {
+            need_send_in_chunk = MAX_TRANSMIT_SOCKET_READ_LENGTH;
+            have_reduce_after_chunk = 1;
+        }
+
+        __u32 is_finished = (need_send_in_chunk + already_send) >= size || 
index == (SOCKET_UPLOAD_CHUNK_LIMIT - 1) ? true : false;
+        __upload_socket_data_with_buffer(ctx, index, cur_iov.iov_base + 
cur_iov_sended, need_send_in_chunk, is_finished, have_reduce_after_chunk, args);
+        already_send += need_send_in_chunk;
+    }
 }
 
 struct socket_data_last_id_t {

Reply via email to