This is an automated email from the ASF dual-hosted git repository.

liuhan pushed a commit to branch program-too-long
in repository https://gitbox.apache.org/repos/asf/skywalking-rover.git


The following commit(s) were added to refs/heads/program-too-long by this push:
     new 4392f9b  Reduce program size
4392f9b is described below

commit 4392f9b673c383e38b1c6e852ef5f760ab1e44ca
Author: mrproliu <[email protected]>
AuthorDate: Fri Dec 27 08:12:57 2024 +0800

    Reduce program size
---
 bpf/include/socket_data.h | 133 +++++++++++++++++++++++++---------------------
 1 file changed, 71 insertions(+), 62 deletions(-)

diff --git a/bpf/include/socket_data.h b/bpf/include/socket_data.h
index e5842b3..b4076a7 100644
--- a/bpf/include/socket_data.h
+++ b/bpf/include/socket_data.h
@@ -107,45 +107,29 @@ static __always_inline struct upload_data_args* 
generate_socket_upload_args() {
     return bpf_map_lookup_elem(&socket_data_upload_args_per_cpu_map, &kZero);
 }
 
-static __always_inline void __upload_socket_data_with_buffer(void *ctx, __u8 
index, char* buf, size_t size, __u32 is_finished, __u8 have_reduce_after_chunk, 
struct upload_data_args *args) {
-    struct socket_data_upload_event *socket_data_event;
-    socket_data_event = rover_reserve_buf(&socket_data_upload_queue, 
sizeof(*socket_data_event));
-    if (socket_data_event == NULL) {
+static __always_inline void __upload_socket_data_with_buffer(void *ctx, __u8 
index, char* buf, size_t size, __u32 is_finished, __u8 have_reduce_after_chunk, 
struct socket_data_upload_event *event) {
+    if (size <= 0) {
         return;
     }
 
     bool is_reach_buffer_size = false;
-    if (size >= sizeof(socket_data_event->buffer)) {
-        size = sizeof(socket_data_event->buffer);
+    if (size > sizeof(event->buffer)) {
+        size = sizeof(event->buffer);
         is_reach_buffer_size = true;
     }
-    if (size <= 0) {
-        rover_discard_buf(socket_data_event);
-        return;
-    }
 
     // basic data
-    socket_data_event->start_time = args->start_time;
-    socket_data_event->end_time = args->end_time;
-    socket_data_event->protocol = args->connection_protocol;
-    socket_data_event->direction = args->data_direction;
-    socket_data_event->conid = args->con_id;
-    socket_data_event->randomid = args->random_id;
-    socket_data_event->total_size = args->bytes_count;
-    socket_data_event->data_id = args->socket_data_id;
-    socket_data_event->prev_data_id = args->prev_socket_data_id;
-
-    socket_data_event->sequence = index;
-    socket_data_event->data_len = size;
-    socket_data_event->finished = is_finished;
-    socket_data_event->have_reduce_after_chunk = have_reduce_after_chunk;
+    event->sequence = index;
+    event->data_len = size;
+    event->finished = is_finished;
+    event->have_reduce_after_chunk = have_reduce_after_chunk;
     asm volatile("%[size] &= 0x7ff;\n" ::[size] "+r"(size) :);  // for the BPF 
verifier, and the max size is: 2047
     // is the size reach the max buffer size, then it must add 1 back to 
2048(sizeof(socket_data_event->buffer)==2048)
-    bpf_probe_read(&socket_data_event->buffer, is_reach_buffer_size ? size + 1 
: size, buf);
-    rover_submit_buf(ctx, &socket_data_upload_queue, socket_data_event, 
sizeof(*socket_data_event));
+    bpf_probe_read(&event->buffer, is_reach_buffer_size ? size + 1 : size, 
buf);
+    rover_submit_buf(ctx, &socket_data_upload_queue, event, sizeof(*event));
 }
 
-static __always_inline void upload_socket_data_buf(void *ctx, char* buf, 
ssize_t size, struct upload_data_args *args, __u8 force_unfinished) {
+static __always_inline void upload_socket_data_buf(void *ctx, char* buf, 
ssize_t size, struct socket_data_upload_event *event, __u8 force_unfinished) {
     ssize_t already_send = 0;
 #pragma unroll
     for (__u8 index = 0; index < SOCKET_UPLOAD_CHUNK_LIMIT; index++) {
@@ -158,48 +142,56 @@ static __always_inline void upload_socket_data_buf(void 
*ctx, char* buf, ssize_t
         __u8 sequence = index;
         if (force_unfinished == 1 && need_send_in_chunk > 0) {
             is_finished = 0;
-            sequence = generate_socket_sequence(args->con_id, 
args->socket_data_id);
+            sequence = generate_socket_sequence(event->conid, event->data_id);
         }
-        __upload_socket_data_with_buffer(ctx, sequence, buf + already_send, 
need_send_in_chunk, is_finished, have_reduce_after_chunk, args);
+        __upload_socket_data_with_buffer(ctx, sequence, buf + already_send, 
need_send_in_chunk, is_finished, have_reduce_after_chunk, event);
         already_send += need_send_in_chunk;
 
     }
 }
 
-static __always_inline void upload_socket_data_iov(void *ctx, struct iovec* 
iov, const size_t iovlen, ssize_t size, struct upload_data_args *args) {
+
+#define UPLOAD_PER_SOCKET_DATA_IOV() \
+if (iov_index < iovlen) {                                                   \
+    struct iovec cur_iov;                                                   \
+    bpf_probe_read(&cur_iov, sizeof(cur_iov), &iov[iov_index]);             \
+    ssize_t remaining = size - already_send;                                \
+    size_t need_send_in_chunk = remaining - cur_iov_sended;                 \
+    __u8 have_reduce_after_chunk = 0;                                       \
+    if (cur_iov_sended + need_send_in_chunk > cur_iov.iov_len) {            \
+        need_send_in_chunk = cur_iov.iov_len - cur_iov_sended;              \
+        if (need_send_in_chunk > MAX_TRANSMIT_SOCKET_READ_LENGTH) {         \
+            need_send_in_chunk = MAX_TRANSMIT_SOCKET_READ_LENGTH;           \
+            have_reduce_after_chunk = 1;                                    \
+        } else {                                                            \
+            iov_index++;                                                    \
+            cur_iov_sended = 0;                                             \
+        }                                                                   \
+    } else if (need_send_in_chunk > MAX_TRANSMIT_SOCKET_READ_LENGTH) {      \
+        need_send_in_chunk = MAX_TRANSMIT_SOCKET_READ_LENGTH;               \
+        have_reduce_after_chunk = 1;                                        \
+    }                                                                       \
+    __u32 is_finished = (need_send_in_chunk + already_send) >= size || 
loop_count == (SOCKET_UPLOAD_CHUNK_LIMIT - 1) ? true : false;                   
         \
+    __upload_socket_data_with_buffer(ctx, loop_count, cur_iov.iov_base + 
cur_iov_sended, need_send_in_chunk, is_finished, have_reduce_after_chunk, 
event);      \
+    already_send += need_send_in_chunk;                                        
                                                      \
+    loop_count++;                                                              
                                                      \
+}
+
+static __always_inline void upload_socket_data_iov(void *ctx, struct iovec* 
iov, const size_t iovlen, ssize_t size, struct socket_data_upload_event *event) 
{
     ssize_t already_send = 0;
     ssize_t cur_iov_sended = 0;
     __u8 iov_index = 0;
-
-#pragma unroll
-    for (__u8 index = 0; index < SOCKET_UPLOAD_CHUNK_LIMIT; index++) {
-        if (iov_index >= iovlen) {
-            return;
-        }
-        struct iovec cur_iov;
-        bpf_probe_read(&cur_iov, sizeof(cur_iov), &iov[iov_index]);
-
-        ssize_t remaining = size - already_send;
-        size_t need_send_in_chunk = remaining - cur_iov_sended;
-        __u8 have_reduce_after_chunk = 0;
-        if (cur_iov_sended + need_send_in_chunk > cur_iov.iov_len) {
-            need_send_in_chunk = cur_iov.iov_len - cur_iov_sended;
-            if (need_send_in_chunk > MAX_TRANSMIT_SOCKET_READ_LENGTH) {
-                need_send_in_chunk = MAX_TRANSMIT_SOCKET_READ_LENGTH;
-                have_reduce_after_chunk = 1;
-            } else {
-                iov_index++;
-                cur_iov_sended = 0;
-            }
-        } else if (need_send_in_chunk > MAX_TRANSMIT_SOCKET_READ_LENGTH) {
-            need_send_in_chunk = MAX_TRANSMIT_SOCKET_READ_LENGTH;
-            have_reduce_after_chunk = 1;
-        }
-
-        __u32 is_finished = (need_send_in_chunk + already_send) >= size || 
index == (SOCKET_UPLOAD_CHUNK_LIMIT - 1) ? true : false;
-        __upload_socket_data_with_buffer(ctx, index, cur_iov.iov_base + 
cur_iov_sended, need_send_in_chunk, is_finished, have_reduce_after_chunk, args);
-        already_send += need_send_in_chunk;
-    }
+    __u8 loop_count = 0;
+
+    // each count is same with SOCKET_UPLOAD_CHUNK_LIMIT
+    UPLOAD_PER_SOCKET_DATA_IOV();
+    UPLOAD_PER_SOCKET_DATA_IOV();
+    UPLOAD_PER_SOCKET_DATA_IOV();
+    UPLOAD_PER_SOCKET_DATA_IOV();
+    UPLOAD_PER_SOCKET_DATA_IOV();
+    UPLOAD_PER_SOCKET_DATA_IOV();
+    UPLOAD_PER_SOCKET_DATA_IOV();
+    UPLOAD_PER_SOCKET_DATA_IOV();
 }
 
 struct socket_data_last_id_t {
@@ -219,15 +211,32 @@ static __inline void upload_socket_data(void *ctx, struct 
upload_data_args *args
     if (args->connection_protocol == CONNECTION_PROTOCOL_UNKNOWN || 
args->connection_ssl != args->socket_data_ssl || 
args->connection_skip_data_upload == 1) {
         return;
     }
+    // generate event
+    __u32 kZero = 0;
+    struct socket_data_upload_event *event = 
bpf_map_lookup_elem(&socket_data_upload_event_per_cpu_map, &kZero);
+    if (event == NULL) {
+        return;
+    }
+
+    // basic data
+    event->start_time = args->start_time;
+    event->end_time = args->end_time;
+    event->protocol = args->connection_protocol;
+    event->direction = args->data_direction;
+    event->conid = args->con_id;
+    event->randomid = args->random_id;
+    event->total_size = args->bytes_count;
+    event->data_id = args->socket_data_id;
+
     struct socket_data_last_id_t *latest = 
bpf_map_lookup_elem(&socket_data_last_id_map, &args->con_id);
     args->prev_socket_data_id = 0;
     if (latest != NULL && latest->random_id == args->random_id) {
         args->prev_socket_data_id = latest->socket_data_id;
     }
     if (args->socket_data_buf != NULL) {
-        upload_socket_data_buf(ctx, args->socket_data_buf, args->bytes_count, 
args, args->socket_ssl_buffer_force_unfinished);
+        upload_socket_data_buf(ctx, args->socket_data_buf, args->bytes_count, 
event, args->socket_ssl_buffer_force_unfinished);
     } else if (args->socket_data_iovec != NULL) {
-        upload_socket_data_iov(ctx, args->socket_data_iovec, 
args->socket_data_iovlen, args->bytes_count, args);
+        upload_socket_data_iov(ctx, args->socket_data_iovec, 
args->socket_data_iovlen, args->bytes_count, event);
     }
 
     if (latest == NULL || latest->socket_data_id != args->socket_data_id) {

Reply via email to