This is an automated email from the ASF dual-hosted git repository. liuhan pushed a commit to branch program-long in repository https://gitbox.apache.org/repos/asf/skywalking-rover.git
commit e61735d62d6f02d7366ddde72b5554bab1dc1d97 Author: mrproliu <[email protected]> AuthorDate: Fri Dec 27 08:24:31 2024 +0800 Reduce program size --- bpf/include/socket_data.h | 83 +++++++++++++++++++++++------------------------ 1 file changed, 41 insertions(+), 42 deletions(-) diff --git a/bpf/include/socket_data.h b/bpf/include/socket_data.h index 248b34c..b4076a7 100644 --- a/bpf/include/socket_data.h +++ b/bpf/include/socket_data.h @@ -107,68 +107,50 @@ static __always_inline struct upload_data_args* generate_socket_upload_args() { return bpf_map_lookup_elem(&socket_data_upload_args_per_cpu_map, &kZero); } -static __always_inline void __upload_socket_data_with_buffer(void *ctx, __u8 index, char* buf, size_t size, __u32 is_finished, __u8 have_reduce_after_chunk, struct upload_data_args *args) { - struct socket_data_upload_event *socket_data_event; - socket_data_event = rover_reserve_buf(&socket_data_upload_queue, sizeof(*socket_data_event)); - if (socket_data_event == NULL) { +static __always_inline void __upload_socket_data_with_buffer(void *ctx, __u8 index, char* buf, size_t size, __u32 is_finished, __u8 have_reduce_after_chunk, struct socket_data_upload_event *event) { + if (size <= 0) { return; } - if (size > sizeof(socket_data_event->buffer)) { - size = sizeof(socket_data_event->buffer); - } - if (size <= 0) { - rover_discard_buf(socket_data_event); - return; + bool is_reach_buffer_size = false; + if (size > sizeof(event->buffer)) { + size = sizeof(event->buffer); + is_reach_buffer_size = true; } // basic data - socket_data_event->start_time = args->start_time; - socket_data_event->end_time = args->end_time; - socket_data_event->protocol = args->connection_protocol; - socket_data_event->direction = args->data_direction; - socket_data_event->conid = args->con_id; - socket_data_event->randomid = args->random_id; - socket_data_event->total_size = args->bytes_count; - socket_data_event->data_id = args->socket_data_id; - socket_data_event->prev_data_id = args->prev_socket_data_id; - - socket_data_event->sequence = index; - socket_data_event->data_len = size; - socket_data_event->finished = is_finished; - socket_data_event->have_reduce_after_chunk = have_reduce_after_chunk; - asm volatile("%[size] &= 0x7ff;\n" ::[size] "+r"(size) :); - bpf_probe_read(&socket_data_event->buffer, size, buf); - rover_submit_buf(ctx, &socket_data_upload_queue, socket_data_event, sizeof(*socket_data_event)); + event->sequence = index; + event->data_len = size; + event->finished = is_finished; + event->have_reduce_after_chunk = have_reduce_after_chunk; + asm volatile("%[size] &= 0x7ff;\n" ::[size] "+r"(size) :); // for the BPF verifier, and the max size is: 2047 + // is the size reach the max buffer size, then it must add 1 back to 2048(sizeof(socket_data_event->buffer)==2048) + bpf_probe_read(&event->buffer, is_reach_buffer_size ? size + 1 : size, buf); + rover_submit_buf(ctx, &socket_data_upload_queue, event, sizeof(*event)); } -static __always_inline void upload_socket_data_buf(void *ctx, char* buf, ssize_t size, struct upload_data_args *args, __u8 force_unfinished) { +static __always_inline void upload_socket_data_buf(void *ctx, char* buf, ssize_t size, struct socket_data_upload_event *event, __u8 force_unfinished) { ssize_t already_send = 0; #pragma unroll for (__u8 index = 0; index < SOCKET_UPLOAD_CHUNK_LIMIT; index++) { // calculate bytes need to send ssize_t remaining = size - already_send; - size_t need_send_in_chunk = 0; - __u8 have_reduce_after_chunk = 0; - if (remaining > MAX_TRANSMIT_SOCKET_READ_LENGTH) { - need_send_in_chunk = MAX_TRANSMIT_SOCKET_READ_LENGTH; - have_reduce_after_chunk = 1; - } else { - need_send_in_chunk = remaining; - } + size_t need_send_in_chunk = remaining > MAX_TRANSMIT_SOCKET_READ_LENGTH ? MAX_TRANSMIT_SOCKET_READ_LENGTH : remaining; + __u8 have_reduce_after_chunk = remaining > MAX_TRANSMIT_SOCKET_READ_LENGTH ? 1 : 0; __u32 is_finished = (need_send_in_chunk + already_send) >= size || index == (SOCKET_UPLOAD_CHUNK_LIMIT - 1) ? true : false; __u8 sequence = index; if (force_unfinished == 1 && need_send_in_chunk > 0) { is_finished = 0; - sequence = generate_socket_sequence(args->con_id, args->socket_data_id); + sequence = generate_socket_sequence(event->conid, event->data_id); } - __upload_socket_data_with_buffer(ctx, sequence, buf + already_send, need_send_in_chunk, is_finished, have_reduce_after_chunk, args); + __upload_socket_data_with_buffer(ctx, sequence, buf + already_send, need_send_in_chunk, is_finished, have_reduce_after_chunk, event); already_send += need_send_in_chunk; } } + #define UPLOAD_PER_SOCKET_DATA_IOV() \ if (iov_index < iovlen) { \ struct iovec cur_iov; \ @@ -190,12 +172,12 @@ if (iov_index < iovlen) { \ have_reduce_after_chunk = 1; \ } \ __u32 is_finished = (need_send_in_chunk + already_send) >= size || loop_count == (SOCKET_UPLOAD_CHUNK_LIMIT - 1) ? true : false; \ - __upload_socket_data_with_buffer(ctx, loop_count, cur_iov.iov_base + cur_iov_sended, need_send_in_chunk, is_finished, have_reduce_after_chunk, args); \ + __upload_socket_data_with_buffer(ctx, loop_count, cur_iov.iov_base + cur_iov_sended, need_send_in_chunk, is_finished, have_reduce_after_chunk, event); \ already_send += need_send_in_chunk; \ loop_count++; \ } -static __always_inline void upload_socket_data_iov(void *ctx, struct iovec* iov, const size_t iovlen, ssize_t size, struct upload_data_args *args) { +static __always_inline void upload_socket_data_iov(void *ctx, struct iovec* iov, const size_t iovlen, ssize_t size, struct socket_data_upload_event *event) { ssize_t already_send = 0; ssize_t cur_iov_sended = 0; __u8 iov_index = 0; @@ -229,15 +211,32 @@ static __inline void upload_socket_data(void *ctx, struct upload_data_args *args if (args->connection_protocol == CONNECTION_PROTOCOL_UNKNOWN || args->connection_ssl != args->socket_data_ssl || args->connection_skip_data_upload == 1) { return; } + // generate event + __u32 kZero = 0; + struct socket_data_upload_event *event = bpf_map_lookup_elem(&socket_data_upload_event_per_cpu_map, &kZero); + if (event == NULL) { + return; + } + + // basic data + event->start_time = args->start_time; + event->end_time = args->end_time; + event->protocol = args->connection_protocol; + event->direction = args->data_direction; + event->conid = args->con_id; + event->randomid = args->random_id; + event->total_size = args->bytes_count; + event->data_id = args->socket_data_id; + struct socket_data_last_id_t *latest = bpf_map_lookup_elem(&socket_data_last_id_map, &args->con_id); args->prev_socket_data_id = 0; if (latest != NULL && latest->random_id == args->random_id) { args->prev_socket_data_id = latest->socket_data_id; } if (args->socket_data_buf != NULL) { - upload_socket_data_buf(ctx, args->socket_data_buf, args->bytes_count, args, args->socket_ssl_buffer_force_unfinished); + upload_socket_data_buf(ctx, args->socket_data_buf, args->bytes_count, event, args->socket_ssl_buffer_force_unfinished); } else if (args->socket_data_iovec != NULL) { - upload_socket_data_iov(ctx, args->socket_data_iovec, args->socket_data_iovlen, args->bytes_count, args); + upload_socket_data_iov(ctx, args->socket_data_iovec, args->socket_data_iovlen, args->bytes_count, event); } if (latest == NULL || latest->socket_data_id != args->socket_data_id) {
