On Sat, 31 Jan 2026 13:28:31 +0000
Vincent Donnefort <[email protected]> wrote:
\
> +/**
> + * simple_ring_buffer_swap_reader_page - Swap ring-buffer head with the
> reader
> + *
> + * This function enables consuming reading. It ensures the current head page
> will not be overwritten
> + * and can be safely read.
> + *
> + * @cpu_buffer: A simple_rb_per_cpu
And if you're going to do kerneldoc, you need to do it correctly ;-)
You put the description before the parameters.
> + *
> + * Returns 0 on success, -ENODEV if @cpu_buffer was unloaded or -EBUSY if we
> failed to catch the
> + * head page.
> + */
> +int simple_ring_buffer_swap_reader_page(struct simple_rb_per_cpu *cpu_buffer)
> +{
> + struct simple_buffer_page *last, *head, *reader;
> + unsigned long overrun;
> + int retry = 8;
> + int ret;
> +
> + if (!simple_rb_loaded(cpu_buffer))
> + return -ENODEV;
> +
> + reader = cpu_buffer->reader_page;
> +
> + do {
> + /* Run after the writer to find the head */
> + ret = simple_rb_find_head(cpu_buffer);
> + if (ret)
> + return ret;
> +
> + head = cpu_buffer->head_page;
> +
> + /* Connect the reader page around the header page */
> + reader->link.next = head->link.next;
> + reader->link.prev = head->link.prev;
> +
> + /* The last page before the head */
> + last = simple_bpage_from_link(head->link.prev);
> +
> + /* The reader page points to the new header page */
> + simple_bpage_set_head_link(reader);
> +
> + overrun = cpu_buffer->meta->overrun;
> + } while (!simple_bpage_unset_head_link(last, reader,
> SIMPLE_RB_LINK_NORMAL) && retry--);
> +
> + if (!retry)
> + return -EINVAL;
> +
> + cpu_buffer->head_page = simple_bpage_from_link(reader->link.next);
> + cpu_buffer->head_page->link.prev = &reader->link;
> + cpu_buffer->reader_page = head;
> + cpu_buffer->meta->reader.lost_events = overrun -
> cpu_buffer->last_overrun;
> + cpu_buffer->meta->reader.id = cpu_buffer->reader_page->id;
> + cpu_buffer->last_overrun = overrun;
> +
> + return 0;
> +}
> +EXPORT_SYMBOL_GPL(simple_ring_buffer_swap_reader_page);
> +
> +static struct simple_buffer_page *simple_rb_move_tail(struct
> simple_rb_per_cpu *cpu_buffer)
> +{
> + struct simple_buffer_page *tail, *new_tail;
> +
> + tail = cpu_buffer->tail_page;
> + new_tail = simple_bpage_next_page(tail);
> +
> + if (simple_bpage_unset_head_link(tail, new_tail,
> SIMPLE_RB_LINK_HEAD_MOVING)) {
> + /*
> + * Oh no! we've caught the head. There is none anymore and
> + * swap_reader will spin until we set the new one. Overrun must
> + * be written first, to make sure we report the correct number
> + * of lost events.
> + */
> + simple_rb_meta_inc(cpu_buffer->meta->overrun,
> new_tail->entries);
> + simple_rb_meta_inc(cpu_buffer->meta->pages_lost, 1);
> +
> + simple_bpage_set_head_link(new_tail);
> + simple_bpage_set_normal_link(tail);
> + }
> +
> + simple_bpage_reset(new_tail);
> + cpu_buffer->tail_page = new_tail;
> +
> + simple_rb_meta_inc(cpu_buffer->meta->pages_touched, 1);
> +
> + return new_tail;
> +}
> +
> +static unsigned long rb_event_size(unsigned long length)
> +{
> + struct ring_buffer_event *event;
> +
> + return length + RB_EVNT_HDR_SIZE + sizeof(event->array[0]);
> +}
> +
> +static struct ring_buffer_event *
> +rb_event_add_ts_extend(struct ring_buffer_event *event, u64 delta)
> +{
> + event->type_len = RINGBUF_TYPE_TIME_EXTEND;
> + event->time_delta = delta & TS_MASK;
> + event->array[0] = delta >> TS_SHIFT;
> +
> + return (struct ring_buffer_event *)((unsigned long)event + 8);
> +}
> +
> +static struct ring_buffer_event *
> +simple_rb_reserve_next(struct simple_rb_per_cpu *cpu_buffer, unsigned long
> length, u64 timestamp)
> +{
> + unsigned long ts_ext_size = 0, event_size = rb_event_size(length);
> + struct simple_buffer_page *tail = cpu_buffer->tail_page;
> + struct ring_buffer_event *event;
> + u32 write, prev_write;
> + u64 time_delta;
> +
> + time_delta = timestamp - cpu_buffer->write_stamp;
> +
> + if (test_time_stamp(time_delta))
> + ts_ext_size = 8;
> +
> + prev_write = tail->write;
> + write = prev_write + event_size + ts_ext_size;
> +
> + if (unlikely(write > (PAGE_SIZE - BUF_PAGE_HDR_SIZE)))
> + tail = simple_rb_move_tail(cpu_buffer);
> +
> + if (!tail->entries) {
> + tail->page->time_stamp = timestamp;
> + time_delta = 0;
> + ts_ext_size = 0;
> + write = event_size;
> + prev_write = 0;
> + }
> +
> + tail->write = write;
> + tail->entries++;
> +
> + cpu_buffer->write_stamp = timestamp;
> +
> + event = (struct ring_buffer_event *)(tail->page->data + prev_write);
> + if (ts_ext_size) {
> + event = rb_event_add_ts_extend(event, time_delta);
> + time_delta = 0;
> + }
> +
> + event->type_len = 0;
> + event->time_delta = time_delta;
> + event->array[0] = event_size - RB_EVNT_HDR_SIZE;
> +
> + return event;
> +}
> +
> +/**
> + * simple_ring_buffer_reserve - Reserve an entry in @cpu_buffer
> + *
And you don't leave a space between the one line description and the
arguments.
> + * @cpu_buffer: A simple_rb_per_cpu
> + * @length: Size of the entry in bytes
> + * @timestamp: Timestamp of the entry
> + *
> + * Returns the address of the entry where to write data or NULL
> + */
> +void *simple_ring_buffer_reserve(struct simple_rb_per_cpu *cpu_buffer,
> unsigned long length,
> + u64 timestamp)
> +{
> + struct ring_buffer_event *rb_event;
> +
> + if (cmpxchg(&cpu_buffer->status, SIMPLE_RB_READY, SIMPLE_RB_WRITING) !=
> SIMPLE_RB_READY)
> + return NULL;
> +
> + rb_event = simple_rb_reserve_next(cpu_buffer, length, timestamp);
> +
> + return &rb_event->array[1];
> +}
> +EXPORT_SYMBOL_GPL(simple_ring_buffer_reserve);
> +
Other than that:
Reviewed-by: Steven Rostedt (Google) <[email protected]>
-- Steve