[...]

> > +static struct ring_buffer_event *
> > +simple_rb_reserve_next(struct simple_rb_per_cpu *cpu_buffer, unsigned long 
> > length, u64 timestamp)
> > +{
> > +   unsigned long ts_ext_size = 0, event_size = rb_event_size(length);
> > +   struct simple_buffer_page *tail = cpu_buffer->tail_page;
> > +   struct ring_buffer_event *event;
> > +   u32 write, prev_write;
> > +   u64 time_delta;
> > +
> > +   time_delta = timestamp - cpu_buffer->write_stamp;
> 
> The remote buffers never get preempted do they?
> 
> That is, it doesn't need to handle different contexts like the normal
> kernel does? (normal, softirq, irq, NMI, etc).

No, luckily, we don't need to support any of that.

> 
> -- Steve
> 
> > +
> > +   if (test_time_stamp(time_delta))
> > +           ts_ext_size = 8;
> > +
> > +   prev_write = tail->write;
> > +   write = prev_write + event_size + ts_ext_size;
> > +
> > +   if (unlikely(write > (PAGE_SIZE - BUF_PAGE_HDR_SIZE)))
> > +           tail = simple_rb_move_tail(cpu_buffer);
> > +
> > +   if (!tail->entries) {
> > +           tail->page->time_stamp = timestamp;
> > +           time_delta = 0;
> > +           ts_ext_size = 0;
> > +           write = event_size;
> > +           prev_write = 0;
> > +   }
> > +
> > +   tail->write = write;
> > +   tail->entries++;
> > +
> > +   cpu_buffer->write_stamp = timestamp;
> > +
> > +   event = (struct ring_buffer_event *)(tail->page->data + prev_write);
> > +   if (ts_ext_size) {
> > +           event = rb_event_add_ts_extend(event, time_delta);
> > +           time_delta = 0;
> > +   }
> > +
> > +   event->type_len = 0;
> > +   event->time_delta = time_delta;
> > +   event->array[0] = event_size - RB_EVNT_HDR_SIZE;
> > +
> > +   return event;
> > +}
> > +

Reply via email to