* Julien Desfossez ([email protected]) wrote:
> This patch provides the copy_from_user and memset operations for the lib
> ringbuffer.
> 
> Signed-off-by: Julien Desfossez <[email protected]>
> ---
>  lib/ringbuffer/backend.h             |  102 
> ++++++++++++++++++++++++++++++++++
>  lib/ringbuffer/backend_internal.h    |   32 +++++++++++
>  lib/ringbuffer/ring_buffer_backend.c |   97 ++++++++++++++++++++++++++++++++
>  ltt-events.h                         |    2 +
>  ltt-ring-buffer-client.h             |    8 +++
>  ltt-ring-buffer-metadata-client.h    |    8 +++
>  probes/lttng-events.h                |    8 +++
>  7 files changed, 257 insertions(+), 0 deletions(-)
> 
> diff --git a/lib/ringbuffer/backend.h b/lib/ringbuffer/backend.h
> index 47bc179..dde4208 100644
> --- a/lib/ringbuffer/backend.h
> +++ b/lib/ringbuffer/backend.h
> @@ -103,6 +103,108 @@ void lib_ring_buffer_write(const struct 
> lib_ring_buffer_config *config,
>       ctx->buf_offset += len;
>  }
>  
> +/**
> + * lib_ring_buffer_memset - write len bytes of c to a buffer backend
> + * @config : ring buffer instance configuration
> + * @bufb : ring buffer backend
> + * @offset : offset within the buffer
> + * @c : the byte to copy
> + * @len : number of bytes to copy
> + *
> + * This function writes "len" bytes of "c" to a buffer backend, at a specific
> + * offset. This is more or less a buffer backend-specific memset() operation.
> + * Calls the slow path (_ring_buffer_memset) if write is crossing a page
> + * boundary.
> + */
> +static inline
> +void lib_ring_buffer_memset(const
> +                         struct lib_ring_buffer_config *config,

const and struct one same line please.

> +                         struct lib_ring_buffer_backend *bufb,

remove bufb, remove offset, add ctx.

> +                         size_t offset, int c, size_t len)
> +{
> +     struct channel_backend *chanb = &bufb->chan->backend;
> +     size_t sbidx, index;
> +     ssize_t pagecpy;
> +     struct lib_ring_buffer_backend_pages *rpages;
> +     unsigned long sb_bindex, id;
> +
> +     offset &= chanb->buf_size - 1;
> +     sbidx = offset >> chanb->subbuf_size_order;
> +     index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
> +     pagecpy = min_t(size_t, len, (-offset) & ~PAGE_MASK);
> +     id = bufb->buf_wsb[sbidx].id;
> +     sb_bindex = subbuffer_id_get_index(config, id);
> +     rpages = bufb->array[sb_bindex];
> +     /* FIXME, need ctx ?

can you add it ad parameter to lib_ring_buffer_memset, just like
lib_ring_buffer_write does ?

> +     CHAN_WARN_ON(ctx->chan,
> +                  config->mode == RING_BUFFER_OVERWRITE
> +                  && subbuffer_id_is_noref(config, id));
> +                  */
> +     if (likely(pagecpy == len))
> +             lib_ring_buffer_do_memset(rpages->p[index].virt
> +                                       + (offset & ~PAGE_MASK),
> +                                       c, len);
> +     else
> +             _lib_ring_buffer_memset(bufb, offset, c, len, 0);

missing   ctx->buf_offset += len;

double-check and make sure you do exactly the same thing as the
lib_ring_buffer_write() and _lib_ring_buffer_write() implementation,
line by line.

> +}
> +
> +/**
> + * lib_ring_buffer_copy_from_user - write userspace data to a buffer backend
> + * @config : ring buffer instance configuration
> + * @ctx: ring buffer context. (input arguments only)
> + * @src : userspace source pointer to copy from
> + * @len : length of data to copy
> + *
> + * This function copies "len" bytes of data from a userspace pointer to a
> + * buffer backend, at the current context offset. This is more or less a 
> buffer
> + * backend-specific memcpy() operation. Calls the slow path
> + * (_ring_buffer_write_from_user) if copy is crossing a page boundary.
> + */
> +static inline
> +void lib_ring_buffer_copy_from_user(const
> +                                 struct lib_ring_buffer_config *config,
> +                                 struct lib_ring_buffer_ctx *ctx,
> +                                 const void __user *src, size_t len)
> +{
> +     struct lib_ring_buffer_backend *bufb = &ctx->buf->backend;
> +     struct channel_backend *chanb = &ctx->chan->backend;
> +     size_t sbidx, index;
> +     size_t offset = ctx->buf_offset;
> +     ssize_t pagecpy;
> +     struct lib_ring_buffer_backend_pages *rpages;
> +     unsigned long sb_bindex, id;
> +     unsigned long ret;
> +
> +     offset &= chanb->buf_size - 1;
> +     sbidx = offset >> chanb->subbuf_size_order;
> +     index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
> +     pagecpy = min_t(size_t, len, (-offset) & ~PAGE_MASK);
> +     id = bufb->buf_wsb[sbidx].id;
> +     sb_bindex = subbuffer_id_get_index(config, id);
> +     rpages = bufb->array[sb_bindex];
> +     CHAN_WARN_ON(ctx->chan,
> +                     config->mode == RING_BUFFER_OVERWRITE
> +                     && subbuffer_id_is_noref(config, id));
> +     if (access_ok(VERIFY_READ, src, len)) {

if (unlikely(!access_ok(VERIFY_READ, src, len))
        goto fill_buffer;

> +             if (likely(pagecpy == len)) {
> +                     ret = lib_ring_buffer_do_copy_from_user(
> +                             rpages->p[index].virt + (offset & ~PAGE_MASK),
> +                             src, len);
> +                     if (ret > 0) {

if (unlikely(ret > 0)) { ...

> +                             len -= (pagecpy - ret);
> +                             offset += (pagecpy - ret);
                                
                                goto fill_buffer;
> +                             _lib_ring_buffer_memset(bufb, offset, 0, len, 
> 0);

> +                     }
> +             } else {
> +                     _lib_ring_buffer_copy_from_user(bufb, offset, src, len, 
> 0);
> +             }
> +             ctx->buf_offset += len;
> +     } else {
> +             _lib_ring_buffer_memset(bufb, offset, 0, len, 0);

> +     }

missing   ctx->buf_offset += len;

        return;

        Please document that we are calling the slow-path directly
        because this is an error path.

fill_buffer:
        _lib_ring_buffer_memset(bufb, offset, 0, len, 0);

> +}
> +
> +
>  /*
>   * This accessor counts the number of unread records in a buffer.
>   * It only provides a consistent value if no reads not writes are performed
> diff --git a/lib/ringbuffer/backend_internal.h 
> b/lib/ringbuffer/backend_internal.h
> index d92fe36..49c691a 100644
> --- a/lib/ringbuffer/backend_internal.h
> +++ b/lib/ringbuffer/backend_internal.h
> @@ -15,6 +15,7 @@
>  #include "../../wrapper/ringbuffer/backend_types.h"
>  #include "../../wrapper/ringbuffer/frontend_types.h"
>  #include <linux/string.h>
> +#include <asm/uaccess.h>

#include <linux/uaccess.h>

>  
>  /* Ring buffer backend API presented to the frontend */
>  
> @@ -40,6 +41,13 @@ void lib_ring_buffer_backend_exit(void);
>  extern void _lib_ring_buffer_write(struct lib_ring_buffer_backend *bufb,
>                                  size_t offset, const void *src, size_t len,
>                                  ssize_t pagecpy);
> +extern void _lib_ring_buffer_memset(struct lib_ring_buffer_backend *bufb,
> +                                 size_t offset, int c, size_t len,
> +                                 ssize_t pagecpy);
> +extern void _lib_ring_buffer_copy_from_user(struct
> +                                          lib_ring_buffer_backend *bufb,

whole struct lib.... on same line. (even if beyond 80 col)

> +                                          size_t offset, const void *src,
> +                                          size_t len, ssize_t pagecpy);
>  
>  /*
>   * Subbuffer ID bits for overwrite mode. Need to fit within a single word to 
> be
> @@ -414,4 +422,28 @@ do {                                                     
>         \
>               inline_memcpy(dest, src, __len);                \
>  } while (0)
>  
> +/*
> + * We use __copy_from_user to copy userspace data since we already
> + * did the access_ok for the whole range.
> + */
> +static inline
> +unsigned long lib_ring_buffer_do_copy_from_user(void *dest,
> +                                             const void __user *src,
> +                                             unsigned long len)
> +{
> +     return __copy_from_user(dest, src, len);
> +}
> +
> +/*
> + * write len bytes to dest with c
> + */
> +static inline
> +void lib_ring_buffer_do_memset(char *dest, int c,
> +     unsigned long len)
> +{
> +     unsigned long i;

add newline.

> +     for (i = 0; i < len; i++)
> +         dest[i] = c;
> +}
> +
>  #endif /* _LINUX_RING_BUFFER_BACKEND_INTERNAL_H */
> diff --git a/lib/ringbuffer/ring_buffer_backend.c 
> b/lib/ringbuffer/ring_buffer_backend.c
> index a9513d1..0afeea8 100644
> --- a/lib/ringbuffer/ring_buffer_backend.c
> +++ b/lib/ringbuffer/ring_buffer_backend.c
> @@ -501,6 +501,103 @@ void _lib_ring_buffer_write(struct 
> lib_ring_buffer_backend *bufb, size_t offset,
>  }
>  EXPORT_SYMBOL_GPL(_lib_ring_buffer_write);
>  
> +
> +/**
> + * lib_ring_buffer_memset - write len bytes of c to a ring_buffer buffer.
> + * @bufb : buffer backend
> + * @offset : offset within the buffer
> + * @c : the byte to write
> + * @len : length to write
> + * @pagecpy : page size copied so far
> + */
> +void _lib_ring_buffer_memset(struct lib_ring_buffer_backend *bufb,
> +                          size_t offset,
> +                          int c, size_t len, ssize_t pagecpy)
> +{
> +     struct channel_backend *chanb = &bufb->chan->backend;
> +     const struct lib_ring_buffer_config *config = chanb->config;
> +     size_t sbidx, index;
> +     struct lib_ring_buffer_backend_pages *rpages;
> +     unsigned long sb_bindex, id;
> +
> +     do {
> +             len -= pagecpy;
> +             offset += pagecpy;
> +             sbidx = offset >> chanb->subbuf_size_order;
> +             index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
> +
> +             /*
> +              * Underlying layer should never ask for writes across
> +              * subbuffers.
> +              */
> +             CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
> +
> +             pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK));
> +             id = bufb->buf_wsb[sbidx].id;
> +             sb_bindex = subbuffer_id_get_index(config, id);
> +             rpages = bufb->array[sb_bindex];
> +             CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
> +                          && subbuffer_id_is_noref(config, id));
> +             lib_ring_buffer_do_memset(rpages->p[index].virt
> +                                       + (offset & ~PAGE_MASK),
> +                                       c, pagecpy);
> +     } while (unlikely(len != pagecpy));
> +}
> +EXPORT_SYMBOL_GPL(_lib_ring_buffer_memset);
> +
> +
> +/**
> + * lib_ring_buffer_copy_from_user - write user data to a ring_buffer buffer.
> + * @bufb : buffer backend
> + * @offset : offset within the buffer
> + * @src : source address
> + * @len : length to write
> + * @pagecpy : page size copied so far

Please document that this function should never be called directly
without having the src pointer checked with access_ok() previously.

> + */
> +void _lib_ring_buffer_copy_from_user(struct lib_ring_buffer_backend *bufb,
> +                                   size_t offset,
> +                                   const void __user *src, size_t len,
> +                                   ssize_t pagecpy)
> +{
> +     struct channel_backend *chanb = &bufb->chan->backend;
> +     const struct lib_ring_buffer_config *config = chanb->config;
> +     size_t sbidx, index;
> +     struct lib_ring_buffer_backend_pages *rpages;
> +     unsigned long sb_bindex, id;
> +     int ret;
> +
> +     do {
> +             len -= pagecpy;
> +             src += pagecpy;
> +             offset += pagecpy;
> +             sbidx = offset >> chanb->subbuf_size_order;
> +             index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
> +
> +             /*
> +              * Underlying layer should never ask for writes across
> +              * subbuffers.
> +              */
> +             CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
> +
> +             pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK));
> +             id = bufb->buf_wsb[sbidx].id;
> +             sb_bindex = subbuffer_id_get_index(config, id);
> +             rpages = bufb->array[sb_bindex];
> +             CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
> +                             && subbuffer_id_is_noref(config, id));
> +             ret = lib_ring_buffer_do_copy_from_user(rpages->p[index].virt
> +                                                     + (offset & ~PAGE_MASK),
> +                                                     src, pagecpy) != 0;
> +             if (ret > 0) {
> +                     offset += (pagecpy - ret);
> +                     len -= (pagecpy - ret);
> +                     lib_ring_buffer_memset(config, bufb, offset, 0, len);

you can call _lib_ring_buffer_memset here. We're in slow path, we don't
care if slow. We want to have minimal size footprint.

> +                     break; /* stop copy */
> +             }
> +     } while (unlikely(len != pagecpy));
> +}
> +EXPORT_SYMBOL_GPL(_lib_ring_buffer_copy_from_user);
> +
>  /**
>   * lib_ring_buffer_read - read data from ring_buffer_buffer.
>   * @bufb : buffer backend
> diff --git a/ltt-events.h b/ltt-events.h
> index dfab9a5..2372320 100644
> --- a/ltt-events.h
> +++ b/ltt-events.h
> @@ -210,6 +210,8 @@ struct ltt_channel_ops {
>       void (*event_commit)(struct lib_ring_buffer_ctx *ctx);
>       void (*event_write)(struct lib_ring_buffer_ctx *ctx, const void *src,
>                           size_t len);
> +     void (*event_write_from_user)(struct lib_ring_buffer_ctx *ctx,
> +                                   const void *src, size_t len);
>       /*
>        * packet_avail_size returns the available size in the current
>        * packet. Note that the size returned is only a hint, since it
> diff --git a/ltt-ring-buffer-client.h b/ltt-ring-buffer-client.h
> index dc6bbd0..34e7ce5 100644
> --- a/ltt-ring-buffer-client.h
> +++ b/ltt-ring-buffer-client.h
> @@ -479,6 +479,13 @@ void ltt_event_write(struct lib_ring_buffer_ctx *ctx, 
> const void *src,
>  }
>  
>  static
> +void ltt_event_write_from_user(struct lib_ring_buffer_ctx *ctx,
> +                            const void __user *src, size_t len)
> +{
> +     lib_ring_buffer_copy_from_user(&client_config, ctx, src, len);
> +}
> +
> +static
>  wait_queue_head_t *ltt_get_writer_buf_wait_queue(struct channel *chan, int 
> cpu)
>  {
>       struct lib_ring_buffer *buf = channel_get_ring_buffer(&client_config,
> @@ -517,6 +524,7 @@ static struct ltt_transport ltt_relay_transport = {
>               .event_reserve = ltt_event_reserve,
>               .event_commit = ltt_event_commit,
>               .event_write = ltt_event_write,
> +             .event_write_from_user = ltt_event_write_from_user,
>               .packet_avail_size = NULL,      /* Would be racy anyway */
>               .get_writer_buf_wait_queue = ltt_get_writer_buf_wait_queue,
>               .get_hp_wait_queue = ltt_get_hp_wait_queue,
> diff --git a/ltt-ring-buffer-metadata-client.h 
> b/ltt-ring-buffer-metadata-client.h
> index dc0e36e..3cf8a34 100644
> --- a/ltt-ring-buffer-metadata-client.h
> +++ b/ltt-ring-buffer-metadata-client.h
> @@ -225,6 +225,13 @@ void ltt_event_write(struct lib_ring_buffer_ctx *ctx, 
> const void *src,
>  }
>  
>  static
> +void ltt_event_write_from_user(struct lib_ring_buffer_ctx *ctx,
> +                            const void __user *src, size_t len)
> +{
> +     lib_ring_buffer_copy_from_user(&client_config, ctx, src, len);
> +}
> +
> +static
>  size_t ltt_packet_avail_size(struct channel *chan)
>                            
>  {
> @@ -279,6 +286,7 @@ static struct ltt_transport ltt_relay_transport = {
>               .buffer_read_close = ltt_buffer_read_close,
>               .event_reserve = ltt_event_reserve,
>               .event_commit = ltt_event_commit,
> +             .event_write_from_user = ltt_event_write_from_user,
>               .event_write = ltt_event_write,
>               .packet_avail_size = ltt_packet_avail_size,
>               .get_writer_buf_wait_queue = ltt_get_writer_buf_wait_queue,
> diff --git a/probes/lttng-events.h b/probes/lttng-events.h
> index 1d2def4..369202e 100644
> --- a/probes/lttng-events.h
> +++ b/probes/lttng-events.h
> @@ -510,6 +510,14 @@ __assign_##dest##_2:                                     
>                 \
>       __chan->ops->event_write(&__ctx, src,                           \
>               sizeof(__typemap.dest) * __get_dynamic_array_len(dest));\
>       goto __end_field_##dest##_2;
> +#undef tp_memcpy_from_user
> +#define tp_memcpy_from_user(dest, src, len)                          \
> +     __assign_##dest:                                                \
> +     if (0)                                                          \
> +             (void) __typemap.dest;                                  \
> +     lib_ring_buffer_align_ctx(&__ctx, ltt_alignof(__typemap.dest)); \
> +     __chan->ops->event_write_from_user(&__ctx, src, len);           \
> +     goto __end_field_##dest;

Don't forget to add a 

#undef tp_copy_string_from_user
#define tp_copy_string_from_user(dest, src, len)                        \
        __assign_##dest:                                                \
        if (0)                                                          \
                (void) __typemap.dest;                                  \
        lib_ring_buffer_align_ctx(&__ctx, ltt_alignof(__typemap.dest)); \
        __chan->ops->event_write_from_user(&__ctx, src, len - 1);       \
        __chan->ops->event_memset(&__ctx, 0, 1);

so we force the final \0 at the end.

thanks!

Mathieu


>  
>  #undef tp_strcpy
>  #define tp_strcpy(dest, src)                                         \
> -- 
> 1.7.5.4
> 

-- 
Mathieu Desnoyers
Operating System Efficiency R&D Consultant
EfficiOS Inc.
http://www.efficios.com

_______________________________________________
ltt-dev mailing list
[email protected]
http://lists.casi.polymtl.ca/cgi-bin/mailman/listinfo/ltt-dev

Reply via email to