Module: xenomai-forge Branch: next Commit: c8e9e166bce43b4f93d0cf1fd8cecf60f586c654 URL: http://git.xenomai.org/?p=xenomai-forge.git;a=commit;h=c8e9e166bce43b4f93d0cf1fd8cecf60f586c654
Author: Philippe Gerum <[email protected]> Date: Wed Aug 6 09:11:32 2014 +0200 cobalt/rtdm: introduce mmap() support The new ->mmap() handler is introduced in the device operation descriptor. This service is restricted to secondary mode, switching a primary mode caller accordingly when invoked from an application. The prototype is: int mmap_handler(struct rtdm_fd *fd, struct vm_area_struct *vma); The semantics are identical to the corresponding handler from the regular linux file operations. The only difference is about receiving a RTDM file descriptor (struct rtdm_fd) instead of a regular struct file pointer. As such, a valid vma descriptor covering the target user address space is passed to the handler, therefore common mapping operations may be performed on it. Two new convenience routines for mapping a chunk of kernel memory and a physical I/O address range are provided, i.e. rtdm_mmap_kmem() and rtdm_mmap_iomem() respectively. Both take a vma descriptor as received by the ->mmap() handler, and the appropriate virtual/physical start address of the memory range to map to the target address range in user-space. The existing rtdm_mmap_to_user() and rtdm_iomap_to_user() calls are kept unmodified. The ->mmap() introduction has no impact on existing drivers currently relying on these calls. --- include/cobalt/kernel/rtdm/driver.h | 22 ++- include/cobalt/kernel/rtdm/fd.h | 26 ++- include/cobalt/uapi/rtdm/syscall.h | 1 + include/rtdm/uapi/rtdm.h | 8 + kernel/cobalt/rtdm/device.c | 55 +++--- kernel/cobalt/rtdm/drvlib.c | 313 ++++++++++++++++++++++++----------- kernel/cobalt/rtdm/fd.c | 47 +++++- kernel/cobalt/rtdm/internal.h | 11 +- kernel/cobalt/rtdm/proc.c | 9 +- kernel/cobalt/rtdm/syscall.c | 21 +++ kernel/cobalt/trace/cobalt-rtdm.h | 62 +++++++ 11 files changed, 429 insertions(+), 146 deletions(-) diff --git a/include/cobalt/kernel/rtdm/driver.h b/include/cobalt/kernel/rtdm/driver.h index ffc9607..053405e 100644 --- a/include/cobalt/kernel/rtdm/driver.h +++ b/include/cobalt/kernel/rtdm/driver.h @@ -87,7 +87,7 @@ enum rtdm_selecttype; * @{ */ /** Version of struct rtdm_device */ -#define RTDM_DEVICE_STRUCT_VER 6 +#define RTDM_DEVICE_STRUCT_VER 7 /** Version of struct rtdm_dev_context */ #define RTDM_CONTEXT_STRUCT_VER 4 @@ -414,13 +414,19 @@ void rtdm_toseq_init(rtdm_toseq_t *timeout_seq, nanosecs_rel_t timeout); * variable updated by the real-time core will hold the information * required to leave the atomic section properly. * - * @note Atomic sections may be nested. + * @note Atomic sections may be nested. The caller is allowed to sleep + * on a blocking Xenomai service from primary mode within an atomic + * section delimited by cobalt_atomic_enter/cobalt_atomic_leave calls. + * On the contrary, sleeping on a regular Linux kernel service while + * holding such lock is NOT valid. * * @note Since the strongest lock is acquired by this service, it can * be used to synchronize real-time and non-real-time contexts. * * @warning This service is not portable to the Mercury core, and - * should be restricted to Cobalt-specific use cases. + * should be restricted to Cobalt-specific use cases, mainly for the + * purpose of porting existing dual-kernel drivers which still depend + * on the obsolete RTDM_EXECUTE_ATOMICALLY() construct. */ #define cobalt_atomic_enter(context) \ do { \ @@ -471,6 +477,8 @@ void rtdm_toseq_init(rtdm_toseq_t *timeout_seq, nanosecs_rel_t timeout); * * @deprecated This construct will be phased out in Xenomai * 3.0. Please use rtdm_waitqueue services instead. + * + * @see cobalt_atomic_enter(). */ #ifdef DOXYGEN_CPP /* Beautify doxygen output */ #define RTDM_EXECUTE_ATOMICALLY(code_block) \ @@ -1160,12 +1168,18 @@ int rtdm_mmap_to_user(struct rtdm_fd *fd, int prot, void **pptr, struct vm_operations_struct *vm_ops, void *vm_private_data); + int rtdm_iomap_to_user(struct rtdm_fd *fd, phys_addr_t src_addr, size_t len, int prot, void **pptr, struct vm_operations_struct *vm_ops, void *vm_private_data); -int rtdm_munmap(struct rtdm_fd *fd, void *ptr, size_t len); + +int rtdm_mmap_kmem(struct vm_area_struct *vma, void *va); + +int rtdm_mmap_iomem(struct vm_area_struct *vma, phys_addr_t pa); + +int rtdm_munmap(void *ptr, size_t len); static inline int rtdm_read_user_ok(struct rtdm_fd *fd, const void __user *ptr, size_t size) diff --git a/include/cobalt/kernel/rtdm/fd.h b/include/cobalt/kernel/rtdm/fd.h index a107315..954c60f 100644 --- a/include/cobalt/kernel/rtdm/fd.h +++ b/include/cobalt/kernel/rtdm/fd.h @@ -25,7 +25,9 @@ #include <linux/socket.h> #include <cobalt/kernel/tree.h> +struct vm_area_struct; struct rtdm_fd; +struct _rtdm_mmap_request; struct xnselector; struct xnsys_ppd; @@ -124,9 +126,13 @@ struct rtdm_fd_ops { rtdm_fd_recvmsg_t *recvmsg_nrt; rtdm_fd_sendmsg_t *sendmsg_rt; rtdm_fd_sendmsg_t *sendmsg_nrt; - int (*select_bind)(struct rtdm_fd *fd, struct xnselector *selector, - unsigned int type, unsigned int index); + int (*select_bind)(struct rtdm_fd *fd, + struct xnselector *selector, + unsigned int type, + unsigned int index); void (*close)(struct rtdm_fd *fd); + int (*mmap)(struct rtdm_fd *fd, + struct vm_area_struct *vma); }; struct rtdm_fd { @@ -161,22 +167,26 @@ void rtdm_fd_put(struct rtdm_fd *fd); void rtdm_fd_unlock(struct rtdm_fd *fd); -int rtdm_fd_ioctl(struct xnsys_ppd *p, int fd, unsigned int request, ...); +int rtdm_fd_ioctl(struct xnsys_ppd *p, int ufd, unsigned int request, ...); -ssize_t rtdm_fd_read(struct xnsys_ppd *p, int fd, +ssize_t rtdm_fd_read(struct xnsys_ppd *p, int ufd, void __user *buf, size_t size); -ssize_t rtdm_fd_write(struct xnsys_ppd *p, int fd, +ssize_t rtdm_fd_write(struct xnsys_ppd *p, int ufd, const void __user *buf, size_t size); -int rtdm_fd_close(struct xnsys_ppd *p, int fd, unsigned int magic); +int rtdm_fd_close(struct xnsys_ppd *p, int ufd, unsigned int magic); -ssize_t rtdm_fd_recvmsg(struct xnsys_ppd *p, int fd, +ssize_t rtdm_fd_recvmsg(struct xnsys_ppd *p, int ufd, struct msghdr *msg, int flags); -ssize_t rtdm_fd_sendmsg(struct xnsys_ppd *p, int fd, +ssize_t rtdm_fd_sendmsg(struct xnsys_ppd *p, int ufd, const struct msghdr *msg, int flags); +int rtdm_fd_mmap(struct xnsys_ppd *p, int ufd, + struct _rtdm_mmap_request *rma, + void * __user *u_addrp); + int rtdm_fd_valid_p(int ufd); int rtdm_fd_select_bind(int ufd, struct xnselector *selector, diff --git a/include/cobalt/uapi/rtdm/syscall.h b/include/cobalt/uapi/rtdm/syscall.h index bfcdfb4..f6c7c5c 100644 --- a/include/cobalt/uapi/rtdm/syscall.h +++ b/include/cobalt/uapi/rtdm/syscall.h @@ -29,5 +29,6 @@ #define sc_rtdm_write 5 #define sc_rtdm_recvmsg 6 #define sc_rtdm_sendmsg 7 +#define sc_rtdm_mmap 8 #endif /* !_COBALT_UAPI_RTDM_SYSCALL_H */ diff --git a/include/rtdm/uapi/rtdm.h b/include/rtdm/uapi/rtdm.h index 518d688..ec74082 100644 --- a/include/rtdm/uapi/rtdm.h +++ b/include/rtdm/uapi/rtdm.h @@ -192,6 +192,14 @@ struct _rtdm_setsockaddr_args { #define _RTIOC_SHUTDOWN _IOW(RTIOC_TYPE_COMMON, 0x28, \ int) +/* Internally used for mmap() */ +struct _rtdm_mmap_request { + size_t length; + off_t offset; + int prot; + int flags; +}; + #ifndef RTDM_NO_DEFAULT_USER_API static inline ssize_t rt_dev_recv(int fd, void *buf, size_t len, int flags) diff --git a/kernel/cobalt/rtdm/device.c b/kernel/cobalt/rtdm/device.c index df68fdd..62003de 100644 --- a/kernel/cobalt/rtdm/device.c +++ b/kernel/cobalt/rtdm/device.c @@ -33,18 +33,18 @@ #define RTDM_DEVICE_MAGIC 0x82846877 -#define SET_DEFAULT_OP(device, operation) \ - (device).operation##_rt = (void *)rtdm_no_support; \ - (device).operation##_nrt = (void *)rtdm_no_support - -#define SET_DEFAULT_OP_IF_NULL(device, operation) \ - if (!(device).operation##_rt) \ - (device).operation##_rt = (void *)rtdm_no_support; \ - if (!(device).operation##_nrt) \ - (device).operation##_nrt = (void *)rtdm_no_support - -#define ANY_HANDLER(device, operation) \ - ((device).operation##_rt || (device).operation##_nrt) +#define SET_DEFAULT_DUAL_OP_IF_NULL(device, operation, handler) \ + if ((device).operation##_rt == NULL) \ + (device).operation##_rt = \ + (__typeof__((device).operation##_rt))handler; \ + if ((device).operation##_nrt == NULL) \ + (device).operation##_nrt = \ + (__typeof__((device).operation##_nrt))handler; + +#define SET_DEFAULT_OP_IF_NULL(device, operation, handler) \ + if ((device).operation == NULL) \ + (device).operation = \ + (__typeof__((device).operation))handler; struct list_head rtdm_named_devices; /* hash table */ struct rb_root rtdm_protocol_devices; @@ -56,18 +56,21 @@ int rtdm_initialised = 0; extern void __rt_dev_close(struct rtdm_fd *fd); -int rtdm_no_support(void) +static int enosys(void) { return -ENOSYS; } -int rtdm_select_bind_no_support(struct rtdm_fd *fd, - struct xnselector *selector, - unsigned int type, unsigned int index) +static int ebadf(void) { return -EBADF; } +static int enodev(void) +{ + return -ENODEV; +} + static inline unsigned long long get_proto_id(int pf, int type) { unsigned long long llpf = (unsigned)pf; @@ -186,7 +189,7 @@ int rtdm_dev_register(struct rtdm_device *device) printk(XENO_ERR "missing open handler for RTDM device\n"); return -EINVAL; } - device->socket = (typeof(device->socket))rtdm_no_support; + device->socket = (typeof(device->socket))enosys; break; case RTDM_PROTOCOL_DEVICE: @@ -195,7 +198,7 @@ int rtdm_dev_register(struct rtdm_device *device) printk(XENO_ERR "missing socket handler for RTDM device\n"); return -EINVAL; } - device->open = (typeof(device->open))rtdm_no_support; + device->open = (typeof(device->open))enosys; break; default: @@ -204,20 +207,20 @@ int rtdm_dev_register(struct rtdm_device *device) /* Sanity check: non-RT close handler? * (Always required for forced cleanup) */ - if (!device->ops.close) { + if (device->ops.close == NULL) { printk(XENO_ERR "missing close handler for RTDM device\n"); return -EINVAL; } device->reserved.close = device->ops.close; device->ops.close = __rt_dev_close; - SET_DEFAULT_OP_IF_NULL(device->ops, ioctl); - SET_DEFAULT_OP_IF_NULL(device->ops, read); - SET_DEFAULT_OP_IF_NULL(device->ops, write); - SET_DEFAULT_OP_IF_NULL(device->ops, recvmsg); - SET_DEFAULT_OP_IF_NULL(device->ops, sendmsg); - if (!device->ops.select_bind) - device->ops.select_bind = rtdm_select_bind_no_support; + SET_DEFAULT_DUAL_OP_IF_NULL(device->ops, ioctl, enosys); + SET_DEFAULT_DUAL_OP_IF_NULL(device->ops, read, enosys); + SET_DEFAULT_DUAL_OP_IF_NULL(device->ops, write, enosys); + SET_DEFAULT_DUAL_OP_IF_NULL(device->ops, recvmsg, enosys); + SET_DEFAULT_DUAL_OP_IF_NULL(device->ops, sendmsg, enosys); + SET_DEFAULT_OP_IF_NULL(device->ops, select_bind, ebadf); + SET_DEFAULT_OP_IF_NULL(device->ops, mmap, enodev); atomic_set(&device->reserved.refcount, 0); device->reserved.exclusive_context = NULL; diff --git a/kernel/cobalt/rtdm/drvlib.c b/kernel/cobalt/rtdm/drvlib.c index 02dfa35..b120d8f 100644 --- a/kernel/cobalt/rtdm/drvlib.c +++ b/kernel/cobalt/rtdm/drvlib.c @@ -1438,96 +1438,160 @@ void rtdm_nrtsig_pend(rtdm_nrtsig_t *nrt_sig); * @{ */ -struct rtdm_mmap_data { +struct mmap_tramp_data { + struct rtdm_fd *fd; + int (*mmap_handler)(struct rtdm_fd *fd, + struct vm_area_struct *vma); + unsigned long + (*unmapped_area_handler)(struct file *filp, + struct mmap_tramp_data *tramp_data, + unsigned long addr, unsigned long len, + unsigned long pgoff, unsigned long flags); +}; + +struct mmap_helper_data { void *src_vaddr; phys_addr_t src_paddr; struct vm_operations_struct *vm_ops; void *vm_private_data; + struct mmap_tramp_data tramp_data; }; -static int rtdm_mmap_buffer(struct file *filp, struct vm_area_struct *vma) +static int mmap_kmem_helper(struct vm_area_struct *vma, void *va) { - struct rtdm_mmap_data *mmap_data = filp->private_data; - unsigned long vaddr, maddr, size; + unsigned long vaddr, maddr, len; phys_addr_t paddr; int ret; - vma->vm_ops = mmap_data->vm_ops; - vma->vm_private_data = mmap_data->vm_private_data; - - vaddr = (unsigned long)mmap_data->src_vaddr; - paddr = mmap_data->src_paddr; - if (paddr == 0) /* kmalloc memory? */ - paddr = __pa(vaddr); - + vaddr = (unsigned long)va; + paddr = __pa(vaddr); maddr = vma->vm_start; - size = vma->vm_end - vma->vm_start; + len = vma->vm_end - vma->vm_start; -#ifdef CONFIG_MMU - /* Catch vmalloc memory (vaddr is 0 for I/O mapping) */ - if ((vaddr >= VMALLOC_START) && (vaddr < VMALLOC_END)) { - unsigned long mapped_size = 0; + if (!XENO_ASSERT(RTDM, vaddr == PAGE_ALIGN(vaddr))) + return -EINVAL; - if (!XENO_ASSERT(RTDM, vaddr == PAGE_ALIGN(vaddr))) - return -EINVAL; - if (!XENO_ASSERT(RTDM, (size % PAGE_SIZE) == 0)) +#ifdef CONFIG_MMU + /* Catch vmalloc memory */ + if (vaddr >= VMALLOC_START && vaddr < VMALLOC_END) { + if (!XENO_ASSERT(RTDM, (len & ~PAGE_MASK) == 0)) return -EINVAL; - while (mapped_size < size) { + while (len >= PAGE_SIZE) { if (xnheap_remap_vm_page(vma, maddr, vaddr)) return -EAGAIN; - maddr += PAGE_SIZE; vaddr += PAGE_SIZE; - mapped_size += PAGE_SIZE; + len -= PAGE_SIZE; } + if (xnarch_machdesc.prefault) xnarch_machdesc.prefault(vma); - ret = 0; - } else + + return 0; + } #else vma->vm_pgoff = paddr >> PAGE_SHIFT; #endif /* CONFIG_MMU */ - if (mmap_data->src_paddr) - ret = xnheap_remap_io_page_range(filp, vma, maddr, paddr, - size, PAGE_SHARED); - else - ret = xnheap_remap_kmem_page_range(vma, maddr, paddr, - size, PAGE_SHARED); - if (xnarch_machdesc.prefault && ret == 0) + + ret = xnheap_remap_kmem_page_range(vma, maddr, paddr, + len, PAGE_SHARED); + if (ret) + return ret; + + if (xnarch_machdesc.prefault) xnarch_machdesc.prefault(vma); + return 0; +} + +static int mmap_iomem_helper(struct vm_area_struct *vma, phys_addr_t pa) +{ + unsigned long maddr, len; + + maddr = vma->vm_start; + len = vma->vm_end - vma->vm_start; +#ifndef CONFIG_MMU + vma->vm_pgoff = pa >> PAGE_SHIFT; +#endif /* CONFIG_MMU */ + + return xnheap_remap_io_page_range(vma, maddr, pa, + len, PAGE_SHARED); +} + +static int mmap_buffer_helper(struct rtdm_fd *fd, struct vm_area_struct *vma) +{ + struct mmap_tramp_data *tramp_data = vma->vm_private_data; + struct mmap_helper_data *helper_data; + int ret; + + helper_data = container_of(tramp_data, struct mmap_helper_data, tramp_data); + vma->vm_ops = helper_data->vm_ops; + vma->vm_private_data = helper_data->vm_private_data; + + if (helper_data->src_paddr) + ret = mmap_iomem_helper(vma, helper_data->src_paddr); + else + ret = mmap_kmem_helper(vma, helper_data->src_vaddr); + return ret; } +static int mmap_trampoline(struct file *filp, struct vm_area_struct *vma) +{ + struct mmap_tramp_data *tramp_data = filp->private_data; + + vma->vm_private_data = tramp_data; + + return tramp_data->mmap_handler(tramp_data->fd, vma); +} + #ifndef CONFIG_MMU -static unsigned long rtdm_unmapped_area(struct file *file, - unsigned long addr, - unsigned long len, - unsigned long pgoff, - unsigned long flags) + +static +unsigned long unmapped_area_helper(struct file *filp, + struct mmap_tramp_data *tramp_data, + unsigned long addr, unsigned long len, + unsigned long pgoff, unsigned long flags) { - struct rtdm_mmap_data *mmap_data = file->private_data; + struct mmap_helper_data *helper_data; unsigned long pa; - pa = mmap_data->src_paddr; - if (pa == 0) - pa = __pa(mmap_data->src_vaddr); + helper_data = container_of(tramp_data, struct mmap_helper_data, tramp_data); + pa = helper_data->src_paddr; + if (pa) + return (unsigned long)__va(pa); + + return (unsigned long)mmap_data->src_vaddr; +} + +static unsigned long +unmapped_area_trampoline(struct file *filp, + unsigned long addr, unsigned long len, + unsigned long pgoff, unsigned long flags) +{ + struct mmap_tramp_data *tramp_data = filp->private_data; + + if (tramp_data->unmapped_area_handler == NULL) + return -ENOSYS; /* We don't know. */ - return pa; + return tramp_data->unmapped_area_handler(filp, tramp_data, addr, + len, pgoff, flags); } + #else -#define rtdm_unmapped_area NULL +#define unmapped_area_helper NULL +#define unmapped_area_trampoline NULL #endif -static struct file_operations rtdm_mmap_fops = { - .mmap = rtdm_mmap_buffer, - .get_unmapped_area = rtdm_unmapped_area +static struct file_operations mmap_fops = { + .mmap = mmap_trampoline, + .get_unmapped_area = unmapped_area_trampoline }; -static int rtdm_do_mmap(struct rtdm_fd *fd, - struct rtdm_mmap_data *mmap_data, - size_t len, int prot, void **pptr) +static int rtdm_mmap(struct mmap_tramp_data *tramp_data, + size_t len, off_t offset, int prot, int flags, + void **pptr) { const struct file_operations *old_fops; unsigned long u_addr; @@ -1542,14 +1606,10 @@ static int rtdm_do_mmap(struct rtdm_fd *fd, return PTR_ERR(filp); old_fops = filp->f_op; - filp->f_op = &rtdm_mmap_fops; - + filp->f_op = &mmap_fops; old_priv_data = filp->private_data; - filp->private_data = mmap_data; - - u_addr = vm_mmap(filp, (unsigned long)*pptr, len, prot, - MAP_SHARED, 0); - + filp->private_data = tramp_data; + u_addr = vm_mmap(filp, (unsigned long)*pptr, len, prot, flags, offset); filp->f_op = (typeof(filp->f_op))old_fops; filp->private_data = old_priv_data; @@ -1563,6 +1623,18 @@ static int rtdm_do_mmap(struct rtdm_fd *fd, return 0; } +int __rtdm_mmap_from_fdop(struct rtdm_fd *fd, size_t len, off_t offset, + int prot, int flags, void *__user *pptr) +{ + struct mmap_tramp_data tramp_data = { + .fd = fd, + .mmap_handler = fd->ops->mmap, + .unmapped_area_handler = NULL, + }; + + return rtdm_mmap(&tramp_data, len, offset, prot, flags, pptr); +} + /** * Map a kernel memory range into the address space of the user. * @@ -1574,9 +1646,9 @@ static int rtdm_do_mmap(struct rtdm_fd *fd, * either PROT_READ or PROT_READ|PROT_WRITE * @param[in,out] pptr Address of a pointer containing the desired user * address or NULL on entry and the finally assigned address on return - * @param[in] vm_ops vm_operations to be executed on the vma_area of the + * @param[in] vm_ops vm_operations to be executed on the vm_area of the * user memory range or NULL - * @param[in] vm_private_data Private data to be stored in the vma_area, + * @param[in] vm_private_data Private data to be stored in the vm_area, * primarily useful for vm_operation handlers * * @return 0 on success, otherwise (most common values): @@ -1597,16 +1669,17 @@ static int rtdm_do_mmap(struct rtdm_fd *fd, * vmalloc(). To map physical I/O memory to user-space use * rtdm_iomap_to_user() instead. * - * @note RTDM supports two models for unmapping the user memory range again. - * One is explicit unmapping via rtdm_munmap(), either performed when the - * user requests it via an IOCTL etc. or when the related device is closed. - * The other is automatic unmapping, triggered by the user invoking standard - * munmap() or by the termination of the related process. To track release of - * the mapping and therefore relinquishment of the referenced physical memory, - * the caller of rtdm_mmap_to_user() can pass a vm_operations_struct on - * invocation, defining a close handler for the vm_area. See Linux - * documentaion (e.g. Linux Device Drivers book) on virtual memory management - * for details. + * @note RTDM supports two models for unmapping the memory area: + * - manual unmapping via rtdm_munmap(), which may be issued from a + * driver in response to an IOCTL call, or by a call to the regular + * munmap() call from the application. + * - automatic unmapping, triggered by the termination of the process + * which owns the mapping. + * To track the number of references pending on the resource mapped, + * the driver can pass the address of a close handler for the vm_area + * considered, in the @a vm_ops descriptor. See the relevant Linux + * kernel programming documentation (e.g. Linux Device Drivers book) + * on virtual memory management for details. * * @coretags{secondary-only} */ @@ -1616,16 +1689,20 @@ int rtdm_mmap_to_user(struct rtdm_fd *fd, struct vm_operations_struct *vm_ops, void *vm_private_data) { - struct rtdm_mmap_data mmap_data = { + struct mmap_helper_data helper_data = { + .tramp_data = { + .fd = fd, + .mmap_handler = mmap_buffer_helper, + .unmapped_area_handler = unmapped_area_helper, + }, .src_vaddr = src_addr, .src_paddr = 0, .vm_ops = vm_ops, .vm_private_data = vm_private_data }; - return rtdm_do_mmap(fd, &mmap_data, len, prot, pptr); + return rtdm_mmap(&helper_data.tramp_data, len, 0, prot, MAP_SHARED, pptr); } - EXPORT_SYMBOL_GPL(rtdm_mmap_to_user); /** @@ -1639,9 +1716,9 @@ EXPORT_SYMBOL_GPL(rtdm_mmap_to_user); * either PROT_READ or PROT_READ|PROT_WRITE * @param[in,out] pptr Address of a pointer containing the desired user * address or NULL on entry and the finally assigned address on return - * @param[in] vm_ops vm_operations to be executed on the vma_area of the + * @param[in] vm_ops vm_operations to be executed on the vm_area of the * user memory range or NULL - * @param[in] vm_private_data Private data to be stored in the vma_area, + * @param[in] vm_private_data Private data to be stored in the vm_area, * primarily useful for vm_operation handlers * * @return 0 on success, otherwise (most common values): @@ -1658,16 +1735,17 @@ EXPORT_SYMBOL_GPL(rtdm_mmap_to_user); * - -EPERM @e may be returned if an illegal invocation environment is * detected. * - * @note RTDM supports two models for unmapping the user memory range again. - * One is explicit unmapping via rtdm_munmap(), either performed when the - * user requests it via an IOCTL etc. or when the related device is closed. - * The other is automatic unmapping, triggered by the user invoking standard - * munmap() or by the termination of the related process. To track release of - * the mapping and therefore relinquishment of the referenced physical memory, - * the caller of rtdm_iomap_to_user() can pass a vm_operations_struct on - * invocation, defining a close handler for the vm_area. See Linux - * documentaion (e.g. Linux Device Drivers book) on virtual memory management - * for details. + * @note RTDM supports two models for unmapping the memory area: + * - manual unmapping via rtdm_munmap(), which may be issued from a + * driver in response to an IOCTL call, or by a call to the regular + * munmap() call from the application. + * - automatic unmapping, triggered by the termination of the process + * which owns the mapping. + * To track the number of references pending on the resource mapped, + * the driver can pass the address of a close handler for the vm_area + * considered, in the @a vm_ops descriptor. See the relevant Linux + * kernel programming documentation (e.g. Linux Device Drivers book) + * on virtual memory management for details. * * @coretags{secondary-only} */ @@ -1677,23 +1755,72 @@ int rtdm_iomap_to_user(struct rtdm_fd *fd, struct vm_operations_struct *vm_ops, void *vm_private_data) { - struct rtdm_mmap_data mmap_data = { + struct mmap_helper_data helper_data = { + .tramp_data = { + .fd = fd, + .mmap_handler = mmap_buffer_helper, + .unmapped_area_handler = unmapped_area_helper, + }, .src_vaddr = NULL, .src_paddr = src_addr, .vm_ops = vm_ops, .vm_private_data = vm_private_data }; - return rtdm_do_mmap(fd, &mmap_data, len, prot, pptr); + return rtdm_mmap(&helper_data.tramp_data, len, 0, prot, MAP_SHARED, pptr); } - EXPORT_SYMBOL_GPL(rtdm_iomap_to_user); /** + * Map a kernel memory range to a virtual memory area. + * + * This routine is commonly used from a ->mmap() handler of a RTDM + * driver, for mapping a kernel memory area over the user address + * space referred to by @a vma. + * + * @param[in] vma The VMA descriptor to receive the mapping. + * @param[in] va The kernel virtual address to be mapped. + * + * @return 0 on success, otherwise a negated error code is returned. + * + * @note This service only works on memory regions allocated via + * kmalloc() or vmalloc(). To map a chunk of physical I/O memory to a + * VMA, call rtdm_mmap_iomem() instead. + * + * @coretags{secondary-only} + */ +int rtdm_mmap_kmem(struct vm_area_struct *vma, void *va) +{ + return mmap_kmem_helper(vma, va); +} +EXPORT_SYMBOL_GPL(rtdm_mmap_kmem); + +/** + * Map an I/O memory range to a virtual memory area. + * + * This routine is commonly used from a ->mmap() handler of a RTDM + * driver, for mapping an I/O memory area over the user address space + * referred to by @a vma. + * + * @param[in] vma The VMA descriptor to receive the mapping. + * @param[in] pa The physical I/O address to be mapped. + * + * @return 0 on success, otherwise a negated error code is returned. + * + * @note To map a chunk of kernel virtual memory to a VMA, call + * rtdm_mmap_kmem() instead. + * + * @coretags{secondary-only} + */ +int rtdm_mmap_iomem(struct vm_area_struct *vma, phys_addr_t pa) +{ + return mmap_iomem_helper(vma, pa); +} +EXPORT_SYMBOL_GPL(rtdm_mmap_iomem); + +/** * Unmap a user memory range. * - * @param[in] fd RTDM file descriptor as passed to - * rtdm_mmap_to_user() when requesting to map the memory range * @param[in] ptr User address or the memory range * @param[in] len Length of the memory range * @@ -1706,18 +1833,12 @@ EXPORT_SYMBOL_GPL(rtdm_iomap_to_user); * * @coretags{secondary-only} */ -int rtdm_munmap(struct rtdm_fd *fd, void *ptr, size_t len) +int rtdm_munmap(void *ptr, size_t len) { - int err; - if (!XENO_ASSERT(RTDM, xnsched_root_p())) return -EPERM; - down_write(¤t->mm->mmap_sem); - err = do_munmap(current->mm, (unsigned long)ptr, len); - up_write(¤t->mm->mmap_sem); - - return err; + return vm_munmap((unsigned long)ptr, len); } EXPORT_SYMBOL_GPL(rtdm_munmap); diff --git a/kernel/cobalt/rtdm/fd.c b/kernel/cobalt/rtdm/fd.c index abbc9a2..173abee 100644 --- a/kernel/cobalt/rtdm/fd.c +++ b/kernel/cobalt/rtdm/fd.c @@ -28,16 +28,12 @@ #include <cobalt/kernel/ppd.h> #include <trace/events/cobalt-rtdm.h> #include <rtdm/fd.h> +#include "internal.h" DEFINE_PRIVATE_XNLOCK(__rtdm_fd_lock); static LIST_HEAD(rtdm_fd_cleanup_queue); static struct semaphore rtdm_fd_cleanup_sem; -int __rt_dev_ioctl_fallback(struct rtdm_fd *fd, - unsigned int request, void __user *arg); - -void __rt_dev_unref(struct rtdm_fd *fd, unsigned int idx); - static int enosys(void) { return -ENOSYS; @@ -48,6 +44,11 @@ static int ebadf(void) return -EBADF; } +static int enodev(void) +{ + return -ENODEV; +} + static void nop_close(struct rtdm_fd *fd) { } @@ -139,6 +140,9 @@ int rtdm_fd_enter(struct xnsys_ppd *p, struct rtdm_fd *fd, int ufd, if (ops->select_bind == NULL) ops->select_bind = (typeof(ops->select_bind))ebadf; + if (ops->mmap == NULL) + ops->mmap = (typeof(ops->mmap))enodev; + if (ops->close == NULL) ops->close = nop_close; @@ -421,7 +425,7 @@ rtdm_fd_read(struct xnsys_ppd *p, int ufd, void __user *buf, size_t size) EXPORT_SYMBOL_GPL(rtdm_fd_read); ssize_t rtdm_fd_write(struct xnsys_ppd *p, int ufd, - const void __user *buf, size_t size) + const void __user *buf, size_t size) { struct rtdm_fd *fd; ssize_t err; @@ -552,6 +556,37 @@ int rtdm_fd_close(struct xnsys_ppd *p, int ufd, unsigned int magic) } EXPORT_SYMBOL_GPL(rtdm_fd_close); +int rtdm_fd_mmap(struct xnsys_ppd *p, int ufd, + struct _rtdm_mmap_request *rma, + void * __user *u_addrp) +{ + struct rtdm_fd *fd; + int ret; + + fd = rtdm_fd_get(p, ufd, XNFD_MAGIC_ANY); + if (IS_ERR(fd)) { + ret = PTR_ERR(fd); + goto out; + } + + trace_cobalt_fd_mmap(current, fd, ufd, rma); + + if (rma->flags & (MAP_FIXED|MAP_ANONYMOUS)) { + ret = -ENODEV; + goto unlock; + } + + ret = __rtdm_mmap_from_fdop(fd, rma->length, rma->offset, + rma->prot, rma->flags, u_addrp); +unlock: + rtdm_fd_put(fd); +out: + if (ret) + trace_cobalt_fd_mmap_status(current, fd, ufd, ret); + + return ret; +} + int rtdm_fd_valid_p(int ufd) { struct rtdm_fd *fd; diff --git a/kernel/cobalt/rtdm/internal.h b/kernel/cobalt/rtdm/internal.h index 6db80a6..47cb862 100644 --- a/kernel/cobalt/rtdm/internal.h +++ b/kernel/cobalt/rtdm/internal.h @@ -22,6 +22,7 @@ #include <linux/list.h> #include <linux/sem.h> +#include <linux/mutex.h> #include <cobalt/kernel/ppd.h> #include <cobalt/kernel/tree.h> #include <rtdm/driver.h> @@ -31,6 +32,8 @@ #define DEF_DEVNAME_HASHTAB_SIZE 256 /* entries in name hash table */ #define DEF_PROTO_HASHTAB_SIZE 256 /* entries in protocol hash table */ +struct rtdm_fd; + struct rtdm_process { #ifdef CONFIG_XENO_OPT_VFILE char name[32]; @@ -77,7 +80,13 @@ static inline void rtdm_proc_unregister_device(struct rtdm_device *device) { } #endif -void rtdm_apc_handler(void *cookie); +int __rt_dev_ioctl_fallback(struct rtdm_fd *fd, + unsigned int request, void __user *arg); + +void __rt_dev_unref(struct rtdm_fd *fd, unsigned int idx); + +int __rtdm_mmap_from_fdop(struct rtdm_fd *fd, size_t len, off_t offset, + int prot, int flags, void *__user *pptr); int rtdm_init(void); diff --git a/kernel/cobalt/rtdm/proc.c b/kernel/cobalt/rtdm/proc.c index 2e3b988..feb609b 100644 --- a/kernel/cobalt/rtdm/proc.c +++ b/kernel/cobalt/rtdm/proc.c @@ -182,7 +182,7 @@ static int openfd_show(struct xnvfile_regular_iterator *it, void *data) { struct rtdm_dev_context *context; struct rtdm_device *device; - struct rtdm_process owner; + struct rtdm_process *owner; int close_lock_count, i; struct rtdm_fd *fd; @@ -200,15 +200,14 @@ static int openfd_show(struct xnvfile_regular_iterator *it, void *data) context = rtdm_fd_to_context(fd); close_lock_count = fd->refs; device = context->device; - - strcpy(owner.name, "<kernel>"); - owner.pid = -1; + owner = context->reserved.owner; xnvfile_printf(it, "%d\t%d\t%-31s %s [%d]\n", i, close_lock_count, (device->device_flags & RTDM_NAMED_DEVICE) ? device->device_name : device->proc_name, - owner.name, owner.pid); + owner ? owner->name : "<kernel>", + owner ? owner->pid : 0); rtdm_fd_put(fd); diff --git a/kernel/cobalt/rtdm/syscall.c b/kernel/cobalt/rtdm/syscall.c index 810570b..0837128 100644 --- a/kernel/cobalt/rtdm/syscall.c +++ b/kernel/cobalt/rtdm/syscall.c @@ -92,6 +92,26 @@ int sys_rtdm_close(int fd) return rtdm_fd_close(xnsys_ppd_get(0), fd, XNFD_MAGIC_ANY); } +int sys_rtdm_mmap(int fd, struct _rtdm_mmap_request __user *u_rma, + void __user **u_addrp) +{ + struct _rtdm_mmap_request rma; + void *u_addr; + int ret; + + if (__xn_copy_from_user(&rma, u_rma, sizeof(rma))) + return -EFAULT; + + ret = rtdm_fd_mmap(xnsys_ppd_get(0), fd, &rma, &u_addr); + if (ret) + return ret; + + if (__xn_copy_to_user(u_addrp, &u_addr, sizeof(u_addr))) + return -EFAULT; + + return 0; +} + static void *rtdm_process_attach(void) { struct rtdm_process *process; @@ -119,6 +139,7 @@ static struct xnsyscall rtdm_syscalls[] = { SKINCALL_DEF(sc_rtdm_open, sys_rtdm_open, lostage), SKINCALL_DEF(sc_rtdm_socket, sys_rtdm_socket, lostage), SKINCALL_DEF(sc_rtdm_close, sys_rtdm_close, lostage), + SKINCALL_DEF(sc_rtdm_mmap, sys_rtdm_mmap, lostage), SKINCALL_DEF(sc_rtdm_ioctl, sys_rtdm_ioctl, probing), SKINCALL_DEF(sc_rtdm_read, sys_rtdm_read, probing), SKINCALL_DEF(sc_rtdm_write, sys_rtdm_write, probing), diff --git a/kernel/cobalt/trace/cobalt-rtdm.h b/kernel/cobalt/trace/cobalt-rtdm.h index 3f364ef..f38f2e4 100644 --- a/kernel/cobalt/trace/cobalt-rtdm.h +++ b/kernel/cobalt/trace/cobalt-rtdm.h @@ -5,6 +5,7 @@ #define _TRACE_COBALT_RTDM_H #include <linux/tracepoint.h> +#include <linux/mman.h> struct rtdm_event; struct rtdm_sem; @@ -255,6 +256,60 @@ DEFINE_EVENT(fd_request, cobalt_fd_recvmsg, TP_ARGS(task, fd, ufd, flags) ); +#define cobalt_print_protbits(__prot) \ + __print_flags(__prot, "|", \ + {PROT_EXEC, "exec"}, \ + {PROT_READ, "read"}, \ + {PROT_WRITE, "write"}) + +#define cobalt_print_mapbits(__flags) \ + __print_flags(__flags, "|", \ + {MAP_SHARED, "shared"}, \ + {MAP_PRIVATE, "private"}, \ + {MAP_ANONYMOUS, "anon"}, \ + {MAP_FIXED, "fixed"}, \ + {MAP_HUGETLB, "huge"}, \ + {MAP_NONBLOCK, "nonblock"}, \ + {MAP_NORESERVE, "noreserve"}, \ + {MAP_POPULATE, "populate"}, \ + {MAP_UNINITIALIZED, "uninit"}) + +TRACE_EVENT(cobalt_fd_mmap, + TP_PROTO(struct task_struct *task, + struct rtdm_fd *fd, int ufd, struct _rtdm_mmap_request *rma), + TP_ARGS(task, fd, ufd, rma), + + TP_STRUCT__entry( + __array(char, comm, TASK_COMM_LEN) + __field(pid_t, pid) + __field(struct rtdm_device *, device) + __field(int, ufd) + __field(size_t, length) + __field(off_t, offset) + __field(int, prot) + __field(int, flags) + ), + + TP_fast_assign( + memcpy(__entry->comm, task->comm, TASK_COMM_LEN); + __entry->pid = task->pid; + __entry->device = rtdm_fd_to_context(fd)->device; + __entry->ufd = ufd; + __entry->length = rma->length; + __entry->offset = rma->offset; + __entry->prot = rma->prot; + __entry->flags = rma->flags; + ), + + TP_printk("device=%p fd=%d area={ len:%Zu, off:%Lu }" + " prot=%#x(%s) flags=%#x(%s) pid=%d comm=%s", + __entry->device, __entry->ufd, __entry->length, + (unsigned long long)__entry->offset, + __entry->prot, cobalt_print_protbits(__entry->prot), + __entry->flags, cobalt_print_mapbits(__entry->flags), + __entry->pid, __entry->comm) +); + DEFINE_EVENT(fd_request_status, cobalt_fd_ioctl_status, TP_PROTO(struct task_struct *task, struct rtdm_fd *fd, int ufd, @@ -290,6 +345,13 @@ DEFINE_EVENT(fd_request_status, cobalt_fd_sendmsg_status, TP_ARGS(task, fd, ufd, status) ); +DEFINE_EVENT(fd_request_status, cobalt_fd_mmap_status, + TP_PROTO(struct task_struct *task, + struct rtdm_fd *fd, int ufd, + int status), + TP_ARGS(task, fd, ufd, status) +); + DEFINE_EVENT(task_op, cobalt_driver_task_join, TP_PROTO(struct xnthread *task), TP_ARGS(task) _______________________________________________ Xenomai-git mailing list [email protected] http://www.xenomai.org/mailman/listinfo/xenomai-git
