Module: xenomai-head Branch: master Commit: 49581f94270d2f6ef1f3fef62f98f19a1798f4a6 URL: http://git.xenomai.org/?p=xenomai-head.git;a=commit;h=49581f94270d2f6ef1f3fef62f98f19a1798f4a6
Author: Gilles Chanteperdrix <[email protected]> Date: Tue Jan 25 03:35:04 2011 +0100 native: centralize allocation of mutex fastlocks Change the native skin mutex code so that the mutex fastlock is allocated in only one place; rt_mutex_create_inner, and also freed in one place; rt_mutex_delete. We also remove rt_mutex_delete_inner and add/fix error checking in rt_mutex_delete and __rt_mutex_create. --- include/native/mutex.h | 8 +-- ksrc/skins/native/mutex.c | 138 +++++++++++++++++++----------------------- ksrc/skins/native/syscall.c | 84 +++++++++++---------------- 3 files changed, 99 insertions(+), 131 deletions(-) diff --git a/include/native/mutex.h b/include/native/mutex.h index d854ee0..bf033a8 100644 --- a/include/native/mutex.h +++ b/include/native/mutex.h @@ -2,7 +2,7 @@ * @file * This file is part of the Xenomai project. * - * @note Copyright (C) 2004 Philippe Gerum <[email protected]> + * @note Copyright (C) 2004 Philippe Gerum <[email protected]> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as @@ -73,7 +73,7 @@ typedef struct __rt_mutex { xnhandle_t handle; /* !< Handle in registry -- zero if unregistered. */ int lockcnt; /* !< Lock nesting level (> 0 means "locked"). */ - + char name[XNOBJECT_NAME_LEN]; /* !< Symbolic name. */ #ifdef CONFIG_XENO_OPT_PERVASIVE @@ -147,9 +147,7 @@ static inline int rt_mutex_unbind (RT_MUTEX *mutex) extern "C" { #endif -int rt_mutex_create_inner(RT_MUTEX *mutex, const char *name, - xnarch_atomic_t *fastlock); -int rt_mutex_delete_inner(RT_MUTEX *mutex); +int rt_mutex_create_inner(RT_MUTEX *mutex, const char *name, int global); /* Public interface. */ diff --git a/ksrc/skins/native/mutex.c b/ksrc/skins/native/mutex.c index 6cf7eb1..4920d9f 100644 --- a/ksrc/skins/native/mutex.c +++ b/ksrc/skins/native/mutex.c @@ -2,7 +2,7 @@ * @file * This file is part of the Xenomai project. * - * @note Copyright (C) 2004 Philippe Gerum <[email protected]> + * @note Copyright (C) 2004 Philippe Gerum <[email protected]> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as @@ -134,17 +134,29 @@ static xnpnode_t __mutex_pnode = { #endif /* !CONFIG_PROC_FS */ -int rt_mutex_create_inner(RT_MUTEX *mutex, const char *name, - xnarch_atomic_t *fastlock) +int rt_mutex_create_inner(RT_MUTEX *mutex, const char *name, int global) { + xnflags_t flags = XNSYNCH_PRIO | XNSYNCH_PIP | XNSYNCH_OWNER; + xnarch_atomic_t *fastlock = NULL; int err = 0; spl_t s; if (xnpod_asynch_p()) return -EPERM; - xnsynch_init(&mutex->synch_base, - XNSYNCH_PRIO | XNSYNCH_PIP | XNSYNCH_OWNER, fastlock); +#ifdef CONFIG_XENO_FASTSYNCH + /* Allocate lock memory for in-kernel use */ + fastlock = xnheap_alloc(&xnsys_ppd_get(global)->sem_heap, + sizeof(*fastlock)); + + if (!fastlock) + return -ENOMEM; + + if (global) + flags |= RT_MUTEX_EXPORTED; +#endif /* CONFIG_XENO_FASTSYNCH */ + + xnsynch_init(&mutex->synch_base, flags, fastlock); mutex->handle = 0; /* i.e. (still) unregistered mutex. */ mutex->magic = XENO_MUTEX_MAGIC; mutex->lockcnt = 0; @@ -169,7 +181,7 @@ int rt_mutex_create_inner(RT_MUTEX *mutex, const char *name, &__mutex_pnode); if (err) - rt_mutex_delete_inner(mutex); + rt_mutex_delete(mutex); } return err; @@ -219,63 +231,7 @@ int rt_mutex_create_inner(RT_MUTEX *mutex, const char *name, int rt_mutex_create(RT_MUTEX *mutex, const char *name) { - xnarch_atomic_t *fastlock = NULL; - int err; - -#ifdef CONFIG_XENO_FASTSYNCH - /* Allocate lock memory for in-kernel use */ - fastlock = xnmalloc(sizeof(xnarch_atomic_t)); - - if (!fastlock) - return -ENOMEM; -#endif /* CONFIG_XENO_FASTSYNCH */ - - err = rt_mutex_create_inner(mutex, name, fastlock); - -#ifdef CONFIG_XENO_FASTSYNCH - if (err) - xnfree(fastlock); -#endif /* CONFIG_XENO_FASTSYNCH */ - - return err; -} - -int rt_mutex_delete_inner(RT_MUTEX *mutex) -{ - int err = 0, rc; - spl_t s; - - if (xnpod_asynch_p()) - return -EPERM; - - xnlock_get_irqsave(&nklock, s); - - mutex = xeno_h2obj_validate(mutex, XENO_MUTEX_MAGIC, RT_MUTEX); - - if (!mutex) { - err = xeno_handle_error(mutex, XENO_MUTEX_MAGIC, RT_MUTEX); - goto unlock_and_exit; - } - - removeq(mutex->rqueue, &mutex->rlink); - - rc = xnsynch_destroy(&mutex->synch_base); - - if (mutex->handle) - xnregistry_remove(mutex->handle); - - xeno_mark_deleted(mutex); - - if (rc == XNSYNCH_RESCHED) - /* Some task has been woken up as a result of the deletion: - reschedule now. */ - xnpod_schedule(); - - unlock_and_exit: - - xnlock_put_irqrestore(&nklock, s); - - return err; + return rt_mutex_create_inner(mutex, name, 1); } /** @@ -312,22 +268,52 @@ int rt_mutex_delete_inner(RT_MUTEX *mutex) int rt_mutex_delete(RT_MUTEX *mutex) { - int err; + int err = 0, global = 0, rc; + spl_t s; - err = rt_mutex_delete_inner(mutex); + if (xnpod_asynch_p()) + return -EPERM; -#ifdef CONFIG_XENO_FASTSYNCH - if (!err) { -#ifdef CONFIG_XENO_OPT_PERVASIVE - if (mutex->cpid) { - int global = xnsynch_test_flags(&mutex->synch_base, - RT_MUTEX_EXPORTED); - xnheap_free(&xnsys_ppd_get(global)->sem_heap, - mutex->synch_base.fastlock); - } else -#endif /* CONFIG_XENO_OPT_PERVASIVE */ - xnfree(mutex->synch_base.fastlock); + xnlock_get_irqsave(&nklock, s); + + mutex = xeno_h2obj_validate(mutex, XENO_MUTEX_MAGIC, RT_MUTEX); + + if (!mutex) { + err = xeno_handle_error(mutex, XENO_MUTEX_MAGIC, RT_MUTEX); + goto unlock_and_exit; } + + global = xnsynch_test_flags(&mutex->synch_base, RT_MUTEX_EXPORTED); + +#if defined(CONFIG_XENO_FASTSYNCH) && defined(CONFIG_XENO_OPT_PERVASIVE) + if (!global && mutex->cpid != current->pid) { + err = -EINVAL; + goto unlock_and_exit; + } +#endif /* CONFIG_XENO_FASTSYNCH */ + + removeq(mutex->rqueue, &mutex->rlink); + + rc = xnsynch_destroy(&mutex->synch_base); + + if (mutex->handle) + xnregistry_remove(mutex->handle); + + xeno_mark_deleted(mutex); + + if (rc == XNSYNCH_RESCHED) + /* Some task has been woken up as a result of the deletion: + reschedule now. */ + xnpod_schedule(); + + unlock_and_exit: + + xnlock_put_irqrestore(&nklock, s); + +#ifdef CONFIG_XENO_FASTSYNCH + if (!err) + xnheap_free(&xnsys_ppd_get(global)->sem_heap, + mutex->synch_base.fastlock); #endif /* CONFIG_XENO_FASTSYNCH */ return err; diff --git a/ksrc/skins/native/syscall.c b/ksrc/skins/native/syscall.c index 56407b8..e6c4257 100644 --- a/ksrc/skins/native/syscall.c +++ b/ksrc/skins/native/syscall.c @@ -2,7 +2,7 @@ * @file * This file is part of the Xenomai project. * - * @note Copyright (C) 2004 Philippe Gerum <[email protected]> + * @note Copyright (C) 2004 Philippe Gerum <[email protected]> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as @@ -372,8 +372,8 @@ static int __rt_task_yield(struct pt_regs *regs) /* * int __rt_task_set_periodic(RT_TASK_PLACEHOLDER *ph, - * RTIME idate, - * RTIME period) + * RTIME idate, + * RTIME period) */ static int __rt_task_set_periodic(struct pt_regs *regs) @@ -1508,7 +1508,6 @@ static int __rt_event_inquire(struct pt_regs *regs) static int __rt_mutex_create(struct pt_regs *regs) { char name[XNOBJECT_NAME_LEN]; - xnarch_atomic_t *fastlock = NULL; xnheap_t *sem_heap; RT_MUTEX_PLACEHOLDER ph; RT_MUTEX *mutex; @@ -1531,39 +1530,31 @@ static int __rt_mutex_create(struct pt_regs *regs) if (!mutex) return -ENOMEM; -#ifdef CONFIG_XENO_FASTSYNCH - fastlock = xnheap_alloc(sem_heap, sizeof(xnarch_atomic_t)); - - if (!fastlock) { - xnfree(mutex); - return -ENOMEM; - } -#endif /* CONFIG_XENO_FASTSYNCH */ - - err = rt_mutex_create_inner(mutex, name, fastlock); + err = rt_mutex_create_inner(mutex, name, *name != '\0'); + if (err < 0) + goto err_free_mutex; - if (err == 0) { - mutex->cpid = current->pid; - /* Copy back the registry handle to the ph struct. */ - ph.opaque = mutex->handle; + mutex->cpid = current->pid; + /* Copy back the registry handle to the ph struct. */ + ph.opaque = mutex->handle; #ifdef CONFIG_XENO_FASTSYNCH /* The lock address will be finished in user space. */ - ph.fastlock = - (void *)xnheap_mapped_offset(sem_heap, fastlock); - if (*name != '\0') - xnsynch_set_flags(&mutex->synch_base, - RT_MUTEX_EXPORTED); -#endif /* CONFIG_XENO_FASTSYNCH */ - if (__xn_safe_copy_to_user((void __user *)__xn_reg_arg1(regs), &ph, - sizeof(ph))) - err = -EFAULT; - } else { -#ifdef CONFIG_XENO_FASTSYNCH - xnheap_free(&xnsys_ppd_get(*name != '\0')->sem_heap, fastlock); + ph.fastlock = + (void *)xnheap_mapped_offset(sem_heap, + mutex->synch_base.fastlock); #endif /* CONFIG_XENO_FASTSYNCH */ - xnfree(mutex); + if (__xn_safe_copy_to_user((void __user *)__xn_reg_arg1(regs), &ph, + sizeof(ph))) { + err = -EFAULT; + goto err_delete_mutex; } + return 0; + + err_delete_mutex: + rt_mutex_delete(mutex); + err_free_mutex: + xnfree(mutex); return err; } @@ -1618,17 +1609,10 @@ static int __rt_mutex_delete(struct pt_regs *regs) if (!mutex) return -ESRCH; - err = rt_mutex_delete_inner(mutex); + err = rt_mutex_delete(mutex); - if (!err && mutex->cpid) { -#ifdef CONFIG_XENO_FASTSYNCH - int global = xnsynch_test_flags(&mutex->synch_base, - RT_MUTEX_EXPORTED); - xnheap_free(&xnsys_ppd_get(global)->sem_heap, - mutex->synch_base.fastlock); -#endif /* CONFIG_XENO_FASTSYNCH */ + if (!err && mutex->cpid) xnfree(mutex); - } return err; } @@ -1829,10 +1813,10 @@ static int __rt_cond_delete(struct pt_regs *regs) /* * int __rt_cond_wait_prologue(RT_COND_PLACEHOLDER *cph, - * RT_MUTEX_PLACEHOLDER *mph, + * RT_MUTEX_PLACEHOLDER *mph, * unsigned *plockcnt, - * xntmode_t timeout_mode, - * RTIME *timeoutp) + * xntmode_t timeout_mode, + * RTIME *timeoutp) */ static int __rt_cond_wait_prologue(struct pt_regs *regs) @@ -1999,7 +1983,7 @@ static int __rt_cond_inquire(struct pt_regs *regs) #else /* !CONFIG_XENO_OPT_NATIVE_COND */ #define __rt_cond_create __rt_call_not_available -#define __rt_cond_bind __rt_call_not_available +#define __rt_cond_bind __rt_call_not_available #define __rt_cond_delete __rt_call_not_available #define __rt_cond_wait_prologue __rt_call_not_available #define __rt_cond_wait_epilogue __rt_call_not_available @@ -2374,9 +2358,9 @@ static int __rt_queue_receive(struct pt_regs *regs) if (__xn_safe_copy_from_user(&timeout, (void __user *)__xn_reg_arg4(regs), sizeof(timeout))) return -EFAULT; - + timeout_mode = __xn_reg_arg3(regs); - + xnlock_get_irqsave(&nklock, s); q = (RT_QUEUE *)xnregistry_fetch(ph.opaque); @@ -2447,7 +2431,7 @@ static int __rt_queue_read(struct pt_regs *regs) /* Relative/absolute timeout spec. */ timeout_mode = __xn_reg_arg4(regs); - + if (__xn_safe_copy_from_user(&timeout, (void __user *)__xn_reg_arg5(regs), sizeof(timeout))) return -EFAULT; @@ -4107,10 +4091,10 @@ static xnsysent_t __systab[] = { [__native_cond_create] = {&__rt_cond_create, __xn_exec_any}, [__native_cond_bind] = {&__rt_cond_bind, __xn_exec_conforming}, [__native_cond_delete] = {&__rt_cond_delete, __xn_exec_any}, - [__native_cond_wait_prologue] = - {&__rt_cond_wait_prologue, + [__native_cond_wait_prologue] = + {&__rt_cond_wait_prologue, __xn_exec_primary | __xn_exec_norestart}, - [__native_cond_wait_epilogue] = + [__native_cond_wait_epilogue] = {&__rt_cond_wait_epilogue, __xn_exec_primary}, [__native_cond_signal] = {&__rt_cond_signal, __xn_exec_any}, [__native_cond_broadcast] = {&__rt_cond_broadcast, __xn_exec_any}, _______________________________________________ Xenomai-git mailing list [email protected] https://mail.gna.org/listinfo/xenomai-git
