Module: xenomai-3 Branch: next Commit: 99e8036a25bb08815fcbda27ea4109fc486306b7 URL: http://git.xenomai.org/?p=xenomai-3.git;a=commit;h=99e8036a25bb08815fcbda27ea4109fc486306b7
Author: Philippe Gerum <r...@xenomai.org> Date: Fri Jul 8 15:04:01 2016 +0200 cobalt/kernel: use raw_spinlock* API to access IRQ pipeline locks In order to cope with PREEMPT_RT_FULL, the spinlock* API should be invoked for regular spin locks exclusively, so that those locks can be handled by PREEMPT_RT's sleeping lock API seamlessly. Since I-pipe locks are basically raw locks with hard IRQ management, sticking to the raw_spinlock* API for them makes sense. The regular spinlock* and raw_spinlock* APIs can be used indifferently for manipulating IRQ pipeline locks (ipipe_spinlock_t) with the current pipeline releases, so this change is backward compatible. --- include/cobalt/kernel/rtdm/driver.h | 10 +++++----- kernel/cobalt/apc.c | 12 ++++++------ kernel/cobalt/posix/process.c | 4 ++-- 3 files changed, 13 insertions(+), 13 deletions(-) diff --git a/include/cobalt/kernel/rtdm/driver.h b/include/cobalt/kernel/rtdm/driver.h index 1133cca..0c7b62e 100644 --- a/include/cobalt/kernel/rtdm/driver.h +++ b/include/cobalt/kernel/rtdm/driver.h @@ -562,7 +562,7 @@ typedef unsigned long rtdm_lockctx_t; */ static inline void rtdm_lock_init(rtdm_lock_t *lock) { - spin_lock_init(lock); + raw_spin_lock_init(lock); } /** @@ -575,7 +575,7 @@ static inline void rtdm_lock_init(rtdm_lock_t *lock) static inline void rtdm_lock_get(rtdm_lock_t *lock) { XENO_BUG_ON(COBALT, !spltest()); - spin_lock(lock); + raw_spin_lock(lock); xnsched_lock(); } @@ -588,7 +588,7 @@ static inline void rtdm_lock_get(rtdm_lock_t *lock) */ static inline void rtdm_lock_put(rtdm_lock_t *lock) { - spin_unlock(lock); + raw_spin_unlock(lock); xnsched_unlock(); } @@ -608,7 +608,7 @@ static inline rtdm_lockctx_t __rtdm_lock_get_irqsave(rtdm_lock_t *lock) rtdm_lockctx_t context; context = ipipe_test_and_stall_head(); - spin_lock(lock); + raw_spin_lock(lock); xnsched_lock(); return context; @@ -625,7 +625,7 @@ static inline rtdm_lockctx_t __rtdm_lock_get_irqsave(rtdm_lock_t *lock) static inline void rtdm_lock_put_irqrestore(rtdm_lock_t *lock, rtdm_lockctx_t context) { - spin_unlock(lock); + raw_spin_unlock(lock); xnsched_unlock(); ipipe_restore_head(context); } diff --git a/kernel/cobalt/apc.c b/kernel/cobalt/apc.c index 3383a26..97edcfc 100644 --- a/kernel/cobalt/apc.c +++ b/kernel/cobalt/apc.c @@ -55,7 +55,7 @@ void apc_dispatch(unsigned int virq, void *arg) * CPU, so that the handler is invoked on the same CPU than * the code which called xnapc_schedule(). */ - spin_lock(&apc_lock); + raw_spin_lock(&apc_lock); /* This is atomic linux context (non-threaded IRQ). */ p = &raw_cpu_ptr(&cobalt_machine_cpudata)->apc_pending; @@ -65,12 +65,12 @@ void apc_dispatch(unsigned int virq, void *arg) handler = cobalt_pipeline.apc_table[apc].handler; cookie = cobalt_pipeline.apc_table[apc].cookie; raw_cpu_ptr(&cobalt_machine_cpudata)->apc_shots[apc]++; - spin_unlock(&apc_lock); + raw_spin_unlock(&apc_lock); handler(cookie); - spin_lock(&apc_lock); + raw_spin_lock(&apc_lock); } - spin_unlock(&apc_lock); + raw_spin_unlock(&apc_lock); } /** @@ -118,7 +118,7 @@ int xnapc_alloc(const char *name, if (handler == NULL) return -EINVAL; - spin_lock_irqsave(&apc_lock, flags); + raw_spin_lock_irqsave(&apc_lock, flags); if (cobalt_pipeline.apc_map == ~0) { apc = -EBUSY; @@ -131,7 +131,7 @@ int xnapc_alloc(const char *name, cobalt_pipeline.apc_table[apc].cookie = cookie; cobalt_pipeline.apc_table[apc].name = name; out: - spin_unlock_irqrestore(&apc_lock, flags); + raw_spin_unlock_irqrestore(&apc_lock, flags); return apc; } diff --git a/kernel/cobalt/posix/process.c b/kernel/cobalt/posix/process.c index 0d225bb..ca0ef2b 100644 --- a/kernel/cobalt/posix/process.c +++ b/kernel/cobalt/posix/process.c @@ -976,7 +976,7 @@ static int handle_hostrt_event(struct ipipe_hostrt_data *hostrt) * Linux kernel and against preemption by Xenomai * - The unsynced R/W block is for lockless read-only access. */ - spin_lock_irqsave(&__hostrtlock, flags); + raw_spin_lock_irqsave(&__hostrtlock, flags); unsynced_write_block(&tmp, &nkvdso->hostrt_data.lock) { nkvdso->hostrt_data.live = 1; @@ -990,7 +990,7 @@ static int handle_hostrt_event(struct ipipe_hostrt_data *hostrt) nkvdso->hostrt_data.wtom_nsec = hostrt->wall_to_monotonic.tv_nsec; } - spin_unlock_irqrestore(&__hostrtlock, flags); + raw_spin_unlock_irqrestore(&__hostrtlock, flags); return KEVENT_PROPAGATE; } _______________________________________________ Xenomai-git mailing list Xenomai-git@xenomai.org https://xenomai.org/mailman/listinfo/xenomai-git