rt_spin_lock() should first do the lock annotation via lockdep and then
do the actual locking. That way we learn about the deadlock from lockdep
before it happens.

Signed-off-by: Sebastian Andrzej Siewior <[email protected]>
---
 kernel/locking/rtmutex.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
index 79f49d73e4d0..639cfdaae72f 100644
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
@@ -1153,8 +1153,8 @@ void __sched rt_spin_lock_slowunlock(struct rt_mutex 
*lock)
 void __lockfunc rt_spin_lock(spinlock_t *lock)
 {
        migrate_disable();
-       rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock);
        spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
+       rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock);
 }
 EXPORT_SYMBOL(rt_spin_lock);
 
-- 
2.14.2

Reply via email to