Hi Juergen,
On 13/03/2020 08:05, Juergen Gross wrote:
Similar to spinlocks preemption should be disabled while holding a
rwlock.
Signed-off-by: Juergen Gross <jgr...@suse.com>
---
xen/include/xen/rwlock.h | 18 +++++++++++++++++-
1 file changed, 17 insertions(+), 1 deletion(-)
diff --git a/xen/include/xen/rwlock.h b/xen/include/xen/rwlock.h
index 1c221dd0d9..4ee341a182 100644
--- a/xen/include/xen/rwlock.h
+++ b/xen/include/xen/rwlock.h
@@ -2,6 +2,7 @@
#define __RWLOCK_H__
#include <xen/percpu.h>
+#include <xen/preempt.h>
#include <xen/smp.h>
#include <xen/spinlock.h>
@@ -57,10 +58,12 @@ static inline int _read_trylock(rwlock_t *lock)
cnts = atomic_read(&lock->cnts);
if ( likely(_can_read_lock(cnts)) )
{
If you get preempted here, then it means the check below is likely going
to fail. So I think it would be best to disable preemption before, to
give more chance to succeed.
+ preempt_disable();
cnts = (u32)atomic_add_return(_QR_BIAS, &lock->cnts);
if ( likely(_can_read_lock(cnts)) )
return 1;
atomic_sub(_QR_BIAS, &lock->cnts);
+ preempt_enable();
}
return 0;
}
@@ -73,6 +76,7 @@ static inline void _read_lock(rwlock_t *lock)
{
u32 cnts;
+ preempt_disable();
cnts = atomic_add_return(_QR_BIAS, &lock->cnts);
if ( likely(_can_read_lock(cnts)) )
return;
@@ -106,6 +110,7 @@ static inline void _read_unlock(rwlock_t *lock)
* Atomically decrement the reader count
*/
atomic_sub(_QR_BIAS, &lock->cnts);
+ preempt_enable();
}
static inline void _read_unlock_irq(rwlock_t *lock)
@@ -137,6 +142,7 @@ static inline unsigned int _write_lock_val(void)
static inline void _write_lock(rwlock_t *lock)
{
/* Optimize for the unfair lock case where the fair flag is 0. */
+ preempt_disable();
if ( atomic_cmpxchg(&lock->cnts, 0, _write_lock_val()) == 0 )
return;
@@ -172,13 +178,21 @@ static inline int _write_trylock(rwlock_t *lock)
if ( unlikely(cnts) )
return 0;
- return likely(atomic_cmpxchg(&lock->cnts, 0, _write_lock_val()) == 0);
+ preempt_disable();
Similar remark as the read_trylock().
+ if ( unlikely(atomic_cmpxchg(&lock->cnts, 0, _write_lock_val()) != 0) )
+ {
+ preempt_enable();
+ return 0;
+ }
+
+ return 1;
}
static inline void _write_unlock(rwlock_t *lock)
{
ASSERT(_is_write_locked_by_me(atomic_read(&lock->cnts)));
atomic_and(~(_QW_CPUMASK | _QW_WMASK), &lock->cnts);
+ preempt_enable();
}
static inline void _write_unlock_irq(rwlock_t *lock)
@@ -274,6 +288,7 @@ static inline void _percpu_read_lock(percpu_rwlock_t
**per_cpudata,
}
/* Indicate this cpu is reading. */
+ preempt_disable();
this_cpu_ptr(per_cpudata) = percpu_rwlock;
smp_mb();
/* Check if a writer is waiting. */
@@ -308,6 +323,7 @@ static inline void _percpu_read_unlock(percpu_rwlock_t
**per_cpudata,
return;
}
this_cpu_ptr(per_cpudata) = NULL;
+ preempt_enable();
smp_wmb();
}
Cheers,
--
Julien Grall
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel