The SuSE PPC guru said that the PPC spinlock code we currently use may
behave erroneously on multiprocessor systems. Attached is the proposed
patch, suggested for inclusion in 7.4. Comments?
--
Peter Eisentraut [EMAIL PROTECTED]
--- src/include/storage/s_lock.h
+++ src/include/storage/s_lock.h
@@ -207,24 +207,24 @@
slock_t _t;
int _res;
- __asm__ __volatile__(
-" lwarx %0,0,%2 \n"
-" cmpwi %0,0 \n"
-" bne 1f \n"
-" addi %0,%0,1 \n"
-" stwcx. %0,0,%2 \n"
-" beq 2f \n"
-"1: li %1,1 \n"
-" b 3f \n"
-"2: \n"
-" isync \n"
-" li %1,0 \n"
-"3: \n"
-
-: "=&r" (_t), "=r" (_res)
-: "r" (lock)
-: "cc", "memory"
- );
+ __asm__ __volatile__(
+" lwarx %0,0,%3 \n"
+" cmpwi %0,0 \n"
+" bne 2f \n"
+" addi %0,%0,1 \n"
+" stwcx. %0,0,%3 \n"
+" bne 2f \n"
+" isync \n"
+"1: li %2,0 \n"
+" b 3f \n"
+"2: \n"
+" li %2,1 \n"
+"3: \n"
+
+: "=&r" (_t), "=r" (lock), "=r" (_res)
+: "r" (lock)
+: "cc", "memory"
+ );
return _res;
}
@@ -260,7 +260,7 @@
#define S_UNLOCK(lock) \
do \
{\
- __asm__ __volatile__ (" sync \n"); \
+ __asm__ __volatile__ (" lwsync \n"); \
*((volatile slock_t *) (lock)) = 0; \
} while (0)
---------------------------(end of broadcast)---------------------------
TIP 3: if posting/reading through Usenet, please send an appropriate
subscribe-nomail command to [EMAIL PROTECTED] so that your
message can get through to the mailing list cleanly