* Andrew Morton ([EMAIL PROTECTED]) wrote:
> wtf? How come patches leave here in working order and keep arriving broken?
I think the trees are in sync, and it's actually a bug that's .config
dependent. Basically, the STI_STRING and CLI_STRING are not useable
the way they get used:
#define CLI_STRING paravirt_alt("pushl %ecx; pushl %edx;" \
"call *paravirt_ops+PARAVIRT_irq_disable;" \
"popl %edx; popl %ecx", \
PARAVIRT_IRQ_DISABLE, CLBR_EAX)
#define STI_STRING paravirt_alt("pushl %ecx; pushl %edx;" \
"call *paravirt_ops+PARAVIRT_irq_enable;" \
"popl %edx; popl %ecx", \
PARAVIRT_IRQ_ENABLE, CLBR_EAX)
#ifndef CONFIG_PROVE_LOCKING <-- that's part of the problem (must be
on in most configs)
static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long
flags)
{
asm volatile(
"\n1:\t"
LOCK_PREFIX " ; decb %0\n\t" <-- here the operand is
numerically referred to
"jns 5f\n"
"2:\t"
"testl $0x200, %1\n\t"
"jz 4f\n\t"
STI_STRING "\n" <-- here we have code that does
"pushl %ecx; pushl %edx;", not %%ecx
"3:\t"
"rep;nop\n\t"
"cmpb $0, %0\n\t"
"jle 3b\n\t"
CLI_STRING "\n\t"
"jmp 1b\n"
"4:\t"
"rep;nop\n\t"
"cmpb $0, %0\n\t"
"jg 1b\n\t"
"jmp 4b\n"
"5:\n\t"
: "+m" (lock->slock)
: "r" (flags)
: "memory" CLI_STI_CLOBBERS);
}
#endif
So, Andi's patch, is needed and correct AFAICT. Fixes all ff, mm, pv trees...
thanks,
-chris
i386: Get paravirt ops to compile
TBD should be folded into the original patches
Signed-off-by: Andi Kleen <[EMAIL PROTECTED]>
===================================================================
--- linux.orig/include/asm-i386/paravirt.h
+++ linux/include/asm-i386/paravirt.h
@@ -454,16 +454,20 @@ static inline unsigned long __raw_local_
return f;
}
-#define CLI_STRING paravirt_alt("pushl %ecx; pushl %edx;" \
- "call *paravirt_ops+PARAVIRT_irq_disable;" \
- "popl %edx; popl %ecx", \
+#define CLI_STRING paravirt_alt("pushl %%ecx; pushl %%edx;" \
+ "call *paravirt_ops+%c[irq_disable];" \
+ "popl %%edx; popl %%ecx", \
PARAVIRT_IRQ_DISABLE, CLBR_EAX)
-#define STI_STRING paravirt_alt("pushl %ecx; pushl %edx;" \
- "call *paravirt_ops+PARAVIRT_irq_enable;" \
- "popl %edx; popl %ecx", \
+#define STI_STRING paravirt_alt("pushl %%ecx; pushl %%edx;" \
+ "call *paravirt_ops+%c[irq_enable];" \
+ "popl %%edx; popl %%ecx", \
PARAVIRT_IRQ_ENABLE, CLBR_EAX)
#define CLI_STI_CLOBBERS , "%eax"
+#define CLI_STI_INPUT_ARGS \
+ , \
+ [irq_disable] "i" (offsetof(struct paravirt_ops, irq_disable)), \
+ [irq_enable] "i" (offsetof(struct paravirt_ops, irq_enable))
#else /* __ASSEMBLY__ */
Index: linux/include/asm-i386/spinlock.h
===================================================================
--- linux.orig/include/asm-i386/spinlock.h
+++ linux/include/asm-i386/spinlock.h
@@ -13,6 +13,7 @@
#define CLI_STRING "cli"
#define STI_STRING "sti"
#define CLI_STI_CLOBBERS
+#define CLI_STI_INPUT_ARGS
#endif /* CONFIG_PARAVIRT */
/*
@@ -58,26 +59,27 @@ static inline void __raw_spin_lock_flags
{
asm volatile(
"\n1:\t"
- LOCK_PREFIX " ; decb %0\n\t"
+ LOCK_PREFIX " ; decb %[slock]\n\t"
"jns 5f\n"
"2:\t"
- "testl $0x200, %1\n\t"
+ "testl $0x200, %[flags]\n\t"
"jz 4f\n\t"
STI_STRING "\n"
"3:\t"
"rep;nop\n\t"
- "cmpb $0, %0\n\t"
+ "cmpb $0, %[slock]\n\t"
"jle 3b\n\t"
CLI_STRING "\n\t"
"jmp 1b\n"
"4:\t"
"rep;nop\n\t"
- "cmpb $0, %0\n\t"
+ "cmpb $0, %[slock]\n\t"
"jg 1b\n\t"
"jmp 4b\n"
"5:\n\t"
- : "+m" (lock->slock)
- : "r" (flags)
+ : [slock] "+m" (lock->slock)
+ : [flags] "r" (flags)
+ CLI_STI_INPUT_ARGS
: "memory" CLI_STI_CLOBBERS);
}
#endif
_______________________________________________
Virtualization mailing list
[email protected]
https://lists.osdl.org/mailman/listinfo/virtualization