On Wed, 13 Aug 2008 13:13:13 -0700 (PDT)
Linus Torvalds <[EMAIL PROTECTED]> wrote:

> > Used a bitop to preserve the runtime checking in there.  spin_unlock()
> > doesn't return the previous lockedness.
> 
> Umm. spin_unlock does a lot more when you have lock debugging on, and 
> doesn't do useless crap when it isn't.
> 
> > A bitop seems a better fit to me.  We never spin on that lock (it
> > always uses test_and_set), so why use a "spin"lock?
> 
> ..because an atomic bitop is not the same as a lock.
> 
> The memory ordering guarantees are different. Yes, they are sufficient, 
> but that's because we've had to make them so to account for CRAP CODE that 
> uses bit operations as if they were locks.
> 
> Don't continue that. It's WRONG.

#2:

(The xchg(kexec_crash_image) stuff is still in there)

--- a/kernel/kexec.c~kexec-use-a-bitop-for-locking-rather-than-xchg
+++ a/kernel/kexec.c
@@ -924,19 +924,14 @@ static int kimage_load_segment(struct ki
  */
 struct kimage *kexec_image;
 struct kimage *kexec_crash_image;
-/*
- * A home grown binary mutex.
- * Nothing can wait so this mutex is safe to use
- * in interrupt context :)
- */
-static int kexec_lock;
+
+static DEFINE_SPINLOCK(kexec_lock);
 
 asmlinkage long sys_kexec_load(unsigned long entry, unsigned long nr_segments,
                                struct kexec_segment __user *segments,
                                unsigned long flags)
 {
        struct kimage **dest_image, *image;
-       int locked;
        int result;
 
        /* We only trust the superuser with rebooting the system. */
@@ -972,8 +967,7 @@ asmlinkage long sys_kexec_load(unsigned 
         *
         * KISS: always take the mutex.
         */
-       locked = xchg(&kexec_lock, 1);
-       if (locked)
+       if (!spin_trylock(&kexec_lock))
                return -EBUSY;
 
        dest_image = &kexec_image;
@@ -1015,8 +1009,7 @@ asmlinkage long sys_kexec_load(unsigned 
        image = xchg(dest_image, image);
 
 out:
-       locked = xchg(&kexec_lock, 0); /* Release the mutex */
-       BUG_ON(!locked);
+       spin_unlock(&kexec_lock);
        kimage_free(image);
 
        return result;
@@ -1063,9 +1056,6 @@ asmlinkage long compat_sys_kexec_load(un
 
 void crash_kexec(struct pt_regs *regs)
 {
-       int locked;
-
-
        /* Take the kexec_lock here to prevent sys_kexec_load
         * running on one cpu from replacing the crash kernel
         * we are using after a panic on a different cpu.
@@ -1074,8 +1064,7 @@ void crash_kexec(struct pt_regs *regs)
         * of memory the xchg(&kexec_crash_image) would be
         * sufficient.  But since I reuse the memory...
         */
-       locked = xchg(&kexec_lock, 1);
-       if (!locked) {
+       if (spin_trylock(&kexec_lock)) {
                if (kexec_crash_image) {
                        struct pt_regs fixed_regs;
                        crash_setup_regs(&fixed_regs, regs);
@@ -1083,8 +1072,7 @@ void crash_kexec(struct pt_regs *regs)
                        machine_crash_shutdown(&fixed_regs);
                        machine_kexec(kexec_crash_image);
                }
-               locked = xchg(&kexec_lock, 0);
-               BUG_ON(!locked);
+               spin_unlock(&kexec_lock);
        }
 }
 
@@ -1434,7 +1422,7 @@ int kernel_kexec(void)
 {
        int error = 0;
 
-       if (xchg(&kexec_lock, 1))
+       if (!spin_trylock(&kexec_lock))
                return -EBUSY;
        if (!kexec_image) {
                error = -EINVAL;
@@ -1498,8 +1486,6 @@ int kernel_kexec(void)
 #endif
 
  Unlock:
-       if (!xchg(&kexec_lock, 0))
-               BUG();
-
+       spin_unlock(&kexec_lock);
        return error;
 }
_


_______________________________________________
kexec mailing list
[email protected]
http://lists.infradead.org/mailman/listinfo/kexec

Reply via email to