Gitweb:     
http://git.kernel.org/git/?p=linux/kernel/git/torvalds/linux-2.6.git;a=commit;h=ca58abcb4a6d52ee2db1b1130cea3ca2a76677b9
Commit:     ca58abcb4a6d52ee2db1b1130cea3ca2a76677b9
Parent:     21f8ca3bf6198bd21e3c4cc820af2ccf753a6ec8
Author:     Peter Zijlstra <[EMAIL PROTECTED]>
AuthorDate: Thu Jul 19 01:48:53 2007 -0700
Committer:  Linus Torvalds <[EMAIL PROTECTED]>
CommitDate: Thu Jul 19 10:04:49 2007 -0700

    lockdep: sanitise CONFIG_PROVE_LOCKING
    
    Ensure that all of the lock dependency tracking code is under
    CONFIG_PROVE_LOCKING.  This allows us to use the held lock tracking code for
    other purposes.
    
    Signed-off-by: Peter Zijlstra <[EMAIL PROTECTED]>
    Acked-by: Ingo Molnar <[EMAIL PROTECTED]>
    Acked-by: Jason Baron <[EMAIL PROTECTED]>
    Signed-off-by: Andrew Morton <[EMAIL PROTECTED]>
    Signed-off-by: Linus Torvalds <[EMAIL PROTECTED]>
---
 kernel/lockdep.c  |   13 ++++++++++++-
 kernel/spinlock.c |    4 ++--
 2 files changed, 14 insertions(+), 3 deletions(-)

diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index edba2ff..05c1261 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -95,6 +95,7 @@ static int lockdep_initialized;
 unsigned long nr_list_entries;
 static struct lock_list list_entries[MAX_LOCKDEP_ENTRIES];
 
+#ifdef CONFIG_PROVE_LOCKING
 /*
  * Allocate a lockdep entry. (assumes the graph_lock held, returns
  * with NULL on failure)
@@ -111,6 +112,7 @@ static struct lock_list *alloc_list_entry(void)
        }
        return list_entries + nr_list_entries++;
 }
+#endif
 
 /*
  * All data structures here are protected by the global debug_lock.
@@ -140,7 +142,9 @@ LIST_HEAD(all_lock_classes);
 static struct list_head classhash_table[CLASSHASH_SIZE];
 
 unsigned long nr_lock_chains;
+#ifdef CONFIG_PROVE_LOCKING
 static struct lock_chain lock_chains[MAX_LOCKDEP_CHAINS];
+#endif
 
 /*
  * We put the lock dependency chains into a hash-table as well, to cache
@@ -482,6 +486,7 @@ static void print_lock_dependencies(struct lock_class 
*class, int depth)
        }
 }
 
+#ifdef CONFIG_PROVE_LOCKING
 /*
  * Add a new dependency to the head of the list:
  */
@@ -541,6 +546,7 @@ print_circular_bug_entry(struct lock_list *target, unsigned 
int depth)
 
        return 0;
 }
+#endif
 
 static void print_kernel_version(void)
 {
@@ -549,6 +555,7 @@ static void print_kernel_version(void)
                init_utsname()->version);
 }
 
+#ifdef CONFIG_PROVE_LOCKING
 /*
  * When a circular dependency is detected, print the
  * header first:
@@ -639,6 +646,7 @@ check_noncircular(struct lock_class *source, unsigned int 
depth)
        }
        return 1;
 }
+#endif
 
 static int very_verbose(struct lock_class *class)
 {
@@ -823,6 +831,7 @@ check_usage(struct task_struct *curr, struct held_lock 
*prev,
 
 #endif
 
+#ifdef CONFIG_PROVE_LOCKING
 static int
 print_deadlock_bug(struct task_struct *curr, struct held_lock *prev,
                   struct held_lock *next)
@@ -1087,7 +1096,7 @@ out_bug:
 
        return 0;
 }
-
+#endif
 
 /*
  * Is this the address of a static object:
@@ -1307,6 +1316,7 @@ out_unlock_set:
        return class;
 }
 
+#ifdef CONFIG_PROVE_LOCKING
 /*
  * Look up a dependency chain. If the key is not present yet then
  * add it and return 1 - in this case the new dependency chain is
@@ -1381,6 +1391,7 @@ cache_hit:
 
        return 1;
 }
+#endif
 
 /*
  * We are building curr_chain_key incrementally, so double-check
diff --git a/kernel/spinlock.c b/kernel/spinlock.c
index 2c6c2bf..cd93bfe 100644
--- a/kernel/spinlock.c
+++ b/kernel/spinlock.c
@@ -88,7 +88,7 @@ unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock)
         * _raw_spin_lock_flags() code, because lockdep assumes
         * that interrupts are not re-enabled during lock-acquire:
         */
-#ifdef CONFIG_PROVE_LOCKING
+#ifdef CONFIG_LOCKDEP
        _raw_spin_lock(lock);
 #else
        _raw_spin_lock_flags(lock, &flags);
@@ -305,7 +305,7 @@ unsigned long __lockfunc 
_spin_lock_irqsave_nested(spinlock_t *lock, int subclas
         * _raw_spin_lock_flags() code, because lockdep assumes
         * that interrupts are not re-enabled during lock-acquire:
         */
-#ifdef CONFIG_PROVE_SPIN_LOCKING
+#ifdef CONFIG_LOCKDEP
        _raw_spin_lock(lock);
 #else
        _raw_spin_lock_flags(lock, &flags);
-
To unsubscribe from this list: send the line "unsubscribe git-commits-head" in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to