[PATCH v7 01/16] lockdep: Refactor lookup_chain_cache()
Currently, lookup_chain_cache() provides both 'lookup' and 'add' functionalities in a function. However, each is useful. So this patch makes lookup_chain_cache() only do 'lookup' functionality and makes add_chain_cahce() only do 'add' functionality. And it's more readable than before. Signed-off-by: Byungchul Park--- kernel/locking/lockdep.c | 132 ++- 1 file changed, 86 insertions(+), 46 deletions(-) diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index 4d7ffc0..0c6e6b7 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c @@ -2110,14 +2110,15 @@ static int check_no_collision(struct task_struct *curr, } /* - * Look up a dependency chain. If the key is not present yet then - * add it and return 1 - in this case the new dependency chain is - * validated. If the key is already hashed, return 0. - * (On return with 1 graph_lock is held.) + * Adds a dependency chain into chain hashtable. And must be called with + * graph_lock held. + * + * Return 0 if fail, and graph_lock is released. + * Return 1 if succeed, with graph_lock held. */ -static inline int lookup_chain_cache(struct task_struct *curr, -struct held_lock *hlock, -u64 chain_key) +static inline int add_chain_cache(struct task_struct *curr, + struct held_lock *hlock, + u64 chain_key) { struct lock_class *class = hlock_class(hlock); struct hlist_head *hash_head = chainhashentry(chain_key); @@ -2125,49 +2126,18 @@ static inline int lookup_chain_cache(struct task_struct *curr, int i, j; /* +* Allocate a new chain entry from the static array, and add +* it to the hash: +*/ + + /* * We might need to take the graph lock, ensure we've got IRQs * disabled to make this an IRQ-safe lock.. for recursion reasons * lockdep won't complain about its own locking errors. */ if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) return 0; - /* -* We can walk it lock-free, because entries only get added -* to the hash: -*/ - hlist_for_each_entry_rcu(chain, hash_head, entry) { - if (chain->chain_key == chain_key) { -cache_hit: - debug_atomic_inc(chain_lookup_hits); - if (!check_no_collision(curr, hlock, chain)) - return 0; - if (very_verbose(class)) - printk("\nhash chain already cached, key: " - "%016Lx tail class: [%p] %s\n", - (unsigned long long)chain_key, - class->key, class->name); - return 0; - } - } - if (very_verbose(class)) - printk("\nnew hash chain, key: %016Lx tail class: [%p] %s\n", - (unsigned long long)chain_key, class->key, class->name); - /* -* Allocate a new chain entry from the static array, and add -* it to the hash: -*/ - if (!graph_lock()) - return 0; - /* -* We have to walk the chain again locked - to avoid duplicates: -*/ - hlist_for_each_entry(chain, hash_head, entry) { - if (chain->chain_key == chain_key) { - graph_unlock(); - goto cache_hit; - } - } if (unlikely(nr_lock_chains >= MAX_LOCKDEP_CHAINS)) { if (!debug_locks_off_graph_unlock()) return 0; @@ -2219,6 +2189,75 @@ static inline int lookup_chain_cache(struct task_struct *curr, return 1; } +/* + * Look up a dependency chain. + */ +static inline struct lock_chain *lookup_chain_cache(u64 chain_key) +{ + struct hlist_head *hash_head = chainhashentry(chain_key); + struct lock_chain *chain; + + /* +* We can walk it lock-free, because entries only get added +* to the hash: +*/ + hlist_for_each_entry_rcu(chain, hash_head, entry) { + if (chain->chain_key == chain_key) { + debug_atomic_inc(chain_lookup_hits); + return chain; + } + } + return NULL; +} + +/* + * If the key is not present yet in dependency chain cache then + * add it and return 1 - in this case the new dependency chain is + * validated. If the key is already hashed, return 0. + * (On return with 1 graph_lock is held.) + */ +static inline int lookup_chain_cache_add(struct task_struct *curr, +struct held_lock *hlock, +u64 chain_key) +{ + struct lock_class *class = hlock_class(hlock); +
[PATCH v7 01/16] lockdep: Refactor lookup_chain_cache()
Currently, lookup_chain_cache() provides both 'lookup' and 'add' functionalities in a function. However, each is useful. So this patch makes lookup_chain_cache() only do 'lookup' functionality and makes add_chain_cahce() only do 'add' functionality. And it's more readable than before. Signed-off-by: Byungchul Park --- kernel/locking/lockdep.c | 132 ++- 1 file changed, 86 insertions(+), 46 deletions(-) diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index 4d7ffc0..0c6e6b7 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c @@ -2110,14 +2110,15 @@ static int check_no_collision(struct task_struct *curr, } /* - * Look up a dependency chain. If the key is not present yet then - * add it and return 1 - in this case the new dependency chain is - * validated. If the key is already hashed, return 0. - * (On return with 1 graph_lock is held.) + * Adds a dependency chain into chain hashtable. And must be called with + * graph_lock held. + * + * Return 0 if fail, and graph_lock is released. + * Return 1 if succeed, with graph_lock held. */ -static inline int lookup_chain_cache(struct task_struct *curr, -struct held_lock *hlock, -u64 chain_key) +static inline int add_chain_cache(struct task_struct *curr, + struct held_lock *hlock, + u64 chain_key) { struct lock_class *class = hlock_class(hlock); struct hlist_head *hash_head = chainhashentry(chain_key); @@ -2125,49 +2126,18 @@ static inline int lookup_chain_cache(struct task_struct *curr, int i, j; /* +* Allocate a new chain entry from the static array, and add +* it to the hash: +*/ + + /* * We might need to take the graph lock, ensure we've got IRQs * disabled to make this an IRQ-safe lock.. for recursion reasons * lockdep won't complain about its own locking errors. */ if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) return 0; - /* -* We can walk it lock-free, because entries only get added -* to the hash: -*/ - hlist_for_each_entry_rcu(chain, hash_head, entry) { - if (chain->chain_key == chain_key) { -cache_hit: - debug_atomic_inc(chain_lookup_hits); - if (!check_no_collision(curr, hlock, chain)) - return 0; - if (very_verbose(class)) - printk("\nhash chain already cached, key: " - "%016Lx tail class: [%p] %s\n", - (unsigned long long)chain_key, - class->key, class->name); - return 0; - } - } - if (very_verbose(class)) - printk("\nnew hash chain, key: %016Lx tail class: [%p] %s\n", - (unsigned long long)chain_key, class->key, class->name); - /* -* Allocate a new chain entry from the static array, and add -* it to the hash: -*/ - if (!graph_lock()) - return 0; - /* -* We have to walk the chain again locked - to avoid duplicates: -*/ - hlist_for_each_entry(chain, hash_head, entry) { - if (chain->chain_key == chain_key) { - graph_unlock(); - goto cache_hit; - } - } if (unlikely(nr_lock_chains >= MAX_LOCKDEP_CHAINS)) { if (!debug_locks_off_graph_unlock()) return 0; @@ -2219,6 +2189,75 @@ static inline int lookup_chain_cache(struct task_struct *curr, return 1; } +/* + * Look up a dependency chain. + */ +static inline struct lock_chain *lookup_chain_cache(u64 chain_key) +{ + struct hlist_head *hash_head = chainhashentry(chain_key); + struct lock_chain *chain; + + /* +* We can walk it lock-free, because entries only get added +* to the hash: +*/ + hlist_for_each_entry_rcu(chain, hash_head, entry) { + if (chain->chain_key == chain_key) { + debug_atomic_inc(chain_lookup_hits); + return chain; + } + } + return NULL; +} + +/* + * If the key is not present yet in dependency chain cache then + * add it and return 1 - in this case the new dependency chain is + * validated. If the key is already hashed, return 0. + * (On return with 1 graph_lock is held.) + */ +static inline int lookup_chain_cache_add(struct task_struct *curr, +struct held_lock *hlock, +u64 chain_key) +{ + struct lock_class *class = hlock_class(hlock); + struct lock_chain