Currently, check_prev_add() can only save its current context's stack
trace. But it would be useful if a seperated stack_trace can be taken
and used in check_prev_add(). Crossrelease feature can use
check_prev_add() with another context's stack_trace.

Signed-off-by: Byungchul Park <byungchul.p...@lge.com>
---
 kernel/locking/lockdep.c | 16 +++++++++-------
 1 file changed, 9 insertions(+), 7 deletions(-)

diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 4d51208..c596bef 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -1822,7 +1822,8 @@ check_deadlock(struct task_struct *curr, struct held_lock 
*next,
  */
 static int
 check_prev_add(struct task_struct *curr, struct held_lock *prev,
-              struct held_lock *next, int distance, int *stack_saved)
+              struct held_lock *next, int distance, int *stack_saved,
+              struct stack_trace *own_trace)
 {
        struct lock_list *entry;
        int ret;
@@ -1883,7 +1884,7 @@ check_prev_add(struct task_struct *curr, struct held_lock 
*prev,
                }
        }
 
-       if (!*stack_saved) {
+       if (!own_trace && stack_saved && !*stack_saved) {
                if (!save_trace(&trace))
                        return 0;
                *stack_saved = 1;
@@ -1895,14 +1896,14 @@ check_prev_add(struct task_struct *curr, struct 
held_lock *prev,
         */
        ret = add_lock_to_list(hlock_class(prev), hlock_class(next),
                               &hlock_class(prev)->locks_after,
-                              next->acquire_ip, distance, &trace);
+                              next->acquire_ip, distance, own_trace ?: &trace);
 
        if (!ret)
                return 0;
 
        ret = add_lock_to_list(hlock_class(next), hlock_class(prev),
                               &hlock_class(next)->locks_before,
-                              next->acquire_ip, distance, &trace);
+                              next->acquire_ip, distance, own_trace ?: &trace);
        if (!ret)
                return 0;
 
@@ -1911,7 +1912,8 @@ check_prev_add(struct task_struct *curr, struct held_lock 
*prev,
         */
        if (verbose(hlock_class(prev)) || verbose(hlock_class(next))) {
                /* We drop graph lock, so another thread can overwrite trace. */
-               *stack_saved = 0;
+               if (stack_saved)
+                       *stack_saved = 0;
                graph_unlock();
                printk("\n new dependency: ");
                print_lock_name(hlock_class(prev));
@@ -1960,8 +1962,8 @@ check_prevs_add(struct task_struct *curr, struct 
held_lock *next)
                 * added:
                 */
                if (hlock->read != 2 && hlock->check) {
-                       if (!check_prev_add(curr, hlock, next,
-                                               distance, &stack_saved))
+                       if (!check_prev_add(curr, hlock, next, distance,
+                                               &stack_saved, NULL))
                                return 0;
                        /*
                         * Stop after the first non-trylock entry,
-- 
1.9.1

Reply via email to