Replace the indirection through struct stack_trace by using the storage
array based interfaces and storing the information is a small lockdep
specific data structure.

Signed-off-by: Thomas Gleixner <t...@linutronix.de>
Acked-by: Peter Zijlstra (Intel) <pet...@infradead.org>
---
 include/linux/lockdep.h  |    9 +++++--
 kernel/locking/lockdep.c |   55 +++++++++++++++++++++++------------------------
 2 files changed, 35 insertions(+), 29 deletions(-)

--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -66,6 +66,11 @@ struct lock_class_key {
 
 extern struct lock_class_key __lockdep_no_validate__;
 
+struct lock_trace {
+       unsigned int            nr_entries;
+       unsigned int            offset;
+};
+
 #define LOCKSTAT_POINTS                4
 
 /*
@@ -100,7 +105,7 @@ struct lock_class {
         * IRQ/softirq usage tracking bits:
         */
        unsigned long                   usage_mask;
-       struct stack_trace              usage_traces[XXX_LOCK_USAGE_STATES];
+       struct lock_trace               usage_traces[XXX_LOCK_USAGE_STATES];
 
        /*
         * Generation counter, when doing certain classes of graph walking,
@@ -188,7 +193,7 @@ struct lock_list {
        struct list_head                entry;
        struct lock_class               *class;
        struct lock_class               *links_to;
-       struct stack_trace              trace;
+       struct lock_trace               trace;
        int                             distance;
 
        /*
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -434,18 +434,14 @@ static void print_lockdep_off(const char
 #endif
 }
 
-static int save_trace(struct stack_trace *trace)
+static int save_trace(struct lock_trace *trace)
 {
-       trace->nr_entries = 0;
-       trace->max_entries = MAX_STACK_TRACE_ENTRIES - nr_stack_trace_entries;
-       trace->entries = stack_trace + nr_stack_trace_entries;
-
-       trace->skip = 3;
-
-       save_stack_trace(trace);
-
-       trace->max_entries = trace->nr_entries;
+       unsigned long *entries = stack_trace + nr_stack_trace_entries;
+       unsigned int max_entries;
 
+       trace->offset = nr_stack_trace_entries;
+       max_entries = MAX_STACK_TRACE_ENTRIES - nr_stack_trace_entries;
+       trace->nr_entries = stack_trace_save(entries, max_entries, 3);
        nr_stack_trace_entries += trace->nr_entries;
 
        if (nr_stack_trace_entries >= MAX_STACK_TRACE_ENTRIES-1) {
@@ -1196,7 +1192,7 @@ static struct lock_list *alloc_list_entr
 static int add_lock_to_list(struct lock_class *this,
                            struct lock_class *links_to, struct list_head *head,
                            unsigned long ip, int distance,
-                           struct stack_trace *trace)
+                           struct lock_trace *trace)
 {
        struct lock_list *entry;
        /*
@@ -1415,6 +1411,13 @@ static inline int __bfs_backwards(struct
  * checking.
  */
 
+static void print_lock_trace(struct lock_trace *trace, unsigned int spaces)
+{
+       unsigned long *entries = stack_trace + trace->offset;
+
+       stack_trace_print(entries, trace->nr_entries, spaces);
+}
+
 /*
  * Print a dependency chain entry (this is only done when a deadlock
  * has been detected):
@@ -1427,8 +1430,7 @@ print_circular_bug_entry(struct lock_lis
        printk("\n-> #%u", depth);
        print_lock_name(target->class);
        printk(KERN_CONT ":\n");
-       print_stack_trace(&target->trace, 6);
-
+       print_lock_trace(&target->trace, 6);
        return 0;
 }
 
@@ -1740,7 +1742,7 @@ static void print_lock_class_header(stru
 
                        len += printk("%*s   %s", depth, "", usage_str[bit]);
                        len += printk(KERN_CONT " at:\n");
-                       print_stack_trace(class->usage_traces + bit, len);
+                       print_lock_trace(class->usage_traces + bit, len);
                }
        }
        printk("%*s }\n", depth, "");
@@ -1765,7 +1767,7 @@ print_shortest_lock_dependencies(struct
        do {
                print_lock_class_header(entry->class, depth);
                printk("%*s ... acquired at:\n", depth, "");
-               print_stack_trace(&entry->trace, 2);
+               print_lock_trace(&entry->trace, 2);
                printk("\n");
 
                if (depth == 0 && (entry != root)) {
@@ -1878,14 +1880,14 @@ print_bad_irq_dependency(struct task_str
        print_lock_name(backwards_entry->class);
        pr_warn("\n... which became %s-irq-safe at:\n", irqclass);
 
-       print_stack_trace(backwards_entry->class->usage_traces + bit1, 1);
+       print_lock_trace(backwards_entry->class->usage_traces + bit1, 1);
 
        pr_warn("\nto a %s-irq-unsafe lock:\n", irqclass);
        print_lock_name(forwards_entry->class);
        pr_warn("\n... which became %s-irq-unsafe at:\n", irqclass);
        pr_warn("...");
 
-       print_stack_trace(forwards_entry->class->usage_traces + bit2, 1);
+       print_lock_trace(forwards_entry->class->usage_traces + bit2, 1);
 
        pr_warn("\nother info that might help us debug this:\n\n");
        print_irq_lock_scenario(backwards_entry, forwards_entry,
@@ -2158,7 +2160,7 @@ check_deadlock(struct task_struct *curr,
  */
 static int
 check_prev_add(struct task_struct *curr, struct held_lock *prev,
-              struct held_lock *next, int distance, struct stack_trace *trace)
+              struct held_lock *next, int distance, struct lock_trace *trace)
 {
        struct lock_list *uninitialized_var(target_entry);
        struct lock_list *entry;
@@ -2196,7 +2198,7 @@ check_prev_add(struct task_struct *curr,
        this.parent = NULL;
        ret = check_noncircular(&this, hlock_class(prev), &target_entry);
        if (unlikely(!ret)) {
-               if (!trace->entries) {
+               if (!trace->nr_entries) {
                        /*
                         * If save_trace fails here, the printing might
                         * trigger a WARN but because of the !nr_entries it
@@ -2252,7 +2254,7 @@ check_prev_add(struct task_struct *curr,
                return print_bfs_bug(ret);
 
 
-       if (!trace->entries && !save_trace(trace))
+       if (!trace->nr_entries && !save_trace(trace))
                return 0;
 
        /*
@@ -2284,14 +2286,9 @@ check_prev_add(struct task_struct *curr,
 static int
 check_prevs_add(struct task_struct *curr, struct held_lock *next)
 {
+       struct lock_trace trace = { .nr_entries = 0 };
        int depth = curr->lockdep_depth;
        struct held_lock *hlock;
-       struct stack_trace trace = {
-               .nr_entries = 0,
-               .max_entries = 0,
-               .entries = NULL,
-               .skip = 0,
-       };
 
        /*
         * Debugging checks.
@@ -2719,6 +2716,10 @@ static inline int validate_chain(struct
 {
        return 1;
 }
+
+static void print_lock_trace(struct lock_trace *trace, unsigned int spaces)
+{
+}
 #endif
 
 /*
@@ -2815,7 +2816,7 @@ print_usage_bug(struct task_struct *curr
        print_lock(this);
 
        pr_warn("{%s} state was registered at:\n", usage_str[prev_bit]);
-       print_stack_trace(hlock_class(this)->usage_traces + prev_bit, 1);
+       print_lock_trace(hlock_class(this)->usage_traces + prev_bit, 1);
 
        print_irqtrace_events(curr);
        pr_warn("\nother info that might help us debug this:\n");


_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

Reply via email to