Unify the context identification in the function trace_get_context_bit().
Signed-off-by: Daniel Bristot de Oliveira <[email protected]>
Cc: Steven Rostedt <[email protected]>
Cc: Arnaldo Carvalho de Melo <[email protected]>
Cc: Ingo Molnar <[email protected]>
Cc: Andy Lutomirski <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Cc: Borislav Petkov <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: "H. Peter Anvin" <[email protected]>
Cc: "Joel Fernandes (Google)" <[email protected]>
Cc: Jiri Olsa <[email protected]>
Cc: Namhyung Kim <[email protected]>
Cc: Alexander Shishkin <[email protected]>
Cc: Tommaso Cucinotta <[email protected]>
Cc: Romulo Silva de Oliveira <[email protected]>
Cc: Clark Williams <[email protected]>
Cc: [email protected]
Cc: [email protected]
---
kernel/trace/ring_buffer.c | 9 +--------
1 file changed, 1 insertion(+), 8 deletions(-)
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index fa8cbad2ca88..5bff5bcb1bf8 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -2685,14 +2685,7 @@ static __always_inline int
trace_recursive_lock(struct ring_buffer_per_cpu *cpu_buffer)
{
unsigned int val = cpu_buffer->current_context;
- unsigned long pc = preempt_count();
- int bit;
-
- if (!(pc & (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_OFFSET)))
- bit = TRACE_CTX_NORMAL;
- else
- bit = pc & NMI_MASK ? TRACE_CTX_NMI :
- pc & HARDIRQ_MASK ? TRACE_CTX_IRQ : TRACE_CTX_SOFTIRQ;
+ int bit = trace_get_context_bit();
if (unlikely(val & (1 << (bit + cpu_buffer->nest))))
return 1;
--
2.20.1