[PATCH 4.4 007/193] blktrace: fix unlocked registration of tracepoints

2018-02-23 Thread Greg Kroah-Hartman
4.4-stable review patch.  If anyone has any objections, please let me know.

--

From: Jens Axboe 

commit a6da0024ffc19e0d47712bb5ca4fd083f76b07df upstream.

We need to ensure that tracepoints are registered and unregistered
with the users of them. The existing atomic count isn't enough for
that. Add a lock around the tracepoints, so we serialize access
to them.

This fixes cases where we have multiple users setting up and
tearing down tracepoints, like this:

CPU: 0 PID: 2995 Comm: syzkaller857118 Not tainted
4.14.0-rc5-next-20171018+ #36
Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS
Google 01/01/2011
Call Trace:
  __dump_stack lib/dump_stack.c:16 [inline]
  dump_stack+0x194/0x257 lib/dump_stack.c:52
  panic+0x1e4/0x41c kernel/panic.c:183
  __warn+0x1c4/0x1e0 kernel/panic.c:546
  report_bug+0x211/0x2d0 lib/bug.c:183
  fixup_bug+0x40/0x90 arch/x86/kernel/traps.c:177
  do_trap_no_signal arch/x86/kernel/traps.c:211 [inline]
  do_trap+0x260/0x390 arch/x86/kernel/traps.c:260
  do_error_trap+0x120/0x390 arch/x86/kernel/traps.c:297
  do_invalid_op+0x1b/0x20 arch/x86/kernel/traps.c:310
  invalid_op+0x18/0x20 arch/x86/entry/entry_64.S:905
RIP: 0010:tracepoint_add_func kernel/tracepoint.c:210 [inline]
RIP: 0010:tracepoint_probe_register_prio+0x397/0x9a0 kernel/tracepoint.c:283
RSP: 0018:8801d1d1f6c0 EFLAGS: 00010293
RAX: 8801d22e8540 RBX: ffef RCX: 81710f07
RDX:  RSI: 85b679c0 RDI: 8801d5f19818
RBP: 8801d1d1f7c8 R08: 81710c10 R09: 0004
R10: 8801d1d1f6b0 R11: 0003 R12: 817597f0
R13:  R14:  R15: 8801d1d1f7a0
  tracepoint_probe_register+0x2a/0x40 kernel/tracepoint.c:304
  register_trace_block_rq_insert include/trace/events/block.h:191 [inline]
  blk_register_tracepoints+0x1e/0x2f0 kernel/trace/blktrace.c:1043
  do_blk_trace_setup+0xa10/0xcf0 kernel/trace/blktrace.c:542
  blk_trace_setup+0xbd/0x180 kernel/trace/blktrace.c:564
  sg_ioctl+0xc71/0x2d90 drivers/scsi/sg.c:1089
  vfs_ioctl fs/ioctl.c:45 [inline]
  do_vfs_ioctl+0x1b1/0x1520 fs/ioctl.c:685
  SYSC_ioctl fs/ioctl.c:700 [inline]
  SyS_ioctl+0x8f/0xc0 fs/ioctl.c:691
  entry_SYSCALL_64_fastpath+0x1f/0xbe
RIP: 0033:0x444339
RSP: 002b:7ffe05bb5b18 EFLAGS: 0206 ORIG_RAX: 0010
RAX: ffda RBX: 006d66c0 RCX: 00444339
RDX: 2084cf90 RSI: c0481273 RDI: 0009
RBP: 0082 R08:  R09: 
R10:  R11: 0206 R12: 
R13: c0481273 R14:  R15: 

since we can now run these in parallel. Ensure that the exported helpers
for doing this are grabbing the queue trace mutex.

Reported-by: Steven Rostedt 
Tested-by: Dmitry Vyukov 
Signed-off-by: Jens Axboe 
Signed-off-by: Greg Kroah-Hartman 

---
 kernel/trace/blktrace.c |   32 ++--
 1 file changed, 22 insertions(+), 10 deletions(-)

--- a/kernel/trace/blktrace.c
+++ b/kernel/trace/blktrace.c
@@ -57,7 +57,8 @@ static struct tracer_flags blk_tracer_fl
 };
 
 /* Global reference count of probes */
-static atomic_t blk_probes_ref = ATOMIC_INIT(0);
+static DEFINE_MUTEX(blk_probe_mutex);
+static int blk_probes_ref;
 
 static void blk_register_tracepoints(void);
 static void blk_unregister_tracepoints(void);
@@ -300,11 +301,26 @@ static void blk_trace_free(struct blk_tr
kfree(bt);
 }
 
+static void get_probe_ref(void)
+{
+   mutex_lock(_probe_mutex);
+   if (++blk_probes_ref == 1)
+   blk_register_tracepoints();
+   mutex_unlock(_probe_mutex);
+}
+
+static void put_probe_ref(void)
+{
+   mutex_lock(_probe_mutex);
+   if (!--blk_probes_ref)
+   blk_unregister_tracepoints();
+   mutex_unlock(_probe_mutex);
+}
+
 static void blk_trace_cleanup(struct blk_trace *bt)
 {
blk_trace_free(bt);
-   if (atomic_dec_and_test(_probes_ref))
-   blk_unregister_tracepoints();
+   put_probe_ref();
 }
 
 int blk_trace_remove(struct request_queue *q)
@@ -522,8 +538,7 @@ int do_blk_trace_setup(struct request_qu
if (cmpxchg(>blk_trace, NULL, bt))
goto err;
 
-   if (atomic_inc_return(_probes_ref) == 1)
-   blk_register_tracepoints();
+   get_probe_ref();
 
return 0;
 err:
@@ -1466,9 +1481,7 @@ static int blk_trace_remove_queue(struct
if (bt == NULL)
return -EINVAL;
 
-   if (atomic_dec_and_test(_probes_ref))
-   blk_unregister_tracepoints();
-
+   put_probe_ref();
blk_trace_free(bt);
return 0;
 }
@@ -1499,8 +1512,7 @@ static int blk_trace_setup_queue(struct
if (cmpxchg(>blk_trace, NULL, bt))
goto free_bt;
 
-   if (atomic_inc_return(_probes_ref) == 1)

[PATCH 4.4 007/193] blktrace: fix unlocked registration of tracepoints

2018-02-23 Thread Greg Kroah-Hartman
4.4-stable review patch.  If anyone has any objections, please let me know.

--

From: Jens Axboe 

commit a6da0024ffc19e0d47712bb5ca4fd083f76b07df upstream.

We need to ensure that tracepoints are registered and unregistered
with the users of them. The existing atomic count isn't enough for
that. Add a lock around the tracepoints, so we serialize access
to them.

This fixes cases where we have multiple users setting up and
tearing down tracepoints, like this:

CPU: 0 PID: 2995 Comm: syzkaller857118 Not tainted
4.14.0-rc5-next-20171018+ #36
Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS
Google 01/01/2011
Call Trace:
  __dump_stack lib/dump_stack.c:16 [inline]
  dump_stack+0x194/0x257 lib/dump_stack.c:52
  panic+0x1e4/0x41c kernel/panic.c:183
  __warn+0x1c4/0x1e0 kernel/panic.c:546
  report_bug+0x211/0x2d0 lib/bug.c:183
  fixup_bug+0x40/0x90 arch/x86/kernel/traps.c:177
  do_trap_no_signal arch/x86/kernel/traps.c:211 [inline]
  do_trap+0x260/0x390 arch/x86/kernel/traps.c:260
  do_error_trap+0x120/0x390 arch/x86/kernel/traps.c:297
  do_invalid_op+0x1b/0x20 arch/x86/kernel/traps.c:310
  invalid_op+0x18/0x20 arch/x86/entry/entry_64.S:905
RIP: 0010:tracepoint_add_func kernel/tracepoint.c:210 [inline]
RIP: 0010:tracepoint_probe_register_prio+0x397/0x9a0 kernel/tracepoint.c:283
RSP: 0018:8801d1d1f6c0 EFLAGS: 00010293
RAX: 8801d22e8540 RBX: ffef RCX: 81710f07
RDX:  RSI: 85b679c0 RDI: 8801d5f19818
RBP: 8801d1d1f7c8 R08: 81710c10 R09: 0004
R10: 8801d1d1f6b0 R11: 0003 R12: 817597f0
R13:  R14:  R15: 8801d1d1f7a0
  tracepoint_probe_register+0x2a/0x40 kernel/tracepoint.c:304
  register_trace_block_rq_insert include/trace/events/block.h:191 [inline]
  blk_register_tracepoints+0x1e/0x2f0 kernel/trace/blktrace.c:1043
  do_blk_trace_setup+0xa10/0xcf0 kernel/trace/blktrace.c:542
  blk_trace_setup+0xbd/0x180 kernel/trace/blktrace.c:564
  sg_ioctl+0xc71/0x2d90 drivers/scsi/sg.c:1089
  vfs_ioctl fs/ioctl.c:45 [inline]
  do_vfs_ioctl+0x1b1/0x1520 fs/ioctl.c:685
  SYSC_ioctl fs/ioctl.c:700 [inline]
  SyS_ioctl+0x8f/0xc0 fs/ioctl.c:691
  entry_SYSCALL_64_fastpath+0x1f/0xbe
RIP: 0033:0x444339
RSP: 002b:7ffe05bb5b18 EFLAGS: 0206 ORIG_RAX: 0010
RAX: ffda RBX: 006d66c0 RCX: 00444339
RDX: 2084cf90 RSI: c0481273 RDI: 0009
RBP: 0082 R08:  R09: 
R10:  R11: 0206 R12: 
R13: c0481273 R14:  R15: 

since we can now run these in parallel. Ensure that the exported helpers
for doing this are grabbing the queue trace mutex.

Reported-by: Steven Rostedt 
Tested-by: Dmitry Vyukov 
Signed-off-by: Jens Axboe 
Signed-off-by: Greg Kroah-Hartman 

---
 kernel/trace/blktrace.c |   32 ++--
 1 file changed, 22 insertions(+), 10 deletions(-)

--- a/kernel/trace/blktrace.c
+++ b/kernel/trace/blktrace.c
@@ -57,7 +57,8 @@ static struct tracer_flags blk_tracer_fl
 };
 
 /* Global reference count of probes */
-static atomic_t blk_probes_ref = ATOMIC_INIT(0);
+static DEFINE_MUTEX(blk_probe_mutex);
+static int blk_probes_ref;
 
 static void blk_register_tracepoints(void);
 static void blk_unregister_tracepoints(void);
@@ -300,11 +301,26 @@ static void blk_trace_free(struct blk_tr
kfree(bt);
 }
 
+static void get_probe_ref(void)
+{
+   mutex_lock(_probe_mutex);
+   if (++blk_probes_ref == 1)
+   blk_register_tracepoints();
+   mutex_unlock(_probe_mutex);
+}
+
+static void put_probe_ref(void)
+{
+   mutex_lock(_probe_mutex);
+   if (!--blk_probes_ref)
+   blk_unregister_tracepoints();
+   mutex_unlock(_probe_mutex);
+}
+
 static void blk_trace_cleanup(struct blk_trace *bt)
 {
blk_trace_free(bt);
-   if (atomic_dec_and_test(_probes_ref))
-   blk_unregister_tracepoints();
+   put_probe_ref();
 }
 
 int blk_trace_remove(struct request_queue *q)
@@ -522,8 +538,7 @@ int do_blk_trace_setup(struct request_qu
if (cmpxchg(>blk_trace, NULL, bt))
goto err;
 
-   if (atomic_inc_return(_probes_ref) == 1)
-   blk_register_tracepoints();
+   get_probe_ref();
 
return 0;
 err:
@@ -1466,9 +1481,7 @@ static int blk_trace_remove_queue(struct
if (bt == NULL)
return -EINVAL;
 
-   if (atomic_dec_and_test(_probes_ref))
-   blk_unregister_tracepoints();
-
+   put_probe_ref();
blk_trace_free(bt);
return 0;
 }
@@ -1499,8 +1512,7 @@ static int blk_trace_setup_queue(struct
if (cmpxchg(>blk_trace, NULL, bt))
goto free_bt;
 
-   if (atomic_inc_return(_probes_ref) == 1)
-   blk_register_tracepoints();
+   get_probe_ref();
return 0;
 
 free_bt: