Re: 4.12 NULL pointer dereference in kmem_cache_free on USB storage removal

2017-07-19 Thread Bart Van Assche
On Wed, 2017-07-19 at 08:13 +0300, Yagmur Oymak wrote:
> On 07/14/2017 12:07 AM, Bart Van Assche wrote:
> > On Thu, 2017-07-13 at 23:24 +0300, Meelis Roos wrote:
> > > [258062.320700] RIP: 0010:kmem_cache_free+0x12/0x160
> > > [258062.320886] Call Trace:
> > > [258062.320897]  scsi_exit_rq+0x4d/0x60
> > > [258062.320909]  free_request_size+0x1c/0x30
> > > [258062.320923]  mempool_destroy+0x1d/0x60
> > > [258062.320935]  blk_exit_rl+0x1b/0x40
> > > [258062.320946]  __blk_release_queue+0x7d/0x120
> > > [258062.320959]  process_one_work+0x1af/0x340
> > > [258062.320972]  worker_thread+0x43/0x3e0
> > > [258062.320984]  kthread+0xfe/0x130
> > > [258062.320995]  ? create_worker+0x170/0x170
> > > [258062.321007]  ? kthread_create_on_node+0x40/0x40
> > > [258062.321022]  ret_from_fork+0x22/0x30
> > 
> > Thank you for your report. Can you apply commit 8e6882545d8c ("scsi: Avoid
> > that scsi_exit_rq() triggers a use-after-free") on top of kernel v4.12 and
> > retest? That commit has been tagged "Cc: stable" so I hope that this patch
> > will be included in kernel v4.12.1. However, that kernel is not yet 
> > available
> > unfortunately ...
> 
> That patch fixes the problem, as far as I tested. However, it still is
> not included in stable by 4.12.2. The problem persists there, and is
> fixed when this patch is applied on top of it. What to to about it? 
 
Hello Yagmur,

[ Please don't top-post -- this is considered annoying in the Linux community ]

Since kernel v4.13-rc1 has been released and since commit 8e6882545d8c is 
included
in v4.13-rc1 I assume that it won't take long before a stable kernel is released
that includes that commit. BTW, Greg just announced that that commit will be 
included
in the next v4.11 stable kernel. See also https://lkml.org/lkml/2017/7/19/494.

Bart.

Re: [PATCH 17/17] block/cfq: cache rightmost rb_node

2017-07-19 Thread Jan Kara
On Tue 18-07-17 18:46:03, Davidlohr Bueso wrote:
> For the same reasons we already cache the leftmost pointer,
> apply the same optimization for rb_last() calls. Users must
> explicitly do this as rb_root_cached only deals with the
> smallest node.
> 
> Cc: ax...@fb.com
> Cc: linux-block@vger.kernel.org
> Signed-off-by: Davidlohr Bueso 

Hum, as I'm reading the code, here we have about 1:1 ratio of cached
lookups and tree insert / delete where we have to maintain the cached
value. Is the optimization worth it in such case?

Honza

> ---
> This is part of the rbtree internal caching series:
> https://lwn.net/Articles/726809/
> 
>  block/cfq-iosched.c | 19 ++-
>  1 file changed, 14 insertions(+), 5 deletions(-)
> 
> diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
> index 92c31683a2bb..57ec45fd4590 100644
> --- a/block/cfq-iosched.c
> +++ b/block/cfq-iosched.c
> @@ -94,11 +94,13 @@ struct cfq_ttime {
>   */
>  struct cfq_rb_root {
>   struct rb_root_cached rb;
> + struct rb_node *rb_rightmost;
>   unsigned count;
>   u64 min_vdisktime;
>   struct cfq_ttime ttime;
>  };
>  #define CFQ_RB_ROOT  (struct cfq_rb_root) { .rb = RB_ROOT_CACHED, \
> + .rb_rightmost = NULL,\
>   .ttime = {.last_end_request = ktime_get_ns(),},}
>  
>  /*
> @@ -1183,6 +1185,9 @@ static struct cfq_group *cfq_rb_first_group(struct 
> cfq_rb_root *root)
>  
>  static void cfq_rb_erase(struct rb_node *n, struct cfq_rb_root *root)
>  {
> + if (root->rb_rightmost == n)
> + root->rb_rightmost = rb_next(n);
> +
>   rb_erase_cached(n, >rb);
>   RB_CLEAR_NODE(n);
>  
> @@ -1239,20 +1244,24 @@ __cfq_group_service_tree_add(struct cfq_rb_root *st, 
> struct cfq_group *cfqg)
>   struct rb_node *parent = NULL;
>   struct cfq_group *__cfqg;
>   s64 key = cfqg_key(st, cfqg);
> - bool leftmost = true;
> + bool leftmost = true, rightmost = true;
>  
>   while (*node != NULL) {
>   parent = *node;
>   __cfqg = rb_entry_cfqg(parent);
>  
> - if (key < cfqg_key(st, __cfqg))
> + if (key < cfqg_key(st, __cfqg)) {
>   node = >rb_left;
> - else {
> + rightmost = false;
> + } else {
>   node = >rb_right;
>   leftmost = false;
>   }
>   }
>  
> + if (rightmost)
> + st->rb_rightmost = >rb_node;
> +
>   rb_link_node(>rb_node, parent, node);
>   rb_insert_color_cached(>rb_node, >rb, leftmost);
>  }
> @@ -1355,7 +1364,7 @@ cfq_group_notify_queue_add(struct cfq_data *cfqd, 
> struct cfq_group *cfqg)
>* so that groups get lesser vtime based on their weights, so that
>* if group does not loose all if it was not continuously backlogged.
>*/
> - n = rb_last(>rb.rb_root);
> + n = st->rb_rightmost;
>   if (n) {
>   __cfqg = rb_entry_cfqg(n);
>   cfqg->vdisktime = __cfqg->vdisktime +
> @@ -2204,7 +2213,7 @@ static void cfq_service_tree_add(struct cfq_data *cfqd, 
> struct cfq_queue *cfqq,
>   st = st_for(cfqq->cfqg, cfqq_class(cfqq), cfqq_type(cfqq));
>   if (cfq_class_idle(cfqq)) {
>   rb_key = CFQ_IDLE_DELAY;
> - parent = rb_last(>rb.rb_root);
> + parent = st->rb_rightmost;
>   if (parent && parent != >rb_node) {
>   __cfqq = rb_entry(parent, struct cfq_queue, rb_node);
>   rb_key += __cfqq->rb_key;
> -- 
> 2.12.0
> 
-- 
Jan Kara 
SUSE Labs, CR


Re: [PATCH 10/17] block/cfq: replace cfq_rb_root leftmost caching

2017-07-19 Thread Jan Kara
On Tue 18-07-17 18:45:56, Davidlohr Bueso wrote:
> ... with the generic rbtree flavor instead. No changes
> in semantics whatsoever.
> 
> Cc: ax...@fb.com
> Cc: linux-block@vger.kernel.org
> Acked-by: Peter Zijlstra (Intel) 
> Signed-off-by: Davidlohr Bueso 

Looks good to me. You can add:

Reviewed-by: Jan Kara 

Honza

> ---
>  block/cfq-iosched.c | 70 
> +++--
>  1 file changed, 20 insertions(+), 50 deletions(-)
> 
> diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
> index 3d5c28945719..92c31683a2bb 100644
> --- a/block/cfq-iosched.c
> +++ b/block/cfq-iosched.c
> @@ -93,13 +93,12 @@ struct cfq_ttime {
>   * move this into the elevator for the rq sorting as well.
>   */
>  struct cfq_rb_root {
> - struct rb_root rb;
> - struct rb_node *left;
> + struct rb_root_cached rb;
>   unsigned count;
>   u64 min_vdisktime;
>   struct cfq_ttime ttime;
>  };
> -#define CFQ_RB_ROOT  (struct cfq_rb_root) { .rb = RB_ROOT, \
> +#define CFQ_RB_ROOT  (struct cfq_rb_root) { .rb = RB_ROOT_CACHED, \
>   .ttime = {.last_end_request = ktime_get_ns(),},}
>  
>  /*
> @@ -984,10 +983,9 @@ static inline u64 max_vdisktime(u64 min_vdisktime, u64 
> vdisktime)
>  
>  static void update_min_vdisktime(struct cfq_rb_root *st)
>  {
> - struct cfq_group *cfqg;
> + if (!RB_EMPTY_ROOT(>rb.rb_root)) {
> + struct cfq_group *cfqg = rb_entry_cfqg(st->rb.rb_leftmost);
>  
> - if (st->left) {
> - cfqg = rb_entry_cfqg(st->left);
>   st->min_vdisktime = max_vdisktime(st->min_vdisktime,
> cfqg->vdisktime);
>   }
> @@ -1169,46 +1167,25 @@ cfq_choose_req(struct cfq_data *cfqd, struct request 
> *rq1, struct request *rq2,
>   }
>  }
>  
> -/*
> - * The below is leftmost cache rbtree addon
> - */
>  static struct cfq_queue *cfq_rb_first(struct cfq_rb_root *root)
>  {
>   /* Service tree is empty */
>   if (!root->count)
>   return NULL;
>  
> - if (!root->left)
> - root->left = rb_first(>rb);
> -
> - if (root->left)
> - return rb_entry(root->left, struct cfq_queue, rb_node);
> -
> - return NULL;
> + return rb_entry(rb_first_cached(>rb), struct cfq_queue, rb_node);
>  }
>  
>  static struct cfq_group *cfq_rb_first_group(struct cfq_rb_root *root)
>  {
> - if (!root->left)
> - root->left = rb_first(>rb);
> -
> - if (root->left)
> - return rb_entry_cfqg(root->left);
> -
> - return NULL;
> + return rb_entry_cfqg(rb_first_cached(>rb));
>  }
>  
> -static void rb_erase_init(struct rb_node *n, struct rb_root *root)
> +static void cfq_rb_erase(struct rb_node *n, struct cfq_rb_root *root)
>  {
> - rb_erase(n, root);
> + rb_erase_cached(n, >rb);
>   RB_CLEAR_NODE(n);
> -}
>  
> -static void cfq_rb_erase(struct rb_node *n, struct cfq_rb_root *root)
> -{
> - if (root->left == n)
> - root->left = NULL;
> - rb_erase_init(n, >rb);
>   --root->count;
>  }
>  
> @@ -1258,11 +1235,11 @@ cfqg_key(struct cfq_rb_root *st, struct cfq_group 
> *cfqg)
>  static void
>  __cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg)
>  {
> - struct rb_node **node = >rb.rb_node;
> + struct rb_node **node = >rb.rb_root.rb_node;
>   struct rb_node *parent = NULL;
>   struct cfq_group *__cfqg;
>   s64 key = cfqg_key(st, cfqg);
> - int left = 1;
> + bool leftmost = true;
>  
>   while (*node != NULL) {
>   parent = *node;
> @@ -1272,15 +1249,12 @@ __cfq_group_service_tree_add(struct cfq_rb_root *st, 
> struct cfq_group *cfqg)
>   node = >rb_left;
>   else {
>   node = >rb_right;
> - left = 0;
> + leftmost = false;
>   }
>   }
>  
> - if (left)
> - st->left = >rb_node;
> -
>   rb_link_node(>rb_node, parent, node);
> - rb_insert_color(>rb_node, >rb);
> + rb_insert_color_cached(>rb_node, >rb, leftmost);
>  }
>  
>  /*
> @@ -1381,7 +1355,7 @@ cfq_group_notify_queue_add(struct cfq_data *cfqd, 
> struct cfq_group *cfqg)
>* so that groups get lesser vtime based on their weights, so that
>* if group does not loose all if it was not continuously backlogged.
>*/
> - n = rb_last(>rb);
> + n = rb_last(>rb.rb_root);
>   if (n) {
>   __cfqg = rb_entry_cfqg(n);
>   cfqg->vdisktime = __cfqg->vdisktime +
> @@ -2223,14 +2197,14 @@ static void cfq_service_tree_add(struct cfq_data 
> *cfqd, struct cfq_queue *cfqq,
>   struct cfq_queue *__cfqq;
>   u64 rb_key;
>   struct cfq_rb_root *st;
> - int left;
> + bool leftmost = true;
>   int new_cfqq = 1;
>   u64 now = ktime_get_ns();
>