[patch V2 1/2] blk/mq/cpu-notif: Convert to hotplug state machine

2016-09-20 Thread Thomas Gleixner
Replace the block-mq notifier list management with the multi instance
facility in the cpu hotplug state machine.

Signed-off-by: Thomas Gleixner 
Cc: Jens Axboe 
Cc: Peter Zijlstra 
Cc: linux-bl...@vger.kernel.org
Cc: r...@linutronix.de
Cc: Christoph Hellwing 

---

 block/Makefile |2 -
 block/blk-mq-cpu.c |   67 -
 block/blk-mq.c |   36 +-
 block/blk-mq.h |7 -
 include/linux/blk-mq.h |8 -
 5 files changed, 15 insertions(+), 105 deletions(-)

--- a/block/Makefile
+++ b/block/Makefile
@@ -6,7 +6,7 @@ obj-$(CONFIG_BLOCK) := bio.o elevator.o
blk-flush.o blk-settings.o blk-ioc.o blk-map.o \
blk-exec.o blk-merge.o blk-softirq.o blk-timeout.o \
blk-lib.o blk-mq.o blk-mq-tag.o \
-   blk-mq-sysfs.o blk-mq-cpu.o blk-mq-cpumap.o ioctl.o \
+   blk-mq-sysfs.o blk-mq-cpumap.o ioctl.o \
genhd.o scsi_ioctl.o partition-generic.o ioprio.o \
badblocks.o partitions/
 
--- a/block/blk-mq-cpu.c
+++ /dev/null
@@ -1,67 +0,0 @@
-/*
- * CPU notifier helper code for blk-mq
- *
- * Copyright (C) 2013-2014 Jens Axboe
- */
-#include 
-#include 
-#include 
-#include 
-#include 
-#include 
-#include 
-#include 
-
-#include 
-#include "blk-mq.h"
-
-static LIST_HEAD(blk_mq_cpu_notify_list);
-static DEFINE_RAW_SPINLOCK(blk_mq_cpu_notify_lock);
-
-static int blk_mq_main_cpu_notify(struct notifier_block *self,
- unsigned long action, void *hcpu)
-{
-   unsigned int cpu = (unsigned long) hcpu;
-   struct blk_mq_cpu_notifier *notify;
-   int ret = NOTIFY_OK;
-
-   raw_spin_lock(_mq_cpu_notify_lock);
-
-   list_for_each_entry(notify, _mq_cpu_notify_list, list) {
-   ret = notify->notify(notify->data, action, cpu);
-   if (ret != NOTIFY_OK)
-   break;
-   }
-
-   raw_spin_unlock(_mq_cpu_notify_lock);
-   return ret;
-}
-
-void blk_mq_register_cpu_notifier(struct blk_mq_cpu_notifier *notifier)
-{
-   BUG_ON(!notifier->notify);
-
-   raw_spin_lock(_mq_cpu_notify_lock);
-   list_add_tail(>list, _mq_cpu_notify_list);
-   raw_spin_unlock(_mq_cpu_notify_lock);
-}
-
-void blk_mq_unregister_cpu_notifier(struct blk_mq_cpu_notifier *notifier)
-{
-   raw_spin_lock(_mq_cpu_notify_lock);
-   list_del(>list);
-   raw_spin_unlock(_mq_cpu_notify_lock);
-}
-
-void blk_mq_init_cpu_notifier(struct blk_mq_cpu_notifier *notifier,
- int (*fn)(void *, unsigned long, unsigned int),
- void *data)
-{
-   notifier->notify = fn;
-   notifier->data = data;
-}
-
-void __init blk_mq_cpu_init(void)
-{
-   hotcpu_notifier(blk_mq_main_cpu_notify, 0);
-}
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1575,11 +1575,13 @@ static struct blk_mq_tags *blk_mq_init_r
  * software queue to the hw queue dispatch list, and ensure that it
  * gets run.
  */
-static int blk_mq_hctx_cpu_offline(struct blk_mq_hw_ctx *hctx, int cpu)
+static int blk_mq_hctx_notify_dead(unsigned int cpu, struct hlist_node *node)
 {
+   struct blk_mq_hw_ctx *hctx;
struct blk_mq_ctx *ctx;
LIST_HEAD(tmp);
 
+   hctx = hlist_entry_safe(node, struct blk_mq_hw_ctx, cpuhp_dead);
ctx = __blk_mq_get_ctx(hctx->queue, cpu);
 
spin_lock(>lock);
@@ -1590,30 +1592,20 @@ static int blk_mq_hctx_cpu_offline(struc
spin_unlock(>lock);
 
if (list_empty())
-   return NOTIFY_OK;
+   return 0;
 
spin_lock(>lock);
list_splice_tail_init(, >dispatch);
spin_unlock(>lock);
 
blk_mq_run_hw_queue(hctx, true);
-   return NOTIFY_OK;
+   return 0;
 }
 
-static int blk_mq_hctx_notify(void *data, unsigned long action,
- unsigned int cpu)
+static void blk_mq_remove_cpuhp(struct blk_mq_hw_ctx *hctx)
 {
-   struct blk_mq_hw_ctx *hctx = data;
-
-   if (action == CPU_DEAD || action == CPU_DEAD_FROZEN)
-   return blk_mq_hctx_cpu_offline(hctx, cpu);
-
-   /*
-* In case of CPU online, tags may be reallocated
-* in blk_mq_map_swqueue() after mapping is updated.
-*/
-
-   return NOTIFY_OK;
+   cpuhp_state_remove_instance_nocalls(CPUHP_BLK_MQ_DEAD,
+   >cpuhp_dead);
 }
 
 /* hctx->ctxs will be freed in queue's release handler */
@@ -1633,7 +1625,7 @@ static void blk_mq_exit_hctx(struct requ
if (set->ops->exit_hctx)
set->ops->exit_hctx(hctx, hctx_idx);
 
-   blk_mq_unregister_cpu_notifier(>cpu_notifier);
+   blk_mq_remove_cpuhp(hctx);
blk_free_flush_queue(hctx->fq);
sbitmap_free(>ctx_map);
 }
@@ -1680,9 +1672,7 @@ static int 

[patch V2 1/2] blk/mq/cpu-notif: Convert to hotplug state machine

2016-09-20 Thread Thomas Gleixner
Replace the block-mq notifier list management with the multi instance
facility in the cpu hotplug state machine.

Signed-off-by: Thomas Gleixner 
Cc: Jens Axboe 
Cc: Peter Zijlstra 
Cc: linux-bl...@vger.kernel.org
Cc: r...@linutronix.de
Cc: Christoph Hellwing 

---

 block/Makefile |2 -
 block/blk-mq-cpu.c |   67 -
 block/blk-mq.c |   36 +-
 block/blk-mq.h |7 -
 include/linux/blk-mq.h |8 -
 5 files changed, 15 insertions(+), 105 deletions(-)

--- a/block/Makefile
+++ b/block/Makefile
@@ -6,7 +6,7 @@ obj-$(CONFIG_BLOCK) := bio.o elevator.o
blk-flush.o blk-settings.o blk-ioc.o blk-map.o \
blk-exec.o blk-merge.o blk-softirq.o blk-timeout.o \
blk-lib.o blk-mq.o blk-mq-tag.o \
-   blk-mq-sysfs.o blk-mq-cpu.o blk-mq-cpumap.o ioctl.o \
+   blk-mq-sysfs.o blk-mq-cpumap.o ioctl.o \
genhd.o scsi_ioctl.o partition-generic.o ioprio.o \
badblocks.o partitions/
 
--- a/block/blk-mq-cpu.c
+++ /dev/null
@@ -1,67 +0,0 @@
-/*
- * CPU notifier helper code for blk-mq
- *
- * Copyright (C) 2013-2014 Jens Axboe
- */
-#include 
-#include 
-#include 
-#include 
-#include 
-#include 
-#include 
-#include 
-
-#include 
-#include "blk-mq.h"
-
-static LIST_HEAD(blk_mq_cpu_notify_list);
-static DEFINE_RAW_SPINLOCK(blk_mq_cpu_notify_lock);
-
-static int blk_mq_main_cpu_notify(struct notifier_block *self,
- unsigned long action, void *hcpu)
-{
-   unsigned int cpu = (unsigned long) hcpu;
-   struct blk_mq_cpu_notifier *notify;
-   int ret = NOTIFY_OK;
-
-   raw_spin_lock(_mq_cpu_notify_lock);
-
-   list_for_each_entry(notify, _mq_cpu_notify_list, list) {
-   ret = notify->notify(notify->data, action, cpu);
-   if (ret != NOTIFY_OK)
-   break;
-   }
-
-   raw_spin_unlock(_mq_cpu_notify_lock);
-   return ret;
-}
-
-void blk_mq_register_cpu_notifier(struct blk_mq_cpu_notifier *notifier)
-{
-   BUG_ON(!notifier->notify);
-
-   raw_spin_lock(_mq_cpu_notify_lock);
-   list_add_tail(>list, _mq_cpu_notify_list);
-   raw_spin_unlock(_mq_cpu_notify_lock);
-}
-
-void blk_mq_unregister_cpu_notifier(struct blk_mq_cpu_notifier *notifier)
-{
-   raw_spin_lock(_mq_cpu_notify_lock);
-   list_del(>list);
-   raw_spin_unlock(_mq_cpu_notify_lock);
-}
-
-void blk_mq_init_cpu_notifier(struct blk_mq_cpu_notifier *notifier,
- int (*fn)(void *, unsigned long, unsigned int),
- void *data)
-{
-   notifier->notify = fn;
-   notifier->data = data;
-}
-
-void __init blk_mq_cpu_init(void)
-{
-   hotcpu_notifier(blk_mq_main_cpu_notify, 0);
-}
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1575,11 +1575,13 @@ static struct blk_mq_tags *blk_mq_init_r
  * software queue to the hw queue dispatch list, and ensure that it
  * gets run.
  */
-static int blk_mq_hctx_cpu_offline(struct blk_mq_hw_ctx *hctx, int cpu)
+static int blk_mq_hctx_notify_dead(unsigned int cpu, struct hlist_node *node)
 {
+   struct blk_mq_hw_ctx *hctx;
struct blk_mq_ctx *ctx;
LIST_HEAD(tmp);
 
+   hctx = hlist_entry_safe(node, struct blk_mq_hw_ctx, cpuhp_dead);
ctx = __blk_mq_get_ctx(hctx->queue, cpu);
 
spin_lock(>lock);
@@ -1590,30 +1592,20 @@ static int blk_mq_hctx_cpu_offline(struc
spin_unlock(>lock);
 
if (list_empty())
-   return NOTIFY_OK;
+   return 0;
 
spin_lock(>lock);
list_splice_tail_init(, >dispatch);
spin_unlock(>lock);
 
blk_mq_run_hw_queue(hctx, true);
-   return NOTIFY_OK;
+   return 0;
 }
 
-static int blk_mq_hctx_notify(void *data, unsigned long action,
- unsigned int cpu)
+static void blk_mq_remove_cpuhp(struct blk_mq_hw_ctx *hctx)
 {
-   struct blk_mq_hw_ctx *hctx = data;
-
-   if (action == CPU_DEAD || action == CPU_DEAD_FROZEN)
-   return blk_mq_hctx_cpu_offline(hctx, cpu);
-
-   /*
-* In case of CPU online, tags may be reallocated
-* in blk_mq_map_swqueue() after mapping is updated.
-*/
-
-   return NOTIFY_OK;
+   cpuhp_state_remove_instance_nocalls(CPUHP_BLK_MQ_DEAD,
+   >cpuhp_dead);
 }
 
 /* hctx->ctxs will be freed in queue's release handler */
@@ -1633,7 +1625,7 @@ static void blk_mq_exit_hctx(struct requ
if (set->ops->exit_hctx)
set->ops->exit_hctx(hctx, hctx_idx);
 
-   blk_mq_unregister_cpu_notifier(>cpu_notifier);
+   blk_mq_remove_cpuhp(hctx);
blk_free_flush_queue(hctx->fq);
sbitmap_free(>ctx_map);
 }
@@ -1680,9 +1672,7 @@ static int blk_mq_init_hctx(struct reque
hctx->queue_num = hctx_idx;