Re: [PATCH rfc 25/30] nvme: move control plane handling to nvme core

2017-06-19 Thread Sagi Grimberg

+static void nvme_free_io_queues(struct nvme_ctrl *ctrl)
+{
+   int i;
+
+   for (i = 1; i < ctrl->queue_count; i++)
+   ctrl->ops->free_hw_queue(ctrl, i);
+}
+
+void nvme_stop_io_queues(struct nvme_ctrl *ctrl)
+{
+   int i;
+
+   for (i = 1; i < ctrl->queue_count; i++)
+   ctrl->ops->stop_hw_queue(ctrl, i);
+}
+EXPORT_SYMBOL_GPL(nvme_stop_io_queues);


At leasr for PCIe this is going to work very differently, so I'm not
sure this part make so much sense in the core.  Maybe in Fabrics?
Or at least make the callouts operate on all I/O queues, which would
suite PCIe a lot more.


Yea, I spent some time thinking on the async nature of queue
removal for pci... I started from ->stop/free_io_queues callouts
but hated the fact that we need to iterate exactly the same way
in every driver...

We could have an optional stop/free_io_queues that the core
will call instead if implemented?


+   error = ctrl->ops->start_hw_queue(ctrl, 0);
+   if (error)
+   goto out_cleanup_connect_queue;
+
+   error = ctrl->ops->reg_read64(ctrl, NVME_REG_CAP, >cap);
+   if (error) {
+   dev_err(ctrl->device,
+   "prop_get NVME_REG_CAP failed\n");
+   goto out_cleanup_connect_queue;
+   }
+
+   ctrl->sqsize = min_t(int, NVME_CAP_MQES(ctrl->cap), ctrl->sqsize);
+
+   error = nvme_enable_ctrl(ctrl, ctrl->cap);
+   if (error)
+   goto out_cleanup_connect_queue;


I'm not sure this ordering is going to work for PCIe..


This one is easy to reverse...


Re: [PATCH rfc 25/30] nvme: move control plane handling to nvme core

2017-06-19 Thread Christoph Hellwig
> +static void nvme_free_io_queues(struct nvme_ctrl *ctrl)
> +{
> + int i;
> +
> + for (i = 1; i < ctrl->queue_count; i++)
> + ctrl->ops->free_hw_queue(ctrl, i);
> +}
> +
> +void nvme_stop_io_queues(struct nvme_ctrl *ctrl)
> +{
> + int i;
> +
> + for (i = 1; i < ctrl->queue_count; i++)
> + ctrl->ops->stop_hw_queue(ctrl, i);
> +}
> +EXPORT_SYMBOL_GPL(nvme_stop_io_queues);

At leasr for PCIe this is going to work very differently, so I'm not
sure this part make so much sense in the core.  Maybe in Fabrics?
Or at least make the callouts operate on all I/O queues, which would
suite PCIe a lot more.

> + error = ctrl->ops->start_hw_queue(ctrl, 0);
> + if (error)
> + goto out_cleanup_connect_queue;
> +
> + error = ctrl->ops->reg_read64(ctrl, NVME_REG_CAP, >cap);
> + if (error) {
> + dev_err(ctrl->device,
> + "prop_get NVME_REG_CAP failed\n");
> + goto out_cleanup_connect_queue;
> + }
> +
> + ctrl->sqsize = min_t(int, NVME_CAP_MQES(ctrl->cap), ctrl->sqsize);
> +
> + error = nvme_enable_ctrl(ctrl, ctrl->cap);
> + if (error)
> + goto out_cleanup_connect_queue;

I'm not sure this ordering is going to work for PCIe..


[PATCH rfc 25/30] nvme: move control plane handling to nvme core

2017-06-18 Thread Sagi Grimberg
handle controller setup, reset and delete

Signed-off-by: Sagi Grimberg 
---
 drivers/nvme/host/core.c | 373 +++
 drivers/nvme/host/nvme.h |  12 ++
 drivers/nvme/host/rdma.c | 372 +-
 3 files changed, 393 insertions(+), 364 deletions(-)

diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 17a10549d688..6937ba26ff2c 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -2670,6 +2670,379 @@ void nvme_start_queues(struct nvme_ctrl *ctrl)
 }
 EXPORT_SYMBOL_GPL(nvme_start_queues);
 
+static void nvme_free_io_queues(struct nvme_ctrl *ctrl)
+{
+   int i;
+
+   for (i = 1; i < ctrl->queue_count; i++)
+   ctrl->ops->free_hw_queue(ctrl, i);
+}
+
+void nvme_stop_io_queues(struct nvme_ctrl *ctrl)
+{
+   int i;
+
+   for (i = 1; i < ctrl->queue_count; i++)
+   ctrl->ops->stop_hw_queue(ctrl, i);
+}
+EXPORT_SYMBOL_GPL(nvme_stop_io_queues);
+
+static int nvme_start_io_queues(struct nvme_ctrl *ctrl)
+{
+   int i, ret = 0;
+
+   for (i = 1; i < ctrl->queue_count; i++) {
+   ret = ctrl->ops->start_hw_queue(ctrl, i);
+   if (ret)
+   goto out_stop_queues;
+   }
+
+   return 0;
+
+out_stop_queues:
+   for (i--; i >= 1; i--)
+   ctrl->ops->stop_hw_queue(ctrl, i);
+   return ret;
+}
+
+static int nvme_alloc_io_queues(struct nvme_ctrl *ctrl)
+{
+   unsigned int nr_io_queues = ctrl->max_queues - 1;
+   int i, ret;
+
+   nr_io_queues = min(nr_io_queues, num_online_cpus());
+   ret = nvme_set_queue_count(ctrl, _io_queues);
+   if (ret)
+   return ret;
+
+   ctrl->queue_count = nr_io_queues + 1;
+   if (ctrl->queue_count < 2)
+   return 0;
+
+   dev_info(ctrl->device,
+   "creating %d I/O queues.\n", nr_io_queues);
+
+   for (i = 1; i < ctrl->queue_count; i++) {
+   ret = ctrl->ops->alloc_hw_queue(ctrl, i,
+   ctrl->sqsize + 1);
+   if (ret)
+   goto out_free_queues;
+   }
+
+   return 0;
+
+out_free_queues:
+   for (i--; i >= 1; i--)
+   ctrl->ops->free_hw_queue(ctrl, i);
+
+   return ret;
+}
+
+void nvme_destroy_io_queues(struct nvme_ctrl *ctrl, bool remove)
+{
+   nvme_stop_io_queues(ctrl);
+   if (remove) {
+   if (ctrl->ops->flags & NVME_F_FABRICS)
+   blk_cleanup_queue(ctrl->connect_q);
+   ctrl->ops->free_tagset(ctrl, false);
+   }
+   nvme_free_io_queues(ctrl);
+}
+EXPORT_SYMBOL_GPL(nvme_destroy_io_queues);
+
+int nvme_configure_io_queues(struct nvme_ctrl *ctrl, bool new)
+{
+   int ret;
+
+   ret = nvme_alloc_io_queues(ctrl);
+   if (ret)
+   return ret;
+
+   if (new) {
+   ctrl->tagset = ctrl->ops->alloc_tagset(ctrl, false);
+   if (IS_ERR(ctrl->tagset)) {
+   ret = PTR_ERR(ctrl->tagset);
+   goto out_free_io_queues;
+   }
+
+   if (ctrl->ops->flags & NVME_F_FABRICS) {
+   ctrl->connect_q = blk_mq_init_queue(ctrl->tagset);
+   if (IS_ERR(ctrl->connect_q)) {
+   ret = PTR_ERR(ctrl->connect_q);
+   goto out_free_tag_set;
+   }
+   }
+   } else {
+   ret = blk_mq_reinit_tagset(ctrl->tagset);
+   if (ret)
+   goto out_free_io_queues;
+   }
+
+   ret = nvme_start_io_queues(ctrl);
+   if (ret)
+   goto out_cleanup_connect_q;
+
+   return 0;
+
+out_cleanup_connect_q:
+   if (new && (ctrl->ops->flags & NVME_F_FABRICS))
+   blk_cleanup_queue(ctrl->connect_q);
+out_free_tag_set:
+   if (new)
+   ctrl->ops->free_tagset(ctrl, false);
+out_free_io_queues:
+   nvme_free_io_queues(ctrl);
+   return ret;
+}
+EXPORT_SYMBOL_GPL(nvme_configure_io_queues);
+
+void nvme_destroy_admin_queue(struct nvme_ctrl *ctrl, bool remove)
+{
+   ctrl->ops->stop_hw_queue(ctrl, 0);
+   if (remove) {
+   if (ctrl->ops->flags & NVME_F_FABRICS)
+   blk_cleanup_queue(ctrl->admin_connect_q);
+   blk_cleanup_queue(ctrl->admin_q);
+   ctrl->ops->free_tagset(ctrl, true);
+   }
+   ctrl->ops->free_hw_queue(ctrl, 0);
+}
+EXPORT_SYMBOL_GPL(nvme_destroy_admin_queue);
+
+int nvme_configure_admin_queue(struct nvme_ctrl *ctrl, bool new)
+{
+   int error;
+
+   error = ctrl->ops->alloc_hw_queue(ctrl, 0, NVME_AQ_DEPTH);
+   if (error)
+   return error;
+
+   if (new) {
+   ctrl->admin_tagset = ctrl->ops->alloc_tagset(ctrl, true);
+   if (IS_ERR(ctrl->admin_tagset)) {
+   error =