Re: [RFC PATCH 2/2] nvme: add simple copy support

2020-12-02 Thread Selva Jove
On Tue, Dec 1, 2020 at 8:46 PM Keith Busch  wrote:
>
> On Tue, Dec 01, 2020 at 11:09:49AM +0530, SelvaKumar S wrote:
> > +static void nvme_config_copy(struct gendisk *disk, struct nvme_ns *ns,
> > +struct nvme_id_ns *id)
> > +{
> > + struct nvme_ctrl *ctrl = ns->ctrl;
> > + struct request_queue *queue = disk->queue;
> > +
> > + if (!(ctrl->oncs & NVME_CTRL_ONCS_COPY)) {
> > + queue->limits.max_copy_sectors = 0;
> > + blk_queue_flag_clear(QUEUE_FLAG_COPY, queue);
> > + return;
> > + }
> > +
> > + /* setting copy limits */
> > + ns->mcl = le64_to_cpu(id->mcl);
> > + ns->mssrl = le32_to_cpu(id->mssrl);
> > + ns->msrc = id->msrc;
>
> These are not used anywhere outside this function, so there's no need to
> add members to the struct.

Sure. Will remove these entries from nvme_ns.

>
> > + if (blk_queue_flag_test_and_set(QUEUE_FLAG_COPY, queue))
> > + return;
>
> The queue limits are not necessarily the same each time we're called to
> update the disk info, so this return shouldn't be here.
>

Makes sense.

> > +
> > + queue->limits.max_copy_sectors = ns->mcl * (1 << (ns->lba_shift - 9));
> > + queue->limits.max_copy_range_sectors = ns->mssrl *
> > + (1 << (ns->lba_shift - 9));
> > + queue->limits.max_copy_nr_ranges = ns->msrc + 1;
> > +}
>
> <>
>
> > @@ -2045,6 +2133,7 @@ static void nvme_update_disk_info(struct gendisk 
> > *disk,
> >   set_capacity_and_notify(disk, capacity);
> >
> >   nvme_config_discard(disk, ns);
> > + nvme_config_copy(disk, ns, id);
> >   nvme_config_write_zeroes(disk, ns);
> >
> >   if (id->nsattr & NVME_NS_ATTR_RO)
> > @@ -3014,6 +3103,7 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
> >   ctrl->oaes = le32_to_cpu(id->oaes);
> >   ctrl->wctemp = le16_to_cpu(id->wctemp);
> >   ctrl->cctemp = le16_to_cpu(id->cctemp);
> > + ctrl->ocfs = le32_to_cpu(id->ocfs);
>
> ocfs is not used anywhere.


Re: [RFC PATCH 2/2] nvme: add simple copy support

2020-12-01 Thread Keith Busch
On Tue, Dec 01, 2020 at 11:09:49AM +0530, SelvaKumar S wrote:
> +static void nvme_config_copy(struct gendisk *disk, struct nvme_ns *ns,
> +struct nvme_id_ns *id)
> +{
> + struct nvme_ctrl *ctrl = ns->ctrl;
> + struct request_queue *queue = disk->queue;
> +
> + if (!(ctrl->oncs & NVME_CTRL_ONCS_COPY)) {
> + queue->limits.max_copy_sectors = 0;
> + blk_queue_flag_clear(QUEUE_FLAG_COPY, queue);
> + return;
> + }
> +
> + /* setting copy limits */
> + ns->mcl = le64_to_cpu(id->mcl);
> + ns->mssrl = le32_to_cpu(id->mssrl);
> + ns->msrc = id->msrc;

These are not used anywhere outside this function, so there's no need to
add members to the struct.

> + if (blk_queue_flag_test_and_set(QUEUE_FLAG_COPY, queue))
> + return;

The queue limits are not necessarily the same each time we're called to
update the disk info, so this return shouldn't be here.

> +
> + queue->limits.max_copy_sectors = ns->mcl * (1 << (ns->lba_shift - 9));
> + queue->limits.max_copy_range_sectors = ns->mssrl *
> + (1 << (ns->lba_shift - 9));
> + queue->limits.max_copy_nr_ranges = ns->msrc + 1;
> +}

<>

> @@ -2045,6 +2133,7 @@ static void nvme_update_disk_info(struct gendisk *disk,
>   set_capacity_and_notify(disk, capacity);
>  
>   nvme_config_discard(disk, ns);
> + nvme_config_copy(disk, ns, id);
>   nvme_config_write_zeroes(disk, ns);
>  
>   if (id->nsattr & NVME_NS_ATTR_RO)
> @@ -3014,6 +3103,7 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
>   ctrl->oaes = le32_to_cpu(id->oaes);
>   ctrl->wctemp = le16_to_cpu(id->wctemp);
>   ctrl->cctemp = le16_to_cpu(id->cctemp);
> + ctrl->ocfs = le32_to_cpu(id->ocfs);

ocfs is not used anywhere.


[RFC PATCH 2/2] nvme: add simple copy support

2020-11-30 Thread SelvaKumar S
Add support for  TP 4065a ("Simple Copy Command"), v2020.05.04
("Ratified")

The implementation uses the payload passed from the block layer
to form simple copy command. Set the device copy limits to queue
limits.

Signed-off-by: SelvaKumar S 
Signed-off-by: Kanchan Joshi 
Signed-off-by: Nitesh Shetty 
Signed-off-by: Javier González 
---
 drivers/nvme/host/core.c | 91 
 drivers/nvme/host/nvme.h |  4 ++
 include/linux/nvme.h | 45 ++--
 3 files changed, 136 insertions(+), 4 deletions(-)

diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 9b6ebeb29cca..eb6a3157cb2b 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -647,6 +647,65 @@ static inline void nvme_setup_flush(struct nvme_ns *ns,
cmnd->common.nsid = cpu_to_le32(ns->head->ns_id);
 }
 
+static inline blk_status_t nvme_setup_copy(struct nvme_ns *ns,
+  struct request *req, struct nvme_command *cmnd)
+{
+   struct nvme_ctrl *ctrl = ns->ctrl;
+   struct nvme_copy_range *range = NULL;
+   struct blk_copy_payload *payload;
+   u16 control = 0;
+   u32 dsmgmt = 0;
+   int nr_range = 0, i;
+   u16 ssrl;
+   u64 slba;
+
+   payload = bio_data(req->bio);
+   nr_range = payload->copy_range;
+
+   if (req->cmd_flags & REQ_FUA)
+   control |= NVME_RW_FUA;
+
+   if (req->cmd_flags & REQ_FAILFAST_DEV)
+   control |= NVME_RW_LR;
+
+   cmnd->copy.opcode = nvme_cmd_copy;
+   cmnd->copy.nsid = cpu_to_le32(ns->head->ns_id);
+   cmnd->copy.sdlba = cpu_to_le64(blk_rq_pos(req) >> (ns->lba_shift - 9));
+
+   range = kmalloc_array(nr_range, sizeof(*range),
+   GFP_ATOMIC | __GFP_NOWARN);
+   if (!range)
+   return BLK_STS_RESOURCE;
+
+   for (i = 0; i < nr_range; i++) {
+   slba = payload->range[i].src;
+   slba = slba >> (ns->lba_shift - 9);
+
+   ssrl = payload->range[i].len;
+   ssrl = ssrl >> (ns->lba_shift - 9);
+
+   range[i].slba = cpu_to_le64(slba);
+   range[i].nlb = cpu_to_le16(ssrl - 1);
+   }
+
+   cmnd->copy.nr_range = nr_range - 1;
+
+   req->special_vec.bv_page = virt_to_page(range);
+   req->special_vec.bv_offset = offset_in_page(range);
+   req->special_vec.bv_len = sizeof(*range) * nr_range;
+   req->rq_flags |= RQF_SPECIAL_PAYLOAD;
+
+   if (ctrl->nr_streams)
+   nvme_assign_write_stream(ctrl, req, , );
+
+   //TBD end-to-end
+
+   cmnd->rw.control = cpu_to_le16(control);
+   cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt);
+
+   return BLK_STS_OK;
+}
+
 static blk_status_t nvme_setup_discard(struct nvme_ns *ns, struct request *req,
struct nvme_command *cmnd)
 {
@@ -829,6 +888,9 @@ blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct 
request *req,
case REQ_OP_DISCARD:
ret = nvme_setup_discard(ns, req, cmd);
break;
+   case REQ_OP_COPY:
+   ret = nvme_setup_copy(ns, req, cmd);
+   break;
case REQ_OP_READ:
ret = nvme_setup_rw(ns, req, cmd, nvme_cmd_read);
break;
@@ -1850,6 +1912,32 @@ static void nvme_config_discard(struct gendisk *disk, 
struct nvme_ns *ns)
blk_queue_max_write_zeroes_sectors(queue, UINT_MAX);
 }
 
+static void nvme_config_copy(struct gendisk *disk, struct nvme_ns *ns,
+  struct nvme_id_ns *id)
+{
+   struct nvme_ctrl *ctrl = ns->ctrl;
+   struct request_queue *queue = disk->queue;
+
+   if (!(ctrl->oncs & NVME_CTRL_ONCS_COPY)) {
+   queue->limits.max_copy_sectors = 0;
+   blk_queue_flag_clear(QUEUE_FLAG_COPY, queue);
+   return;
+   }
+
+   /* setting copy limits */
+   ns->mcl = le64_to_cpu(id->mcl);
+   ns->mssrl = le32_to_cpu(id->mssrl);
+   ns->msrc = id->msrc;
+
+   if (blk_queue_flag_test_and_set(QUEUE_FLAG_COPY, queue))
+   return;
+
+   queue->limits.max_copy_sectors = ns->mcl * (1 << (ns->lba_shift - 9));
+   queue->limits.max_copy_range_sectors = ns->mssrl *
+   (1 << (ns->lba_shift - 9));
+   queue->limits.max_copy_nr_ranges = ns->msrc + 1;
+}
+
 static void nvme_config_write_zeroes(struct gendisk *disk, struct nvme_ns *ns)
 {
u64 max_blocks;
@@ -2045,6 +2133,7 @@ static void nvme_update_disk_info(struct gendisk *disk,
set_capacity_and_notify(disk, capacity);
 
nvme_config_discard(disk, ns);
+   nvme_config_copy(disk, ns, id);
nvme_config_write_zeroes(disk, ns);
 
if (id->nsattr & NVME_NS_ATTR_RO)
@@ -3014,6 +3103,7 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
ctrl->oaes = le32_to_cpu(id->oaes);
ctrl->wctemp = le16_to_cpu(id->wctemp);
ctrl->cctemp = le16_to_cpu(id->cctemp);
+   ctrl->ocfs = le32_to_cpu(id->ocfs);