This file contains code for
Direct Data Placement.

Signed-off-by: Varun Prakash <va...@chelsio.com>
---
 drivers/target/iscsi/cxgbit/cxgbit_ddp.c | 374 +++++++++++++++++++++++++++++++
 1 file changed, 374 insertions(+)
 create mode 100644 drivers/target/iscsi/cxgbit/cxgbit_ddp.c

diff --git a/drivers/target/iscsi/cxgbit/cxgbit_ddp.c 
b/drivers/target/iscsi/cxgbit/cxgbit_ddp.c
new file mode 100644
index 0000000..07e2bc8
--- /dev/null
+++ b/drivers/target/iscsi/cxgbit/cxgbit_ddp.c
@@ -0,0 +1,374 @@
+/*
+ * Copyright (c) 2016 Chelsio Communications, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "cxgbit.h"
+
+/*
+ * functions to program the pagepod in h/w
+ */
+static void ulp_mem_io_set_hdr(struct cxgbit_device *cdev,
+                              struct ulp_mem_io *req,
+                              unsigned int wr_len,
+                              unsigned int dlen,
+                              unsigned int pm_addr,
+                              int tid)
+{
+       struct ulptx_idata *idata = (struct ulptx_idata *)(req + 1);
+
+       INIT_ULPTX_WR(req, wr_len, 0, tid);
+       req->wr.wr_hi = htonl(FW_WR_OP_V(FW_ULPTX_WR) |
+               FW_WR_ATOMIC_V(0));
+       req->cmd = htonl(ULPTX_CMD_V(ULP_TX_MEM_WRITE) |
+               ULP_MEMIO_ORDER_V(0) |
+               T5_ULP_MEMIO_IMM_V(1));
+       req->dlen = htonl(ULP_MEMIO_DATA_LEN_V(dlen >> 5));
+       req->lock_addr = htonl(ULP_MEMIO_ADDR_V(pm_addr >> 5));
+       req->len16 = htonl(DIV_ROUND_UP(wr_len - sizeof(req->wr), 16));
+
+       idata->cmd_more = htonl(ULPTX_CMD_V(ULP_TX_SC_IMM));
+       idata->len = htonl(dlen);
+}
+
+static void cxgbit_set_one_ppod(struct cxgbi_pagepod *ppod,
+                               struct cxgbi_task_tag_info *ttinfo,
+                               struct scatterlist **sg_pp,
+                               unsigned int *sg_off)
+{
+       struct scatterlist *sg = sg_pp ? *sg_pp : NULL;
+       unsigned int offset = sg_off ? *sg_off : 0;
+       dma_addr_t addr = 0UL;
+       unsigned int len = 0;
+       int i;
+
+       memcpy(ppod, &ttinfo->hdr, sizeof(struct cxgbi_pagepod_hdr));
+
+       if (sg) {
+               addr = sg_dma_address(sg);
+               len = sg_dma_len(sg);
+       }
+
+       for (i = 0; i < PPOD_PAGES_MAX; i++) {
+               if (sg) {
+                       ppod->addr[i] = cpu_to_be64(addr + offset);
+                       offset += PAGE_SIZE;
+                       if (offset == (len + sg->offset)) {
+                               offset = 0;
+                               sg = sg_next(sg);
+                               if (sg) {
+                                       addr = sg_dma_address(sg);
+                                       len = sg_dma_len(sg);
+                               }
+                       }
+               } else {
+                       ppod->addr[i] = 0ULL;
+               }
+       }
+
+       /*
+        * the fifth address needs to be repeated in the next ppod, so do
+        * not move sg
+        */
+       if (sg_pp) {
+               *sg_pp = sg;
+               *sg_off = offset;
+       }
+
+       if (offset == len) {
+               offset = 0;
+               sg = sg_next(sg);
+               if (sg) {
+                       addr = sg_dma_address(sg);
+                       len = sg_dma_len(sg);
+               }
+       }
+       ppod->addr[i] = sg ? cpu_to_be64(addr + offset) : 0ULL;
+}
+
+static struct sk_buff *cxgbit_ppod_init_idata(struct cxgbit_device *cdev,
+                                             struct cxgbi_ppm *ppm,
+                                             unsigned int idx,
+                                             unsigned int npods,
+                                             unsigned int tid)
+{
+       unsigned int pm_addr = (idx << PPOD_SIZE_SHIFT) + ppm->llimit;
+       unsigned int dlen = npods << PPOD_SIZE_SHIFT;
+       unsigned int wr_len = roundup(sizeof(struct ulp_mem_io) +
+                               sizeof(struct ulptx_idata) + dlen, 16);
+       struct sk_buff *skb = alloc_skb(wr_len, GFP_KERNEL);
+
+       if (!skb)
+               return NULL;
+
+       __skb_put(skb, wr_len);
+       ulp_mem_io_set_hdr(cdev, (struct ulp_mem_io *)skb->data, wr_len, dlen,
+                          pm_addr, tid);
+
+       return skb;
+}
+
+static int cxgbit_ppod_write_idata(struct cxgbi_ppm *ppm,
+                                  struct cxgbit_sock *csk,
+                                  struct cxgbi_task_tag_info *ttinfo,
+                                  unsigned int idx, unsigned int npods,
+                                  struct scatterlist **sg_pp,
+                                  unsigned int *sg_off)
+{
+       struct cxgbit_device *cdev = csk->com.cdev;
+       struct sk_buff *skb = cxgbit_ppod_init_idata(cdev, ppm, idx, npods,
+                                               csk->tid);
+       struct ulp_mem_io *req;
+       struct ulptx_idata *idata;
+       struct cxgbi_pagepod *ppod;
+       int i;
+
+       if (!skb)
+               return -ENOMEM;
+
+       req = (struct ulp_mem_io *)skb->data;
+       idata = (struct ulptx_idata *)(req + 1);
+       ppod = (struct cxgbi_pagepod *)(idata + 1);
+
+       for (i = 0; i < npods; i++, ppod++)
+               cxgbit_set_one_ppod(ppod, ttinfo, sg_pp, sg_off);
+
+       __skb_queue_tail(&csk->ppodq, skb);
+
+       return 0;
+}
+
+static int cxgbit_ddp_set_map(struct cxgbi_ppm *ppm, struct cxgbit_sock *csk,
+                             struct cxgbi_task_tag_info *ttinfo)
+{
+       unsigned int pidx = ttinfo->idx;
+       unsigned int npods = ttinfo->npods;
+       unsigned int i, cnt;
+       int ret = 0;
+       struct scatterlist *sg = ttinfo->sgl;
+       unsigned int offset = 0;
+
+       ttinfo->cid = csk->port_id;
+
+       for (i = 0; i < npods; i += cnt, pidx += cnt) {
+               cnt = npods - i;
+
+               if (cnt > ULPMEM_IDATA_MAX_NPPODS)
+                       cnt = ULPMEM_IDATA_MAX_NPPODS;
+
+               ret = cxgbit_ppod_write_idata(ppm, csk, ttinfo, pidx, cnt,
+                                             &sg, &offset);
+               if (ret < 0)
+                       break;
+       }
+
+       return ret;
+}
+
+static void
+cxgbit_dump_sgl(const char *cap, struct scatterlist *sgl, int nents)
+{
+       struct scatterlist *sg;
+       int i;
+
+       if (cap)
+               pr_info("%s: sgl 0x%p, nents %u.\n", cap, sgl, nents);
+       for_each_sg(sgl, sg, nents, i)
+               pr_info("\t%d/%u, 0x%p: len %u, off %u, pg 0x%p, dma 0x%llx, 
%u\n",
+                       i, nents, sg, sg->length, sg->offset, sg_page(sg),
+                       sg_dma_address(sg), sg_dma_len(sg));
+}
+
+static int cxgbit_ddp_sgl_check(struct scatterlist *sgl, int nents)
+{
+       int i;
+       int last_sgidx = nents - 1;
+       struct scatterlist *sg = sgl;
+
+       for (i = 1, sg = sg_next(sgl); i < nents; i++, sg = sg_next(sg)) {
+               if ((i && sg->offset) ||
+                   ((i != last_sgidx) &&
+                    ((sg->length + sg->offset) & ((1 << PAGE_SHIFT) - 1)))) {
+                       pr_info("%s: sg %u/%u, %u,%u, not page aligned.\n",
+                               __func__, i, nents, sg->offset, sg->length);
+                       cxgbit_dump_sgl(NULL, sgl, nents);
+                       return -EINVAL;
+               }
+       }
+
+       return 0;
+}
+
+static int cxgbit_ddp_reserve(struct cxgbit_sock *csk,
+                             struct cxgbi_task_tag_info *ttinfo,
+                             unsigned int xferlen)
+{
+       struct cxgbit_device *cdev = csk->com.cdev;
+       struct cxgbi_ppm *ppm = cdev2ppm(cdev);
+       struct scatterlist *sgl = ttinfo->sgl;
+       unsigned int sgcnt = ttinfo->nents;
+       unsigned int sg_offset = sgl->offset;
+       int ret;
+
+       if (!ppm || xferlen < DDP_THRESHOLD || !sgcnt ||
+           ppm->tformat.pgsz_idx_dflt >= DDP_PGIDX_MAX) {
+               pr_debug("ppm 0x%p, pgidx %u, xfer %u, sgcnt %u, NO ddp.\n",
+                        ppm, ppm ? ppm->tformat.pgsz_idx_dflt :
+                        DDP_PGIDX_MAX,
+                        xferlen, ttinfo->nents);
+               return -EINVAL;
+       }
+
+       /* make sure the buffer is suitable for ddp */
+       if (cxgbit_ddp_sgl_check(sgl, sgcnt) < 0)
+               return -EINVAL;
+
+       ttinfo->nr_pages = (xferlen + sgl->offset +
+                           (1 << PAGE_SHIFT) - 1) >> PAGE_SHIFT;
+
+       /*
+        * the ddp tag will be used for the ttt in the outgoing r2t pdu
+        */
+       ret = cxgbi_ppm_ppods_reserve(ppm, ttinfo->nr_pages, 0, &ttinfo->idx,
+                                     &ttinfo->tag, 0);
+       if (ret < 0)
+               return ret;
+       ttinfo->npods = ret;
+
+        /* setup dma from scsi command sgl */
+       sgl->offset = 0;
+       ret = dma_map_sg(&ppm->pdev->dev, sgl, sgcnt, DMA_FROM_DEVICE);
+       sgl->offset = sg_offset;
+       if (!ret) {
+               pr_info("%s: 0x%x, xfer %u, sgl %u dma mapping err.\n",
+                       __func__, 0, xferlen, sgcnt);
+               goto rel_ppods;
+       }
+       if (ret != ttinfo->nr_pages) {
+               pr_info("%s: 0x%x, xfer %u, sgl %u, dma count %d.\n",
+                       __func__, 0, xferlen, sgcnt, ret);
+               cxgbit_dump_sgl(__func__, sgl, sgcnt);
+       }
+
+       ttinfo->flags |= CXGBI_PPOD_INFO_FLAG_MAPPED;
+       ttinfo->cid = csk->port_id;
+
+       cxgbi_ppm_make_ppod_hdr(ppm, ttinfo->tag, csk->tid, sgl->offset,
+                               xferlen, &ttinfo->hdr);
+
+       ttinfo->flags |= CXGBI_PPOD_INFO_FLAG_VALID;
+       cxgbit_ddp_set_map(ppm, csk, ttinfo);
+
+       return 0;
+
+rel_ppods:
+       cxgbi_ppm_ppod_release(ppm, ttinfo->idx);
+
+       if (ttinfo->flags & CXGBI_PPOD_INFO_FLAG_MAPPED) {
+               ttinfo->flags &= ~CXGBI_PPOD_INFO_FLAG_MAPPED;
+               dma_unmap_sg(&ppm->pdev->dev, sgl, sgcnt, DMA_FROM_DEVICE);
+       }
+       return -EINVAL;
+}
+
+int cxgbit_reserve_ttt(struct cxgbit_sock *csk, struct iscsi_cmd *cmd)
+{
+       struct cxgbit_device *cdev = csk->com.cdev;
+       struct cxgbit_cmd *ccmd = iscsit_priv_cmd(cmd);
+       struct cxgbi_task_tag_info *ttinfo = &ccmd->ttinfo;
+       int ret = -EINVAL;
+
+       ttinfo->sgl = cmd->se_cmd.t_data_sg;
+       ttinfo->nents = cmd->se_cmd.t_data_nents;
+
+       ret = cxgbit_ddp_reserve(csk, ttinfo, cmd->se_cmd.data_length);
+       if (ret < 0) {
+               pr_info("csk 0x%p, cmd 0x%p, xfer len %u, sgcnt %u no ddp.\n",
+                       csk, cmd, cmd->se_cmd.data_length, ttinfo->nents);
+
+               ttinfo->sgl = NULL;
+               ttinfo->nents = 0;
+
+               return ret;
+       }
+
+       ccmd->release = true;
+
+       pr_debug("cdev 0x%p, cmd 0x%p, tag 0x%x\n", cdev, cmd, ttinfo->tag);
+
+       return 0;
+}
+
+void cxgbit_release_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
+{
+       struct cxgbit_cmd *ccmd = iscsit_priv_cmd(cmd);
+
+       if (ccmd->release) {
+               struct cxgbi_task_tag_info *ttinfo = &ccmd->ttinfo;
+
+               if (ttinfo->sgl) {
+                       struct cxgbit_sock *csk = conn->context;
+                       struct cxgbit_device *cdev = csk->com.cdev;
+                       struct cxgbi_ppm *ppm = cdev2ppm(cdev);
+
+                       cxgbi_ppm_ppod_release(ppm, ttinfo->idx);
+
+                       dma_unmap_sg(&ppm->pdev->dev, ttinfo->sgl,
+                                    ttinfo->nents, DMA_FROM_DEVICE);
+               } else {
+                       put_page(sg_page(&ccmd->sg));
+               }
+
+               ccmd->release = false;
+       }
+}
+
+static void cxgbit_ddp_ppm_setup(void **ppm_pp, struct cxgbit_device *cdev,
+                                struct cxgbi_tag_format *tformat,
+                                unsigned int ppmax,
+                                unsigned int llimit,
+                                unsigned int start)
+{
+       int ret = cxgbi_ppm_init(ppm_pp, cdev->lldi.ports[0], cdev->lldi.pdev,
+                                &cdev->lldi, tformat, ppmax, llimit, start,
+                                2);
+
+       if (ret >= 0) {
+               struct cxgbi_ppm *ppm = (struct cxgbi_ppm *)(*ppm_pp);
+
+               if (ppm->ppmax < 1024 ||
+                   ppm->tformat.pgsz_idx_dflt >= DDP_PGIDX_MAX)
+                       return;
+
+               set_bit(CDEV_DDP_ENABLE, &cdev->flags);
+       }
+}
+
+int cxgbit_ddp_init(struct cxgbit_device *cdev)
+{
+       struct cxgb4_lld_info *lldi = &cdev->lldi;
+       struct net_device *ndev = cdev->lldi.ports[0];
+       struct cxgbi_tag_format tformat;
+       unsigned int ppmax;
+       int i;
+
+       if (!lldi->vr->iscsi.size) {
+               pr_warn("%s, iscsi NOT enabled, check config!\n", ndev->name);
+               return -EACCES;
+       }
+
+       ppmax = lldi->vr->iscsi.size >> PPOD_SIZE_SHIFT;
+
+       memset(&tformat, 0, sizeof(struct cxgbi_tag_format));
+       for (i = 0; i < 4; i++)
+               tformat.pgsz_order[i] = (lldi->iscsi_pgsz_order >> (i << 3))
+                                        & 0xF;
+       cxgbi_tagmask_check(lldi->iscsi_tagmask, &tformat);
+
+       cxgbit_ddp_ppm_setup(lldi->iscsi_ppm, cdev, &tformat, ppmax,
+                            lldi->iscsi_llimit, lldi->vr->iscsi.start);
+       return 0;
+}
-- 
2.0.2

--
To unsubscribe from this list: send the line "unsubscribe linux-scsi" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to