Am 09.08.25 um 03:04 schrieb Mikhail Kshevetskiy:
> Make use of the spi-mem direct mapping API to let advanced controllers
> optimize read/write operations when they support direct mapping.
> 
> Based on a linux commit 981d1aa0697ce1393e00933f154d181e965703d0

Please use the shortened SHA-1 as described in [1]. Same for the other
patches in this series. In this case:

$ git log --abbrev=12 --pretty="%h (\"%s\")" -1
981d1aa0697ce1393e00933f154d181e965703d0

981d1aa0697c ("mtd: spinand: Use the spi-mem dirmap API")

[1]
https://docs.kernel.org/process/submitting-patches.html#describe-your-changes

> (mtd: spinand: Use the spi-mem dirmap API)
> created by Boris Brezillon <bbrezil...@kernel.org> with additional
> fixes taken from linux-6.10.

Nitpick: Linux 6.10

> 
> Signed-off-by: Mikhail Kshevetskiy <mikhail.kshevets...@iopsys.eu>
> ---
>  drivers/mtd/nand/spi/core.c | 185 +++++++++++++++++-------------------
>  include/linux/mtd/spinand.h |   7 ++
>  2 files changed, 95 insertions(+), 97 deletions(-)
> 
> diff --git a/drivers/mtd/nand/spi/core.c b/drivers/mtd/nand/spi/core.c
> index 3a1e7e18736..c1b8b9627f2 100644
> --- a/drivers/mtd/nand/spi/core.c
> +++ b/drivers/mtd/nand/spi/core.c
> @@ -41,21 +41,6 @@ struct spinand_plat {
>  /* SPI NAND index visible in MTD names */
>  static int spi_nand_idx;
>  
> -static void spinand_cache_op_adjust_colum(struct spinand_device *spinand,
> -                                       const struct nand_page_io_req *req,
> -                                       u16 *column)
> -{
> -     struct nand_device *nand = spinand_to_nand(spinand);
> -     unsigned int shift;
> -
> -     if (nand->memorg.planes_per_lun < 2)
> -             return;
> -
> -     /* The plane number is passed in MSB just above the column address */
> -     shift = fls(nand->memorg.pagesize);
> -     *column |= req->pos.plane << shift;
> -}
> -
>  static int spinand_read_reg_op(struct spinand_device *spinand, u8 reg, u8 
> *val)
>  {
>       struct spi_mem_op op = SPINAND_GET_FEATURE_OP(reg,
> @@ -249,27 +234,21 @@ static int spinand_load_page_op(struct spinand_device 
> *spinand,
>  static int spinand_read_from_cache_op(struct spinand_device *spinand,
>                                     const struct nand_page_io_req *req)
>  {
> -     struct spi_mem_op op = *spinand->op_templates.read_cache;
>       struct nand_device *nand = spinand_to_nand(spinand);
>       struct mtd_info *mtd = nanddev_to_mtd(nand);
> -     struct nand_page_io_req adjreq = *req;
> +     struct spi_mem_dirmap_desc *rdesc;
>       unsigned int nbytes = 0;
>       void *buf = NULL;
>       u16 column = 0;
> -     int ret;
> +     ssize_t ret;
>  
>       if (req->datalen) {
> -             adjreq.datalen = nanddev_page_size(nand);
> -             adjreq.dataoffs = 0;
> -             adjreq.databuf.in = spinand->databuf;
>               buf = spinand->databuf;
> -             nbytes = adjreq.datalen;
> +             nbytes = nanddev_page_size(nand);
> +             column = 0;
>       }
>  
>       if (req->ooblen) {
> -             adjreq.ooblen = nanddev_per_page_oobsize(nand);
> -             adjreq.ooboffs = 0;
> -             adjreq.oobbuf.in = spinand->oobbuf;
>               nbytes += nanddev_per_page_oobsize(nand);
>               if (!buf) {
>                       buf = spinand->oobbuf;
> @@ -277,28 +256,19 @@ static int spinand_read_from_cache_op(struct 
> spinand_device *spinand,
>               }
>       }
>  
> -     spinand_cache_op_adjust_colum(spinand, &adjreq, &column);
> -     op.addr.val = column;
> +     rdesc = spinand->dirmaps[req->pos.plane].rdesc;
>  
> -     /*
> -      * Some controllers are limited in term of max RX data size. In this
> -      * case, just repeat the READ_CACHE operation after updating the
> -      * column.
> -      */
>       while (nbytes) {
> -             op.data.buf.in = buf;
> -             op.data.nbytes = nbytes;
> -             ret = spi_mem_adjust_op_size(spinand->slave, &op);
> -             if (ret)
> +             ret = spi_mem_dirmap_read(rdesc, column, nbytes, buf);
> +             if (ret < 0)
>                       return ret;
>  
> -             ret = spi_mem_exec_op(spinand->slave, &op);
> -             if (ret)
> -                     return ret;
> +             if (!ret || ret > nbytes)
> +                     return -EIO;
>  
> -             buf += op.data.nbytes;
> -             nbytes -= op.data.nbytes;
> -             op.addr.val += op.data.nbytes;
> +             nbytes -= ret;
> +             column += ret;
> +             buf += ret;
>       }
>  
>       if (req->datalen)
> @@ -322,14 +292,12 @@ static int spinand_read_from_cache_op(struct 
> spinand_device *spinand,
>  static int spinand_write_to_cache_op(struct spinand_device *spinand,
>                                    const struct nand_page_io_req *req)
>  {
> -     struct spi_mem_op op = *spinand->op_templates.write_cache;
>       struct nand_device *nand = spinand_to_nand(spinand);
>       struct mtd_info *mtd = nanddev_to_mtd(nand);
> -     struct nand_page_io_req adjreq = *req;
> -     unsigned int nbytes = 0;
> -     void *buf = NULL;
> -     u16 column = 0;
> -     int ret;
> +     struct spi_mem_dirmap_desc *wdesc;
> +     unsigned int nbytes, column = 0;
> +     void *buf = spinand->databuf;
> +     ssize_t ret;
>  
>       /*
>        * Looks like PROGRAM LOAD (AKA write cache) does not necessarily reset
> @@ -338,19 +306,12 @@ static int spinand_write_to_cache_op(struct 
> spinand_device *spinand,
>        * the data portion of the page, otherwise we might corrupt the BBM or
>        * user data previously programmed in OOB area.
>        */
> -     memset(spinand->databuf, 0xff,
> -            nanddev_page_size(nand) +
> -            nanddev_per_page_oobsize(nand));
> +     nbytes = nanddev_page_size(nand) + nanddev_per_page_oobsize(nand);
> +     memset(spinand->databuf, 0xff, nbytes);
>  
> -     if (req->datalen) {
> +     if (req->datalen)
>               memcpy(spinand->databuf + req->dataoffs, req->databuf.out,
>                      req->datalen);
> -             adjreq.dataoffs = 0;
> -             adjreq.datalen = nanddev_page_size(nand);
> -             adjreq.databuf.out = spinand->databuf;
> -             nbytes = adjreq.datalen;
> -             buf = spinand->databuf;
> -     }
>  
>       if (req->ooblen) {
>               if (req->mode == MTD_OPS_AUTO_OOB)
> @@ -361,52 +322,21 @@ static int spinand_write_to_cache_op(struct 
> spinand_device *spinand,
>               else
>                       memcpy(spinand->oobbuf + req->ooboffs, req->oobbuf.out,
>                              req->ooblen);
> -
> -             adjreq.ooblen = nanddev_per_page_oobsize(nand);
> -             adjreq.ooboffs = 0;
> -             nbytes += nanddev_per_page_oobsize(nand);
> -             if (!buf) {
> -                     buf = spinand->oobbuf;
> -                     column = nanddev_page_size(nand);
> -             }
>       }
>  
> -     spinand_cache_op_adjust_colum(spinand, &adjreq, &column);
> -
> -     op = *spinand->op_templates.write_cache;
> -     op.addr.val = column;
> +     wdesc = spinand->dirmaps[req->pos.plane].wdesc;
>  
> -     /*
> -      * Some controllers are limited in term of max TX data size. In this
> -      * case, split the operation into one LOAD CACHE and one or more
> -      * LOAD RANDOM CACHE.
> -      */
>       while (nbytes) {
> -             op.data.buf.out = buf;
> -             op.data.nbytes = nbytes;
> -
> -             ret = spi_mem_adjust_op_size(spinand->slave, &op);
> -             if (ret)
> -                     return ret;
> -
> -             ret = spi_mem_exec_op(spinand->slave, &op);
> -             if (ret)
> +             ret = spi_mem_dirmap_write(wdesc, column, nbytes, buf);
> +             if (ret < 0)
>                       return ret;
>  
> -             buf += op.data.nbytes;
> -             nbytes -= op.data.nbytes;
> -             op.addr.val += op.data.nbytes;
> +             if (!ret || ret > nbytes)
> +                     return -EIO;
>  
> -             /*
> -              * We need to use the RANDOM LOAD CACHE operation if there's
> -              * more than one iteration, because the LOAD operation resets
> -              * the cache to 0xff.
> -              */
> -             if (nbytes) {
> -                     column = op.addr.val;
> -                     op = *spinand->op_templates.update_cache;
> -                     op.addr.val = column;
> -             }
> +             nbytes -= ret;
> +             column += ret;
> +             buf += ret;
>       }
>  
>       return 0;
> @@ -819,6 +749,59 @@ static int spinand_mtd_block_isreserved(struct mtd_info 
> *mtd, loff_t offs)
>       return ret;
>  }
>  
> +static int spinand_create_dirmap(struct spinand_device *spinand,
> +                              unsigned int plane)
> +{
> +     struct nand_device *nand = spinand_to_nand(spinand);
> +     struct spi_mem_dirmap_info info = {
> +             .length = nanddev_page_size(nand) +
> +                       nanddev_per_page_oobsize(nand),
> +     };
> +     struct spi_mem_dirmap_desc *desc;
> +
> +     /* The plane number is passed in MSB just above the column address */
> +     info.offset = plane << fls(nand->memorg.pagesize);
> +
> +     info.op_tmpl = *spinand->op_templates.update_cache;
> +     desc = spi_mem_dirmap_create(spinand->slave, &info);
> +     if (IS_ERR(desc))
> +             return PTR_ERR(desc);
> +
> +     spinand->dirmaps[plane].wdesc = desc;
> +
> +     info.op_tmpl = *spinand->op_templates.read_cache;
> +     desc = spi_mem_dirmap_create(spinand->slave, &info);
> +     if (IS_ERR(desc)) {
> +             spi_mem_dirmap_destroy(spinand->dirmaps[plane].wdesc);
> +             return PTR_ERR(desc);
> +     }
> +
> +     spinand->dirmaps[plane].rdesc = desc;
> +
> +     return 0;
> +}
> +
> +static int spinand_create_dirmaps(struct spinand_device *spinand)
> +{
> +     struct nand_device *nand = spinand_to_nand(spinand);
> +     int i, ret;
> +
> +     spinand->dirmaps = devm_kzalloc(spinand->slave->dev,
> +                                     sizeof(*spinand->dirmaps) *
> +                                     nand->memorg.planes_per_lun,
> +                                     GFP_KERNEL);
> +     if (!spinand->dirmaps)
> +             return -ENOMEM;
> +
> +     for (i = 0; i < nand->memorg.planes_per_lun; i++) {
> +             ret = spinand_create_dirmap(spinand, i);
> +             if (ret)
> +                     return ret;
> +     }
> +
> +     return 0;
> +}
> +
>  static const struct nand_ops spinand_ops = {
>       .erase = spinand_erase,
>       .markbad = spinand_markbad,
> @@ -1134,6 +1117,14 @@ static int spinand_init(struct spinand_device *spinand)
>               goto err_free_bufs;
>       }
>  
> +     ret = spinand_create_dirmaps(spinand);
> +     if (ret) {
> +             dev_err(spinand->slave->dev,
> +                     "Failed to create direct mappings for read/write 
> operations (err = %d)\n",
> +                     ret);
> +             goto err_manuf_cleanup;
> +     }
> +
>       /* After power up, all blocks are locked, so unlock them here. */
>       for (i = 0; i < nand->memorg.ntargets; i++) {
>               ret = spinand_select_target(spinand, i);
> diff --git a/include/linux/mtd/spinand.h b/include/linux/mtd/spinand.h
> index 6fe6fd520a4..163269313f6 100644
> --- a/include/linux/mtd/spinand.h
> +++ b/include/linux/mtd/spinand.h
> @@ -363,6 +363,11 @@ struct spinand_info {
>               __VA_ARGS__                                             \
>       }
>  
> +struct spinand_dirmap {
> +     struct spi_mem_dirmap_desc *wdesc;
> +     struct spi_mem_dirmap_desc *rdesc;
> +};
> +
>  /**
>   * struct spinand_device - SPI NAND device instance
>   * @base: NAND device instance
> @@ -406,6 +411,8 @@ struct spinand_device {
>               const struct spi_mem_op *update_cache;
>       } op_templates;
>  
> +     struct spinand_dirmap *dirmaps;
> +
>       int (*select_target)(struct spinand_device *spinand,
>                            unsigned int target);
>       unsigned int cur_target;

Reply via email to