The code was ported from linux-6.12

This is a port of linux patch 631cfdd0520d19b7f4fc13b834fd9c8b46c6dbac
(mtd: spi-nand: Add continuous read support)
created by Miquel Raynal <miquel.ray...@bootlin.com>

Signed-off-by: Mikhail Kshevetskiy <mikhail.kshevets...@iopsys.eu>
---
 drivers/mtd/nand/spi/core.c | 210 ++++++++++++++++++++++++++++++++----
 include/linux/mtd/nand.h    |  90 ++++++++++++++--
 include/linux/mtd/spinand.h |  19 +++-
 3 files changed, 291 insertions(+), 28 deletions(-)

diff --git a/drivers/mtd/nand/spi/core.c b/drivers/mtd/nand/spi/core.c
index a8fb3177f8a..a52cc91dd3c 100644
--- a/drivers/mtd/nand/spi/core.c
+++ b/drivers/mtd/nand/spi/core.c
@@ -222,6 +222,13 @@ static int spinand_ecc_enable(struct spinand_device 
*spinand,
                               enable ? CFG_ECC_ENABLE : 0);
 }
 
+
+static int spinand_cont_read_enable(struct spinand_device *spinand,
+                                   bool enable)
+{
+       return spinand->set_cont_read(spinand, enable);
+}
+
 static int spinand_check_ecc_status(struct spinand_device *spinand, u8 status)
 {
        struct nand_device *nand = spinand_to_nand(spinand);
@@ -323,10 +330,22 @@ static int spinand_ondie_ecc_finish_io_req(struct 
nand_device *nand,
 
        /* Finish a page read: check the status, report errors/bitflips */
        ret = spinand_check_ecc_status(spinand, spinand->last_wait_status);
-       if (ret == -EBADMSG)
+       if (ret == -EBADMSG) {
                mtd->ecc_stats.failed++;
-       else if (ret > 0)
-               mtd->ecc_stats.corrected += ret;
+       } else if (ret > 0) {
+               unsigned int pages;
+
+               /*
+                * Continuous reads don't allow us to get the detail,
+                * so we may exagerate the actual number of corrected bitflips.
+                */
+               if (!req->continuous)
+                       pages = 1;
+               else
+                       pages = req->datalen / nanddev_page_size(nand);
+
+               mtd->ecc_stats.corrected += ret * pages;
+       }
 
        return ret;
 }
@@ -368,7 +387,11 @@ static int spinand_read_from_cache_op(struct 
spinand_device *spinand,
 
        if (req->datalen) {
                buf = spinand->databuf;
-               nbytes = nanddev_page_size(nand);
+               if (!req->continuous)
+                       nbytes = nanddev_page_size(nand);
+               else
+                       nbytes = round_up(req->dataoffs + req->datalen,
+                                         nanddev_page_size(nand));
                column = 0;
        }
 
@@ -399,6 +422,13 @@ static int spinand_read_from_cache_op(struct 
spinand_device *spinand,
                nbytes -= ret;
                column += ret;
                buf += ret;
+
+               /*
+                * Dirmap accesses are allowed to toggle the CS.
+                * Toggling the CS during a continuous read is forbidden.
+                */
+               if (nbytes && req->continuous)
+                       return -EIO;
        }
 
        if (req->datalen)
@@ -639,24 +669,20 @@ static int spinand_write_page(struct spinand_device 
*spinand,
        return spinand_ondie_ecc_finish_io_req(nand, (struct nand_page_io_req 
*)req);
 }
 
-static int spinand_mtd_read(struct mtd_info *mtd, loff_t from,
-                           struct mtd_oob_ops *ops)
+static int spinand_mtd_regular_page_read(struct mtd_info *mtd, loff_t from,
+                                        struct mtd_oob_ops *ops,
+                                        unsigned int *max_bitflips)
 {
        struct spinand_device *spinand = mtd_to_spinand(mtd);
        struct nand_device *nand = mtd_to_nanddev(mtd);
-       unsigned int max_bitflips = 0;
        struct nand_io_iter iter;
        bool disable_ecc = false;
        bool ecc_failed = false;
-       int ret = 0;
+       int ret;
 
-       if (ops->mode == MTD_OPS_RAW || !spinand->eccinfo.ooblayout)
+       if (ops->mode == MTD_OPS_RAW || !mtd->ooblayout)
                disable_ecc = true;
 
-#ifndef __UBOOT__
-       mutex_lock(&spinand->lock);
-#endif
-
        nanddev_io_for_each_page(nand, NAND_PAGE_READ, from, ops, &iter) {
                schedule();
                if (disable_ecc)
@@ -673,18 +699,152 @@ static int spinand_mtd_read(struct mtd_info *mtd, loff_t 
from,
                if (ret == -EBADMSG)
                        ecc_failed = true;
                else
-                       max_bitflips = max_t(unsigned int, max_bitflips, ret);
+                       *max_bitflips = max_t(unsigned int, *max_bitflips, ret);
 
                ret = 0;
                ops->retlen += iter.req.datalen;
                ops->oobretlen += iter.req.ooblen;
        }
 
+       if (ecc_failed && !ret)
+               ret = -EBADMSG;
+
+       return ret;
+}
+
+static int spinand_mtd_continuous_page_read(struct mtd_info *mtd, loff_t from,
+                                           struct mtd_oob_ops *ops,
+                                           unsigned int *max_bitflips)
+{
+       struct spinand_device *spinand = mtd_to_spinand(mtd);
+       struct nand_device *nand = mtd_to_nanddev(mtd);
+       struct nand_io_iter iter;
+       u8 status;
+       int ret;
+
+       ret = spinand_cont_read_enable(spinand, true);
+       if (ret)
+               return ret;
+
+       /*
+        * The cache is divided into two halves. While one half of the cache has
+        * the requested data, the other half is loaded with the next chunk of 
data.
+        * Therefore, the host can read out the data continuously from page to 
page.
+        * Each data read must be a multiple of 4-bytes and full pages should 
be read;
+        * otherwise, the data output might get out of sequence from one read 
command
+        * to another.
+        */
+       nanddev_io_for_each_block(nand, NAND_PAGE_READ, from, ops, &iter) {
+               schedule();
+               ret = spinand_select_target(spinand, iter.req.pos.target);
+               if (ret)
+                       goto end_cont_read;
+
+               ret = spinand_ondie_ecc_prepare_io_req(nand, &iter.req);
+               if (ret)
+                       goto end_cont_read;
+
+               ret = spinand_load_page_op(spinand, &iter.req);
+               if (ret)
+                       goto end_cont_read;
+
+               ret = spinand_wait(spinand, SPINAND_READ_INITIAL_DELAY_US,
+                                  SPINAND_READ_POLL_DELAY_US, NULL);
+               if (ret < 0)
+                       goto end_cont_read;
+
+               ret = spinand_read_from_cache_op(spinand, &iter.req);
+               if (ret)
+                       goto end_cont_read;
+
+               ops->retlen += iter.req.datalen;
+
+               ret = spinand_read_status(spinand, &status);
+               if (ret)
+                       goto end_cont_read;
+
+               spinand_ondie_ecc_save_status(nand, status);
+
+               ret = spinand_ondie_ecc_finish_io_req(nand, &iter.req);
+               if (ret < 0)
+                       goto end_cont_read;
+
+               *max_bitflips = max_t(unsigned int, *max_bitflips, ret);
+               ret = 0;
+       }
+
+end_cont_read:
+       /*
+        * Once all the data has been read out, the host can either pull CS#
+        * high and wait for tRST or manually clear the bit in the configuration
+        * register to terminate the continuous read operation. We have no
+        * guarantee the SPI controller drivers will effectively deassert the CS
+        * when we expect them to, so take the register based approach.
+        */
+       spinand_cont_read_enable(spinand, false);
+
+       return ret;
+}
+
+static void spinand_cont_read_init(struct spinand_device *spinand)
+{
+       /* OOBs cannot be retrieved so external/on-host ECC engine won't work */
+       if (spinand->set_cont_read) {
+               spinand->cont_read_possible = true;
+       }
+}
+
+static bool spinand_use_cont_read(struct mtd_info *mtd, loff_t from,
+                                 struct mtd_oob_ops *ops)
+{
+       struct nand_device *nand = mtd_to_nanddev(mtd);
+       struct spinand_device *spinand = nand_to_spinand(nand);
+       struct nand_pos start_pos, end_pos;
+
+       if (!spinand->cont_read_possible)
+               return false;
+
+       /* OOBs won't be retrieved */
+       if (ops->ooblen || ops->oobbuf)
+               return false;
+
+       nanddev_offs_to_pos(nand, from, &start_pos);
+       nanddev_offs_to_pos(nand, from + ops->len - 1, &end_pos);
+
+       /*
+        * Continuous reads never cross LUN boundaries. Some devices don't
+        * support crossing planes boundaries. Some devices don't even support
+        * crossing blocks boundaries. The common case being to read through 
UBI,
+        * we will very rarely read two consequent blocks or more, so it is 
safer
+        * and easier (can be improved) to only enable continuous reads when
+        * reading within the same erase block.
+        */
+       if (start_pos.target != end_pos.target ||
+           start_pos.plane != end_pos.plane ||
+           start_pos.eraseblock != end_pos.eraseblock)
+               return false;
+
+       return start_pos.page < end_pos.page;
+}
+
+static int spinand_mtd_read(struct mtd_info *mtd, loff_t from,
+                           struct mtd_oob_ops *ops)
+{
+       unsigned int max_bitflips = 0;
+       int ret;
+
+#ifndef __UBOOT__
+       mutex_lock(&spinand->lock);
+#endif
+
+       if (spinand_use_cont_read(mtd, from, ops))
+               ret = spinand_mtd_continuous_page_read(mtd, from, ops, 
&max_bitflips);
+       else
+               ret = spinand_mtd_regular_page_read(mtd, from, ops, 
&max_bitflips);
+
 #ifndef __UBOOT__
        mutex_unlock(&spinand->lock);
 #endif
-       if (ecc_failed && !ret)
-               ret = -EBADMSG;
 
        return ret ? ret : max_bitflips;
 }
@@ -821,6 +981,7 @@ static int spinand_mtd_block_markbad(struct mtd_info *mtd, 
loff_t offs)
 #ifndef __UBOOT__
        mutex_unlock(&spinand->lock);
 #endif
+
        return ret;
 }
 
@@ -846,6 +1007,7 @@ static int spinand_erase(struct nand_device *nand, const 
struct nand_pos *pos)
                           SPINAND_ERASE_INITIAL_DELAY_US,
                           SPINAND_ERASE_POLL_DELAY_US,
                           &status);
+
        if (!ret && (status & STATUS_ERASE_FAILED))
                ret = -EIO;
 
@@ -902,6 +1064,9 @@ static int spinand_create_dirmap(struct spinand_device 
*spinand,
        };
        struct spi_mem_dirmap_desc *desc;
 
+       if (spinand->cont_read_possible)
+               info.length = nanddev_eraseblock_size(nand);
+
        /* The plane number is passed in MSB just above the column address */
        info.offset = plane << fls(nand->memorg.pagesize);
 
@@ -1131,6 +1296,7 @@ int spinand_match_and_init(struct spinand_device *spinand,
                spinand->flags = table[i].flags;
                spinand->id.len = 1 + table[i].devid.len;
                spinand->select_target = table[i].select_target;
+               spinand->set_cont_read = table[i].set_cont_read;
 
                op = spinand_select_op_variant(spinand,
                                               info->op_variants.read_cache);
@@ -1256,9 +1422,8 @@ static int spinand_init(struct spinand_device *spinand)
         * may use this buffer for DMA access.
         * Memory allocated by devm_ does not guarantee DMA-safe alignment.
         */
-       spinand->databuf = kzalloc(nanddev_page_size(nand) +
-                              nanddev_per_page_oobsize(nand),
-                              GFP_KERNEL);
+       spinand->databuf = kzalloc(nanddev_eraseblock_size(nand),
+                                  GFP_KERNEL);
        if (!spinand->databuf) {
                ret = -ENOMEM;
                goto err_free_bufs;
@@ -1283,6 +1448,12 @@ static int spinand_init(struct spinand_device *spinand)
        if (ret)
                goto err_cleanup_nanddev;
 
+       /*
+        * Continuous read can only be enabled with an on-die ECC engine, so the
+        * ECC initialization must have happened previously.
+        */
+       spinand_cont_read_init(spinand);
+
        mtd->_read_oob = spinand_mtd_read;
        mtd->_write_oob = spinand_mtd_write;
        mtd->_block_isbad = spinand_mtd_block_isbad;
@@ -1330,6 +1501,7 @@ static void spinand_cleanup(struct spinand_device 
*spinand)
 {
        struct nand_device *nand = spinand_to_nand(spinand);
 
+       spinand_ondie_ecc_cleanup_ctx(nand);
        nanddev_cleanup(nand);
        spinand_manufacturer_cleanup(spinand);
        kfree(spinand->databuf);
diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h
index 910ef56ed0c..9285edd5c4b 100644
--- a/include/linux/mtd/nand.h
+++ b/include/linux/mtd/nand.h
@@ -101,6 +101,8 @@ enum nand_page_io_req_type {
  * @ooblen: the number of OOB bytes to read from/write to this page
  * @oobbuf: buffer to store OOB data in or get OOB data from
  * @mode: one of the %MTD_OPS_XXX mode
+ * @continuous: no need to start over the operation at the end of each page, 
the
+ * NAND device will automatically prepare the next one
  *
  * This object is used to pass per-page I/O requests to NAND sub-layers. This
  * way all useful information are already formatted in a useful way and
@@ -123,6 +125,7 @@ struct nand_page_io_req {
                void *in;
        } oobbuf;
        int mode;
+       bool continuous;
 };
 
 /**
@@ -638,19 +641,19 @@ static inline void nanddev_pos_next_page(struct 
nand_device *nand,
 }
 
 /**
- * nand_io_iter_init - Initialize a NAND I/O iterator
+ * nand_io_page_iter_init - Initialize a NAND I/O iterator
  * @nand: NAND device
  * @offs: absolute offset
  * @req: MTD request
  * @iter: NAND I/O iterator
  *
  * Initializes a NAND iterator based on the information passed by the MTD
- * layer.
+ * layer for page jumps.
  */
-static inline void nanddev_io_iter_init(struct nand_device *nand,
-                                       enum nand_page_io_req_type reqtype,
-                                       loff_t offs, struct mtd_oob_ops *req,
-                                       struct nand_io_iter *iter)
+static inline void nanddev_io_page_iter_init(struct nand_device *nand,
+                                            enum nand_page_io_req_type reqtype,
+                                            loff_t offs, struct mtd_oob_ops 
*req,
+                                            struct nand_io_iter *iter)
 {
        struct mtd_info *mtd = nanddev_to_mtd(nand);
 
@@ -669,6 +672,43 @@ static inline void nanddev_io_iter_init(struct nand_device 
*nand,
        iter->req.ooblen = min_t(unsigned int,
                                 iter->oobbytes_per_page - iter->req.ooboffs,
                                 iter->oobleft);
+       iter->req.continuous = false;
+}
+
+/**
+ * nand_io_block_iter_init - Initialize a NAND I/O iterator
+ * @nand: NAND device
+ * @offs: absolute offset
+ * @req: MTD request
+ * @iter: NAND I/O iterator
+ *
+ * Initializes a NAND iterator based on the information passed by the MTD
+ * layer for block jumps (no OOB)
+ *
+ * In practice only reads may leverage this iterator.
+ */
+static inline void nanddev_io_block_iter_init(struct nand_device *nand,
+                                             enum nand_page_io_req_type 
reqtype,
+                                             loff_t offs, struct mtd_oob_ops 
*req,
+                                             struct nand_io_iter *iter)
+{
+       unsigned int offs_in_eb;
+
+       iter->req.type = reqtype;
+       iter->req.mode = req->mode;
+       iter->req.dataoffs = nanddev_offs_to_pos(nand, offs, &iter->req.pos);
+       iter->req.ooboffs = 0;
+       iter->oobbytes_per_page = 0;
+       iter->dataleft = req->len;
+       iter->oobleft = 0;
+       iter->req.databuf.in = req->datbuf;
+       offs_in_eb = (nand->memorg.pagesize * iter->req.pos.page) + 
iter->req.dataoffs;
+       iter->req.datalen = min_t(unsigned int,
+                                 nanddev_eraseblock_size(nand) - offs_in_eb,
+                                 iter->dataleft);
+       iter->req.oobbuf.in = NULL;
+       iter->req.ooblen = 0;
+       iter->req.continuous = true;
 }
 
 /**
@@ -694,6 +734,25 @@ static inline void nanddev_io_iter_next_page(struct 
nand_device *nand,
                                 iter->oobleft);
 }
 
+/**
+ * nand_io_iter_next_block - Move to the next block
+ * @nand: NAND device
+ * @iter: NAND I/O iterator
+ *
+ * Updates the @iter to point to the next block.
+ * No OOB handling available.
+ */
+static inline void nanddev_io_iter_next_block(struct nand_device *nand,
+                                             struct nand_io_iter *iter)
+{
+       nanddev_pos_next_eraseblock(nand, &iter->req.pos);
+       iter->dataleft -= iter->req.datalen;
+       iter->req.databuf.in += iter->req.datalen;
+       iter->req.dataoffs = 0;
+       iter->req.datalen = min_t(unsigned int, nanddev_eraseblock_size(nand),
+                                 iter->dataleft);
+}
+
 /**
  * nand_io_iter_end - Should end iteration or not
  * @nand: NAND device
@@ -722,13 +781,28 @@ static inline bool nanddev_io_iter_end(struct nand_device 
*nand,
  * @req: MTD I/O request
  * @iter: NAND I/O iterator
  *
- * Should be used for iterate over pages that are contained in an MTD request.
+ * Should be used for iterating over pages that are contained in an MTD 
request.
  */
 #define nanddev_io_for_each_page(nand, type, start, req, iter)         \
-       for (nanddev_io_iter_init(nand, type, start, req, iter);        \
+       for (nanddev_io_page_iter_init(nand, type, start, req, iter);   \
             !nanddev_io_iter_end(nand, iter);                          \
             nanddev_io_iter_next_page(nand, iter))
 
+/**
+ * nand_io_for_each_block - Iterate over all NAND pages contained in an MTD I/O
+ *                         request, one block at a time
+ * @nand: NAND device
+ * @start: start address to read/write from
+ * @req: MTD I/O request
+ * @iter: NAND I/O iterator
+ *
+ * Should be used for iterating over blocks that are contained in an MTD 
request.
+ */
+#define nanddev_io_for_each_block(nand, type, start, req, iter)                
\
+       for (nanddev_io_block_iter_init(nand, type, start, req, iter);  \
+            !nanddev_io_iter_end(nand, iter);                          \
+            nanddev_io_iter_next_block(nand, iter))
+
 bool nanddev_isbad(struct nand_device *nand, const struct nand_pos *pos);
 bool nanddev_isreserved(struct nand_device *nand, const struct nand_pos *pos);
 int nanddev_markbad(struct nand_device *nand, const struct nand_pos *pos);
diff --git a/include/linux/mtd/spinand.h b/include/linux/mtd/spinand.h
index 63626c9c27c..2f8212e4037 100644
--- a/include/linux/mtd/spinand.h
+++ b/include/linux/mtd/spinand.h
@@ -175,7 +175,7 @@
 struct spinand_op;
 struct spinand_device;
 
-#define SPINAND_MAX_ID_LEN     4
+#define SPINAND_MAX_ID_LEN     5
 /*
  * For erase, write and read operation, we got the following timings :
  * tBERS (erase) 1ms to 4ms
@@ -336,6 +336,7 @@ struct spinand_ecc_info {
  * @op_variants.update_cache: variants of the update-cache operation
  * @select_target: function used to select a target/die. Required only for
  *                multi-die chips
+ * @set_cont_read: enable/disable continuous cached reads
  *
  * Each SPI NAND manufacturer driver should have a spinand_info table
  * describing all the chips supported by the driver.
@@ -354,6 +355,8 @@ struct spinand_info {
        } op_variants;
        int (*select_target)(struct spinand_device *spinand,
                             unsigned int target);
+       int (*set_cont_read)(struct spinand_device *spinand,
+                            bool enable);
 };
 
 #define SPINAND_ID(__method, ...)                                      \
@@ -379,6 +382,9 @@ struct spinand_info {
 #define SPINAND_SELECT_TARGET(__func)                                  \
        .select_target = __func,
 
+#define SPINAND_CONT_READ(__set_cont_read)                             \
+       .set_cont_read = __set_cont_read,
+
 #define SPINAND_INFO(__model, __id, __memorg, __eccreq, __op_variants, \
                     __flags, ...)                                      \
        {                                                               \
@@ -422,6 +428,12 @@ struct spinand_dirmap {
  *             passed in spi_mem_op be DMA-able, so we can't based the bufs on
  *             the stack
  * @manufacturer: SPI NAND manufacturer information
+ * @cont_read_possible: Field filled by the core once the whole system
+ *             configuration is known to tell whether continuous reads are
+ *             suitable to use or not in general with this chip/configuration.
+ *             A per-transfer check must of course be done to ensure it is
+ *             actually relevant to enable this feature.
+ * @set_cont_read: Enable/disable the continuous read feature
  * @priv: manufacturer private data
  * @last_wait_status: status of the last wait operation that will be used in 
case
  *                   ->get_status() is not populated by the spinand device.
@@ -457,7 +469,12 @@ struct spinand_device {
        u8 *scratchbuf;
        const struct spinand_manufacturer *manufacturer;
        void *priv;
+
        u8 last_wait_status;
+
+       bool cont_read_possible;
+       int (*set_cont_read)(struct spinand_device *spinand,
+                            bool enable);
 };
 
 /**
-- 
2.47.2

Reply via email to