summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMiquel Raynal <miquel.raynal@bootlin.com>2024-08-26 12:14:06 +0200
committerMiquel Raynal <miquel.raynal@bootlin.com>2024-09-06 17:00:04 +0200
commit79da17072e22a802a321ca44c9082ee2e855e72b (patch)
treecf27caf43f9b7b6261fe289984495e403f07e261
parent8adf1ac24ba8ee34b8fd36ebf159004ec472fca0 (diff)
downloadlwn-79da17072e22a802a321ca44c9082ee2e855e72b.tar.gz
lwn-79da17072e22a802a321ca44c9082ee2e855e72b.zip
mtd: spi-nand: Isolate the MTD read logic in a helper
There is currently only a single path for performing page reads as requested by the MTD layer. Soon there will be two: - a "regular" page read - a continuous page read Let's extract the page read logic in a dedicated helper, so the introduction of continuous page reads will be as easy as checking whether continuous reads shall/can be used and calling one helper or the other. There is not behavioral change intended. Signed-off-by: Miquel Raynal <miquel.raynal@bootlin.com> Link: https://lore.kernel.org/linux-mtd/20240826101412.20644-4-miquel.raynal@bootlin.com
-rw-r--r--drivers/mtd/nand/spi/core.c40
1 files changed, 26 insertions, 14 deletions
diff --git a/drivers/mtd/nand/spi/core.c b/drivers/mtd/nand/spi/core.c
index 018c854d0619..1f468ed93c8e 100644
--- a/drivers/mtd/nand/spi/core.c
+++ b/drivers/mtd/nand/spi/core.c
@@ -630,25 +630,20 @@ static int spinand_write_page(struct spinand_device *spinand,
return nand_ecc_finish_io_req(nand, (struct nand_page_io_req *)req);
}
-static int spinand_mtd_read(struct mtd_info *mtd, loff_t from,
- struct mtd_oob_ops *ops)
+static int spinand_mtd_regular_page_read(struct mtd_info *mtd, loff_t from,
+ struct mtd_oob_ops *ops,
+ unsigned int *max_bitflips)
{
struct spinand_device *spinand = mtd_to_spinand(mtd);
struct nand_device *nand = mtd_to_nanddev(mtd);
- struct mtd_ecc_stats old_stats;
- unsigned int max_bitflips = 0;
struct nand_io_iter iter;
bool disable_ecc = false;
bool ecc_failed = false;
- int ret = 0;
+ int ret;
- if (ops->mode == MTD_OPS_RAW || !spinand->eccinfo.ooblayout)
+ if (ops->mode == MTD_OPS_RAW || !mtd->ooblayout)
disable_ecc = true;
- mutex_lock(&spinand->lock);
-
- old_stats = mtd->ecc_stats;
-
nanddev_io_for_each_page(nand, NAND_PAGE_READ, from, ops, &iter) {
if (disable_ecc)
iter.req.mode = MTD_OPS_RAW;
@@ -664,13 +659,33 @@ static int spinand_mtd_read(struct mtd_info *mtd, loff_t from,
if (ret == -EBADMSG)
ecc_failed = true;
else
- max_bitflips = max_t(unsigned int, max_bitflips, ret);
+ *max_bitflips = max_t(unsigned int, *max_bitflips, ret);
ret = 0;
ops->retlen += iter.req.datalen;
ops->oobretlen += iter.req.ooblen;
}
+ if (ecc_failed && !ret)
+ ret = -EBADMSG;
+
+ return ret;
+}
+
+static int spinand_mtd_read(struct mtd_info *mtd, loff_t from,
+ struct mtd_oob_ops *ops)
+{
+ struct spinand_device *spinand = mtd_to_spinand(mtd);
+ struct mtd_ecc_stats old_stats;
+ unsigned int max_bitflips = 0;
+ int ret;
+
+ mutex_lock(&spinand->lock);
+
+ old_stats = mtd->ecc_stats;
+
+ ret = spinand_mtd_regular_page_read(mtd, from, ops, &max_bitflips);
+
if (ops->stats) {
ops->stats->uncorrectable_errors +=
mtd->ecc_stats.failed - old_stats.failed;
@@ -680,9 +695,6 @@ static int spinand_mtd_read(struct mtd_info *mtd, loff_t from,
mutex_unlock(&spinand->lock);
- if (ecc_failed && !ret)
- ret = -EBADMSG;
-
return ret ? ret : max_bitflips;
}