summaryrefslogtreecommitdiff
path: root/drivers/mtd/nand/raw/nand_base.c
diff options
context:
space:
mode:
authorMiquel Raynal <miquel.raynal@bootlin.com>2024-03-15 14:00:45 +0300
committerMiquel Raynal <miquel.raynal@bootlin.com>2024-03-15 14:00:45 +0300
commit09888e973cc9d3615dbab5d178eecb58d8a0b7ab (patch)
treef2728f4e4fd45f7ab5d9fed920f87724598e812e /drivers/mtd/nand/raw/nand_base.c
parent2842dc9bc1a53893eec62ec9e49beb3b501702d0 (diff)
parent4120aa0e3961f68f1f8cfe6b4c3c809ffea31fdc (diff)
downloadlinux-09888e973cc9d3615dbab5d178eecb58d8a0b7ab.tar.xz
Merge tag 'nand/for-6.9' into mtd/next
Raw NAND The main series brought is an update of the Broadcom support to support all BCMBCA SoCs and their specificity (ECC, write protection, configuration straps), plus a few misc fixes and changes in the main driver. Device tree updates are also part of this PR, initially because of a misunderstanding on my side. The STM32_FMC2 controller driver is also upgraded to properly support MP1 and MP25 SoCs. A new compatible is added for an Atmel flavor. Among all these feature changes, there is as well a load of continuous read related fixes, avoiding more corner conditions and clarifying the logic. Finally a few miscellaneous fixes are made to the core, the lpx32xx_mlc, fsl_lbc, Meson and Atmel controller driver, as well as final one in the Hynix vendor driver. SPI-NAND The ESMT support has been extended to match 5 bytes ID to avoid collisions. Winbond support on its side receives support for W25N04KV chips.
Diffstat (limited to 'drivers/mtd/nand/raw/nand_base.c')
-rw-r--r--drivers/mtd/nand/raw/nand_base.c88
1 files changed, 59 insertions, 29 deletions
diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c
index 3b3ce2926f5d..d7dbbd469b89 100644
--- a/drivers/mtd/nand/raw/nand_base.c
+++ b/drivers/mtd/nand/raw/nand_base.c
@@ -1211,21 +1211,36 @@ static int nand_lp_exec_read_page_op(struct nand_chip *chip, unsigned int page,
return nand_exec_op(chip, &op);
}
+static unsigned int rawnand_last_page_of_lun(unsigned int pages_per_lun, unsigned int lun)
+{
+ /* lun is expected to be very small */
+ return (lun * pages_per_lun) + pages_per_lun - 1;
+}
+
static void rawnand_cap_cont_reads(struct nand_chip *chip)
{
struct nand_memory_organization *memorg;
- unsigned int pages_per_lun, first_lun, last_lun;
+ unsigned int ppl, first_lun, last_lun;
memorg = nanddev_get_memorg(&chip->base);
- pages_per_lun = memorg->pages_per_eraseblock * memorg->eraseblocks_per_lun;
- first_lun = chip->cont_read.first_page / pages_per_lun;
- last_lun = chip->cont_read.last_page / pages_per_lun;
+ ppl = memorg->pages_per_eraseblock * memorg->eraseblocks_per_lun;
+ first_lun = chip->cont_read.first_page / ppl;
+ last_lun = chip->cont_read.last_page / ppl;
/* Prevent sequential cache reads across LUN boundaries */
if (first_lun != last_lun)
- chip->cont_read.pause_page = first_lun * pages_per_lun + pages_per_lun - 1;
+ chip->cont_read.pause_page = rawnand_last_page_of_lun(ppl, first_lun);
else
chip->cont_read.pause_page = chip->cont_read.last_page;
+
+ if (chip->cont_read.first_page == chip->cont_read.pause_page) {
+ chip->cont_read.first_page++;
+ chip->cont_read.pause_page = min(chip->cont_read.last_page,
+ rawnand_last_page_of_lun(ppl, first_lun + 1));
+ }
+
+ if (chip->cont_read.first_page >= chip->cont_read.last_page)
+ chip->cont_read.ongoing = false;
}
static int nand_lp_exec_cont_read_page_op(struct nand_chip *chip, unsigned int page,
@@ -1292,12 +1307,11 @@ static int nand_lp_exec_cont_read_page_op(struct nand_chip *chip, unsigned int p
if (!chip->cont_read.ongoing)
return 0;
- if (page == chip->cont_read.pause_page &&
- page != chip->cont_read.last_page) {
- chip->cont_read.first_page = chip->cont_read.pause_page + 1;
- rawnand_cap_cont_reads(chip);
- } else if (page == chip->cont_read.last_page) {
+ if (page == chip->cont_read.last_page) {
chip->cont_read.ongoing = false;
+ } else if (page == chip->cont_read.pause_page) {
+ chip->cont_read.first_page++;
+ rawnand_cap_cont_reads(chip);
}
return 0;
@@ -3466,30 +3480,36 @@ static void rawnand_enable_cont_reads(struct nand_chip *chip, unsigned int page,
u32 readlen, int col)
{
struct mtd_info *mtd = nand_to_mtd(chip);
- unsigned int end_page, end_col;
+ unsigned int first_page, last_page;
chip->cont_read.ongoing = false;
if (!chip->controller->supported_op.cont_read)
return;
- end_page = DIV_ROUND_UP(col + readlen, mtd->writesize);
- end_col = (col + readlen) % mtd->writesize;
+ /*
+ * Don't bother making any calculations if the length is too small.
+ * Side effect: avoids possible integer underflows below.
+ */
+ if (readlen < (2 * mtd->writesize))
+ return;
+ /* Derive the page where continuous read should start (the first full page read) */
+ first_page = page;
if (col)
- page++;
+ first_page++;
- if (end_col && end_page)
- end_page--;
+ /* Derive the page where continuous read should stop (the last full page read) */
+ last_page = page + ((col + readlen) / mtd->writesize) - 1;
- if (page + 1 > end_page)
- return;
-
- chip->cont_read.first_page = page;
- chip->cont_read.last_page = end_page;
- chip->cont_read.ongoing = true;
-
- rawnand_cap_cont_reads(chip);
+ /* Configure and enable continuous read when suitable */
+ if (first_page < last_page) {
+ chip->cont_read.first_page = first_page;
+ chip->cont_read.last_page = last_page;
+ chip->cont_read.ongoing = true;
+ /* May reset the ongoing flag */
+ rawnand_cap_cont_reads(chip);
+ }
}
static void rawnand_cont_read_skip_first_page(struct nand_chip *chip, unsigned int page)
@@ -3498,10 +3518,7 @@ static void rawnand_cont_read_skip_first_page(struct nand_chip *chip, unsigned i
return;
chip->cont_read.first_page++;
- if (chip->cont_read.first_page == chip->cont_read.pause_page)
- chip->cont_read.first_page++;
- if (chip->cont_read.first_page >= chip->cont_read.last_page)
- chip->cont_read.ongoing = false;
+ rawnand_cap_cont_reads(chip);
}
/**
@@ -3577,7 +3594,8 @@ static int nand_do_read_ops(struct nand_chip *chip, loff_t from,
oob = ops->oobbuf;
oob_required = oob ? 1 : 0;
- rawnand_enable_cont_reads(chip, page, readlen, col);
+ if (likely(ops->mode != MTD_OPS_RAW))
+ rawnand_enable_cont_reads(chip, page, readlen, col);
while (1) {
struct mtd_ecc_stats ecc_stats = mtd->ecc_stats;
@@ -3710,6 +3728,9 @@ read_retry:
}
nand_deselect_target(chip);
+ if (WARN_ON_ONCE(chip->cont_read.ongoing))
+ chip->cont_read.ongoing = false;
+
ops->retlen = ops->len - (size_t) readlen;
if (oob)
ops->oobretlen = ops->ooblen - oobreadlen;
@@ -5195,6 +5216,15 @@ static void rawnand_late_check_supported_ops(struct nand_chip *chip)
if (!nand_has_exec_op(chip))
return;
+ /*
+ * For now, continuous reads can only be used with the core page helpers.
+ * This can be extended later.
+ */
+ if (!(chip->ecc.read_page == nand_read_page_hwecc ||
+ chip->ecc.read_page == nand_read_page_syndrome ||
+ chip->ecc.read_page == nand_read_page_swecc))
+ return;
+
rawnand_check_cont_read_support(chip);
}