diff options
author | Miquel Raynal <miquel.raynal@bootlin.com> | 2023-08-28 16:37:38 +0200 |
---|---|---|
committer | Miquel Raynal <miquel.raynal@bootlin.com> | 2023-08-28 16:37:38 +0200 |
commit | 54a3f6e89fbbf12e1e3539a4f977a7536029d780 (patch) | |
tree | 6dbb5024d574a83c23fc14eaf931d01a8bcf4ebd /drivers/mtd | |
parent | 000412e4bb7e32b787fba1bf69a53bc4424476bf (diff) | |
parent | b9283ac01a277e73111dbd06d1974ace30e40f16 (diff) | |
download | lwn-54a3f6e89fbbf12e1e3539a4f977a7536029d780.tar.gz lwn-54a3f6e89fbbf12e1e3539a4f977a7536029d780.zip |
Merge tag 'nand/for-6.6' into mtd/next
Raw NAND core changes:
* Fix -Wvoid-pointer-to-enum-cast warning
* Export 'nand_exit_status_op()'
* dt-bindings: Fix nand-controller.yaml license
Raw NAND controller driver changes:
* Omap, Omap2, Samsung, Atmel, fsl_upm, lpc32xx_slc, lpc32xx_mlc, STM32_FMC2,
sh_ftlctl, MXC, Sunxi:
- Use devm_platform_get_and_ioremap_resource()
* Orion, vf610_nfc, Sunxi, STM32_FMC2, MTK, mpc5121, lpc32xx_slc, Intel,
FSMC, Arasan:
- Use helper function devm_clk_get_optional_enabled()
* Brcmnand:
- Use devm_platform_ioremap_resource_byname()
- Propagate init error -EPROBE_DEFER up
- Propagate error and simplify ternary operators
- Fix mtd oobsize
- Fix potential out-of-bounds access in oob write
- Fix crash during the panic_write
- Fix potential false time out warning
- Fix ECC level field setting for v7.2 controller
* fsmc: Handle clk prepare error in fsmc_nand_resume()
* Marvell: Add support for AC5 SoC
* Meson:
- Support for 512B ECC step size
- Fix build error
- Use NAND core API to check status
- dt-bindings:
* Make ECC properties dependent
* Support for 512B ECC step size
* Drop unneeded quotes
* Oxnas: Remove driver and bindings
* Qcom:
- Conversion to ->exec_op()
- Removal of the legacy interface
- Two full series of improvements/misc fixes
* Use the BIT() macro
* Use u8 instead of uint8_t
* Fix alignment with open parenthesis
* Fix the spacing
* Fix wrong indentation
* Fix a typo
* Early structure initialization
* Fix address parsing within ->exec_op()
* Remove superfluous initialization of "ret"
* Rename variables in qcom_op_cmd_mapping()
* Handle unsupported opcode in qcom_op_cmd_mapping()
* Fix the opcode check in qcom_check_op()
* Use EOPNOTSUPP instead of ENOTSUPP
* Wrap qcom_nand_exec_op() to 80 columns
* Unmap sg_list and free desc within submic_descs()
* Simplify the call to nand_prog_page_end_op()
* Do not override the error no of submit_descs()
* Sort includes alphabetically
* Clear buf_count and buf_start in raw read
* Add read/read_start ops in exec_op path
* vf610_nfc: Do not check 0 for platform_get_irq()
SPI-NAND changes:
* gigadevice: Add support for GD5F1GQ{4,5}RExxH
* esmt: Add support for F50D2G41KA
* toshiba: Add support for T{C,H}58NYG{0,2}S3HBAI4 and TH58NYG3S0HBAI6
Signed-off-by: Miquel Raynal <miquel.raynal@bootlin.com>
Diffstat (limited to 'drivers/mtd')
31 files changed, 871 insertions, 946 deletions
diff --git a/drivers/mtd/nand/onenand/onenand_omap2.c b/drivers/mtd/nand/onenand/onenand_omap2.c index 7db53b59605d..a12f8f3efd07 100644 --- a/drivers/mtd/nand/onenand/onenand_omap2.c +++ b/drivers/mtd/nand/onenand/onenand_omap2.c @@ -467,12 +467,6 @@ static int omap2_onenand_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; struct device_node *np = dev->of_node; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) { - dev_err(dev, "error getting memory resource\n"); - return -EINVAL; - } - r = of_property_read_u32(np, "reg", &val); if (r) { dev_err(dev, "reg not found in DT\n"); @@ -486,11 +480,11 @@ static int omap2_onenand_probe(struct platform_device *pdev) init_completion(&c->irq_done); init_completion(&c->dma_done); c->gpmc_cs = val; - c->phys_base = res->start; - c->onenand.base = devm_ioremap_resource(dev, res); + c->onenand.base = devm_platform_get_and_ioremap_resource(pdev, 0, &res); if (IS_ERR(c->onenand.base)) return PTR_ERR(c->onenand.base); + c->phys_base = res->start; c->int_gpiod = devm_gpiod_get_optional(dev, "int", GPIOD_IN); if (IS_ERR(c->int_gpiod)) { diff --git a/drivers/mtd/nand/onenand/onenand_samsung.c b/drivers/mtd/nand/onenand/onenand_samsung.c index 92151aa52964..fd6890a03d55 100644 --- a/drivers/mtd/nand/onenand/onenand_samsung.c +++ b/drivers/mtd/nand/onenand/onenand_samsung.c @@ -860,8 +860,7 @@ static int s3c_onenand_probe(struct platform_device *pdev) s3c_onenand_setup(mtd); - r = platform_get_resource(pdev, IORESOURCE_MEM, 0); - onenand->base = devm_ioremap_resource(&pdev->dev, r); + onenand->base = devm_platform_get_and_ioremap_resource(pdev, 0, &r); if (IS_ERR(onenand->base)) return PTR_ERR(onenand->base); @@ -874,8 +873,7 @@ static int s3c_onenand_probe(struct platform_device *pdev) this->options |= ONENAND_SKIP_UNLOCK_CHECK; if (onenand->type != TYPE_S5PC110) { - r = platform_get_resource(pdev, IORESOURCE_MEM, 1); - onenand->ahb_addr = devm_ioremap_resource(&pdev->dev, r); + onenand->ahb_addr = devm_platform_ioremap_resource(pdev, 1); if (IS_ERR(onenand->ahb_addr)) return PTR_ERR(onenand->ahb_addr); @@ -895,8 +893,7 @@ static int s3c_onenand_probe(struct platform_device *pdev) this->subpagesize = mtd->writesize; } else { /* S5PC110 */ - r = platform_get_resource(pdev, IORESOURCE_MEM, 1); - onenand->dma_addr = devm_ioremap_resource(&pdev->dev, r); + onenand->dma_addr = devm_platform_ioremap_resource(pdev, 1); if (IS_ERR(onenand->dma_addr)) return PTR_ERR(onenand->dma_addr); diff --git a/drivers/mtd/nand/raw/Kconfig b/drivers/mtd/nand/raw/Kconfig index b523354dfb00..cbf8ae85e1ae 100644 --- a/drivers/mtd/nand/raw/Kconfig +++ b/drivers/mtd/nand/raw/Kconfig @@ -160,7 +160,7 @@ config MTD_NAND_MARVELL including: - PXA3xx processors (NFCv1) - 32-bit Armada platforms (XP, 37x, 38x, 39x) (NFCv2) - - 64-bit Aramda platforms (7k, 8k) (NFCv2) + - 64-bit Aramda platforms (7k, 8k, ac5) (NFCv2) config MTD_NAND_SLC_LPC32XX tristate "NXP LPC32xx SLC NAND controller" @@ -204,13 +204,6 @@ config MTD_NAND_BCM47XXNFLASH registered by bcma as platform devices. This enables driver for NAND flash memories. For now only BCM4706 is supported. -config MTD_NAND_OXNAS - tristate "Oxford Semiconductor NAND controller" - depends on ARCH_OXNAS || COMPILE_TEST - depends on HAS_IOMEM - help - This enables the NAND flash controller on Oxford Semiconductor SoCs. - config MTD_NAND_MPC5121_NFC tristate "MPC5121 NAND controller" depends on PPC_MPC512x diff --git a/drivers/mtd/nand/raw/Makefile b/drivers/mtd/nand/raw/Makefile index d93e861d8ba7..25120a4afada 100644 --- a/drivers/mtd/nand/raw/Makefile +++ b/drivers/mtd/nand/raw/Makefile @@ -26,7 +26,6 @@ obj-$(CONFIG_MTD_NAND_MARVELL) += marvell_nand.o obj-$(CONFIG_MTD_NAND_PLATFORM) += plat_nand.o obj-$(CONFIG_MTD_NAND_PASEMI) += pasemi_nand.o obj-$(CONFIG_MTD_NAND_ORION) += orion_nand.o -obj-$(CONFIG_MTD_NAND_OXNAS) += oxnas_nand.o obj-$(CONFIG_MTD_NAND_FSL_ELBC) += fsl_elbc_nand.o obj-$(CONFIG_MTD_NAND_FSL_IFC) += fsl_ifc_nand.o obj-$(CONFIG_MTD_NAND_FSL_UPM) += fsl_upm.o diff --git a/drivers/mtd/nand/raw/arasan-nand-controller.c b/drivers/mtd/nand/raw/arasan-nand-controller.c index 906eef70cb6d..4621ec549cc7 100644 --- a/drivers/mtd/nand/raw/arasan-nand-controller.c +++ b/drivers/mtd/nand/raw/arasan-nand-controller.c @@ -1440,45 +1440,29 @@ static int anfc_probe(struct platform_device *pdev) anfc_reset(nfc); - nfc->controller_clk = devm_clk_get(&pdev->dev, "controller"); + nfc->controller_clk = devm_clk_get_enabled(&pdev->dev, "controller"); if (IS_ERR(nfc->controller_clk)) return PTR_ERR(nfc->controller_clk); - nfc->bus_clk = devm_clk_get(&pdev->dev, "bus"); + nfc->bus_clk = devm_clk_get_enabled(&pdev->dev, "bus"); if (IS_ERR(nfc->bus_clk)) return PTR_ERR(nfc->bus_clk); - ret = clk_prepare_enable(nfc->controller_clk); - if (ret) - return ret; - - ret = clk_prepare_enable(nfc->bus_clk); - if (ret) - goto disable_controller_clk; - ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)); if (ret) - goto disable_bus_clk; + return ret; ret = anfc_parse_cs(nfc); if (ret) - goto disable_bus_clk; + return ret; ret = anfc_chips_init(nfc); if (ret) - goto disable_bus_clk; + return ret; platform_set_drvdata(pdev, nfc); return 0; - -disable_bus_clk: - clk_disable_unprepare(nfc->bus_clk); - -disable_controller_clk: - clk_disable_unprepare(nfc->controller_clk); - - return ret; } static void anfc_remove(struct platform_device *pdev) @@ -1486,9 +1470,6 @@ static void anfc_remove(struct platform_device *pdev) struct arasan_nfc *nfc = platform_get_drvdata(pdev); anfc_chips_cleanup(nfc); - - clk_disable_unprepare(nfc->bus_clk); - clk_disable_unprepare(nfc->controller_clk); } static const struct of_device_id anfc_ids[] = { diff --git a/drivers/mtd/nand/raw/atmel/nand-controller.c b/drivers/mtd/nand/raw/atmel/nand-controller.c index 81e3d682a8cd..3f494f7c7ecb 100644 --- a/drivers/mtd/nand/raw/atmel/nand-controller.c +++ b/drivers/mtd/nand/raw/atmel/nand-controller.c @@ -1791,8 +1791,7 @@ atmel_nand_controller_legacy_add_nands(struct atmel_nand_controller *nc) nand->numcs = 1; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - nand->cs[0].io.virt = devm_ioremap_resource(dev, res); + nand->cs[0].io.virt = devm_platform_get_and_ioremap_resource(pdev, 0, &res); if (IS_ERR(nand->cs[0].io.virt)) return PTR_ERR(nand->cs[0].io.virt); diff --git a/drivers/mtd/nand/raw/brcmnand/bcm63138_nand.c b/drivers/mtd/nand/raw/brcmnand/bcm63138_nand.c index 71ddcc611f6e..9596629000f4 100644 --- a/drivers/mtd/nand/raw/brcmnand/bcm63138_nand.c +++ b/drivers/mtd/nand/raw/brcmnand/bcm63138_nand.c @@ -61,15 +61,13 @@ static int bcm63138_nand_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; struct bcm63138_nand_soc *priv; struct brcmnand_soc *soc; - struct resource *res; priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; soc = &priv->soc; - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand-int-base"); - priv->base = devm_ioremap_resource(dev, res); + priv->base = devm_platform_ioremap_resource_byname(pdev, "nand-int-base"); if (IS_ERR(priv->base)) return PTR_ERR(priv->base); diff --git a/drivers/mtd/nand/raw/brcmnand/brcmnand.c b/drivers/mtd/nand/raw/brcmnand/brcmnand.c index 2e9c2e2d9c9f..440bef477930 100644 --- a/drivers/mtd/nand/raw/brcmnand/brcmnand.c +++ b/drivers/mtd/nand/raw/brcmnand/brcmnand.c @@ -272,6 +272,7 @@ struct brcmnand_controller { const unsigned int *page_sizes; unsigned int page_size_shift; unsigned int max_oob; + u32 ecc_level_shift; u32 features; /* for low-power standby/resume only */ @@ -596,6 +597,34 @@ enum { INTFC_CTLR_READY = BIT(31), }; +/*********************************************************************** + * NAND ACC CONTROL bitfield + * + * Some bits have remained constant throughout hardware revision, while + * others have shifted around. + ***********************************************************************/ + +/* Constant for all versions (where supported) */ +enum { + /* See BRCMNAND_HAS_CACHE_MODE */ + ACC_CONTROL_CACHE_MODE = BIT(22), + + /* See BRCMNAND_HAS_PREFETCH */ + ACC_CONTROL_PREFETCH = BIT(23), + + ACC_CONTROL_PAGE_HIT = BIT(24), + ACC_CONTROL_WR_PREEMPT = BIT(25), + ACC_CONTROL_PARTIAL_PAGE = BIT(26), + ACC_CONTROL_RD_ERASED = BIT(27), + ACC_CONTROL_FAST_PGM_RDIN = BIT(28), + ACC_CONTROL_WR_ECC = BIT(30), + ACC_CONTROL_RD_ECC = BIT(31), +}; + +#define ACC_CONTROL_ECC_SHIFT 16 +/* Only for v7.2 */ +#define ACC_CONTROL_ECC_EXT_SHIFT 13 + static inline bool brcmnand_non_mmio_ops(struct brcmnand_controller *ctrl) { #if IS_ENABLED(CONFIG_MTD_NAND_BRCMNAND_BCMA) @@ -737,6 +766,12 @@ static int brcmnand_revision_init(struct brcmnand_controller *ctrl) else if (of_property_read_bool(ctrl->dev->of_node, "brcm,nand-has-wp")) ctrl->features |= BRCMNAND_HAS_WP; + /* v7.2 has different ecc level shift in the acc register */ + if (ctrl->nand_version == 0x0702) + ctrl->ecc_level_shift = ACC_CONTROL_ECC_EXT_SHIFT; + else + ctrl->ecc_level_shift = ACC_CONTROL_ECC_SHIFT; + return 0; } @@ -931,30 +966,6 @@ static inline int brcmnand_cmd_shift(struct brcmnand_controller *ctrl) return 0; } -/*********************************************************************** - * NAND ACC CONTROL bitfield - * - * Some bits have remained constant throughout hardware revision, while - * others have shifted around. - ***********************************************************************/ - -/* Constant for all versions (where supported) */ -enum { - /* See BRCMNAND_HAS_CACHE_MODE */ - ACC_CONTROL_CACHE_MODE = BIT(22), - - /* See BRCMNAND_HAS_PREFETCH */ - ACC_CONTROL_PREFETCH = BIT(23), - - ACC_CONTROL_PAGE_HIT = BIT(24), - ACC_CONTROL_WR_PREEMPT = BIT(25), - ACC_CONTROL_PARTIAL_PAGE = BIT(26), - ACC_CONTROL_RD_ERASED = BIT(27), - ACC_CONTROL_FAST_PGM_RDIN = BIT(28), - ACC_CONTROL_WR_ECC = BIT(30), - ACC_CONTROL_RD_ECC = BIT(31), -}; - static inline u32 brcmnand_spare_area_mask(struct brcmnand_controller *ctrl) { if (ctrl->nand_version == 0x0702) @@ -967,18 +978,15 @@ static inline u32 brcmnand_spare_area_mask(struct brcmnand_controller *ctrl) return GENMASK(4, 0); } -#define NAND_ACC_CONTROL_ECC_SHIFT 16 -#define NAND_ACC_CONTROL_ECC_EXT_SHIFT 13 - static inline u32 brcmnand_ecc_level_mask(struct brcmnand_controller *ctrl) { u32 mask = (ctrl->nand_version >= 0x0600) ? 0x1f : 0x0f; - mask <<= NAND_ACC_CONTROL_ECC_SHIFT; + mask <<= ACC_CONTROL_ECC_SHIFT; /* v7.2 includes additional ECC levels */ - if (ctrl->nand_version >= 0x0702) - mask |= 0x7 << NAND_ACC_CONTROL_ECC_EXT_SHIFT; + if (ctrl->nand_version == 0x0702) + mask |= 0x7 << ACC_CONTROL_ECC_EXT_SHIFT; return mask; } @@ -992,8 +1000,8 @@ static void brcmnand_set_ecc_enabled(struct brcmnand_host *host, int en) if (en) { acc_control |= ecc_flags; /* enable RD/WR ECC */ - acc_control |= host->hwcfg.ecc_level - << NAND_ACC_CONTROL_ECC_SHIFT; + acc_control &= ~brcmnand_ecc_level_mask(ctrl); + acc_control |= host->hwcfg.ecc_level << ctrl->ecc_level_shift; } else { acc_control &= ~ecc_flags; /* disable RD/WR ECC */ acc_control &= ~brcmnand_ecc_level_mask(ctrl); @@ -1072,6 +1080,14 @@ static int bcmnand_ctrl_poll_status(struct brcmnand_controller *ctrl, cpu_relax(); } while (time_after(limit, jiffies)); + /* + * do a final check after time out in case the CPU was busy and the driver + * did not get enough time to perform the polling to avoid false alarms + */ + val = brcmnand_read_reg(ctrl, BRCMNAND_INTFC_STATUS); + if ((val & mask) == expected_val) + return 0; + dev_warn(ctrl->dev, "timeout on status poll (expected %x got %x)\n", expected_val, val & mask); @@ -1461,19 +1477,33 @@ static int write_oob_to_regs(struct brcmnand_controller *ctrl, int i, const u8 *oob, int sas, int sector_1k) { int tbytes = sas << sector_1k; - int j; + int j, k = 0; + u32 last = 0xffffffff; + u8 *plast = (u8 *)&last; /* Adjust OOB values for 1K sector size */ if (sector_1k && (i & 0x01)) tbytes = max(0, tbytes - (int)ctrl->max_oob); tbytes = min_t(int, tbytes, ctrl->max_oob); - for (j = 0; j < tbytes; j += 4) + /* + * tbytes may not be multiple of words. Make sure we don't read out of + * the boundary and stop at last word. + */ + for (j = 0; (j + 3) < tbytes; j += 4) oob_reg_write(ctrl, j, (oob[j + 0] << 24) | (oob[j + 1] << 16) | (oob[j + 2] << 8) | (oob[j + 3] << 0)); + + /* handle the remaing bytes */ + while (j < tbytes) + plast[k++] = oob[j++]; + + if (tbytes & 0x3) + oob_reg_write(ctrl, (tbytes & ~0x3), (__force u32)cpu_to_be32(last)); + return tbytes; } @@ -1592,7 +1622,17 @@ static void brcmnand_send_cmd(struct brcmnand_host *host, int cmd) dev_dbg(ctrl->dev, "send native cmd %d addr 0x%llx\n", cmd, cmd_addr); - BUG_ON(ctrl->cmd_pending != 0); + /* + * If we came here through _panic_write and there is a pending + * command, try to wait for it. If it times out, rather than + * hitting BUG_ON, just return so we don't crash while crashing. + */ + if (oops_in_progress) { + if (ctrl->cmd_pending && + bcmnand_ctrl_poll_status(ctrl, NAND_CTRL_RDY, NAND_CTRL_RDY, 0)) + return; + } else + BUG_ON(ctrl->cmd_pending != 0); ctrl->cmd_pending = cmd; ret = bcmnand_ctrl_poll_status(ctrl, NAND_CTRL_RDY, NAND_CTRL_RDY, 0); @@ -1626,13 +1666,13 @@ static bool brcmstb_nand_wait_for_completion(struct nand_chip *chip) disable_ctrl_irqs(ctrl); sts = bcmnand_ctrl_poll_status(ctrl, NAND_CTRL_RDY, NAND_CTRL_RDY, 0); - err = (sts < 0) ? true : false; + err = sts < 0; } else { unsigned long timeo = msecs_to_jiffies( NAND_POLL_STATUS_TIMEOUT_MS); /* wait for completion interrupt */ sts = wait_for_completion_timeout(&ctrl->done, timeo); - err = (sts <= 0) ? true : false; + err = !sts; } return err; @@ -1648,6 +1688,7 @@ static int brcmnand_waitfunc(struct nand_chip *chip) if (ctrl->cmd_pending) err = brcmstb_nand_wait_for_completion(chip); + ctrl->cmd_pending = 0; if (err) { u32 cmd = brcmnand_read_reg(ctrl, BRCMNAND_CMD_START) >> brcmnand_cmd_shift(ctrl); @@ -1656,8 +1697,8 @@ static int brcmnand_waitfunc(struct nand_chip *chip) "timeout waiting for command %#02x\n", cmd); dev_err_ratelimited(ctrl->dev, "intfc status %08x\n", brcmnand_read_reg(ctrl, BRCMNAND_INTFC_STATUS)); + return -ETIMEDOUT; } - ctrl->cmd_pending = 0; return brcmnand_read_reg(ctrl, BRCMNAND_INTFC_STATUS) & INTFC_FLASH_STATUS; } @@ -2561,7 +2602,7 @@ static int brcmnand_set_cfg(struct brcmnand_host *host, tmp &= ~brcmnand_ecc_level_mask(ctrl); tmp &= ~brcmnand_spare_area_mask(ctrl); if (ctrl->nand_version >= 0x0302) { - tmp |= cfg->ecc_level << NAND_ACC_CONTROL_ECC_SHIFT; + tmp |= cfg->ecc_level << ctrl->ecc_level_shift; tmp |= cfg->spare_area_size; } nand_writereg(ctrl, acc_control_offs, tmp); @@ -2612,6 +2653,8 @@ static int brcmnand_setup_dev(struct brcmnand_host *host) struct nand_chip *chip = &host->chip; const struct nand_ecc_props *requirements = nanddev_get_ecc_requirements(&chip->base); + struct nand_memory_organization *memorg = + nanddev_get_memorg(&chip->base); struct brcmnand_controller *ctrl = host->ctrl; struct brcmnand_cfg *cfg = &host->hwcfg; char msg[128]; @@ -2633,10 +2676,11 @@ static int brcmnand_setup_dev(struct brcmnand_host *host) if (cfg->spare_area_size > ctrl->max_oob) cfg->spare_area_size = ctrl->max_oob; /* - * Set oobsize to be consistent with controller's spare_area_size, as - * the rest is inaccessible. + * Set mtd and memorg oobsize to be consistent with controller's + * spare_area_size, as the rest is inaccessible. */ mtd->oobsize = cfg->spare_area_size * (mtd->writesize >> FC_SHIFT); + memorg->oobsize = mtd->oobsize; cfg->device_size = mtd->size; cfg->block_size = mtd->erasesize; @@ -3202,6 +3246,10 @@ int brcmnand_probe(struct platform_device *pdev, struct brcmnand_soc *soc) ret = brcmnand_init_cs(host, NULL); if (ret) { + if (ret == -EPROBE_DEFER) { + of_node_put(child); + goto err; + } devm_kfree(dev, host); continue; /* Try all chip-selects */ } diff --git a/drivers/mtd/nand/raw/brcmnand/iproc_nand.c b/drivers/mtd/nand/raw/brcmnand/iproc_nand.c index d32950847a62..089c70fc6edf 100644 --- a/drivers/mtd/nand/raw/brcmnand/iproc_nand.c +++ b/drivers/mtd/nand/raw/brcmnand/iproc_nand.c @@ -103,7 +103,6 @@ static int iproc_nand_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; struct iproc_nand_soc *priv; struct brcmnand_soc *soc; - struct resource *res; priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); if (!priv) @@ -112,13 +111,11 @@ static int iproc_nand_probe(struct platform_device *pdev) spin_lock_init(&priv->idm_lock); - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "iproc-idm"); - priv->idm_base = devm_ioremap_resource(dev, res); + priv->idm_base = devm_platform_ioremap_resource_byname(pdev, "iproc-idm"); if (IS_ERR(priv->idm_base)) return PTR_ERR(priv->idm_base); - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "iproc-ext"); - priv->ext_base = devm_ioremap_resource(dev, res); + priv->ext_base = devm_platform_ioremap_resource_byname(pdev, "iproc-ext"); if (IS_ERR(priv->ext_base)) return PTR_ERR(priv->ext_base); diff --git a/drivers/mtd/nand/raw/fsl_upm.c b/drivers/mtd/nand/raw/fsl_upm.c index 5bf73efe42e7..797fda30c51e 100644 --- a/drivers/mtd/nand/raw/fsl_upm.c +++ b/drivers/mtd/nand/raw/fsl_upm.c @@ -173,8 +173,7 @@ static int fun_probe(struct platform_device *ofdev) if (!fun) return -ENOMEM; - io_res = platform_get_resource(ofdev, IORESOURCE_MEM, 0); - fun->io_base = devm_ioremap_resource(&ofdev->dev, io_res); + fun->io_base = devm_platform_get_and_ioremap_resource(ofdev, 0, &io_res); if (IS_ERR(fun->io_base)) return PTR_ERR(fun->io_base); diff --git a/drivers/mtd/nand/raw/fsmc_nand.c b/drivers/mtd/nand/raw/fsmc_nand.c index 7b4742420dfc..811982da3557 100644 --- a/drivers/mtd/nand/raw/fsmc_nand.c +++ b/drivers/mtd/nand/raw/fsmc_nand.c @@ -1066,16 +1066,12 @@ static int __init fsmc_nand_probe(struct platform_device *pdev) host->regs_va = base + FSMC_NOR_REG_SIZE + (host->bank * FSMC_NAND_BANK_SZ); - host->clk = devm_clk_get(&pdev->dev, NULL); + host->clk = devm_clk_get_enabled(&pdev->dev, NULL); if (IS_ERR(host->clk)) { dev_err(&pdev->dev, "failed to fetch block clock\n"); return PTR_ERR(host->clk); } - ret = clk_prepare_enable(host->clk); - if (ret) - return ret; - /* * This device ID is actually a common AMBA ID as used on the * AMBA PrimeCell bus. However it is not a PrimeCell. @@ -1111,7 +1107,7 @@ static int __init fsmc_nand_probe(struct platform_device *pdev) if (!host->read_dma_chan) { dev_err(&pdev->dev, "Unable to get read dma channel\n"); ret = -ENODEV; - goto disable_clk; + goto disable_fsmc; } host->write_dma_chan = dma_request_channel(mask, filter, NULL); if (!host->write_dma_chan) { @@ -1155,9 +1151,8 @@ release_dma_write_chan: release_dma_read_chan: if (host->mode == USE_DMA_ACCESS) dma_release_channel(host->read_dma_chan); -disable_clk: +disable_fsmc: fsmc_nand_disable(host); - clk_disable_unprepare(host->clk); return ret; } @@ -1182,7 +1177,6 @@ static void fsmc_nand_remove(struct platform_device *pdev) dma_release_channel(host->write_dma_chan); dma_release_channel(host->read_dma_chan); } - clk_disable_unprepare(host->clk); } } @@ -1200,9 +1194,14 @@ static int fsmc_nand_suspend(struct device *dev) static int fsmc_nand_resume(struct device *dev) { struct fsmc_nand_data *host = dev_get_drvdata(dev); + int ret; if (host) { - clk_prepare_enable(host->clk); + ret = clk_prepare_enable(host->clk); + if (ret) { + dev_err(dev, "failed to enable clk\n"); + return ret; + } if (host->dev_timings) fsmc_nand_setup(host, host->dev_timings); nand_reset(&host->nand, 0); diff --git a/drivers/mtd/nand/raw/intel-nand-controller.c b/drivers/mtd/nand/raw/intel-nand-controller.c index a9909eb08124..cb5d88f42297 100644 --- a/drivers/mtd/nand/raw/intel-nand-controller.c +++ b/drivers/mtd/nand/raw/intel-nand-controller.c @@ -626,16 +626,10 @@ static int ebu_nand_probe(struct platform_device *pdev) goto err_of_node_put; } - ebu_host->clk = devm_clk_get(dev, NULL); + ebu_host->clk = devm_clk_get_enabled(dev, NULL); if (IS_ERR(ebu_host->clk)) { ret = dev_err_probe(dev, PTR_ERR(ebu_host->clk), - "failed to get clock\n"); - goto err_of_node_put; - } - - ret = clk_prepare_enable(ebu_host->clk); - if (ret) { - dev_err(dev, "failed to enable clock: %d\n", ret); + "failed to get and enable clock\n"); goto err_of_node_put; } @@ -643,7 +637,7 @@ static int ebu_nand_probe(struct platform_device *pdev) if (IS_ERR(ebu_host->dma_tx)) { ret = dev_err_probe(dev, PTR_ERR(ebu_host->dma_tx), "failed to request DMA tx chan!.\n"); - goto err_disable_unprepare_clk; + goto err_of_node_put; } ebu_host->dma_rx = dma_request_chan(dev, "rx"); @@ -698,8 +692,6 @@ err_clean_nand: nand_cleanup(&ebu_host->chip); err_cleanup_dma: ebu_dma_cleanup(ebu_host); -err_disable_unprepare_clk: - clk_disable_unprepare(ebu_host->clk); err_of_node_put: of_node_put(chip_np); @@ -716,7 +708,6 @@ static void ebu_nand_remove(struct platform_device *pdev) nand_cleanup(&ebu_host->chip); ebu_nand_disable(&ebu_host->chip); ebu_dma_cleanup(ebu_host); - clk_disable_unprepare(ebu_host->clk); } static const struct of_device_id ebu_nand_match[] = { diff --git a/drivers/mtd/nand/raw/lpc32xx_mlc.c b/drivers/mtd/nand/raw/lpc32xx_mlc.c index b3136ae6f4e9..488fd452611a 100644 --- a/drivers/mtd/nand/raw/lpc32xx_mlc.c +++ b/drivers/mtd/nand/raw/lpc32xx_mlc.c @@ -695,8 +695,7 @@ static int lpc32xx_nand_probe(struct platform_device *pdev) host->pdev = pdev; - rc = platform_get_resource(pdev, IORESOURCE_MEM, 0); - host->io_base = devm_ioremap_resource(&pdev->dev, rc); + host->io_base = devm_platform_get_and_ioremap_resource(pdev, 0, &rc); if (IS_ERR(host->io_base)) return PTR_ERR(host->io_base); diff --git a/drivers/mtd/nand/raw/lpc32xx_slc.c b/drivers/mtd/nand/raw/lpc32xx_slc.c index 3139b6107660..1c5fa855b9f2 100644 --- a/drivers/mtd/nand/raw/lpc32xx_slc.c +++ b/drivers/mtd/nand/raw/lpc32xx_slc.c @@ -836,8 +836,7 @@ static int lpc32xx_nand_probe(struct platform_device *pdev) if (!host) return -ENOMEM; - rc = platform_get_resource(pdev, IORESOURCE_MEM, 0); - host->io_base = devm_ioremap_resource(&pdev->dev, rc); + host->io_base = devm_platform_get_and_ioremap_resource(pdev, 0, &rc); if (IS_ERR(host->io_base)) return PTR_ERR(host->io_base); @@ -872,15 +871,12 @@ static int lpc32xx_nand_probe(struct platform_device *pdev) mtd->dev.parent = &pdev->dev; /* Get NAND clock */ - host->clk = devm_clk_get(&pdev->dev, NULL); + host->clk = devm_clk_get_enabled(&pdev->dev, NULL); if (IS_ERR(host->clk)) { dev_err(&pdev->dev, "Clock failure\n"); res = -ENOENT; goto enable_wp; } - res = clk_prepare_enable(host->clk); - if (res) - goto enable_wp; /* Set NAND IO addresses and command/ready functions */ chip->legacy.IO_ADDR_R = SLC_DATA(host->io_base); @@ -908,13 +904,13 @@ static int lpc32xx_nand_probe(struct platform_device *pdev) GFP_KERNEL); if (host->data_buf == NULL) { res = -ENOMEM; - goto unprepare_clk; + goto enable_wp; } res = lpc32xx_nand_dma_setup(host); if (res) { res = -EIO; - goto unprepare_clk; + goto enable_wp; } /* Find NAND device */ @@ -935,8 +931,6 @@ cleanup_nand: nand_cleanup(chip); release_dma: dma_release_channel(host->dma_chan); -unprepare_clk: - clk_disable_unprepare(host->clk); enable_wp: lpc32xx_wp_enable(host); @@ -963,7 +957,6 @@ static void lpc32xx_nand_remove(struct platform_device *pdev) tmp &= ~SLCCFG_CE_LOW; writel(tmp, SLC_CTRL(host->io_base)); - clk_disable_unprepare(host->clk); lpc32xx_wp_enable(host); } diff --git a/drivers/mtd/nand/raw/marvell_nand.c b/drivers/mtd/nand/raw/marvell_nand.c index 7d0204ea305b..2c94da7a3b3a 100644 --- a/drivers/mtd/nand/raw/marvell_nand.c +++ b/drivers/mtd/nand/raw/marvell_nand.c @@ -376,6 +376,7 @@ static inline struct marvell_nand_chip_sel *to_nand_sel(struct marvell_nand_chip * BCH error detection and correction algorithm, * NDCB3 register has been added * @use_dma: Use dma for data transfers + * @max_mode_number: Maximum timing mode supported by the controller */ struct marvell_nfc_caps { unsigned int max_cs_nb; @@ -384,6 +385,7 @@ struct marvell_nfc_caps { bool legacy_of_bindings; bool is_nfcv2; bool use_dma; + unsigned int max_mode_number; }; /** @@ -2377,6 +2379,9 @@ static int marvell_nfc_setup_interface(struct nand_chip *chip, int chipnr, if (IS_ERR(sdr)) return PTR_ERR(sdr); + if (nfc->caps->max_mode_number && nfc->caps->max_mode_number < conf->timings.mode) + return -EOPNOTSUPP; + /* * SDR timings are given in pico-seconds while NFC timings must be * expressed in NAND controller clock cycles, which is half of the @@ -3074,6 +3079,13 @@ static const struct marvell_nfc_caps marvell_armada_8k_nfc_caps = { .is_nfcv2 = true, }; +static const struct marvell_nfc_caps marvell_ac5_caps = { + .max_cs_nb = 2, + .max_rb_nb = 1, + .is_nfcv2 = true, + .max_mode_number = 3, +}; + static const struct marvell_nfc_caps marvell_armada370_nfc_caps = { .max_cs_nb = 4, .max_rb_nb = 2, @@ -3123,6 +3135,10 @@ static const struct of_device_id marvell_nfc_of_ids[] = { .data = &marvell_armada_8k_nfc_caps, }, { + .compatible = "marvell,ac5-nand-controller", + .data = &marvell_ac5_caps, + }, + { .compatible = "marvell,armada370-nand-controller", .data = &marvell_armada370_nfc_caps, }, diff --git a/drivers/mtd/nand/raw/meson_nand.c b/drivers/mtd/nand/raw/meson_nand.c index ef821b43b3d4..de2e4ff0ce5f 100644 --- a/drivers/mtd/nand/raw/meson_nand.c +++ b/drivers/mtd/nand/raw/meson_nand.c @@ -134,6 +134,7 @@ struct meson_nfc_nand_chip { struct meson_nand_ecc { u32 bch; u32 strength; + u32 size; }; struct meson_nfc_data { @@ -189,7 +190,8 @@ struct meson_nfc { }; enum { - NFC_ECC_BCH8_1K = 2, + NFC_ECC_BCH8_512 = 1, + NFC_ECC_BCH8_1K, NFC_ECC_BCH24_1K, NFC_ECC_BCH30_1K, NFC_ECC_BCH40_1K, @@ -197,15 +199,16 @@ enum { NFC_ECC_BCH60_1K, }; -#define MESON_ECC_DATA(b, s) { .bch = (b), .strength = (s)} +#define MESON_ECC_DATA(b, s, sz) { .bch = (b), .strength = (s), .size = (sz) } static struct meson_nand_ecc meson_ecc[] = { - MESON_ECC_DATA(NFC_ECC_BCH8_1K, 8), - MESON_ECC_DATA(NFC_ECC_BCH24_1K, 24), - MESON_ECC_DATA(NFC_ECC_BCH30_1K, 30), - MESON_ECC_DATA(NFC_ECC_BCH40_1K, 40), - MESON_ECC_DATA(NFC_ECC_BCH50_1K, 50), - MESON_ECC_DATA(NFC_ECC_BCH60_1K, 60), + MESON_ECC_DATA(NFC_ECC_BCH8_512, 8, 512), + MESON_ECC_DATA(NFC_ECC_BCH8_1K, 8, 1024), + MESON_ECC_DATA(NFC_ECC_BCH24_1K, 24, 1024), + MESON_ECC_DATA(NFC_ECC_BCH30_1K, 30, 1024), + MESON_ECC_DATA(NFC_ECC_BCH40_1K, 40, 1024), + MESON_ECC_DATA(NFC_ECC_BCH50_1K, 50, 1024), + MESON_ECC_DATA(NFC_ECC_BCH60_1K, 60, 1024), }; static int meson_nand_calc_ecc_bytes(int step_size, int strength) @@ -223,8 +226,27 @@ static int meson_nand_calc_ecc_bytes(int step_size, int strength) NAND_ECC_CAPS_SINGLE(meson_gxl_ecc_caps, meson_nand_calc_ecc_bytes, 1024, 8, 24, 30, 40, 50, 60); -NAND_ECC_CAPS_SINGLE(meson_axg_ecc_caps, - meson_nand_calc_ecc_bytes, 1024, 8); + +static const int axg_stepinfo_strengths[] = { 8 }; + +static const struct nand_ecc_step_info axg_stepinfo[] = { + { + .stepsize = 1024, + .strengths = axg_stepinfo_strengths, + .nstrengths = ARRAY_SIZE(axg_stepinfo_strengths) + }, + { + .stepsize = 512, + .strengths = axg_stepinfo_strengths, + .nstrengths = ARRAY_SIZE(axg_stepinfo_strengths) + }, +}; + +static const struct nand_ecc_caps meson_axg_ecc_caps = { + .stepinfos = axg_stepinfo, + .nstepinfos = ARRAY_SIZE(axg_stepinfo), + .calc_ecc_bytes = meson_nand_calc_ecc_bytes, +}; static struct meson_nfc_nand_chip *to_meson_nand(struct nand_chip *nand) { @@ -399,9 +421,10 @@ static void meson_nfc_set_data_oob(struct nand_chip *nand, } } -static int meson_nfc_wait_no_rb_pin(struct meson_nfc *nfc, int timeout_ms, +static int meson_nfc_wait_no_rb_pin(struct nand_chip *nand, int timeout_ms, bool need_cmd_read0) { + struct meson_nfc *nfc = nand_get_controller_data(nand); u32 cmd, cfg; meson_nfc_cmd_idle(nfc, nfc->timing.twb); @@ -413,8 +436,7 @@ static int meson_nfc_wait_no_rb_pin(struct meson_nfc *nfc, int timeout_ms, writel(cfg, nfc->reg_base + NFC_REG_CFG); reinit_completion(&nfc->completion); - cmd = nfc->param.chip_select | NFC_CMD_CLE | NAND_CMD_STATUS; - writel(cmd, nfc->reg_base + NFC_REG_CMD); + nand_status_op(nand, NULL); /* use the max erase time as the maximum clock for waiting R/B */ cmd = NFC_CMD_RB | NFC_CMD_RB_INT_NO_PIN | nfc->timing.tbers_max; @@ -424,12 +446,8 @@ static int meson_nfc_wait_no_rb_pin(struct meson_nfc *nfc, int timeout_ms, msecs_to_jiffies(timeout_ms))) return -ETIMEDOUT; - if (need_cmd_read0) { - cmd = nfc->param.chip_select | NFC_CMD_CLE | NAND_CMD_READ0; - writel(cmd, nfc->reg_base + NFC_REG_CMD); - meson_nfc_drain_cmd(nfc); - meson_nfc_wait_cmd_finish(nfc, CMD_FIFO_EMPTY_TIMEOUT); - } + if (need_cmd_read0) + nand_exit_status_op(nand); return 0; } @@ -462,9 +480,11 @@ static int meson_nfc_wait_rb_pin(struct meson_nfc *nfc, int timeout_ms) return ret; } -static int meson_nfc_queue_rb(struct meson_nfc *nfc, int timeout_ms, +static int meson_nfc_queue_rb(struct nand_chip *nand, int timeout_ms, bool need_cmd_read0) { + struct meson_nfc *nfc = nand_get_controller_data(nand); + if (nfc->no_rb_pin) { /* This mode is used when there is no wired R/B pin. * It works like 'nand_soft_waitrdy()', but instead of @@ -476,7 +496,7 @@ static int meson_nfc_queue_rb(struct meson_nfc *nfc, int timeout_ms, * needed (for all cases except page programming - this * is reason of 'need_cmd_read0' flag). */ - return meson_nfc_wait_no_rb_pin(nfc, timeout_ms, + return meson_nfc_wait_no_rb_pin(nand, timeout_ms, need_cmd_read0); } else { return meson_nfc_wait_rb_pin(nfc, timeout_ms); @@ -686,7 +706,7 @@ static int meson_nfc_rw_cmd_prepare_and_execute(struct nand_chip *nand, if (in) { nfc->cmdfifo.rw.cmd1 = cs | NFC_CMD_CLE | NAND_CMD_READSTART; writel(nfc->cmdfifo.rw.cmd1, nfc->reg_base + NFC_REG_CMD); - meson_nfc_queue_rb(nfc, PSEC_TO_MSEC(sdr->tR_max), true); + meson_nfc_queue_rb(nand, PSEC_TO_MSEC(sdr->tR_max), true); } else { meson_nfc_cmd_idle(nfc, nfc->timing.tadl); } @@ -732,7 +752,7 @@ static int meson_nfc_write_page_sub(struct nand_chip *nand, cmd = nfc->param.chip_select | NFC_CMD_CLE | NAND_CMD_PAGEPROG; writel(cmd, nfc->reg_base + NFC_REG_CMD); - meson_nfc_queue_rb(nfc, PSEC_TO_MSEC(sdr->tPROG_max), false); + meson_nfc_queue_rb(nand, PSEC_TO_MSEC(sdr->tPROG_max), false); meson_nfc_dma_buffer_release(nand, data_len, info_len, DMA_TO_DEVICE); @@ -1048,7 +1068,7 @@ static int meson_nfc_exec_op(struct nand_chip *nand, break; case NAND_OP_WAITRDY_INSTR: - meson_nfc_queue_rb(nfc, instr->ctx.waitrdy.timeout_ms, + meson_nfc_queue_rb(nand, instr->ctx.waitrdy.timeout_ms, true); if (instr->delay_ns) meson_nfc_cmd_idle(nfc, delay_idle); @@ -1258,7 +1278,8 @@ static int meson_nand_bch_mode(struct nand_chip *nand) return -EINVAL; for (i = 0; i < ARRAY_SIZE(meson_ecc); i++) { - if (meson_ecc[i].strength == nand->ecc.strength) { + if (meson_ecc[i].strength == nand->ecc.strength && + meson_ecc[i].size == nand->ecc.size) { meson_chip->bch_mode = meson_ecc[i].bch; return 0; } diff --git a/drivers/mtd/nand/raw/mpc5121_nfc.c b/drivers/mtd/nand/raw/mpc5121_nfc.c index 6e8e790f84e7..215610f808f1 100644 --- a/drivers/mtd/nand/raw/mpc5121_nfc.c +++ b/drivers/mtd/nand/raw/mpc5121_nfc.c @@ -595,8 +595,6 @@ static void mpc5121_nfc_free(struct device *dev, struct mtd_info *mtd) struct nand_chip *chip = mtd_to_nand(mtd); struct mpc5121_nfc_prv *prv = nand_get_controller_data(chip); - clk_disable_unprepare(prv->clk); - if (prv->csreg) iounmap(prv->csreg); } @@ -717,17 +715,12 @@ static int mpc5121_nfc_probe(struct platform_device *op) } /* Enable NFC clock */ - clk = devm_clk_get(dev, "ipg"); + clk = devm_clk_get_enabled(dev, "ipg"); if (IS_ERR(clk)) { - dev_err(dev, "Unable to acquire NFC clock!\n"); + dev_err(dev, "Unable to acquire and enable NFC clock!\n"); retval = PTR_ERR(clk); goto error; } - retval = clk_prepare_enable(clk); - if (retval) { - dev_err(dev, "Unable to enable NFC clock!\n"); - goto error; - } prv->clk = clk; /* Reset NAND Flash controller */ diff --git a/drivers/mtd/nand/raw/mtk_nand.c b/drivers/mtd/nand/raw/mtk_nand.c index b6eb8cb6b5e9..29c8bddde67f 100644 --- a/drivers/mtd/nand/raw/mtk_nand.c +++ b/drivers/mtd/nand/raw/mtk_nand.c @@ -1118,32 +1118,6 @@ static irqreturn_t mtk_nfc_irq(int irq, void *id) return IRQ_HANDLED; } -static int mtk_nfc_enable_clk(struct device *dev, struct mtk_nfc_clk *clk) -{ - int ret; - - ret = clk_prepare_enable(clk->nfi_clk); - if (ret) { - dev_err(dev, "failed to enable nfi clk\n"); - return ret; - } - - ret = clk_prepare_enable(clk->pad_clk); - if (ret) { - dev_err(dev, "failed to enable pad clk\n"); - clk_disable_unprepare(clk->nfi_clk); - return ret; - } - - return 0; -} - -static void mtk_nfc_disable_clk(struct mtk_nfc_clk *clk) -{ - clk_disable_unprepare(clk->nfi_clk); - clk_disable_unprepare(clk->pad_clk); -} - static int mtk_nfc_ooblayout_free(struct mtd_info *mtd, int section, struct mtd_oob_region *oob_region) { @@ -1545,40 +1519,36 @@ static int mtk_nfc_probe(struct platform_device *pdev) goto release_ecc; } - nfc->clk.nfi_clk = devm_clk_get(dev, "nfi_clk"); + nfc->clk.nfi_clk = devm_clk_get_enabled(dev, "nfi_clk"); if (IS_ERR(nfc->clk.nfi_clk)) { dev_err(dev, "no clk\n"); ret = PTR_ERR(nfc->clk.nfi_clk); goto release_ecc; } - nfc->clk.pad_clk = devm_clk_get(dev, "pad_clk"); + nfc->clk.pad_clk = devm_clk_get_enabled(dev, "pad_clk"); if (IS_ERR(nfc->clk.pad_clk)) { dev_err(dev, "no pad clk\n"); ret = PTR_ERR(nfc->clk.pad_clk); goto release_ecc; } - ret = mtk_nfc_enable_clk(dev, &nfc->clk); - if (ret) - goto release_ecc; - irq = platform_get_irq(pdev, 0); if (irq < 0) { ret = -EINVAL; - goto clk_disable; + goto release_ecc; } ret = devm_request_irq(dev, irq, mtk_nfc_irq, 0x0, "mtk-nand", nfc); if (ret) { dev_err(dev, "failed to request nfi irq\n"); - goto clk_disable; + goto release_ecc; } ret = dma_set_mask(dev, DMA_BIT_MASK(32)); if (ret) { dev_err(dev, "failed to set dma mask\n"); - goto clk_disable; + goto release_ecc; } platform_set_drvdata(pdev, nfc); @@ -1586,14 +1556,11 @@ static int mtk_nfc_probe(struct platform_device *pdev) ret = mtk_nfc_nand_chips_init(dev, nfc); if (ret) { dev_err(dev, "failed to init nand chips\n"); - goto clk_disable; + goto release_ecc; } return 0; -clk_disable: - mtk_nfc_disable_clk(&nfc->clk); - release_ecc: mtk_ecc_release(nfc->ecc); @@ -1618,7 +1585,6 @@ static void mtk_nfc_remove(struct platform_device *pdev) } mtk_ecc_release(nfc->ecc); - mtk_nfc_disable_clk(&nfc->clk); } #ifdef CONFIG_PM_SLEEP @@ -1626,7 +1592,8 @@ static int mtk_nfc_suspend(struct device *dev) { struct mtk_nfc *nfc = dev_get_drvdata(dev); - mtk_nfc_disable_clk(&nfc->clk); + clk_disable_unprepare(nfc->clk.nfi_clk); + clk_disable_unprepare(nfc->clk.pad_clk); return 0; } @@ -1641,9 +1608,18 @@ static int mtk_nfc_resume(struct device *dev) udelay(200); - ret = mtk_nfc_enable_clk(dev, &nfc->clk); - if (ret) + ret = clk_prepare_enable(nfc->clk.nfi_clk); + if (ret) { + dev_err(dev, "failed to enable nfi clk\n"); return ret; + } + + ret = clk_prepare_enable(nfc->clk.pad_clk); + if (ret) { + dev_err(dev, "failed to enable pad clk\n"); + clk_disable_unprepare(nfc->clk.nfi_clk); + return ret; + } /* reset NAND chip if VCC was powered off */ list_for_each_entry(chip, &nfc->chips, node) { diff --git a/drivers/mtd/nand/raw/mxc_nand.c b/drivers/mtd/nand/raw/mxc_nand.c index 0cd98d5714fc..003008355b3c 100644 --- a/drivers/mtd/nand/raw/mxc_nand.c +++ b/drivers/mtd/nand/raw/mxc_nand.c @@ -1695,7 +1695,6 @@ static int mxcnd_probe(struct platform_device *pdev) struct nand_chip *this; struct mtd_info *mtd; struct mxc_nand_host *host; - struct resource *res; int err = 0; /* Allocate memory for MTD device structure and private data */ @@ -1739,17 +1738,15 @@ static int mxcnd_probe(struct platform_device *pdev) this->options |= NAND_KEEP_TIMINGS; if (host->devtype_data->needs_ip) { - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - host->regs_ip = devm_ioremap_resource(&pdev->dev, res); + host->regs_ip = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(host->regs_ip)) return PTR_ERR(host->regs_ip); - res = platform_get_resource(pdev, IORESOURCE_MEM, 1); + host->base = devm_platform_ioremap_resource(pdev, 1); } else { - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + host->base = devm_platform_ioremap_resource(pdev, 0); } - host->base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(host->base)) return PTR_ERR(host->base); diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c index a6af521832aa..d4b55155aeae 100644 --- a/drivers/mtd/nand/raw/nand_base.c +++ b/drivers/mtd/nand/raw/nand_base.c @@ -1885,6 +1885,7 @@ int nand_exit_status_op(struct nand_chip *chip) return 0; } +EXPORT_SYMBOL_GPL(nand_exit_status_op); /** * nand_erase_op - Do an erase operation diff --git a/drivers/mtd/nand/raw/omap2.c b/drivers/mtd/nand/raw/omap2.c index e057c03a197d..c45bef6158e7 100644 --- a/drivers/mtd/nand/raw/omap2.c +++ b/drivers/mtd/nand/raw/omap2.c @@ -2219,8 +2219,7 @@ static int omap_nand_probe(struct platform_device *pdev) } } - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - vaddr = devm_ioremap_resource(&pdev->dev, res); + vaddr = devm_platform_get_and_ioremap_resource(pdev, 0, &res); if (IS_ERR(vaddr)) return PTR_ERR(vaddr); diff --git a/drivers/mtd/nand/raw/orion_nand.c b/drivers/mtd/nand/raw/orion_nand.c index 7e0313889b50..2951d81614fd 100644 --- a/drivers/mtd/nand/raw/orion_nand.c +++ b/drivers/mtd/nand/raw/orion_nand.c @@ -169,16 +169,10 @@ static int __init orion_nand_probe(struct platform_device *pdev) platform_set_drvdata(pdev, info); /* Not all platforms can gate the clock, so it is optional. */ - info->clk = devm_clk_get_optional(&pdev->dev, NULL); + info->clk = devm_clk_get_optional_enabled(&pdev->dev, NULL); if (IS_ERR(info->clk)) return dev_err_probe(&pdev->dev, PTR_ERR(info->clk), - "failed to get clock!\n"); - - ret = clk_prepare_enable(info->clk); - if (ret) { - dev_err(&pdev->dev, "failed to prepare clock!\n"); - return ret; - } + "failed to get and enable clock!\n"); /* * This driver assumes that the default ECC engine should be TYPE_SOFT. @@ -189,19 +183,13 @@ static int __init orion_nand_probe(struct platform_device *pdev) ret = nand_scan(nc, 1); if (ret) - goto no_dev; + return ret; mtd->name = "orion_nand"; ret = mtd_device_register(mtd, board->parts, board->nr_parts); - if (ret) { + if (ret) nand_cleanup(nc); - goto no_dev; - } - - return 0; -no_dev: - clk_disable_unprepare(info->clk); return ret; } @@ -215,8 +203,6 @@ static void orion_nand_remove(struct platform_device *pdev) WARN_ON(ret); nand_cleanup(chip); - - clk_disable_unprepare(info->clk); } #ifdef CONFIG_OF diff --git a/drivers/mtd/nand/raw/oxnas_nand.c b/drivers/mtd/nand/raw/oxnas_nand.c deleted file mode 100644 index e3c9807df1cd..000000000000 --- a/drivers/mtd/nand/raw/oxnas_nand.c +++ /dev/null @@ -1,209 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Oxford Semiconductor OXNAS NAND driver - - * Copyright (C) 2016 Neil Armstrong <narmstrong@baylibre.com> - * Heavily based on plat_nand.c : - * Author: Vitaly Wool <vitalywool@gmail.com> - * Copyright (C) 2013 Ma Haijun <mahaijuns@gmail.com> - * Copyright (C) 2012 John Crispin <blogic@openwrt.org> - */ - -#include <linux/err.h> -#include <linux/io.h> -#include <linux/module.h> -#include <linux/platform_device.h> -#include <linux/slab.h> -#include <linux/clk.h> -#include <linux/reset.h> -#include <linux/mtd/mtd.h> -#include <linux/mtd/rawnand.h> -#include <linux/mtd/partitions.h> -#include <linux/of.h> - -/* Nand commands */ -#define OXNAS_NAND_CMD_ALE BIT(18) -#define OXNAS_NAND_CMD_CLE BIT(19) - -#define OXNAS_NAND_MAX_CHIPS 1 - -struct oxnas_nand_ctrl { - struct nand_controller base; - void __iomem *io_base; - struct clk *clk; - struct nand_chip *chips[OXNAS_NAND_MAX_CHIPS]; - unsigned int nchips; -}; - -static uint8_t oxnas_nand_read_byte(struct nand_chip *chip) -{ - struct oxnas_nand_ctrl *oxnas = nand_get_controller_data(chip); - - return readb(oxnas->io_base); -} - -static void oxnas_nand_read_buf(struct nand_chip *chip, u8 *buf, int len) -{ - struct oxnas_nand_ctrl *oxnas = nand_get_controller_data(chip); - - ioread8_rep(oxnas->io_base, buf, len); -} - -static void oxnas_nand_write_buf(struct nand_chip *chip, const u8 *buf, - int len) -{ - struct oxnas_nand_ctrl *oxnas = nand_get_controller_data(chip); - - iowrite8_rep(oxnas->io_base, buf, len); -} - -/* Single CS command control */ -static void oxnas_nand_cmd_ctrl(struct nand_chip *chip, int cmd, - unsigned int ctrl) -{ - struct oxnas_nand_ctrl *oxnas = nand_get_controller_data(chip); - - if (ctrl & NAND_CLE) - writeb(cmd, oxnas->io_base + OXNAS_NAND_CMD_CLE); - else if (ctrl & NAND_ALE) - writeb(cmd, oxnas->io_base + OXNAS_NAND_CMD_ALE); -} - -/* - * Probe for the NAND device. - */ -static int oxnas_nand_probe(struct platform_device *pdev) -{ - struct device_node *np = pdev->dev.of_node; - struct device_node *nand_np; - struct oxnas_nand_ctrl *oxnas; - struct nand_chip *chip; - struct mtd_info *mtd; - int count = 0; - int err = 0; - int i; - - /* Allocate memory for the device structure (and zero it) */ - oxnas = devm_kzalloc(&pdev->dev, sizeof(*oxnas), - GFP_KERNEL); - if (!oxnas) - return -ENOMEM; - - nand_controller_init(&oxnas->base); - - oxnas->io_base = devm_platform_ioremap_resource(pdev, 0); - if (IS_ERR(oxnas->io_base)) - return PTR_ERR(oxnas->io_base); - - oxnas->clk = devm_clk_get(&pdev->dev, NULL); - if (IS_ERR(oxnas->clk)) - oxnas->clk = NULL; - - /* Only a single chip node is supported */ - count = of_get_child_count(np); - if (count > 1) - return -EINVAL; - - err = clk_prepare_enable(oxnas->clk); - if (err) - return err; - - device_reset_optional(&pdev->dev); - - for_each_child_of_node(np, nand_np) { - chip = devm_kzalloc(&pdev->dev, sizeof(struct nand_chip), - GFP_KERNEL); - if (!chip) { - err = -ENOMEM; - goto err_release_child; - } - - chip->controller = &oxnas->base; - - nand_set_flash_node(chip, nand_np); - nand_set_controller_data(chip, oxnas); - - mtd = nand_to_mtd(chip); - mtd->dev.parent = &pdev->dev; - mtd->priv = chip; - - chip->legacy.cmd_ctrl = oxnas_nand_cmd_ctrl; - chip->legacy.read_buf = oxnas_nand_read_buf; - chip->legacy.read_byte = oxnas_nand_read_byte; - chip->legacy.write_buf = oxnas_nand_write_buf; - chip->legacy.chip_delay = 30; - - /* Scan to find existence of the device */ - err = nand_scan(chip, 1); - if (err) - goto err_release_child; - - err = mtd_device_register(mtd, NULL, 0); - if (err) - goto err_cleanup_nand; - - oxnas->chips[oxnas->nchips++] = chip; - } - - /* Exit if no chips found */ - if (!oxnas->nchips) { - err = -ENODEV; - goto err_clk_unprepare; - } - - platform_set_drvdata(pdev, oxnas); - - return 0; - -err_cleanup_nand: - nand_cleanup(chip); -err_release_child: - of_node_put(nand_np); - - for (i = 0; i < oxnas->nchips; i++) { - chip = oxnas->chips[i]; - WARN_ON(mtd_device_unregister(nand_to_mtd(chip))); - nand_cleanup(chip); - } - -err_clk_unprepare: - clk_disable_unprepare(oxnas->clk); - return err; -} - -static void oxnas_nand_remove(struct platform_device *pdev) -{ - struct oxnas_nand_ctrl *oxnas = platform_get_drvdata(pdev); - struct nand_chip *chip; - int i; - - for (i = 0; i < oxnas->nchips; i++) { - chip = oxnas->chips[i]; - WARN_ON(mtd_device_unregister(nand_to_mtd(chip))); - nand_cleanup(chip); - } - - clk_disable_unprepare(oxnas->clk); -} - -static const struct of_device_id oxnas_nand_match[] = { - { .compatible = "oxsemi,ox820-nand" }, - {}, -}; -MODULE_DEVICE_TABLE(of, oxnas_nand_match); - -static struct platform_driver oxnas_nand_driver = { - .probe = oxnas_nand_probe, - .remove_new = oxnas_nand_remove, - .driver = { - .name = "oxnas_nand", - .of_match_table = oxnas_nand_match, - }, -}; - -module_platform_driver(oxnas_nand_driver); - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Neil Armstrong <narmstrong@baylibre.com>"); -MODULE_DESCRIPTION("Oxnas NAND driver"); -MODULE_ALIAS("platform:oxnas_nand"); diff --git a/drivers/mtd/nand/raw/qcom_nandc.c b/drivers/mtd/nand/raw/qcom_nandc.c index df245353b12b..64499c1b3603 100644 --- a/drivers/mtd/nand/raw/qcom_nandc.c +++ b/drivers/mtd/nand/raw/qcom_nandc.c @@ -2,19 +2,19 @@ /* * Copyright (c) 2016, The Linux Foundation. All rights reserved. */ -#include <linux/clk.h> -#include <linux/platform_device.h> -#include <linux/slab.h> #include <linux/bitops.h> -#include <linux/dma/qcom_adm.h> -#include <linux/dma-mapping.h> +#include <linux/clk.h> +#include <linux/delay.h> #include <linux/dmaengine.h> +#include <linux/dma-mapping.h> +#include <linux/dma/qcom_adm.h> +#include <linux/dma/qcom_bam_dma.h> #include <linux/module.h> -#include <linux/mtd/rawnand.h> #include <linux/mtd/partitions.h> +#include <linux/mtd/rawnand.h> #include <linux/of.h> -#include <linux/delay.h> -#include <linux/dma/qcom_bam_dma.h> +#include <linux/platform_device.h> +#include <linux/slab.h> /* NANDc reg offsets */ #define NAND_FLASH_CMD 0x00 @@ -123,8 +123,8 @@ /* NAND_ERASED_CW_DETECT_CFG bits */ #define ERASED_CW_ECC_MASK 1 #define AUTO_DETECT_RES 0 -#define MASK_ECC (1 << ERASED_CW_ECC_MASK) -#define RESET_ERASED_DET (1 << AUTO_DETECT_RES) +#define MASK_ECC BIT(ERASED_CW_ECC_MASK) +#define RESET_ERASED_DET BIT(AUTO_DETECT_RES) #define ACTIVE_ERASED_DET (0 << AUTO_DETECT_RES) #define CLR_ERASED_PAGE_DET (RESET_ERASED_DET | MASK_ECC) #define SET_ERASED_PAGE_DET (ACTIVE_ERASED_DET | MASK_ECC) @@ -157,6 +157,7 @@ #define OP_PAGE_PROGRAM_WITH_ECC 0x7 #define OP_PROGRAM_PAGE_SPARE 0x9 #define OP_BLOCK_ERASE 0xa +#define OP_CHECK_STATUS 0xc #define OP_FETCH_ID 0xb #define OP_RESET_DEVICE 0xd @@ -211,7 +212,7 @@ nandc_set_reg(chip, reg, \ /* Returns the dma address for reg read buffer */ #define reg_buf_dma_addr(chip, vaddr) \ ((chip)->reg_read_dma + \ - ((uint8_t *)(vaddr) - (uint8_t *)(chip)->reg_read_buf)) + ((u8 *)(vaddr) - (u8 *)(chip)->reg_read_buf)) #define QPIC_PER_CW_CMD_ELEMENTS 32 #define QPIC_PER_CW_CMD_SGL 32 @@ -235,6 +236,8 @@ nandc_set_reg(chip, reg, \ */ #define NAND_ERASED_CW_SET BIT(4) +#define MAX_ADDRESS_CYCLE 5 + /* * This data type corresponds to the BAM transaction which will be used for all * NAND transfers. @@ -382,6 +385,9 @@ struct nandc_regs { * @reg_read_pos: marker for data read in reg_read_buf * * @cmd1/vld: some fixed controller register values + * + * @exec_opwrite: flag to select correct number of code word + * while reading status */ struct qcom_nand_controller { struct device *dev; @@ -432,6 +438,7 @@ struct qcom_nand_controller { int reg_read_pos; u32 cmd1, vld; + bool exec_opwrite; }; /* @@ -448,6 +455,29 @@ struct qcom_nand_boot_partition { }; /* + * Qcom op for each exec_op transfer + * + * @data_instr: data instruction pointer + * @data_instr_idx: data instruction index + * @rdy_timeout_ms: wait ready timeout in ms + * @rdy_delay_ns: Additional delay in ns + * @addr1_reg: Address1 register value + * @addr2_reg: Address2 register value + * @cmd_reg: CMD register value + * @flag: flag for misc instruction + */ +struct qcom_op { + const struct nand_op_instr *data_instr; + unsigned int data_instr_idx; + unsigned int rdy_timeout_ms; + unsigned int rdy_delay_ns; + u32 addr1_reg; + u32 addr2_reg; + u32 cmd_reg; + u8 flag; +}; + +/* * NAND chip structure * * @boot_partitions: array of boot partitions where offset and size of the @@ -1273,182 +1303,33 @@ static void config_nand_cw_write(struct nand_chip *chip) write_reg_dma(nandc, NAND_READ_STATUS, 1, NAND_BAM_NEXT_SGL); } -/* - * the following functions are used within chip->legacy.cmdfunc() to - * perform different NAND_CMD_* commands - */ - -/* sets up descriptors for NAND_CMD_PARAM */ -static int nandc_param(struct qcom_nand_host *host) -{ - struct nand_chip *chip = &host->chip; - struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip); - - /* - * NAND_CMD_PARAM is called before we know much about the FLASH chip - * in use. we configure the controller to perform a raw read of 512 - * bytes to read onfi params - */ - if (nandc->props->qpic_v2) - nandc_set_reg(chip, NAND_FLASH_CMD, OP_PAGE_READ_ONFI_READ | - PAGE_ACC | LAST_PAGE); - else - nandc_set_reg(chip, NAND_FLASH_CMD, OP_PAGE_READ | - PAGE_ACC | LAST_PAGE); - - nandc_set_reg(chip, NAND_ADDR0, 0); - nandc_set_reg(chip, NAND_ADDR1, 0); - nandc_set_reg(chip, NAND_DEV0_CFG0, 0 << CW_PER_PAGE - | 512 << UD_SIZE_BYTES - | 5 << NUM_ADDR_CYCLES - | 0 << SPARE_SIZE_BYTES); - nandc_set_reg(chip, NAND_DEV0_CFG1, 7 << NAND_RECOVERY_CYCLES - | 0 << CS_ACTIVE_BSY - | 17 << BAD_BLOCK_BYTE_NUM - | 1 << BAD_BLOCK_IN_SPARE_AREA - | 2 << WR_RD_BSY_GAP - | 0 << WIDE_FLASH - | 1 << DEV0_CFG1_ECC_DISABLE); - if (!nandc->props->qpic_v2) - nandc_set_reg(chip, NAND_EBI2_ECC_BUF_CFG, 1 << ECC_CFG_ECC_DISABLE); - - /* configure CMD1 and VLD for ONFI param probing in QPIC v1 */ - if (!nandc->props->qpic_v2) { - nandc_set_reg(chip, NAND_DEV_CMD_VLD, - (nandc->vld & ~READ_START_VLD)); - nandc_set_reg(chip, NAND_DEV_CMD1, - (nandc->cmd1 & ~(0xFF << READ_ADDR)) - | NAND_CMD_PARAM << READ_ADDR); - } - - nandc_set_reg(chip, NAND_EXEC_CMD, 1); - - if (!nandc->props->qpic_v2) { - nandc_set_reg(chip, NAND_DEV_CMD1_RESTORE, nandc->cmd1); - nandc_set_reg(chip, NAND_DEV_CMD_VLD_RESTORE, nandc->vld); - } - - nandc_set_read_loc(chip, 0, 0, 0, 512, 1); - - if (!nandc->props->qpic_v2) { - write_reg_dma(nandc, NAND_DEV_CMD_VLD, 1, 0); - write_reg_dma(nandc, NAND_DEV_CMD1, 1, NAND_BAM_NEXT_SGL); - } - - nandc->buf_count = 512; - memset(nandc->data_buffer, 0xff, nandc->buf_count); - - config_nand_single_cw_page_read(chip, false, 0); - - read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer, - nandc->buf_count, 0); - - /* restore CMD1 and VLD regs */ - if (!nandc->props->qpic_v2) { - write_reg_dma(nandc, NAND_DEV_CMD1_RESTORE, 1, 0); - write_reg_dma(nandc, NAND_DEV_CMD_VLD_RESTORE, 1, NAND_BAM_NEXT_SGL); - } - - return 0; -} - -/* sets up descriptors for NAND_CMD_ERASE1 */ -static int erase_block(struct qcom_nand_host *host, int page_addr) -{ - struct nand_chip *chip = &host->chip; - struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip); - - nandc_set_reg(chip, NAND_FLASH_CMD, - OP_BLOCK_ERASE | PAGE_ACC | LAST_PAGE); - nandc_set_reg(chip, NAND_ADDR0, page_addr); - nandc_set_reg(chip, NAND_ADDR1, 0); - nandc_set_reg(chip, NAND_DEV0_CFG0, - host->cfg0_raw & ~(7 << CW_PER_PAGE)); - nandc_set_reg(chip, NAND_DEV0_CFG1, host->cfg1_raw); - nandc_set_reg(chip, NAND_EXEC_CMD, 1); - nandc_set_reg(chip, NAND_FLASH_STATUS, host->clrflashstatus); - nandc_set_reg(chip, NAND_READ_STATUS, host->clrreadstatus); - - write_reg_dma(nandc, NAND_FLASH_CMD, 3, NAND_BAM_NEXT_SGL); - write_reg_dma(nandc, NAND_DEV0_CFG0, 2, NAND_BAM_NEXT_SGL); - write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL); - - read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL); - - write_reg_dma(nandc, NAND_FLASH_STATUS, 1, 0); - write_reg_dma(nandc, NAND_READ_STATUS, 1, NAND_BAM_NEXT_SGL); - - return 0; -} - -/* sets up descriptors for NAND_CMD_READID */ -static int read_id(struct qcom_nand_host *host, int column) -{ - struct nand_chip *chip = &host->chip; - struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip); - - if (column == -1) - return 0; - - nandc_set_reg(chip, NAND_FLASH_CMD, OP_FETCH_ID); - nandc_set_reg(chip, NAND_ADDR0, column); - nandc_set_reg(chip, NAND_ADDR1, 0); - nandc_set_reg(chip, NAND_FLASH_CHIP_SELECT, - nandc->props->is_bam ? 0 : DM_EN); - nandc_set_reg(chip, NAND_EXEC_CMD, 1); - - write_reg_dma(nandc, NAND_FLASH_CMD, 4, NAND_BAM_NEXT_SGL); - write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL); - - read_reg_dma(nandc, NAND_READ_ID, 1, NAND_BAM_NEXT_SGL); - - return 0; -} - -/* sets up descriptors for NAND_CMD_RESET */ -static int reset(struct qcom_nand_host *host) -{ - struct nand_chip *chip = &host->chip; - struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip); - - nandc_set_reg(chip, NAND_FLASH_CMD, OP_RESET_DEVICE); - nandc_set_reg(chip, NAND_EXEC_CMD, 1); - - write_reg_dma(nandc, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL); - write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL); - - read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL); - - return 0; -} - /* helpers to submit/free our list of dma descriptors */ static int submit_descs(struct qcom_nand_controller *nandc) { - struct desc_info *desc; + struct desc_info *desc, *n; dma_cookie_t cookie = 0; struct bam_transaction *bam_txn = nandc->bam_txn; - int r; + int ret = 0; if (nandc->props->is_bam) { if (bam_txn->rx_sgl_pos > bam_txn->rx_sgl_start) { - r = prepare_bam_async_desc(nandc, nandc->rx_chan, 0); - if (r) - return r; + ret = prepare_bam_async_desc(nandc, nandc->rx_chan, 0); + if (ret) + goto err_unmap_free_desc; } if (bam_txn->tx_sgl_pos > bam_txn->tx_sgl_start) { - r = prepare_bam_async_desc(nandc, nandc->tx_chan, + ret = prepare_bam_async_desc(nandc, nandc->tx_chan, DMA_PREP_INTERRUPT); - if (r) - return r; + if (ret) + goto err_unmap_free_desc; } if (bam_txn->cmd_sgl_pos > bam_txn->cmd_sgl_start) { - r = prepare_bam_async_desc(nandc, nandc->cmd_chan, + ret = prepare_bam_async_desc(nandc, nandc->cmd_chan, DMA_PREP_CMD); - if (r) - return r; + if (ret) + goto err_unmap_free_desc; } } @@ -1470,19 +1351,17 @@ static int submit_descs(struct qcom_nand_controller *nandc) if (!wait_for_completion_timeout(&bam_txn->txn_done, QPIC_NAND_COMPLETION_TIMEOUT)) - return -ETIMEDOUT; + ret = -ETIMEDOUT; } else { if (dma_sync_wait(nandc->chan, cookie) != DMA_COMPLETE) - return -ETIMEDOUT; + ret = -ETIMEDOUT; } - return 0; -} - -static void free_descs(struct qcom_nand_controller *nandc) -{ - struct desc_info *desc, *n; - +err_unmap_free_desc: + /* + * Unmap the dma sg_list and free the desc allocated by both + * prepare_bam_async_desc() and prep_adm_dma_desc() functions. + */ list_for_each_entry_safe(desc, n, &nandc->desc_list, node) { list_del(&desc->node); @@ -1495,6 +1374,8 @@ static void free_descs(struct qcom_nand_controller *nandc) kfree(desc); } + + return ret; } /* reset the register read buffer for next NAND operation */ @@ -1504,152 +1385,6 @@ static void clear_read_regs(struct qcom_nand_controller *nandc) nandc_read_buffer_sync(nandc, false); } -static void pre_command(struct qcom_nand_host *host, int command) -{ - struct nand_chip *chip = &host->chip; - struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip); - - nandc->buf_count = 0; - nandc->buf_start = 0; - host->use_ecc = false; - host->last_command = command; - - clear_read_regs(nandc); - - if (command == NAND_CMD_RESET || command == NAND_CMD_READID || - command == NAND_CMD_PARAM || command == NAND_CMD_ERASE1) - clear_bam_transaction(nandc); -} - -/* - * this is called after NAND_CMD_PAGEPROG and NAND_CMD_ERASE1 to set our - * privately maintained status byte, this status byte can be read after - * NAND_CMD_STATUS is called - */ -static void parse_erase_write_errors(struct qcom_nand_host *host, int command) -{ - struct nand_chip *chip = &host->chip; - struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip); - struct nand_ecc_ctrl *ecc = &chip->ecc; - int num_cw; - int i; - - num_cw = command == NAND_CMD_PAGEPROG ? ecc->steps : 1; - nandc_read_buffer_sync(nandc, true); - - for (i = 0; i < num_cw; i++) { - u32 flash_status = le32_to_cpu(nandc->reg_read_buf[i]); - - if (flash_status & FS_MPU_ERR) - host->status &= ~NAND_STATUS_WP; - - if (flash_status & FS_OP_ERR || (i == (num_cw - 1) && - (flash_status & - FS_DEVICE_STS_ERR))) - host->status |= NAND_STATUS_FAIL; - } -} - -static void post_command(struct qcom_nand_host *host, int command) -{ - struct nand_chip *chip = &host->chip; - struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip); - - switch (command) { - case NAND_CMD_READID: - nandc_read_buffer_sync(nandc, true); - memcpy(nandc->data_buffer, nandc->reg_read_buf, - nandc->buf_count); - break; - case NAND_CMD_PAGEPROG: - case NAND_CMD_ERASE1: - parse_erase_write_errors(host, command); - break; - default: - break; - } -} - -/* - * Implements chip->legacy.cmdfunc. It's only used for a limited set of - * commands. The rest of the commands wouldn't be called by upper layers. - * For example, NAND_CMD_READOOB would never be called because we have our own - * versions of read_oob ops for nand_ecc_ctrl. - */ -static void qcom_nandc_command(struct nand_chip *chip, unsigned int command, - int column, int page_addr) -{ - struct qcom_nand_host *host = to_qcom_nand_host(chip); - struct nand_ecc_ctrl *ecc = &chip->ecc; - struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip); - bool wait = false; - int ret = 0; - - pre_command(host, command); - - switch (command) { - case NAND_CMD_RESET: - ret = reset(host); - wait = true; - break; - - case NAND_CMD_READID: - nandc->buf_count = 4; - ret = read_id(host, column); - wait = true; - break; - - case NAND_CMD_PARAM: - ret = nandc_param(host); - wait = true; - break; - - case NAND_CMD_ERASE1: - ret = erase_block(host, page_addr); - wait = true; - break; - - case NAND_CMD_READ0: - /* we read the entire page for now */ - WARN_ON(column != 0); - - host->use_ecc = true; - set_address(host, 0, page_addr); - update_rw_regs(host, ecc->steps, true, 0); - break; - - case NAND_CMD_SEQIN: - WARN_ON(column != 0); - set_address(host, 0, page_addr); - break; - - case NAND_CMD_PAGEPROG: - case NAND_CMD_STATUS: - case NAND_CMD_NONE: - default: - break; - } - - if (ret) { - dev_err(nandc->dev, "failure executing command %d\n", - command); - free_descs(nandc); - return; - } - - if (wait) { - ret = submit_descs(nandc); - if (ret) - dev_err(nandc->dev, - "failure submitting descs for command %d\n", - command); - } - - free_descs(nandc); - - post_command(host, command); -} - /* * when using BCH ECC, the HW flags an error in NAND_FLASH_STATUS if it read * an erased CW, and reports an erased CW in NAND_ERASED_CW_DETECT_STATUS. @@ -1736,6 +1471,9 @@ qcom_nandc_read_cw_raw(struct mtd_info *mtd, struct nand_chip *chip, int raw_cw = cw; nand_read_page_op(chip, page, 0, NULL, 0); + nandc->buf_count = 0; + nandc->buf_start = 0; + clear_read_regs(nandc); host->use_ecc = false; if (nandc->props->qpic_v2) @@ -1786,7 +1524,6 @@ qcom_nandc_read_cw_raw(struct mtd_info *mtd, struct nand_chip *chip, read_data_dma(nandc, reg_off, oob_buf + oob_size1, oob_size2, 0); ret = submit_descs(nandc); - free_descs(nandc); if (ret) { dev_err(nandc->dev, "failure to read raw cw %d\n", cw); return ret; @@ -1819,7 +1556,7 @@ check_for_erased_page(struct qcom_nand_host *host, u8 *data_buf, struct mtd_info *mtd = nand_to_mtd(chip); struct nand_ecc_ctrl *ecc = &chip->ecc; u8 *cw_data_buf, *cw_oob_buf; - int cw, data_size, oob_size, ret = 0; + int cw, data_size, oob_size, ret; if (!data_buf) data_buf = nand_get_data_buf(chip); @@ -2040,8 +1777,6 @@ static int read_page_ecc(struct qcom_nand_host *host, u8 *data_buf, } ret = submit_descs(nandc); - free_descs(nandc); - if (ret) { dev_err(nandc->dev, "failure to read page/oob\n"); return ret; @@ -2080,8 +1815,6 @@ static int copy_last_cw(struct qcom_nand_host *host, int page) if (ret) dev_err(nandc->dev, "failed to copy last codeword\n"); - free_descs(nandc); - return ret; } @@ -2149,17 +1882,25 @@ static void qcom_nandc_codeword_fixup(struct qcom_nand_host *host, int page) } /* implements ecc->read_page() */ -static int qcom_nandc_read_page(struct nand_chip *chip, uint8_t *buf, +static int qcom_nandc_read_page(struct nand_chip *chip, u8 *buf, int oob_required, int page) { struct qcom_nand_host *host = to_qcom_nand_host(chip); struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip); + struct nand_ecc_ctrl *ecc = &chip->ecc; u8 *data_buf, *oob_buf = NULL; if (host->nr_boot_partitions) qcom_nandc_codeword_fixup(host, page); nand_read_page_op(chip, page, 0, NULL, 0); + nandc->buf_count = 0; + nandc->buf_start = 0; + host->use_ecc = true; + clear_read_regs(nandc); + set_address(host, 0, page); + update_rw_regs(host, ecc->steps, true, 0); + data_buf = buf; oob_buf = oob_required ? chip->oob_poi : NULL; @@ -2169,7 +1910,7 @@ static int qcom_nandc_read_page(struct nand_chip *chip, uint8_t *buf, } /* implements ecc->read_page_raw() */ -static int qcom_nandc_read_page_raw(struct nand_chip *chip, uint8_t *buf, +static int qcom_nandc_read_page_raw(struct nand_chip *chip, u8 *buf, int oob_required, int page) { struct mtd_info *mtd = nand_to_mtd(chip); @@ -2215,7 +1956,7 @@ static int qcom_nandc_read_oob(struct nand_chip *chip, int page) } /* implements ecc->write_page() */ -static int qcom_nandc_write_page(struct nand_chip *chip, const uint8_t *buf, +static int qcom_nandc_write_page(struct nand_chip *chip, const u8 *buf, int oob_required, int page) { struct qcom_nand_host *host = to_qcom_nand_host(chip); @@ -2229,6 +1970,9 @@ static int qcom_nandc_write_page(struct nand_chip *chip, const uint8_t *buf, nand_prog_page_begin_op(chip, page, 0, NULL, 0); + set_address(host, 0, page); + nandc->buf_count = 0; + nandc->buf_start = 0; clear_read_regs(nandc); clear_bam_transaction(nandc); @@ -2251,7 +1995,6 @@ static int qcom_nandc_write_page(struct nand_chip *chip, const uint8_t *buf, oob_size = ecc->bytes; } - write_data_dma(nandc, FLASH_BUF_ACC, data_buf, data_size, i == (ecc->steps - 1) ? NAND_BAM_NO_EOT : 0); @@ -2276,20 +2019,17 @@ static int qcom_nandc_write_page(struct nand_chip *chip, const uint8_t *buf, } ret = submit_descs(nandc); - if (ret) + if (ret) { dev_err(nandc->dev, "failure to write page\n"); + return ret; + } - free_descs(nandc); - - if (!ret) - ret = nand_prog_page_end_op(chip); - - return ret; + return nand_prog_page_end_op(chip); } /* implements ecc->write_page_raw() */ static int qcom_nandc_write_page_raw(struct nand_chip *chip, - const uint8_t *buf, int oob_required, + const u8 *buf, int oob_required, int page) { struct mtd_info *mtd = nand_to_mtd(chip); @@ -2352,15 +2092,12 @@ static int qcom_nandc_write_page_raw(struct nand_chip *chip, } ret = submit_descs(nandc); - if (ret) + if (ret) { dev_err(nandc->dev, "failure to write raw page\n"); + return ret; + } - free_descs(nandc); - - if (!ret) - ret = nand_prog_page_end_op(chip); - - return ret; + return nand_prog_page_end_op(chip); } /* @@ -2404,12 +2141,9 @@ static int qcom_nandc_write_oob(struct nand_chip *chip, int page) config_nand_cw_write(chip); ret = submit_descs(nandc); - - free_descs(nandc); - if (ret) { dev_err(nandc->dev, "failure to write oob\n"); - return -EIO; + return ret; } return nand_prog_page_end_op(chip); @@ -2483,73 +2217,12 @@ static int qcom_nandc_block_markbad(struct nand_chip *chip, loff_t ofs) config_nand_cw_write(chip); ret = submit_descs(nandc); - - free_descs(nandc); - if (ret) { dev_err(nandc->dev, "failure to update BBM\n"); - return -EIO; - } - - return nand_prog_page_end_op(chip); -} - -/* - * the three functions below implement chip->legacy.read_byte(), - * chip->legacy.read_buf() and chip->legacy.write_buf() respectively. these - * aren't used for reading/writing page data, they are used for smaller data - * like reading id, status etc - */ -static uint8_t qcom_nandc_read_byte(struct nand_chip *chip) -{ - struct qcom_nand_host *host = to_qcom_nand_host(chip); - struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip); - u8 *buf = nandc->data_buffer; - u8 ret = 0x0; - - if (host->last_command == NAND_CMD_STATUS) { - ret = host->status; - - host->status = NAND_STATUS_READY | NAND_STATUS_WP; - return ret; } - if (nandc->buf_start < nandc->buf_count) - ret = buf[nandc->buf_start++]; - - return ret; -} - -static void qcom_nandc_read_buf(struct nand_chip *chip, uint8_t *buf, int len) -{ - struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip); - int real_len = min_t(size_t, len, nandc->buf_count - nandc->buf_start); - - memcpy(buf, nandc->data_buffer + nandc->buf_start, real_len); - nandc->buf_start += real_len; -} - -static void qcom_nandc_write_buf(struct nand_chip *chip, const uint8_t *buf, - int len) -{ - struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip); - int real_len = min_t(size_t, len, nandc->buf_count - nandc->buf_start); - - memcpy(nandc->data_buffer + nandc->buf_start, buf, real_len); - - nandc->buf_start += real_len; -} - -/* we support only one external chip for now */ -static void qcom_nandc_select_chip(struct nand_chip *chip, int chipnr) -{ - struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip); - - if (chipnr <= 0) - return; - - dev_warn(nandc->dev, "invalid chip select\n"); + return nand_prog_page_end_op(chip); } /* @@ -2660,7 +2333,7 @@ static int qcom_nand_ooblayout_ecc(struct mtd_info *mtd, int section, } static int qcom_nand_ooblayout_free(struct mtd_info *mtd, int section, - struct mtd_oob_region *oobregion) + struct mtd_oob_region *oobregion) { struct nand_chip *chip = mtd_to_nand(mtd); struct qcom_nand_host *host = to_qcom_nand_host(chip); @@ -2685,6 +2358,7 @@ qcom_nandc_calc_ecc_bytes(int step_size, int strength) { return strength == 4 ? 12 : 16; } + NAND_ECC_CAPS_SINGLE(qcom_nandc_ecc_caps, qcom_nandc_calc_ecc_bytes, NANDC_STEP_SIZE, 4, 8); @@ -2867,8 +2541,479 @@ static int qcom_nand_attach_chip(struct nand_chip *chip) return 0; } +static int qcom_op_cmd_mapping(struct nand_chip *chip, u8 opcode, + struct qcom_op *q_op) +{ + struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip); + struct qcom_nand_host *host = to_qcom_nand_host(chip); + int cmd; + + switch (opcode) { + case NAND_CMD_RESET: + cmd = OP_RESET_DEVICE; + break; + case NAND_CMD_READID: + cmd = OP_FETCH_ID; + break; + case NAND_CMD_PARAM: + if (nandc->props->qpic_v2) + cmd = OP_PAGE_READ_ONFI_READ; + else + cmd = OP_PAGE_READ; + break; + case NAND_CMD_ERASE1: + case NAND_CMD_ERASE2: + cmd = OP_BLOCK_ERASE; + break; + case NAND_CMD_STATUS: + cmd = OP_CHECK_STATUS; + break; + case NAND_CMD_PAGEPROG: + cmd = OP_PROGRAM_PAGE; + q_op->flag = OP_PROGRAM_PAGE; + nandc->exec_opwrite = true; + break; + case NAND_CMD_READ0: + case NAND_CMD_READSTART: + if (host->use_ecc) + cmd = OP_PAGE_READ_WITH_ECC; + else + cmd = OP_PAGE_READ; + break; + default: + dev_err(nandc->dev, "Opcode not supported: %u\n", opcode); + return -EOPNOTSUPP; + } + + return cmd; +} + +/* NAND framework ->exec_op() hooks and related helpers */ +static int qcom_parse_instructions(struct nand_chip *chip, + const struct nand_subop *subop, + struct qcom_op *q_op) +{ + const struct nand_op_instr *instr = NULL; + unsigned int op_id; + int i, ret; + + for (op_id = 0; op_id < subop->ninstrs; op_id++) { + unsigned int offset, naddrs; + const u8 *addrs; + + instr = &subop->instrs[op_id]; + + switch (instr->type) { + case NAND_OP_CMD_INSTR: + ret = qcom_op_cmd_mapping(chip, instr->ctx.cmd.opcode, q_op); + if (ret < 0) + return ret; + + q_op->cmd_reg = ret; + q_op->rdy_delay_ns = instr->delay_ns; + break; + + case NAND_OP_ADDR_INSTR: + offset = nand_subop_get_addr_start_off(subop, op_id); + naddrs = nand_subop_get_num_addr_cyc(subop, op_id); + addrs = &instr->ctx.addr.addrs[offset]; + + for (i = 0; i < min_t(unsigned int, 4, naddrs); i++) + q_op->addr1_reg |= addrs[i] << (i * 8); + + if (naddrs > 4) + q_op->addr2_reg |= addrs[4]; + + q_op->rdy_delay_ns = instr->delay_ns; + break; + + case NAND_OP_DATA_IN_INSTR: + q_op->data_instr = instr; + q_op->data_instr_idx = op_id; + q_op->rdy_delay_ns = instr->delay_ns; + fallthrough; + case NAND_OP_DATA_OUT_INSTR: + q_op->rdy_delay_ns = instr->delay_ns; + break; + + case NAND_OP_WAITRDY_INSTR: + q_op->rdy_timeout_ms = instr->ctx.waitrdy.timeout_ms; + q_op->rdy_delay_ns = instr->delay_ns; + break; + } + } + + return 0; +} + +static void qcom_delay_ns(unsigned int ns) +{ + if (!ns) + return; + + if (ns < 10000) + ndelay(ns); + else + udelay(DIV_ROUND_UP(ns, 1000)); +} + +static int qcom_wait_rdy_poll(struct nand_chip *chip, unsigned int time_ms) +{ + struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip); + unsigned long start = jiffies + msecs_to_jiffies(time_ms); + u32 flash; + + nandc_read_buffer_sync(nandc, true); + + do { + flash = le32_to_cpu(nandc->reg_read_buf[0]); + if (flash & FS_READY_BSY_N) + return 0; + cpu_relax(); + } while (time_after(start, jiffies)); + + dev_err(nandc->dev, "Timeout waiting for device to be ready:0x%08x\n", flash); + + return -ETIMEDOUT; +} + +static int qcom_read_status_exec(struct nand_chip *chip, + const struct nand_subop *subop) +{ + struct qcom_nand_host *host = to_qcom_nand_host(chip); + struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip); + struct nand_ecc_ctrl *ecc = &chip->ecc; + struct qcom_op q_op = {}; + const struct nand_op_instr *instr = NULL; + unsigned int op_id = 0; + unsigned int len = 0; + int ret, num_cw, i; + u32 flash_status; + + host->status = NAND_STATUS_READY | NAND_STATUS_WP; + + ret = qcom_parse_instructions(chip, subop, &q_op); + if (ret) + return ret; + + num_cw = nandc->exec_opwrite ? ecc->steps : 1; + nandc->exec_opwrite = false; + + nandc->buf_count = 0; + nandc->buf_start = 0; + host->use_ecc = false; + + clear_read_regs(nandc); + clear_bam_transaction(nandc); + + nandc_set_reg(chip, NAND_FLASH_CMD, q_op.cmd_reg); + nandc_set_reg(chip, NAND_EXEC_CMD, 1); + + write_reg_dma(nandc, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL); + write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL); + read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL); + + ret = submit_descs(nandc); + if (ret) { + dev_err(nandc->dev, "failure in submitting status descriptor\n"); + goto err_out; + } + + nandc_read_buffer_sync(nandc, true); + + for (i = 0; i < num_cw; i++) { + flash_status = le32_to_cpu(nandc->reg_read_buf[i]); + + if (flash_status & FS_MPU_ERR) + host->status &= ~NAND_STATUS_WP; + + if (flash_status & FS_OP_ERR || + (i == (num_cw - 1) && (flash_status & FS_DEVICE_STS_ERR))) + host->status |= NAND_STATUS_FAIL; + } + + flash_status = host->status; + instr = q_op.data_instr; + op_id = q_op.data_instr_idx; + len = nand_subop_get_data_len(subop, op_id); + memcpy(instr->ctx.data.buf.in, &flash_status, len); + +err_out: + return ret; +} + +static int qcom_read_id_type_exec(struct nand_chip *chip, const struct nand_subop *subop) +{ + struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip); + struct qcom_nand_host *host = to_qcom_nand_host(chip); + struct qcom_op q_op = {}; + const struct nand_op_instr *instr = NULL; + unsigned int op_id = 0; + unsigned int len = 0; + int ret; + + ret = qcom_parse_instructions(chip, subop, &q_op); + if (ret) + return ret; + + nandc->buf_count = 0; + nandc->buf_start = 0; + host->use_ecc = false; + + clear_read_regs(nandc); + clear_bam_transaction(nandc); + + nandc_set_reg(chip, NAND_FLASH_CMD, q_op.cmd_reg); + nandc_set_reg(chip, NAND_ADDR0, q_op.addr1_reg); + nandc_set_reg(chip, NAND_ADDR1, q_op.addr2_reg); + nandc_set_reg(chip, NAND_FLASH_CHIP_SELECT, + nandc->props->is_bam ? 0 : DM_EN); + + nandc_set_reg(chip, NAND_EXEC_CMD, 1); + + write_reg_dma(nandc, NAND_FLASH_CMD, 4, NAND_BAM_NEXT_SGL); + write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL); + + read_reg_dma(nandc, NAND_READ_ID, 1, NAND_BAM_NEXT_SGL); + + ret = submit_descs(nandc); + if (ret) { + dev_err(nandc->dev, "failure in submitting read id descriptor\n"); + goto err_out; + } + + instr = q_op.data_instr; + op_id = q_op.data_instr_idx; + len = nand_subop_get_data_len(subop, op_id); + + nandc_read_buffer_sync(nandc, true); + memcpy(instr->ctx.data.buf.in, nandc->reg_read_buf, len); + +err_out: + return ret; +} + +static int qcom_misc_cmd_type_exec(struct nand_chip *chip, const struct nand_subop *subop) +{ + struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip); + struct qcom_nand_host *host = to_qcom_nand_host(chip); + struct qcom_op q_op = {}; + int ret; + int instrs = 1; + + ret = qcom_parse_instructions(chip, subop, &q_op); + if (ret) + return ret; + + if (q_op.flag == OP_PROGRAM_PAGE) { + goto wait_rdy; + } else if (q_op.cmd_reg == OP_BLOCK_ERASE) { + q_op.cmd_reg |= PAGE_ACC | LAST_PAGE; + nandc_set_reg(chip, NAND_ADDR0, q_op.addr1_reg); + nandc_set_reg(chip, NAND_ADDR1, q_op.addr2_reg); + nandc_set_reg(chip, NAND_DEV0_CFG0, + host->cfg0_raw & ~(7 << CW_PER_PAGE)); + nandc_set_reg(chip, NAND_DEV0_CFG1, host->cfg1_raw); + instrs = 3; + } else { + return 0; + } + + nandc->buf_count = 0; + nandc->buf_start = 0; + host->use_ecc = false; + + clear_read_regs(nandc); + clear_bam_transaction(nandc); + + nandc_set_reg(chip, NAND_FLASH_CMD, q_op.cmd_reg); + nandc_set_reg(chip, NAND_EXEC_CMD, 1); + + write_reg_dma(nandc, NAND_FLASH_CMD, instrs, NAND_BAM_NEXT_SGL); + (q_op.cmd_reg == OP_BLOCK_ERASE) ? write_reg_dma(nandc, NAND_DEV0_CFG0, + 2, NAND_BAM_NEXT_SGL) : read_reg_dma(nandc, + NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL); + + write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL); + read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL); + + ret = submit_descs(nandc); + if (ret) { + dev_err(nandc->dev, "failure in submitting misc descriptor\n"); + goto err_out; + } + +wait_rdy: + qcom_delay_ns(q_op.rdy_delay_ns); + ret = qcom_wait_rdy_poll(chip, q_op.rdy_timeout_ms); + +err_out: + return ret; +} + +static int qcom_param_page_type_exec(struct nand_chip *chip, const struct nand_subop *subop) +{ + struct qcom_nand_host *host = to_qcom_nand_host(chip); + struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip); + struct qcom_op q_op = {}; + const struct nand_op_instr *instr = NULL; + unsigned int op_id = 0; + unsigned int len = 0; + int ret; + + ret = qcom_parse_instructions(chip, subop, &q_op); + if (ret) + return ret; + + q_op.cmd_reg |= PAGE_ACC | LAST_PAGE; + + nandc->buf_count = 0; + nandc->buf_start = 0; + host->use_ecc = false; + clear_read_regs(nandc); + clear_bam_transaction(nandc); + + nandc_set_reg(chip, NAND_FLASH_CMD, q_op.cmd_reg); + + nandc_set_reg(chip, NAND_ADDR0, 0); + nandc_set_reg(chip, NAND_ADDR1, 0); + nandc_set_reg(chip, NAND_DEV0_CFG0, 0 << CW_PER_PAGE + | 512 << UD_SIZE_BYTES + | 5 << NUM_ADDR_CYCLES + | 0 << SPARE_SIZE_BYTES); + nandc_set_reg(chip, NAND_DEV0_CFG1, 7 << NAND_RECOVERY_CYCLES + | 0 << CS_ACTIVE_BSY + | 17 << BAD_BLOCK_BYTE_NUM + | 1 << BAD_BLOCK_IN_SPARE_AREA + | 2 << WR_RD_BSY_GAP + | 0 << WIDE_FLASH + | 1 << DEV0_CFG1_ECC_DISABLE); + if (!nandc->props->qpic_v2) + nandc_set_reg(chip, NAND_EBI2_ECC_BUF_CFG, 1 << ECC_CFG_ECC_DISABLE); + + /* configure CMD1 and VLD for ONFI param probing in QPIC v1 */ + if (!nandc->props->qpic_v2) { + nandc_set_reg(chip, NAND_DEV_CMD_VLD, + (nandc->vld & ~READ_START_VLD)); + nandc_set_reg(chip, NAND_DEV_CMD1, + (nandc->cmd1 & ~(0xFF << READ_ADDR)) + | NAND_CMD_PARAM << READ_ADDR); + } + + nandc_set_reg(chip, NAND_EXEC_CMD, 1); + + if (!nandc->props->qpic_v2) { + nandc_set_reg(chip, NAND_DEV_CMD1_RESTORE, nandc->cmd1); + nandc_set_reg(chip, NAND_DEV_CMD_VLD_RESTORE, nandc->vld); + } + + instr = q_op.data_instr; + op_id = q_op.data_instr_idx; + len = nand_subop_get_data_len(subop, op_id); + + nandc_set_read_loc(chip, 0, 0, 0, len, 1); + + if (!nandc->props->qpic_v2) { + write_reg_dma(nandc, NAND_DEV_CMD_VLD, 1, 0); + write_reg_dma(nandc, NAND_DEV_CMD1, 1, NAND_BAM_NEXT_SGL); + } + + nandc->buf_count = len; + memset(nandc->data_buffer, 0xff, nandc->buf_count); + + config_nand_single_cw_page_read(chip, false, 0); + + read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer, + nandc->buf_count, 0); + + /* restore CMD1 and VLD regs */ + if (!nandc->props->qpic_v2) { + write_reg_dma(nandc, NAND_DEV_CMD1_RESTORE, 1, 0); + write_reg_dma(nandc, NAND_DEV_CMD_VLD_RESTORE, 1, NAND_BAM_NEXT_SGL); + } + + ret = submit_descs(nandc); + if (ret) { + dev_err(nandc->dev, "failure in submitting param page descriptor\n"); + goto err_out; + } + + ret = qcom_wait_rdy_poll(chip, q_op.rdy_timeout_ms); + if (ret) + goto err_out; + + memcpy(instr->ctx.data.buf.in, nandc->data_buffer, len); + +err_out: + return ret; +} + +static const struct nand_op_parser qcom_op_parser = NAND_OP_PARSER( + NAND_OP_PARSER_PATTERN( + qcom_read_id_type_exec, + NAND_OP_PARSER_PAT_CMD_ELEM(false), + NAND_OP_PARSER_PAT_ADDR_ELEM(false, MAX_ADDRESS_CYCLE), + NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, 8)), + NAND_OP_PARSER_PATTERN( + qcom_read_status_exec, + NAND_OP_PARSER_PAT_CMD_ELEM(false), + NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, 1)), + NAND_OP_PARSER_PATTERN( + qcom_param_page_type_exec, + NAND_OP_PARSER_PAT_CMD_ELEM(false), + NAND_OP_PARSER_PAT_ADDR_ELEM(false, MAX_ADDRESS_CYCLE), + NAND_OP_PARSER_PAT_WAITRDY_ELEM(true), + NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, 512)), + NAND_OP_PARSER_PATTERN( + qcom_misc_cmd_type_exec, + NAND_OP_PARSER_PAT_CMD_ELEM(false), + NAND_OP_PARSER_PAT_ADDR_ELEM(true, MAX_ADDRESS_CYCLE), + NAND_OP_PARSER_PAT_CMD_ELEM(true), + NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)), + ); + +static int qcom_check_op(struct nand_chip *chip, + const struct nand_operation *op) +{ + const struct nand_op_instr *instr; + int op_id; + + for (op_id = 0; op_id < op->ninstrs; op_id++) { + instr = &op->instrs[op_id]; + + switch (instr->type) { + case NAND_OP_CMD_INSTR: + if (instr->ctx.cmd.opcode != NAND_CMD_RESET && + instr->ctx.cmd.opcode != NAND_CMD_READID && + instr->ctx.cmd.opcode != NAND_CMD_PARAM && + instr->ctx.cmd.opcode != NAND_CMD_ERASE1 && + instr->ctx.cmd.opcode != NAND_CMD_ERASE2 && + instr->ctx.cmd.opcode != NAND_CMD_STATUS && + instr->ctx.cmd.opcode != NAND_CMD_PAGEPROG && + instr->ctx.cmd.opcode != NAND_CMD_READ0 && + instr->ctx.cmd.opcode != NAND_CMD_READSTART) + return -EOPNOTSUPP; + break; + default: + break; + } + } + + return 0; +} + +static int qcom_nand_exec_op(struct nand_chip *chip, + const struct nand_operation *op, bool check_only) +{ + if (check_only) + return qcom_check_op(chip, op); + + return nand_op_parser_exec_op(chip, &qcom_op_parser, op, check_only); +} + static const struct nand_controller_ops qcom_nandc_ops = { .attach_chip = qcom_nand_attach_chip, + .exec_op = qcom_nand_exec_op, }; static void qcom_nandc_unalloc(struct qcom_nand_controller *nandc) @@ -2912,19 +3057,17 @@ static int qcom_nandc_alloc(struct qcom_nand_controller *nandc) */ nandc->buf_size = 532; - nandc->data_buffer = devm_kzalloc(nandc->dev, nandc->buf_size, - GFP_KERNEL); + nandc->data_buffer = devm_kzalloc(nandc->dev, nandc->buf_size, GFP_KERNEL); if (!nandc->data_buffer) return -ENOMEM; - nandc->regs = devm_kzalloc(nandc->dev, sizeof(*nandc->regs), - GFP_KERNEL); + nandc->regs = devm_kzalloc(nandc->dev, sizeof(*nandc->regs), GFP_KERNEL); if (!nandc->regs) return -ENOMEM; - nandc->reg_read_buf = devm_kcalloc(nandc->dev, - MAX_REG_RD, sizeof(*nandc->reg_read_buf), - GFP_KERNEL); + nandc->reg_read_buf = devm_kcalloc(nandc->dev, MAX_REG_RD, + sizeof(*nandc->reg_read_buf), + GFP_KERNEL); if (!nandc->reg_read_buf) return -ENOMEM; @@ -2969,7 +3112,7 @@ static int qcom_nandc_alloc(struct qcom_nand_controller *nandc) /* * Initially allocate BAM transaction to read ONFI param page. * After detecting all the devices, this BAM transaction will - * be freed and the next BAM tranasction will be allocated with + * be freed and the next BAM transaction will be allocated with * maximum codeword size */ nandc->max_cwperpage = 1; @@ -3135,14 +3278,6 @@ static int qcom_nand_host_init_and_register(struct qcom_nand_controller *nandc, mtd->owner = THIS_MODULE; mtd->dev.parent = dev; - chip->legacy.cmdfunc = qcom_nandc_command; - chip->legacy.select_chip = qcom_nandc_select_chip; - chip->legacy.read_byte = qcom_nandc_read_byte; - chip->legacy.read_buf = qcom_nandc_read_buf; - chip->legacy.write_buf = qcom_nandc_write_buf; - chip->legacy.set_features = nand_get_set_features_notsupp; - chip->legacy.get_features = nand_get_set_features_notsupp; - /* * the bad block marker is readable only when we read the last codeword * of a page with ECC disabled. currently, the nand_base and nand_bbt diff --git a/drivers/mtd/nand/raw/sh_flctl.c b/drivers/mtd/nand/raw/sh_flctl.c index 08211e96ce64..3e5df75cbc98 100644 --- a/drivers/mtd/nand/raw/sh_flctl.c +++ b/drivers/mtd/nand/raw/sh_flctl.c @@ -1123,8 +1123,7 @@ static int flctl_probe(struct platform_device *pdev) if (!flctl) return -ENOMEM; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - flctl->reg = devm_ioremap_resource(&pdev->dev, res); + flctl->reg = devm_platform_get_and_ioremap_resource(pdev, 0, &res); if (IS_ERR(flctl->reg)) return PTR_ERR(flctl->reg); flctl->fifo = res->start + 0x24; /* FLDTFIFO */ diff --git a/drivers/mtd/nand/raw/stm32_fmc2_nand.c b/drivers/mtd/nand/raw/stm32_fmc2_nand.c index 10c11cecac08..88811139aaf5 100644 --- a/drivers/mtd/nand/raw/stm32_fmc2_nand.c +++ b/drivers/mtd/nand/raw/stm32_fmc2_nand.c @@ -1922,8 +1922,8 @@ static int stm32_fmc2_nfc_probe(struct platform_device *pdev) if (!(nfc->cs_assigned & BIT(chip_cs))) continue; - res = platform_get_resource(pdev, IORESOURCE_MEM, mem_region); - nfc->data_base[chip_cs] = devm_ioremap_resource(dev, res); + nfc->data_base[chip_cs] = devm_platform_get_and_ioremap_resource(pdev, + mem_region, &res); if (IS_ERR(nfc->data_base[chip_cs])) return PTR_ERR(nfc->data_base[chip_cs]); @@ -1951,21 +1951,17 @@ static int stm32_fmc2_nfc_probe(struct platform_device *pdev) init_completion(&nfc->complete); - nfc->clk = devm_clk_get(nfc->cdev, NULL); - if (IS_ERR(nfc->clk)) + nfc->clk = devm_clk_get_enabled(nfc->cdev, NULL); + if (IS_ERR(nfc->clk)) { + dev_err(dev, "can not get and enable the clock\n"); return PTR_ERR(nfc->clk); - - ret = clk_prepare_enable(nfc->clk); - if (ret) { - dev_err(dev, "can not enable the clock\n"); - return ret; } rstc = devm_reset_control_get(dev, NULL); if (IS_ERR(rstc)) { ret = PTR_ERR(rstc); if (ret == -EPROBE_DEFER) - goto err_clk_disable; + return ret; } else { reset_control_assert(rstc); reset_control_deassert(rstc); @@ -2018,9 +2014,6 @@ err_release_dma: sg_free_table(&nfc->dma_data_sg); sg_free_table(&nfc->dma_ecc_sg); -err_clk_disable: - clk_disable_unprepare(nfc->clk); - return ret; } @@ -2045,8 +2038,6 @@ static void stm32_fmc2_nfc_remove(struct platform_device *pdev) sg_free_table(&nfc->dma_data_sg); sg_free_table(&nfc->dma_ecc_sg); - clk_disable_unprepare(nfc->clk); - stm32_fmc2_nfc_wp_enable(nand); } diff --git a/drivers/mtd/nand/raw/sunxi_nand.c b/drivers/mtd/nand/raw/sunxi_nand.c index c6cd7713bee7..9abf38049d35 100644 --- a/drivers/mtd/nand/raw/sunxi_nand.c +++ b/drivers/mtd/nand/raw/sunxi_nand.c @@ -2086,8 +2086,7 @@ static int sunxi_nfc_probe(struct platform_device *pdev) nand_controller_init(&nfc->controller); INIT_LIST_HEAD(&nfc->chips); - r = platform_get_resource(pdev, IORESOURCE_MEM, 0); - nfc->regs = devm_ioremap_resource(dev, r); + nfc->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &r); if (IS_ERR(nfc->regs)) return PTR_ERR(nfc->regs); @@ -2095,37 +2094,26 @@ static int sunxi_nfc_probe(struct platform_device *pdev) if (irq < 0) return irq; - nfc->ahb_clk = devm_clk_get(dev, "ahb"); + nfc->ahb_clk = devm_clk_get_enabled(dev, "ahb"); if (IS_ERR(nfc->ahb_clk)) { dev_err(dev, "failed to retrieve ahb clk\n"); return PTR_ERR(nfc->ahb_clk); } - ret = clk_prepare_enable(nfc->ahb_clk); - if (ret) - return ret; - - nfc->mod_clk = devm_clk_get(dev, "mod"); + nfc->mod_clk = devm_clk_get_enabled(dev, "mod"); if (IS_ERR(nfc->mod_clk)) { dev_err(dev, "failed to retrieve mod clk\n"); - ret = PTR_ERR(nfc->mod_clk); - goto out_ahb_clk_unprepare; + return PTR_ERR(nfc->mod_clk); } - ret = clk_prepare_enable(nfc->mod_clk); - if (ret) - goto out_ahb_clk_unprepare; - nfc->reset = devm_reset_control_get_optional_exclusive(dev, "ahb"); - if (IS_ERR(nfc->reset)) { - ret = PTR_ERR(nfc->reset); - goto out_mod_clk_unprepare; - } + if (IS_ERR(nfc->reset)) + return PTR_ERR(nfc->reset); ret = reset_control_deassert(nfc->reset); if (ret) { dev_err(dev, "reset err %d\n", ret); - goto out_mod_clk_unprepare; + return ret; } nfc->caps = of_device_get_match_data(&pdev->dev); @@ -2164,10 +2152,6 @@ out_release_dmac: dma_release_channel(nfc->dmac); out_ahb_reset_reassert: reset_control_assert(nfc->reset); -out_mod_clk_unprepare: - clk_disable_unprepare(nfc->mod_clk); -out_ahb_clk_unprepare: - clk_disable_unprepare(nfc->ahb_clk); return ret; } @@ -2182,8 +2166,6 @@ static void sunxi_nfc_remove(struct platform_device *pdev) if (nfc->dmac) dma_release_channel(nfc->dmac); - clk_disable_unprepare(nfc->mod_clk); - clk_disable_unprepare(nfc->ahb_clk); } static const struct sunxi_nfc_caps sunxi_nfc_a10_caps = { diff --git a/drivers/mtd/nand/raw/vf610_nfc.c b/drivers/mtd/nand/raw/vf610_nfc.c index 86522048e271..3f783b8f76c9 100644 --- a/drivers/mtd/nand/raw/vf610_nfc.c +++ b/drivers/mtd/nand/raw/vf610_nfc.c @@ -827,30 +827,24 @@ static int vf610_nfc_probe(struct platform_device *pdev) mtd->name = DRV_NAME; irq = platform_get_irq(pdev, 0); - if (irq <= 0) - return -EINVAL; + if (irq < 0) + return irq; nfc->regs = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(nfc->regs)) return PTR_ERR(nfc->regs); - nfc->clk = devm_clk_get(&pdev->dev, NULL); - if (IS_ERR(nfc->clk)) + nfc->clk = devm_clk_get_enabled(&pdev->dev, NULL); + if (IS_ERR(nfc->clk)) { + dev_err(nfc->dev, "Unable to get and enable clock!\n"); return PTR_ERR(nfc->clk); - - err = clk_prepare_enable(nfc->clk); - if (err) { - dev_err(nfc->dev, "Unable to enable clock!\n"); - return err; } of_id = of_match_device(vf610_nfc_dt_ids, &pdev->dev); - if (!of_id) { - err = -ENODEV; - goto err_disable_clk; - } + if (!of_id) + return -ENODEV; - nfc->variant = (enum vf610_nfc_variant)of_id->data; + nfc->variant = (uintptr_t)of_id->data; for_each_available_child_of_node(nfc->dev->of_node, child) { if (of_device_is_compatible(child, "fsl,vf610-nfc-nandcs")) { @@ -858,9 +852,8 @@ static int vf610_nfc_probe(struct platform_device *pdev) if (nand_get_flash_node(chip)) { dev_err(nfc->dev, "Only one NAND chip supported!\n"); - err = -EINVAL; of_node_put(child); - goto err_disable_clk; + return -EINVAL; } nand_set_flash_node(chip, child); @@ -869,8 +862,7 @@ static int vf610_nfc_probe(struct platform_device *pdev) if (!nand_get_flash_node(chip)) { dev_err(nfc->dev, "NAND chip sub-node missing!\n"); - err = -ENODEV; - goto err_disable_clk; + return -ENODEV; } chip->options |= NAND_NO_SUBPAGE_WRITE; @@ -880,7 +872,7 @@ static int vf610_nfc_probe(struct platform_device *pdev) err = devm_request_irq(nfc->dev, irq, vf610_nfc_irq, 0, DRV_NAME, nfc); if (err) { dev_err(nfc->dev, "Error requesting IRQ!\n"); - goto err_disable_clk; + return err; } vf610_nfc_preinit_controller(nfc); @@ -892,7 +884,7 @@ static int vf610_nfc_probe(struct platform_device *pdev) /* Scan the NAND chip */ err = nand_scan(chip, 1); if (err) - goto err_disable_clk; + return err; platform_set_drvdata(pdev, nfc); @@ -904,8 +896,6 @@ static int vf610_nfc_probe(struct platform_device *pdev) err_cleanup_nand: nand_cleanup(chip); -err_disable_clk: - clk_disable_unprepare(nfc->clk); return err; } @@ -918,7 +908,6 @@ static void vf610_nfc_remove(struct platform_device *pdev) ret = mtd_device_unregister(nand_to_mtd(chip)); WARN_ON(ret); nand_cleanup(chip); - clk_disable_unprepare(nfc->clk); } #ifdef CONFIG_PM_SLEEP diff --git a/drivers/mtd/nand/spi/esmt.c b/drivers/mtd/nand/spi/esmt.c index 1a3ffb982335..31c439a557b1 100644 --- a/drivers/mtd/nand/spi/esmt.c +++ b/drivers/mtd/nand/spi/esmt.c @@ -121,6 +121,15 @@ static const struct spinand_info esmt_c8_spinand_table[] = { &update_cache_variants), 0, SPINAND_ECCINFO(&f50l1g41lb_ooblayout, NULL)), + SPINAND_INFO("F50D2G41KA", + SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0x51), + NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 1, 1), + NAND_ECCREQ(8, 512), + SPINAND_INFO_OP_VARIANTS(&read_cache_variants, + &write_cache_variants, + &update_cache_variants), + 0, + SPINAND_ECCINFO(&f50l1g41lb_ooblayout, NULL)), }; static const struct spinand_manufacturer_ops esmt_spinand_manuf_ops = { diff --git a/drivers/mtd/nand/spi/gigadevice.c b/drivers/mtd/nand/spi/gigadevice.c index cfd7c3b26dc4..987710e09441 100644 --- a/drivers/mtd/nand/spi/gigadevice.c +++ b/drivers/mtd/nand/spi/gigadevice.c @@ -511,6 +511,26 @@ static const struct spinand_info gigadevice_spinand_table[] = { SPINAND_HAS_QE_BIT, SPINAND_ECCINFO(&gd5fxgqx_variant2_ooblayout, gd5fxgq4uexxg_ecc_get_status)), + SPINAND_INFO("GD5F1GQ5RExxH", + SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x21), + NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1), + NAND_ECCREQ(4, 512), + SPINAND_INFO_OP_VARIANTS(&read_cache_variants_1gq5, + &write_cache_variants, + &update_cache_variants), + SPINAND_HAS_QE_BIT, + SPINAND_ECCINFO(&gd5fxgqx_variant2_ooblayout, + gd5fxgq4uexxg_ecc_get_status)), + SPINAND_INFO("GD5F1GQ4RExxH", + SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xc9), + NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1), + NAND_ECCREQ(4, 512), + SPINAND_INFO_OP_VARIANTS(&read_cache_variants_1gq5, + &write_cache_variants, + &update_cache_variants), + SPINAND_HAS_QE_BIT, + SPINAND_ECCINFO(&gd5fxgqx_variant2_ooblayout, + gd5fxgq4uexxg_ecc_get_status)), }; static const struct spinand_manufacturer_ops gigadevice_spinand_manuf_ops = { diff --git a/drivers/mtd/nand/spi/toshiba.c b/drivers/mtd/nand/spi/toshiba.c index 7380b1ebaccd..cd027b41dd58 100644 --- a/drivers/mtd/nand/spi/toshiba.c +++ b/drivers/mtd/nand/spi/toshiba.c @@ -266,6 +266,39 @@ static const struct spinand_info toshiba_spinand_table[] = { SPINAND_HAS_QE_BIT, SPINAND_ECCINFO(&tx58cxgxsxraix_ooblayout, tx58cxgxsxraix_ecc_get_status)), + /* 1.8V 1Gb (1st generation) */ + SPINAND_INFO("TC58NYG0S3HBAI4", + SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xA1), + NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1), + NAND_ECCREQ(8, 512), + SPINAND_INFO_OP_VARIANTS(&read_cache_variants, + &write_cache_variants, + &update_cache_variants), + 0, + SPINAND_ECCINFO(&tx58cxgxsxraix_ooblayout, + tx58cxgxsxraix_ecc_get_status)), + /* 1.8V 4Gb (1st generation) */ + SPINAND_INFO("TH58NYG2S3HBAI4", + SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xAC), + NAND_MEMORG(1, 2048, 128, 64, 4096, 80, 1, 2, 1), + NAND_ECCREQ(8, 512), + SPINAND_INFO_OP_VARIANTS(&read_cache_variants, + &write_cache_x4_variants, + &update_cache_x4_variants), + SPINAND_HAS_QE_BIT, + SPINAND_ECCINFO(&tx58cxgxsxraix_ooblayout, + tx58cxgxsxraix_ecc_get_status)), + /* 1.8V 8Gb (1st generation) */ + SPINAND_INFO("TH58NYG3S0HBAI6", + SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xA3), + NAND_MEMORG(1, 4096, 256, 64, 4096, 80, 1, 1, 1), + NAND_ECCREQ(8, 512), + SPINAND_INFO_OP_VARIANTS(&read_cache_variants, + &write_cache_x4_variants, + &update_cache_x4_variants), + SPINAND_HAS_QE_BIT, + SPINAND_ECCINFO(&tx58cxgxsxraix_ooblayout, + tx58cxgxsxraix_ecc_get_status)), }; static const struct spinand_manufacturer_ops toshiba_spinand_manuf_ops = { |