summaryrefslogtreecommitdiff
path: root/drivers/spi
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/spi')
-rw-r--r--drivers/spi/Kconfig44
-rw-r--r--drivers/spi/Makefile7
-rw-r--r--drivers/spi/spi-aspeed-smc.c7
-rw-r--r--drivers/spi/spi-axi-spi-engine.c315
-rw-r--r--drivers/spi/spi-cadence-quadspi.c8
-rw-r--r--drivers/spi/spi-fsi.c13
-rw-r--r--drivers/spi/spi-fsl-lpspi.c2
-rw-r--r--drivers/spi/spi-gpio.c45
-rw-r--r--drivers/spi/spi-imx.c2
-rw-r--r--drivers/spi/spi-mem.c11
-rw-r--r--drivers/spi/spi-mt65xx.c17
-rw-r--r--drivers/spi/spi-mtk-snfi.c3
-rw-r--r--drivers/spi/spi-mux.c4
-rw-r--r--drivers/spi/spi-npcm-fiu.c5
-rw-r--r--drivers/spi/spi-offload-trigger-pwm.c169
-rw-r--r--drivers/spi/spi-offload.c468
-rw-r--r--drivers/spi/spi-qpic-snand.c1633
-rw-r--r--drivers/spi/spi-realtek-rtl-snand.c1
-rw-r--r--drivers/spi/spi-s3c64xx.c4
-rw-r--r--drivers/spi/spi-sg2044-nor.c488
-rw-r--r--drivers/spi/spi-stm32-ospi.c1063
-rw-r--r--drivers/spi/spi-stm32-qspi.c5
-rw-r--r--drivers/spi/spi-zynq-qspi.c4
-rw-r--r--drivers/spi/spi-zynqmp-gqspi.c173
-rw-r--r--drivers/spi/spi.c117
-rw-r--r--drivers/spi/spidev.c2
26 files changed, 4383 insertions, 227 deletions
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index ea8a31032927..f40c282d4d63 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -55,6 +55,9 @@ config SPI_MEM
This extension is meant to simplify interaction with SPI memories
by providing a high-level interface to send memory-like commands.
+config SPI_OFFLOAD
+ bool
+
comment "SPI Master Controller Drivers"
config SPI_AIROHA_SNFI
@@ -176,6 +179,7 @@ config SPI_AU1550
config SPI_AXI_SPI_ENGINE
tristate "Analog Devices AXI SPI Engine controller"
depends on HAS_IOMEM
+ select SPI_OFFLOAD
help
This enables support for the Analog Devices AXI SPI Engine SPI controller.
It is part of the SPI Engine framework that is used in some Analog Devices
@@ -932,6 +936,15 @@ config SPI_QCOM_QSPI
help
QSPI(Quad SPI) driver for Qualcomm QSPI controller.
+config SPI_QPIC_SNAND
+ bool "QPIC SNAND controller"
+ depends on ARCH_QCOM || COMPILE_TEST
+ select MTD
+ help
+ QPIC_SNAND (QPIC SPI NAND) driver for Qualcomm QPIC controller.
+ QPIC controller supports both parallel nand and serial nand.
+ This config will enable serial nand driver for QPIC controller.
+
config SPI_QUP
tristate "Qualcomm SPI controller with QUP interface"
depends on ARCH_QCOM || COMPILE_TEST
@@ -1021,6 +1034,15 @@ config SPI_SN_F_OSPI
for connecting an SPI Flash memory over up to 8-bit wide bus.
It supports indirect access mode only.
+config SPI_SG2044_NOR
+ tristate "SG2044 SPI NOR Controller"
+ depends on ARCH_SOPHGO || COMPILE_TEST
+ help
+ This enables support for the SG2044 SPI NOR controller,
+ which supports Dual/Quad read and write operations while
+ also supporting 3Byte address devices and 4Byte address
+ devices.
+
config SPI_SPRD
tristate "Spreadtrum SPI controller"
depends on ARCH_SPRD || COMPILE_TEST
@@ -1045,6 +1067,16 @@ config SPI_STM32
is not available, the driver automatically falls back to
PIO mode.
+config SPI_STM32_OSPI
+ tristate "STMicroelectronics STM32 OCTO SPI controller"
+ depends on ARCH_STM32 || COMPILE_TEST
+ depends on OF
+ depends on SPI_MEM
+ help
+ This enables support for the Octo SPI controller in master mode.
+ This driver does not support generic SPI. The implementation only
+ supports spi-mem interface.
+
config SPI_STM32_QSPI
tristate "STMicroelectronics STM32 QUAD SPI controller"
depends on ARCH_STM32 || COMPILE_TEST
@@ -1317,4 +1349,16 @@ endif # SPI_SLAVE
config SPI_DYNAMIC
def_bool ACPI || OF_DYNAMIC || SPI_SLAVE
+if SPI_OFFLOAD
+
+comment "SPI Offload triggers"
+
+config SPI_OFFLOAD_TRIGGER_PWM
+ tristate "SPI offload trigger using PWM"
+ depends on PWM
+ help
+ Generic SPI offload trigger implemented using PWM output.
+
+endif # SPI_OFFLOAD
+
endif # SPI
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index 9db7554c1864..c3a1a47b3bf4 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -10,6 +10,7 @@ ccflags-$(CONFIG_SPI_DEBUG) := -DDEBUG
obj-$(CONFIG_SPI_MASTER) += spi.o
obj-$(CONFIG_SPI_MEM) += spi-mem.o
obj-$(CONFIG_SPI_MUX) += spi-mux.o
+obj-$(CONFIG_SPI_OFFLOAD) += spi-offload.o
obj-$(CONFIG_SPI_SPIDEV) += spidev.o
obj-$(CONFIG_SPI_LOOPBACK_TEST) += spi-loopback-test.o
@@ -116,6 +117,7 @@ obj-$(CONFIG_SPI_PXA2XX) += spi-pxa2xx-platform.o
obj-$(CONFIG_SPI_PXA2XX_PCI) += spi-pxa2xx-pci.o
obj-$(CONFIG_SPI_QCOM_GENI) += spi-geni-qcom.o
obj-$(CONFIG_SPI_QCOM_QSPI) += spi-qcom-qspi.o
+obj-$(CONFIG_SPI_QPIC_SNAND) += spi-qpic-snand.o
obj-$(CONFIG_SPI_QUP) += spi-qup.o
obj-$(CONFIG_SPI_ROCKCHIP) += spi-rockchip.o
obj-$(CONFIG_SPI_ROCKCHIP_SFC) += spi-rockchip-sfc.o
@@ -134,9 +136,11 @@ obj-$(CONFIG_SPI_SH_SCI) += spi-sh-sci.o
obj-$(CONFIG_SPI_SIFIVE) += spi-sifive.o
obj-$(CONFIG_SPI_SLAVE_MT27XX) += spi-slave-mt27xx.o
obj-$(CONFIG_SPI_SN_F_OSPI) += spi-sn-f-ospi.o
+obj-$(CONFIG_SPI_SG2044_NOR) += spi-sg2044-nor.o
obj-$(CONFIG_SPI_SPRD) += spi-sprd.o
obj-$(CONFIG_SPI_SPRD_ADI) += spi-sprd-adi.o
obj-$(CONFIG_SPI_STM32) += spi-stm32.o
+obj-$(CONFIG_SPI_STM32_OSPI) += spi-stm32-ospi.o
obj-$(CONFIG_SPI_STM32_QSPI) += spi-stm32-qspi.o
obj-$(CONFIG_SPI_ST_SSC4) += spi-st-ssc4.o
obj-$(CONFIG_SPI_SUN4I) += spi-sun4i.o
@@ -163,3 +167,6 @@ obj-$(CONFIG_SPI_AMD) += spi-amd.o
# SPI slave protocol handlers
obj-$(CONFIG_SPI_SLAVE_TIME) += spi-slave-time.o
obj-$(CONFIG_SPI_SLAVE_SYSTEM_CONTROL) += spi-slave-system-control.o
+
+# SPI offload triggers
+obj-$(CONFIG_SPI_OFFLOAD_TRIGGER_PWM) += spi-offload-trigger-pwm.o
diff --git a/drivers/spi/spi-aspeed-smc.c b/drivers/spi/spi-aspeed-smc.c
index e9beae95dded..62a11142bd63 100644
--- a/drivers/spi/spi-aspeed-smc.c
+++ b/drivers/spi/spi-aspeed-smc.c
@@ -303,13 +303,6 @@ static int do_aspeed_spi_exec_op(struct spi_mem *mem, const struct spi_mem_op *o
u32 ctl_val;
int ret = 0;
- dev_dbg(aspi->dev,
- "CE%d %s OP %#x mode:%d.%d.%d.%d naddr:%#x ndummies:%#x len:%#x",
- chip->cs, op->data.dir == SPI_MEM_DATA_IN ? "read" : "write",
- op->cmd.opcode, op->cmd.buswidth, op->addr.buswidth,
- op->dummy.buswidth, op->data.buswidth,
- op->addr.nbytes, op->dummy.nbytes, op->data.nbytes);
-
addr_mode = readl(aspi->regs + CE_CTRL_REG);
addr_mode_backup = addr_mode;
diff --git a/drivers/spi/spi-axi-spi-engine.c b/drivers/spi/spi-axi-spi-engine.c
index 7c252126b33e..da9840957778 100644
--- a/drivers/spi/spi-axi-spi-engine.c
+++ b/drivers/spi/spi-axi-spi-engine.c
@@ -2,11 +2,15 @@
/*
* SPI-Engine SPI controller driver
* Copyright 2015 Analog Devices Inc.
+ * Copyright 2024 BayLibre, SAS
* Author: Lars-Peter Clausen <lars@metafoo.de>
*/
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
#include <linux/clk.h>
#include <linux/completion.h>
+#include <linux/dmaengine.h>
#include <linux/fpga/adi-axi-common.h>
#include <linux/interrupt.h>
#include <linux/io.h>
@@ -14,9 +18,11 @@
#include <linux/module.h>
#include <linux/overflow.h>
#include <linux/platform_device.h>
+#include <linux/spi/offload/provider.h>
#include <linux/spi/spi.h>
#include <trace/events/spi.h>
+#define SPI_ENGINE_REG_OFFLOAD_MEM_ADDR_WIDTH 0x10
#define SPI_ENGINE_REG_RESET 0x40
#define SPI_ENGINE_REG_INT_ENABLE 0x80
@@ -24,6 +30,7 @@
#define SPI_ENGINE_REG_INT_SOURCE 0x88
#define SPI_ENGINE_REG_SYNC_ID 0xc0
+#define SPI_ENGINE_REG_OFFLOAD_SYNC_ID 0xc4
#define SPI_ENGINE_REG_CMD_FIFO_ROOM 0xd0
#define SPI_ENGINE_REG_SDO_FIFO_ROOM 0xd4
@@ -34,10 +41,24 @@
#define SPI_ENGINE_REG_SDI_DATA_FIFO 0xe8
#define SPI_ENGINE_REG_SDI_DATA_FIFO_PEEK 0xec
+#define SPI_ENGINE_MAX_NUM_OFFLOADS 32
+
+#define SPI_ENGINE_REG_OFFLOAD_CTRL(x) (0x100 + SPI_ENGINE_MAX_NUM_OFFLOADS * (x))
+#define SPI_ENGINE_REG_OFFLOAD_STATUS(x) (0x104 + SPI_ENGINE_MAX_NUM_OFFLOADS * (x))
+#define SPI_ENGINE_REG_OFFLOAD_RESET(x) (0x108 + SPI_ENGINE_MAX_NUM_OFFLOADS * (x))
+#define SPI_ENGINE_REG_OFFLOAD_CMD_FIFO(x) (0x110 + SPI_ENGINE_MAX_NUM_OFFLOADS * (x))
+#define SPI_ENGINE_REG_OFFLOAD_SDO_FIFO(x) (0x114 + SPI_ENGINE_MAX_NUM_OFFLOADS * (x))
+
+#define SPI_ENGINE_SPI_OFFLOAD_MEM_WIDTH_SDO GENMASK(15, 8)
+#define SPI_ENGINE_SPI_OFFLOAD_MEM_WIDTH_CMD GENMASK(7, 0)
+
#define SPI_ENGINE_INT_CMD_ALMOST_EMPTY BIT(0)
#define SPI_ENGINE_INT_SDO_ALMOST_EMPTY BIT(1)
#define SPI_ENGINE_INT_SDI_ALMOST_FULL BIT(2)
#define SPI_ENGINE_INT_SYNC BIT(3)
+#define SPI_ENGINE_INT_OFFLOAD_SYNC BIT(4)
+
+#define SPI_ENGINE_OFFLOAD_CTRL_ENABLE BIT(0)
#define SPI_ENGINE_CONFIG_CPHA BIT(0)
#define SPI_ENGINE_CONFIG_CPOL BIT(1)
@@ -79,6 +100,10 @@
#define SPI_ENGINE_CMD_CS_INV(flags) \
SPI_ENGINE_CMD(SPI_ENGINE_INST_CS_INV, 0, (flags))
+/* default sizes - can be changed when SPI Engine firmware is compiled */
+#define SPI_ENGINE_OFFLOAD_CMD_FIFO_SIZE 16
+#define SPI_ENGINE_OFFLOAD_SDO_FIFO_SIZE 16
+
struct spi_engine_program {
unsigned int length;
uint16_t instructions[] __counted_by(length);
@@ -106,6 +131,17 @@ struct spi_engine_message_state {
uint8_t *rx_buf;
};
+enum {
+ SPI_ENGINE_OFFLOAD_FLAG_ASSIGNED,
+ SPI_ENGINE_OFFLOAD_FLAG_PREPARED,
+};
+
+struct spi_engine_offload {
+ struct spi_engine *spi_engine;
+ unsigned long flags;
+ unsigned int offload_num;
+};
+
struct spi_engine {
struct clk *clk;
struct clk *ref_clk;
@@ -118,6 +154,11 @@ struct spi_engine {
unsigned int int_enable;
/* shadows hardware CS inversion flag state */
u8 cs_inv;
+
+ unsigned int offload_ctrl_mem_size;
+ unsigned int offload_sdo_mem_size;
+ struct spi_offload *offload;
+ u32 offload_caps;
};
static void spi_engine_program_add_cmd(struct spi_engine_program *p,
@@ -163,9 +204,9 @@ static void spi_engine_gen_xfer(struct spi_engine_program *p, bool dry,
unsigned int n = min(len, 256U);
unsigned int flags = 0;
- if (xfer->tx_buf)
+ if (xfer->tx_buf || (xfer->offload_flags & SPI_OFFLOAD_XFER_TX_STREAM))
flags |= SPI_ENGINE_TRANSFER_WRITE;
- if (xfer->rx_buf)
+ if (xfer->rx_buf || (xfer->offload_flags & SPI_OFFLOAD_XFER_RX_STREAM))
flags |= SPI_ENGINE_TRANSFER_READ;
spi_engine_program_add_cmd(p, dry,
@@ -217,16 +258,24 @@ static void spi_engine_gen_cs(struct spi_engine_program *p, bool dry,
*
* NB: This is separate from spi_engine_compile_message() because the latter
* is called twice and would otherwise result in double-evaluation.
+ *
+ * Returns 0 on success, -EINVAL on failure.
*/
-static void spi_engine_precompile_message(struct spi_message *msg)
+static int spi_engine_precompile_message(struct spi_message *msg)
{
unsigned int clk_div, max_hz = msg->spi->controller->max_speed_hz;
struct spi_transfer *xfer;
list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+ /* If we have an offload transfer, we can't rx to buffer */
+ if (msg->offload && xfer->rx_buf)
+ return -EINVAL;
+
clk_div = DIV_ROUND_UP(max_hz, xfer->speed_hz);
xfer->effective_speed_hz = max_hz / min(clk_div, 256U);
}
+
+ return 0;
}
static void spi_engine_compile_message(struct spi_message *msg, bool dry,
@@ -521,11 +570,105 @@ static irqreturn_t spi_engine_irq(int irq, void *devid)
return IRQ_HANDLED;
}
+static int spi_engine_offload_prepare(struct spi_message *msg)
+{
+ struct spi_controller *host = msg->spi->controller;
+ struct spi_engine *spi_engine = spi_controller_get_devdata(host);
+ struct spi_engine_program *p = msg->opt_state;
+ struct spi_engine_offload *priv = msg->offload->priv;
+ struct spi_transfer *xfer;
+ void __iomem *cmd_addr;
+ void __iomem *sdo_addr;
+ size_t tx_word_count = 0;
+ unsigned int i;
+
+ if (p->length > spi_engine->offload_ctrl_mem_size)
+ return -EINVAL;
+
+ /* count total number of tx words in message */
+ list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+ /* no support for reading to rx_buf */
+ if (xfer->rx_buf)
+ return -EINVAL;
+
+ if (!xfer->tx_buf)
+ continue;
+
+ if (xfer->bits_per_word <= 8)
+ tx_word_count += xfer->len;
+ else if (xfer->bits_per_word <= 16)
+ tx_word_count += xfer->len / 2;
+ else
+ tx_word_count += xfer->len / 4;
+ }
+
+ if (tx_word_count && !(spi_engine->offload_caps & SPI_OFFLOAD_CAP_TX_STATIC_DATA))
+ return -EINVAL;
+
+ if (tx_word_count > spi_engine->offload_sdo_mem_size)
+ return -EINVAL;
+
+ /*
+ * This protects against calling spi_optimize_message() with an offload
+ * that has already been prepared with a different message.
+ */
+ if (test_and_set_bit_lock(SPI_ENGINE_OFFLOAD_FLAG_PREPARED, &priv->flags))
+ return -EBUSY;
+
+ cmd_addr = spi_engine->base +
+ SPI_ENGINE_REG_OFFLOAD_CMD_FIFO(priv->offload_num);
+ sdo_addr = spi_engine->base +
+ SPI_ENGINE_REG_OFFLOAD_SDO_FIFO(priv->offload_num);
+
+ list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+ if (!xfer->tx_buf)
+ continue;
+
+ if (xfer->bits_per_word <= 8) {
+ const u8 *buf = xfer->tx_buf;
+
+ for (i = 0; i < xfer->len; i++)
+ writel_relaxed(buf[i], sdo_addr);
+ } else if (xfer->bits_per_word <= 16) {
+ const u16 *buf = xfer->tx_buf;
+
+ for (i = 0; i < xfer->len / 2; i++)
+ writel_relaxed(buf[i], sdo_addr);
+ } else {
+ const u32 *buf = xfer->tx_buf;
+
+ for (i = 0; i < xfer->len / 4; i++)
+ writel_relaxed(buf[i], sdo_addr);
+ }
+ }
+
+ for (i = 0; i < p->length; i++)
+ writel_relaxed(p->instructions[i], cmd_addr);
+
+ return 0;
+}
+
+static void spi_engine_offload_unprepare(struct spi_offload *offload)
+{
+ struct spi_engine_offload *priv = offload->priv;
+ struct spi_engine *spi_engine = priv->spi_engine;
+
+ writel_relaxed(1, spi_engine->base +
+ SPI_ENGINE_REG_OFFLOAD_RESET(priv->offload_num));
+ writel_relaxed(0, spi_engine->base +
+ SPI_ENGINE_REG_OFFLOAD_RESET(priv->offload_num));
+
+ clear_bit_unlock(SPI_ENGINE_OFFLOAD_FLAG_PREPARED, &priv->flags);
+}
+
static int spi_engine_optimize_message(struct spi_message *msg)
{
struct spi_engine_program p_dry, *p;
+ int ret;
- spi_engine_precompile_message(msg);
+ ret = spi_engine_precompile_message(msg);
+ if (ret)
+ return ret;
p_dry.length = 0;
spi_engine_compile_message(msg, true, &p_dry);
@@ -537,20 +680,61 @@ static int spi_engine_optimize_message(struct spi_message *msg)
spi_engine_compile_message(msg, false, p);
spi_engine_program_add_cmd(p, false, SPI_ENGINE_CMD_SYNC(
- AXI_SPI_ENGINE_CUR_MSG_SYNC_ID));
+ msg->offload ? 0 : AXI_SPI_ENGINE_CUR_MSG_SYNC_ID));
msg->opt_state = p;
+ if (msg->offload) {
+ ret = spi_engine_offload_prepare(msg);
+ if (ret) {
+ msg->opt_state = NULL;
+ kfree(p);
+ return ret;
+ }
+ }
+
return 0;
}
static int spi_engine_unoptimize_message(struct spi_message *msg)
{
+ if (msg->offload)
+ spi_engine_offload_unprepare(msg->offload);
+
kfree(msg->opt_state);
return 0;
}
+static struct spi_offload
+*spi_engine_get_offload(struct spi_device *spi,
+ const struct spi_offload_config *config)
+{
+ struct spi_controller *host = spi->controller;
+ struct spi_engine *spi_engine = spi_controller_get_devdata(host);
+ struct spi_engine_offload *priv;
+
+ if (!spi_engine->offload)
+ return ERR_PTR(-ENODEV);
+
+ if (config->capability_flags & ~spi_engine->offload_caps)
+ return ERR_PTR(-EINVAL);
+
+ priv = spi_engine->offload->priv;
+
+ if (test_and_set_bit_lock(SPI_ENGINE_OFFLOAD_FLAG_ASSIGNED, &priv->flags))
+ return ERR_PTR(-EBUSY);
+
+ return spi_engine->offload;
+}
+
+static void spi_engine_put_offload(struct spi_offload *offload)
+{
+ struct spi_engine_offload *priv = offload->priv;
+
+ clear_bit_unlock(SPI_ENGINE_OFFLOAD_FLAG_ASSIGNED, &priv->flags);
+}
+
static int spi_engine_setup(struct spi_device *device)
{
struct spi_controller *host = device->controller;
@@ -583,6 +767,12 @@ static int spi_engine_transfer_one_message(struct spi_controller *host,
unsigned int int_enable = 0;
unsigned long flags;
+ if (msg->offload) {
+ dev_err(&host->dev, "Single transfer offload not supported\n");
+ msg->status = -EOPNOTSUPP;
+ goto out;
+ }
+
/* reinitialize message state for this transfer */
memset(st, 0, sizeof(*st));
st->cmd_buf = p->instructions;
@@ -632,11 +822,68 @@ static int spi_engine_transfer_one_message(struct spi_controller *host,
trace_spi_transfer_stop(msg, xfer);
}
+out:
spi_finalize_current_message(host);
return msg->status;
}
+static int spi_engine_trigger_enable(struct spi_offload *offload)
+{
+ struct spi_engine_offload *priv = offload->priv;
+ struct spi_engine *spi_engine = priv->spi_engine;
+ unsigned int reg;
+
+ reg = readl_relaxed(spi_engine->base +
+ SPI_ENGINE_REG_OFFLOAD_CTRL(priv->offload_num));
+ reg |= SPI_ENGINE_OFFLOAD_CTRL_ENABLE;
+ writel_relaxed(reg, spi_engine->base +
+ SPI_ENGINE_REG_OFFLOAD_CTRL(priv->offload_num));
+ return 0;
+}
+
+static void spi_engine_trigger_disable(struct spi_offload *offload)
+{
+ struct spi_engine_offload *priv = offload->priv;
+ struct spi_engine *spi_engine = priv->spi_engine;
+ unsigned int reg;
+
+ reg = readl_relaxed(spi_engine->base +
+ SPI_ENGINE_REG_OFFLOAD_CTRL(priv->offload_num));
+ reg &= ~SPI_ENGINE_OFFLOAD_CTRL_ENABLE;
+ writel_relaxed(reg, spi_engine->base +
+ SPI_ENGINE_REG_OFFLOAD_CTRL(priv->offload_num));
+}
+
+static struct dma_chan
+*spi_engine_tx_stream_request_dma_chan(struct spi_offload *offload)
+{
+ struct spi_engine_offload *priv = offload->priv;
+ char name[16];
+
+ snprintf(name, sizeof(name), "offload%u-tx", priv->offload_num);
+
+ return dma_request_chan(offload->provider_dev, name);
+}
+
+static struct dma_chan
+*spi_engine_rx_stream_request_dma_chan(struct spi_offload *offload)
+{
+ struct spi_engine_offload *priv = offload->priv;
+ char name[16];
+
+ snprintf(name, sizeof(name), "offload%u-rx", priv->offload_num);
+
+ return dma_request_chan(offload->provider_dev, name);
+}
+
+static const struct spi_offload_ops spi_engine_offload_ops = {
+ .trigger_enable = spi_engine_trigger_enable,
+ .trigger_disable = spi_engine_trigger_disable,
+ .tx_stream_request_dma_chan = spi_engine_tx_stream_request_dma_chan,
+ .rx_stream_request_dma_chan = spi_engine_rx_stream_request_dma_chan,
+};
+
static void spi_engine_release_hw(void *p)
{
struct spi_engine *spi_engine = p;
@@ -651,8 +898,7 @@ static int spi_engine_probe(struct platform_device *pdev)
struct spi_engine *spi_engine;
struct spi_controller *host;
unsigned int version;
- int irq;
- int ret;
+ int irq, ret;
irq = platform_get_irq(pdev, 0);
if (irq < 0)
@@ -667,6 +913,46 @@ static int spi_engine_probe(struct platform_device *pdev)
spin_lock_init(&spi_engine->lock);
init_completion(&spi_engine->msg_complete);
+ /*
+ * REVISIT: for now, all SPI Engines only have one offload. In the
+ * future, this should be read from a memory mapped register to
+ * determine the number of offloads enabled at HDL compile time. For
+ * now, we can tell if an offload is present if there is a trigger
+ * source wired up to it.
+ */
+ if (device_property_present(&pdev->dev, "trigger-sources")) {
+ struct spi_engine_offload *priv;
+
+ spi_engine->offload =
+ devm_spi_offload_alloc(&pdev->dev,
+ sizeof(struct spi_engine_offload));
+ if (IS_ERR(spi_engine->offload))
+ return PTR_ERR(spi_engine->offload);
+
+ priv = spi_engine->offload->priv;
+ priv->spi_engine = spi_engine;
+ priv->offload_num = 0;
+
+ spi_engine->offload->ops = &spi_engine_offload_ops;
+ spi_engine->offload_caps = SPI_OFFLOAD_CAP_TRIGGER;
+
+ if (device_property_match_string(&pdev->dev, "dma-names", "offload0-rx") >= 0) {
+ spi_engine->offload_caps |= SPI_OFFLOAD_CAP_RX_STREAM_DMA;
+ spi_engine->offload->xfer_flags |= SPI_OFFLOAD_XFER_RX_STREAM;
+ }
+
+ if (device_property_match_string(&pdev->dev, "dma-names", "offload0-tx") >= 0) {
+ spi_engine->offload_caps |= SPI_OFFLOAD_CAP_TX_STREAM_DMA;
+ spi_engine->offload->xfer_flags |= SPI_OFFLOAD_XFER_TX_STREAM;
+ } else {
+ /*
+ * HDL compile option to enable TX DMA stream also disables
+ * the SDO memory, so can't do both at the same time.
+ */
+ spi_engine->offload_caps |= SPI_OFFLOAD_CAP_TX_STATIC_DATA;
+ }
+ }
+
spi_engine->clk = devm_clk_get_enabled(&pdev->dev, "s_axi_aclk");
if (IS_ERR(spi_engine->clk))
return PTR_ERR(spi_engine->clk);
@@ -688,6 +974,19 @@ static int spi_engine_probe(struct platform_device *pdev)
return -ENODEV;
}
+ if (ADI_AXI_PCORE_VER_MINOR(version) >= 1) {
+ unsigned int sizes = readl(spi_engine->base +
+ SPI_ENGINE_REG_OFFLOAD_MEM_ADDR_WIDTH);
+
+ spi_engine->offload_ctrl_mem_size = 1 <<
+ FIELD_GET(SPI_ENGINE_SPI_OFFLOAD_MEM_WIDTH_CMD, sizes);
+ spi_engine->offload_sdo_mem_size = 1 <<
+ FIELD_GET(SPI_ENGINE_SPI_OFFLOAD_MEM_WIDTH_SDO, sizes);
+ } else {
+ spi_engine->offload_ctrl_mem_size = SPI_ENGINE_OFFLOAD_CMD_FIFO_SIZE;
+ spi_engine->offload_sdo_mem_size = SPI_ENGINE_OFFLOAD_SDO_FIFO_SIZE;
+ }
+
writel_relaxed(0x00, spi_engine->base + SPI_ENGINE_REG_RESET);
writel_relaxed(0xff, spi_engine->base + SPI_ENGINE_REG_INT_PENDING);
writel_relaxed(0x00, spi_engine->base + SPI_ENGINE_REG_INT_ENABLE);
@@ -709,6 +1008,8 @@ static int spi_engine_probe(struct platform_device *pdev)
host->transfer_one_message = spi_engine_transfer_one_message;
host->optimize_message = spi_engine_optimize_message;
host->unoptimize_message = spi_engine_unoptimize_message;
+ host->get_offload = spi_engine_get_offload;
+ host->put_offload = spi_engine_put_offload;
host->num_chipselect = 8;
/* Some features depend of the IP core version. */
diff --git a/drivers/spi/spi-cadence-quadspi.c b/drivers/spi/spi-cadence-quadspi.c
index 0cd37a7436d5..559fbdfbd9f7 100644
--- a/drivers/spi/spi-cadence-quadspi.c
+++ b/drivers/spi/spi-cadence-quadspi.c
@@ -1658,6 +1658,12 @@ static int cqspi_request_mmap_dma(struct cqspi_st *cqspi)
int ret = PTR_ERR(cqspi->rx_chan);
cqspi->rx_chan = NULL;
+ if (ret == -ENODEV) {
+ /* DMA support is not mandatory */
+ dev_info(&cqspi->pdev->dev, "No Rx DMA available\n");
+ return 0;
+ }
+
return dev_err_probe(&cqspi->pdev->dev, ret, "No Rx DMA available\n");
}
init_completion(&cqspi->rx_dma_complete);
@@ -2067,7 +2073,7 @@ static const struct cqspi_driver_platdata k2g_qspi = {
static const struct cqspi_driver_platdata am654_ospi = {
.hwcaps_mask = CQSPI_SUPPORTS_OCTAL | CQSPI_SUPPORTS_QUAD,
- .quirks = CQSPI_NEEDS_WR_DELAY,
+ .quirks = CQSPI_DISABLE_DAC_MODE | CQSPI_NEEDS_WR_DELAY,
};
static const struct cqspi_driver_platdata intel_lgm_qspi = {
diff --git a/drivers/spi/spi-fsi.c b/drivers/spi/spi-fsi.c
index fc9e33be1e0e..e01c63d23b64 100644
--- a/drivers/spi/spi-fsi.c
+++ b/drivers/spi/spi-fsi.c
@@ -479,6 +479,19 @@ static int fsi_spi_transfer_one_message(struct spi_controller *ctlr,
shift = SPI_FSI_SEQUENCE_SHIFT_IN(next->len);
fsi_spi_sequence_add(&seq, shift);
+ } else if (next->tx_buf) {
+ if ((next->len + transfer->len) > (SPI_FSI_MAX_TX_SIZE + 8)) {
+ rc = -EINVAL;
+ goto error;
+ }
+
+ len = next->len;
+ while (len > 8) {
+ fsi_spi_sequence_add(&seq,
+ SPI_FSI_SEQUENCE_SHIFT_OUT(8));
+ len -= 8;
+ }
+ fsi_spi_sequence_add(&seq, SPI_FSI_SEQUENCE_SHIFT_OUT(len));
} else {
next = NULL;
}
diff --git a/drivers/spi/spi-fsl-lpspi.c b/drivers/spi/spi-fsl-lpspi.c
index 40f5c8fdba76..5e3818445234 100644
--- a/drivers/spi/spi-fsl-lpspi.c
+++ b/drivers/spi/spi-fsl-lpspi.c
@@ -572,7 +572,7 @@ static int fsl_lpspi_calculate_timeout(struct fsl_lpspi_data *fsl_lpspi,
timeout += 1;
/* Double calculated timeout */
- return msecs_to_jiffies(2 * timeout * MSEC_PER_SEC);
+ return secs_to_jiffies(2 * timeout);
}
static int fsl_lpspi_dma_transfer(struct spi_controller *controller,
diff --git a/drivers/spi/spi-gpio.c b/drivers/spi/spi-gpio.c
index 4f192e013cd6..405deb6677c1 100644
--- a/drivers/spi/spi-gpio.c
+++ b/drivers/spi/spi-gpio.c
@@ -39,36 +39,8 @@ struct spi_gpio {
/*----------------------------------------------------------------------*/
-/*
- * Because the overhead of going through four GPIO procedure calls
- * per transferred bit can make performance a problem, this code
- * is set up so that you can use it in either of two ways:
- *
- * - The slow generic way: set up platform_data to hold the GPIO
- * numbers used for MISO/MOSI/SCK, and issue procedure calls for
- * each of them. This driver can handle several such busses.
- *
- * - The quicker inlined way: only helps with platform GPIO code
- * that inlines operations for constant GPIOs. This can give
- * you tight (fast!) inner loops, but each such bus needs a
- * new driver. You'll define a new C file, with Makefile and
- * Kconfig support; the C code can be a total of six lines:
- *
- * #define DRIVER_NAME "myboard_spi2"
- * #define SPI_MISO_GPIO 119
- * #define SPI_MOSI_GPIO 120
- * #define SPI_SCK_GPIO 121
- * #define SPI_N_CHIPSEL 4
- * #include "spi-gpio.c"
- */
-
-#ifndef DRIVER_NAME
#define DRIVER_NAME "spi_gpio"
-#define GENERIC_BITBANG /* vs tight inlines */
-
-#endif
-
/*----------------------------------------------------------------------*/
static inline struct spi_gpio *__pure
@@ -341,16 +313,14 @@ static int spi_gpio_probe_pdata(struct platform_device *pdev,
struct spi_gpio *spi_gpio = spi_controller_get_devdata(host);
int i;
-#ifdef GENERIC_BITBANG
- if (!pdata || !pdata->num_chipselect)
+ if (!pdata)
return -ENODEV;
-#endif
- /*
- * The host needs to think there is a chipselect even if not
- * connected
- */
- host->num_chipselect = pdata->num_chipselect ?: 1;
+ /* It's just one always-selected device, fine to continue */
+ if (!pdata->num_chipselect)
+ return 0;
+
+ host->num_chipselect = pdata->num_chipselect;
spi_gpio->cs_gpios = devm_kcalloc(dev, host->num_chipselect,
sizeof(*spi_gpio->cs_gpios),
GFP_KERNEL);
@@ -445,8 +415,6 @@ static int spi_gpio_probe(struct platform_device *pdev)
return devm_spi_register_controller(&pdev->dev, host);
}
-MODULE_ALIAS("platform:" DRIVER_NAME);
-
static const struct of_device_id spi_gpio_dt_ids[] = {
{ .compatible = "spi-gpio" },
{}
@@ -465,3 +433,4 @@ module_platform_driver(spi_gpio_driver);
MODULE_DESCRIPTION("SPI host driver using generic bitbanged GPIO ");
MODULE_AUTHOR("David Brownell");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
index eeb7d082c247..832d6e9009eb 100644
--- a/drivers/spi/spi-imx.c
+++ b/drivers/spi/spi-imx.c
@@ -1449,7 +1449,7 @@ static int spi_imx_calculate_timeout(struct spi_imx_data *spi_imx, int size)
timeout += 1;
/* Double calculated timeout */
- return msecs_to_jiffies(2 * timeout * MSEC_PER_SEC);
+ return secs_to_jiffies(2 * timeout);
}
static int spi_imx_dma_transfer(struct spi_imx_data *spi_imx,
diff --git a/drivers/spi/spi-mem.c b/drivers/spi/spi-mem.c
index a9f0f47f4759..a31a1db07aa4 100644
--- a/drivers/spi/spi-mem.c
+++ b/drivers/spi/spi-mem.c
@@ -377,6 +377,17 @@ int spi_mem_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
/* Make sure the operation frequency is correct before going futher */
spi_mem_adjust_op_freq(mem, (struct spi_mem_op *)op);
+ dev_vdbg(&mem->spi->dev, "[cmd: 0x%02x][%dB addr: %#8llx][%2dB dummy][%4dB data %s] %d%c-%d%c-%d%c-%d%c @ %uHz\n",
+ op->cmd.opcode,
+ op->addr.nbytes, (op->addr.nbytes ? op->addr.val : 0),
+ op->dummy.nbytes,
+ op->data.nbytes, (op->data.nbytes ? (op->data.dir == SPI_MEM_DATA_IN ? " read" : "write") : " "),
+ op->cmd.buswidth, op->cmd.dtr ? 'D' : 'S',
+ op->addr.buswidth, op->addr.dtr ? 'D' : 'S',
+ op->dummy.buswidth, op->dummy.dtr ? 'D' : 'S',
+ op->data.buswidth, op->data.dtr ? 'D' : 'S',
+ op->max_freq ? op->max_freq : mem->spi->max_speed_hz);
+
ret = spi_mem_check_op(op);
if (ret)
return ret;
diff --git a/drivers/spi/spi-mt65xx.c b/drivers/spi/spi-mt65xx.c
index 197bf2dbe5de..4b0a1c0db041 100644
--- a/drivers/spi/spi-mt65xx.c
+++ b/drivers/spi/spi-mt65xx.c
@@ -20,6 +20,7 @@
#include <linux/spi/spi.h>
#include <linux/spi/spi-mem.h>
#include <linux/dma-mapping.h>
+#include <linux/pm_qos.h>
#define SPI_CFG0_REG 0x0000
#define SPI_CFG1_REG 0x0004
@@ -147,6 +148,7 @@ struct mtk_spi_compatible {
* @tx_sgl_len: Size of TX DMA transfer
* @rx_sgl_len: Size of RX DMA transfer
* @dev_comp: Device data structure
+ * @qos_request: QoS request
* @spi_clk_hz: Current SPI clock in Hz
* @spimem_done: SPI-MEM operation completion
* @use_spimem: Enables SPI-MEM
@@ -166,6 +168,7 @@ struct mtk_spi {
struct scatterlist *tx_sgl, *rx_sgl;
u32 tx_sgl_len, rx_sgl_len;
const struct mtk_spi_compatible *dev_comp;
+ struct pm_qos_request qos_request;
u32 spi_clk_hz;
struct completion spimem_done;
bool use_spimem;
@@ -356,6 +359,7 @@ static int mtk_spi_hw_init(struct spi_controller *host,
struct mtk_chip_config *chip_config = spi->controller_data;
struct mtk_spi *mdata = spi_controller_get_devdata(host);
+ cpu_latency_qos_update_request(&mdata->qos_request, 500);
cpha = spi->mode & SPI_CPHA ? 1 : 0;
cpol = spi->mode & SPI_CPOL ? 1 : 0;
@@ -459,6 +463,15 @@ static int mtk_spi_prepare_message(struct spi_controller *host,
return mtk_spi_hw_init(host, msg->spi);
}
+static int mtk_spi_unprepare_message(struct spi_controller *host,
+ struct spi_message *message)
+{
+ struct mtk_spi *mdata = spi_controller_get_devdata(host);
+
+ cpu_latency_qos_update_request(&mdata->qos_request, PM_QOS_DEFAULT_VALUE);
+ return 0;
+}
+
static void mtk_spi_set_cs(struct spi_device *spi, bool enable)
{
u32 reg_val;
@@ -1143,6 +1156,7 @@ static int mtk_spi_probe(struct platform_device *pdev)
host->set_cs = mtk_spi_set_cs;
host->prepare_message = mtk_spi_prepare_message;
+ host->unprepare_message = mtk_spi_unprepare_message;
host->transfer_one = mtk_spi_transfer_one;
host->can_dma = mtk_spi_can_dma;
host->setup = mtk_spi_setup;
@@ -1249,6 +1263,8 @@ static int mtk_spi_probe(struct platform_device *pdev)
clk_disable_unprepare(mdata->spi_hclk);
}
+ cpu_latency_qos_add_request(&mdata->qos_request, PM_QOS_DEFAULT_VALUE);
+
if (mdata->dev_comp->need_pad_sel) {
if (mdata->pad_num != host->num_chipselect)
return dev_err_probe(dev, -EINVAL,
@@ -1292,6 +1308,7 @@ static void mtk_spi_remove(struct platform_device *pdev)
struct mtk_spi *mdata = spi_controller_get_devdata(host);
int ret;
+ cpu_latency_qos_remove_request(&mdata->qos_request);
if (mdata->use_spimem && !completion_done(&mdata->spimem_done))
complete(&mdata->spimem_done);
diff --git a/drivers/spi/spi-mtk-snfi.c b/drivers/spi/spi-mtk-snfi.c
index fdbea9dffb62..e82ee6dcf498 100644
--- a/drivers/spi/spi-mtk-snfi.c
+++ b/drivers/spi/spi-mtk-snfi.c
@@ -1284,9 +1284,6 @@ static int mtk_snand_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
{
struct mtk_snand *ms = spi_controller_get_devdata(mem->spi->controller);
- dev_dbg(ms->dev, "OP %02x ADDR %08llX@%d:%u DATA %d:%u", op->cmd.opcode,
- op->addr.val, op->addr.buswidth, op->addr.nbytes,
- op->data.buswidth, op->data.nbytes);
if (mtk_snand_is_page_ops(op)) {
if (op->data.dir == SPI_MEM_DATA_IN)
return mtk_snand_read_page_cache(ms, op);
diff --git a/drivers/spi/spi-mux.c b/drivers/spi/spi-mux.c
index c02c4204442f..0eb35c4e3987 100644
--- a/drivers/spi/spi-mux.c
+++ b/drivers/spi/spi-mux.c
@@ -68,9 +68,7 @@ static int spi_mux_select(struct spi_device *spi)
priv->current_cs = spi_get_chipselect(spi, 0);
- spi_setup(priv->spi);
-
- return 0;
+ return spi_setup(priv->spi);
}
static int spi_mux_setup(struct spi_device *spi)
diff --git a/drivers/spi/spi-npcm-fiu.c b/drivers/spi/spi-npcm-fiu.c
index 958bab27a081..67cc1d86de42 100644
--- a/drivers/spi/spi-npcm-fiu.c
+++ b/drivers/spi/spi-npcm-fiu.c
@@ -550,11 +550,6 @@ static int npcm_fiu_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
int ret = 0;
u8 *buf;
- dev_dbg(fiu->dev, "cmd:%#x mode:%d.%d.%d.%d addr:%#llx len:%#x\n",
- op->cmd.opcode, op->cmd.buswidth, op->addr.buswidth,
- op->dummy.buswidth, op->data.buswidth, op->addr.val,
- op->data.nbytes);
-
if (fiu->spix_mode || op->addr.nbytes > 4)
return -EOPNOTSUPP;
diff --git a/drivers/spi/spi-offload-trigger-pwm.c b/drivers/spi/spi-offload-trigger-pwm.c
new file mode 100644
index 000000000000..805ed41560df
--- /dev/null
+++ b/drivers/spi/spi-offload-trigger-pwm.c
@@ -0,0 +1,169 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2024 Analog Devices Inc.
+ * Copyright (C) 2024 BayLibre, SAS
+ *
+ * Generic PWM trigger for SPI offload.
+ */
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/math.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <linux/pwm.h>
+#include <linux/spi/offload/provider.h>
+#include <linux/spi/offload/types.h>
+#include <linux/time.h>
+#include <linux/types.h>
+
+struct spi_offload_trigger_pwm_state {
+ struct device *dev;
+ struct pwm_device *pwm;
+};
+
+static bool spi_offload_trigger_pwm_match(struct spi_offload_trigger *trigger,
+ enum spi_offload_trigger_type type,
+ u64 *args, u32 nargs)
+{
+ if (nargs)
+ return false;
+
+ return type == SPI_OFFLOAD_TRIGGER_PERIODIC;
+}
+
+static int spi_offload_trigger_pwm_validate(struct spi_offload_trigger *trigger,
+ struct spi_offload_trigger_config *config)
+{
+ struct spi_offload_trigger_pwm_state *st = spi_offload_trigger_get_priv(trigger);
+ struct spi_offload_trigger_periodic *periodic = &config->periodic;
+ struct pwm_waveform wf = { };
+ int ret;
+
+ if (config->type != SPI_OFFLOAD_TRIGGER_PERIODIC)
+ return -EINVAL;
+
+ if (!periodic->frequency_hz)
+ return -EINVAL;
+
+ wf.period_length_ns = DIV_ROUND_UP_ULL(NSEC_PER_SEC, periodic->frequency_hz);
+ /* REVISIT: 50% duty-cycle for now - may add config parameter later */
+ wf.duty_length_ns = wf.period_length_ns / 2;
+
+ ret = pwm_round_waveform_might_sleep(st->pwm, &wf);
+ if (ret < 0)
+ return ret;
+
+ periodic->frequency_hz = DIV_ROUND_UP_ULL(NSEC_PER_SEC, wf.period_length_ns);
+
+ return 0;
+}
+
+static int spi_offload_trigger_pwm_enable(struct spi_offload_trigger *trigger,
+ struct spi_offload_trigger_config *config)
+{
+ struct spi_offload_trigger_pwm_state *st = spi_offload_trigger_get_priv(trigger);
+ struct spi_offload_trigger_periodic *periodic = &config->periodic;
+ struct pwm_waveform wf = { };
+
+ if (config->type != SPI_OFFLOAD_TRIGGER_PERIODIC)
+ return -EINVAL;
+
+ if (!periodic->frequency_hz)
+ return -EINVAL;
+
+ wf.period_length_ns = DIV_ROUND_UP_ULL(NSEC_PER_SEC, periodic->frequency_hz);
+ /* REVISIT: 50% duty-cycle for now - may add config parameter later */
+ wf.duty_length_ns = wf.period_length_ns / 2;
+
+ return pwm_set_waveform_might_sleep(st->pwm, &wf, false);
+}
+
+static void spi_offload_trigger_pwm_disable(struct spi_offload_trigger *trigger)
+{
+ struct spi_offload_trigger_pwm_state *st = spi_offload_trigger_get_priv(trigger);
+ struct pwm_waveform wf;
+ int ret;
+
+ ret = pwm_get_waveform_might_sleep(st->pwm, &wf);
+ if (ret < 0) {
+ dev_err(st->dev, "failed to get waveform: %d\n", ret);
+ return;
+ }
+
+ wf.duty_length_ns = 0;
+
+ ret = pwm_set_waveform_might_sleep(st->pwm, &wf, false);
+ if (ret < 0)
+ dev_err(st->dev, "failed to disable PWM: %d\n", ret);
+}
+
+static const struct spi_offload_trigger_ops spi_offload_trigger_pwm_ops = {
+ .match = spi_offload_trigger_pwm_match,
+ .validate = spi_offload_trigger_pwm_validate,
+ .enable = spi_offload_trigger_pwm_enable,
+ .disable = spi_offload_trigger_pwm_disable,
+};
+
+static void spi_offload_trigger_pwm_release(void *data)
+{
+ pwm_disable(data);
+}
+
+static int spi_offload_trigger_pwm_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct spi_offload_trigger_info info = {
+ .fwnode = dev_fwnode(dev),
+ .ops = &spi_offload_trigger_pwm_ops,
+ };
+ struct spi_offload_trigger_pwm_state *st;
+ struct pwm_state state;
+ int ret;
+
+ st = devm_kzalloc(dev, sizeof(*st), GFP_KERNEL);
+ if (!st)
+ return -ENOMEM;
+
+ info.priv = st;
+ st->dev = dev;
+
+ st->pwm = devm_pwm_get(dev, NULL);
+ if (IS_ERR(st->pwm))
+ return dev_err_probe(dev, PTR_ERR(st->pwm), "failed to get PWM\n");
+
+ /* init with duty_cycle = 0, output enabled to ensure trigger off */
+ pwm_init_state(st->pwm, &state);
+ state.enabled = true;
+
+ ret = pwm_apply_might_sleep(st->pwm, &state);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "failed to apply PWM state\n");
+
+ ret = devm_add_action_or_reset(dev, spi_offload_trigger_pwm_release, st->pwm);
+ if (ret)
+ return ret;
+
+ return devm_spi_offload_trigger_register(dev, &info);
+}
+
+static const struct of_device_id spi_offload_trigger_pwm_of_match_table[] = {
+ { .compatible = "pwm-trigger" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, spi_offload_trigger_pwm_of_match_table);
+
+static struct platform_driver spi_offload_trigger_pwm_driver = {
+ .driver = {
+ .name = "pwm-trigger",
+ .of_match_table = spi_offload_trigger_pwm_of_match_table,
+ },
+ .probe = spi_offload_trigger_pwm_probe,
+};
+module_platform_driver(spi_offload_trigger_pwm_driver);
+
+MODULE_AUTHOR("David Lechner <dlechner@baylibre.com>");
+MODULE_DESCRIPTION("Generic PWM trigger");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-offload.c b/drivers/spi/spi-offload.c
new file mode 100644
index 000000000000..6bad042fe437
--- /dev/null
+++ b/drivers/spi/spi-offload.c
@@ -0,0 +1,468 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2024 Analog Devices Inc.
+ * Copyright (C) 2024 BayLibre, SAS
+ */
+
+/*
+ * SPI Offloading support.
+ *
+ * Some SPI controllers support offloading of SPI transfers. Essentially, this
+ * is the ability for a SPI controller to perform SPI transfers with minimal
+ * or even no CPU intervention, e.g. via a specialized SPI controller with a
+ * hardware trigger or via a conventional SPI controller using a non-Linux MCU
+ * processor core to offload the work.
+ */
+
+#define DEFAULT_SYMBOL_NAMESPACE "SPI_OFFLOAD"
+
+#include <linux/cleanup.h>
+#include <linux/device.h>
+#include <linux/dmaengine.h>
+#include <linux/export.h>
+#include <linux/kref.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/property.h>
+#include <linux/spi/offload/consumer.h>
+#include <linux/spi/offload/provider.h>
+#include <linux/spi/offload/types.h>
+#include <linux/spi/spi.h>
+#include <linux/types.h>
+
+struct spi_controller_and_offload {
+ struct spi_controller *controller;
+ struct spi_offload *offload;
+};
+
+struct spi_offload_trigger {
+ struct list_head list;
+ struct kref ref;
+ struct fwnode_handle *fwnode;
+ /* synchronizes calling ops and driver registration */
+ struct mutex lock;
+ /*
+ * If the provider goes away while the consumer still has a reference,
+ * ops and priv will be set to NULL and all calls will fail with -ENODEV.
+ */
+ const struct spi_offload_trigger_ops *ops;
+ void *priv;
+};
+
+static LIST_HEAD(spi_offload_triggers);
+static DEFINE_MUTEX(spi_offload_triggers_lock);
+
+/**
+ * devm_spi_offload_alloc() - Allocate offload instance
+ * @dev: Device for devm purposes and assigned to &struct spi_offload.provider_dev
+ * @priv_size: Size of private data to allocate
+ *
+ * Offload providers should use this to allocate offload instances.
+ *
+ * Return: Pointer to new offload instance or error on failure.
+ */
+struct spi_offload *devm_spi_offload_alloc(struct device *dev,
+ size_t priv_size)
+{
+ struct spi_offload *offload;
+ void *priv;
+
+ offload = devm_kzalloc(dev, sizeof(*offload), GFP_KERNEL);
+ if (!offload)
+ return ERR_PTR(-ENOMEM);
+
+ priv = devm_kzalloc(dev, priv_size, GFP_KERNEL);
+ if (!priv)
+ return ERR_PTR(-ENOMEM);
+
+ offload->provider_dev = dev;
+ offload->priv = priv;
+
+ return offload;
+}
+EXPORT_SYMBOL_GPL(devm_spi_offload_alloc);
+
+static void spi_offload_put(void *data)
+{
+ struct spi_controller_and_offload *resource = data;
+
+ resource->controller->put_offload(resource->offload);
+ kfree(resource);
+}
+
+/**
+ * devm_spi_offload_get() - Get an offload instance
+ * @dev: Device for devm purposes
+ * @spi: SPI device to use for the transfers
+ * @config: Offload configuration
+ *
+ * Peripheral drivers call this function to get an offload instance that meets
+ * the requirements specified in @config. If no suitable offload instance is
+ * available, -ENODEV is returned.
+ *
+ * Return: Offload instance or error on failure.
+ */
+struct spi_offload *devm_spi_offload_get(struct device *dev,
+ struct spi_device *spi,
+ const struct spi_offload_config *config)
+{
+ struct spi_controller_and_offload *resource;
+ struct spi_offload *offload;
+ int ret;
+
+ if (!spi || !config)
+ return ERR_PTR(-EINVAL);
+
+ if (!spi->controller->get_offload)
+ return ERR_PTR(-ENODEV);
+
+ resource = kzalloc(sizeof(*resource), GFP_KERNEL);
+ if (!resource)
+ return ERR_PTR(-ENOMEM);
+
+ offload = spi->controller->get_offload(spi, config);
+ if (IS_ERR(offload)) {
+ kfree(resource);
+ return offload;
+ }
+
+ resource->controller = spi->controller;
+ resource->offload = offload;
+
+ ret = devm_add_action_or_reset(dev, spi_offload_put, resource);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return offload;
+}
+EXPORT_SYMBOL_GPL(devm_spi_offload_get);
+
+static void spi_offload_trigger_free(struct kref *ref)
+{
+ struct spi_offload_trigger *trigger =
+ container_of(ref, struct spi_offload_trigger, ref);
+
+ mutex_destroy(&trigger->lock);
+ fwnode_handle_put(trigger->fwnode);
+ kfree(trigger);
+}
+
+static void spi_offload_trigger_put(void *data)
+{
+ struct spi_offload_trigger *trigger = data;
+
+ scoped_guard(mutex, &trigger->lock)
+ if (trigger->ops && trigger->ops->release)
+ trigger->ops->release(trigger);
+
+ kref_put(&trigger->ref, spi_offload_trigger_free);
+}
+
+static struct spi_offload_trigger
+*spi_offload_trigger_get(enum spi_offload_trigger_type type,
+ struct fwnode_reference_args *args)
+{
+ struct spi_offload_trigger *trigger;
+ bool match = false;
+ int ret;
+
+ guard(mutex)(&spi_offload_triggers_lock);
+
+ list_for_each_entry(trigger, &spi_offload_triggers, list) {
+ if (trigger->fwnode != args->fwnode)
+ continue;
+
+ match = trigger->ops->match(trigger, type, args->args, args->nargs);
+ if (match)
+ break;
+ }
+
+ if (!match)
+ return ERR_PTR(-EPROBE_DEFER);
+
+ guard(mutex)(&trigger->lock);
+
+ if (!trigger->ops)
+ return ERR_PTR(-ENODEV);
+
+ if (trigger->ops->request) {
+ ret = trigger->ops->request(trigger, type, args->args, args->nargs);
+ if (ret)
+ return ERR_PTR(ret);
+ }
+
+ kref_get(&trigger->ref);
+
+ return trigger;
+}
+
+/**
+ * devm_spi_offload_trigger_get() - Get an offload trigger instance
+ * @dev: Device for devm purposes.
+ * @offload: Offload instance connected to a trigger.
+ * @type: Trigger type to get.
+ *
+ * Return: Offload trigger instance or error on failure.
+ */
+struct spi_offload_trigger
+*devm_spi_offload_trigger_get(struct device *dev,
+ struct spi_offload *offload,
+ enum spi_offload_trigger_type type)
+{
+ struct spi_offload_trigger *trigger;
+ struct fwnode_reference_args args;
+ int ret;
+
+ ret = fwnode_property_get_reference_args(dev_fwnode(offload->provider_dev),
+ "trigger-sources",
+ "#trigger-source-cells", 0, 0,
+ &args);
+ if (ret)
+ return ERR_PTR(ret);
+
+ trigger = spi_offload_trigger_get(type, &args);
+ fwnode_handle_put(args.fwnode);
+ if (IS_ERR(trigger))
+ return trigger;
+
+ ret = devm_add_action_or_reset(dev, spi_offload_trigger_put, trigger);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return trigger;
+}
+EXPORT_SYMBOL_GPL(devm_spi_offload_trigger_get);
+
+/**
+ * spi_offload_trigger_validate - Validate the requested trigger
+ * @trigger: Offload trigger instance
+ * @config: Trigger config to validate
+ *
+ * On success, @config may be modifed to reflect what the hardware can do.
+ * For example, the frequency of a periodic trigger may be adjusted to the
+ * nearest supported value.
+ *
+ * Callers will likely need to do additional validation of the modified trigger
+ * parameters.
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+int spi_offload_trigger_validate(struct spi_offload_trigger *trigger,
+ struct spi_offload_trigger_config *config)
+{
+ guard(mutex)(&trigger->lock);
+
+ if (!trigger->ops)
+ return -ENODEV;
+
+ if (!trigger->ops->validate)
+ return -EOPNOTSUPP;
+
+ return trigger->ops->validate(trigger, config);
+}
+EXPORT_SYMBOL_GPL(spi_offload_trigger_validate);
+
+/**
+ * spi_offload_trigger_enable - enables trigger for offload
+ * @offload: Offload instance
+ * @trigger: Offload trigger instance
+ * @config: Trigger config to validate
+ *
+ * There must be a prepared offload instance with the specified ID (i.e.
+ * spi_optimize_message() was called with the same offload assigned to the
+ * message). This will also reserve the bus for exclusive use by the offload
+ * instance until the trigger is disabled. Any other attempts to send a
+ * transfer or lock the bus will fail with -EBUSY during this time.
+ *
+ * Calls must be balanced with spi_offload_trigger_disable().
+ *
+ * Context: can sleep
+ * Return: 0 on success, else a negative error code.
+ */
+int spi_offload_trigger_enable(struct spi_offload *offload,
+ struct spi_offload_trigger *trigger,
+ struct spi_offload_trigger_config *config)
+{
+ int ret;
+
+ guard(mutex)(&trigger->lock);
+
+ if (!trigger->ops)
+ return -ENODEV;
+
+ if (offload->ops && offload->ops->trigger_enable) {
+ ret = offload->ops->trigger_enable(offload);
+ if (ret)
+ return ret;
+ }
+
+ if (trigger->ops->enable) {
+ ret = trigger->ops->enable(trigger, config);
+ if (ret) {
+ if (offload->ops->trigger_disable)
+ offload->ops->trigger_disable(offload);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(spi_offload_trigger_enable);
+
+/**
+ * spi_offload_trigger_disable - disables hardware trigger for offload
+ * @offload: Offload instance
+ * @trigger: Offload trigger instance
+ *
+ * Disables the hardware trigger for the offload instance with the specified ID
+ * and releases the bus for use by other clients.
+ *
+ * Context: can sleep
+ */
+void spi_offload_trigger_disable(struct spi_offload *offload,
+ struct spi_offload_trigger *trigger)
+{
+ if (offload->ops && offload->ops->trigger_disable)
+ offload->ops->trigger_disable(offload);
+
+ guard(mutex)(&trigger->lock);
+
+ if (!trigger->ops)
+ return;
+
+ if (trigger->ops->disable)
+ trigger->ops->disable(trigger);
+}
+EXPORT_SYMBOL_GPL(spi_offload_trigger_disable);
+
+static void spi_offload_release_dma_chan(void *chan)
+{
+ dma_release_channel(chan);
+}
+
+/**
+ * devm_spi_offload_tx_stream_request_dma_chan - Get the DMA channel info for the TX stream
+ * @dev: Device for devm purposes.
+ * @offload: Offload instance
+ *
+ * This is the DMA channel that will provide data to transfers that use the
+ * %SPI_OFFLOAD_XFER_TX_STREAM offload flag.
+ *
+ * Return: Pointer to DMA channel info, or negative error code
+ */
+struct dma_chan
+*devm_spi_offload_tx_stream_request_dma_chan(struct device *dev,
+ struct spi_offload *offload)
+{
+ struct dma_chan *chan;
+ int ret;
+
+ if (!offload->ops || !offload->ops->tx_stream_request_dma_chan)
+ return ERR_PTR(-EOPNOTSUPP);
+
+ chan = offload->ops->tx_stream_request_dma_chan(offload);
+ if (IS_ERR(chan))
+ return chan;
+
+ ret = devm_add_action_or_reset(dev, spi_offload_release_dma_chan, chan);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return chan;
+}
+EXPORT_SYMBOL_GPL(devm_spi_offload_tx_stream_request_dma_chan);
+
+/**
+ * devm_spi_offload_rx_stream_request_dma_chan - Get the DMA channel info for the RX stream
+ * @dev: Device for devm purposes.
+ * @offload: Offload instance
+ *
+ * This is the DMA channel that will receive data from transfers that use the
+ * %SPI_OFFLOAD_XFER_RX_STREAM offload flag.
+ *
+ * Return: Pointer to DMA channel info, or negative error code
+ */
+struct dma_chan
+*devm_spi_offload_rx_stream_request_dma_chan(struct device *dev,
+ struct spi_offload *offload)
+{
+ struct dma_chan *chan;
+ int ret;
+
+ if (!offload->ops || !offload->ops->rx_stream_request_dma_chan)
+ return ERR_PTR(-EOPNOTSUPP);
+
+ chan = offload->ops->rx_stream_request_dma_chan(offload);
+ if (IS_ERR(chan))
+ return chan;
+
+ ret = devm_add_action_or_reset(dev, spi_offload_release_dma_chan, chan);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return chan;
+}
+EXPORT_SYMBOL_GPL(devm_spi_offload_rx_stream_request_dma_chan);
+
+/* Triggers providers */
+
+static void spi_offload_trigger_unregister(void *data)
+{
+ struct spi_offload_trigger *trigger = data;
+
+ scoped_guard(mutex, &spi_offload_triggers_lock)
+ list_del(&trigger->list);
+
+ scoped_guard(mutex, &trigger->lock) {
+ trigger->priv = NULL;
+ trigger->ops = NULL;
+ }
+
+ kref_put(&trigger->ref, spi_offload_trigger_free);
+}
+
+/**
+ * devm_spi_offload_trigger_register() - Allocate and register an offload trigger
+ * @dev: Device for devm purposes.
+ * @info: Provider-specific trigger info.
+ *
+ * Return: 0 on success, else a negative error code.
+ */
+int devm_spi_offload_trigger_register(struct device *dev,
+ struct spi_offload_trigger_info *info)
+{
+ struct spi_offload_trigger *trigger;
+
+ if (!info->fwnode || !info->ops)
+ return -EINVAL;
+
+ trigger = kzalloc(sizeof(*trigger), GFP_KERNEL);
+ if (!trigger)
+ return -ENOMEM;
+
+ kref_init(&trigger->ref);
+ mutex_init(&trigger->lock);
+ trigger->fwnode = fwnode_handle_get(info->fwnode);
+ trigger->ops = info->ops;
+ trigger->priv = info->priv;
+
+ scoped_guard(mutex, &spi_offload_triggers_lock)
+ list_add_tail(&trigger->list, &spi_offload_triggers);
+
+ return devm_add_action_or_reset(dev, spi_offload_trigger_unregister, trigger);
+}
+EXPORT_SYMBOL_GPL(devm_spi_offload_trigger_register);
+
+/**
+ * spi_offload_trigger_get_priv() - Get the private data for the trigger
+ *
+ * @trigger: Offload trigger instance.
+ *
+ * Return: Private data for the trigger.
+ */
+void *spi_offload_trigger_get_priv(struct spi_offload_trigger *trigger)
+{
+ return trigger->priv;
+}
+EXPORT_SYMBOL_GPL(spi_offload_trigger_get_priv);
diff --git a/drivers/spi/spi-qpic-snand.c b/drivers/spi/spi-qpic-snand.c
new file mode 100644
index 000000000000..fbba7741a9bf
--- /dev/null
+++ b/drivers/spi/spi-qpic-snand.c
@@ -0,0 +1,1633 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved.
+ *
+ * Authors:
+ * Md Sadre Alam <quic_mdalam@quicinc.com>
+ * Sricharan R <quic_srichara@quicinc.com>
+ * Varadarajan Narayanan <quic_varada@quicinc.com>
+ */
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/dma/qcom_adm.h>
+#include <linux/dma/qcom_bam_dma.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/mtd/nand-qpic-common.h>
+#include <linux/mtd/spinand.h>
+#include <linux/bitfield.h>
+
+#define NAND_FLASH_SPI_CFG 0xc0
+#define NAND_NUM_ADDR_CYCLES 0xc4
+#define NAND_BUSY_CHECK_WAIT_CNT 0xc8
+#define NAND_FLASH_FEATURES 0xf64
+
+/* QSPI NAND config reg bits */
+#define LOAD_CLK_CNTR_INIT_EN BIT(28)
+#define CLK_CNTR_INIT_VAL_VEC 0x924
+#define CLK_CNTR_INIT_VAL_VEC_MASK GENMASK(27, 16)
+#define FEA_STATUS_DEV_ADDR 0xc0
+#define FEA_STATUS_DEV_ADDR_MASK GENMASK(15, 8)
+#define SPI_CFG BIT(0)
+#define SPI_NUM_ADDR 0xDA4DB
+#define SPI_WAIT_CNT 0x10
+#define QPIC_QSPI_NUM_CS 1
+#define SPI_TRANSFER_MODE_x1 BIT(29)
+#define SPI_TRANSFER_MODE_x4 (3 << 29)
+#define SPI_WP BIT(28)
+#define SPI_HOLD BIT(27)
+#define QPIC_SET_FEATURE BIT(31)
+
+#define SPINAND_RESET 0xff
+#define SPINAND_READID 0x9f
+#define SPINAND_GET_FEATURE 0x0f
+#define SPINAND_SET_FEATURE 0x1f
+#define SPINAND_READ 0x13
+#define SPINAND_ERASE 0xd8
+#define SPINAND_WRITE_EN 0x06
+#define SPINAND_PROGRAM_EXECUTE 0x10
+#define SPINAND_PROGRAM_LOAD 0x84
+
+#define ACC_FEATURE 0xe
+#define BAD_BLOCK_MARKER_SIZE 0x2
+#define OOB_BUF_SIZE 128
+#define ecceng_to_qspi(eng) container_of(eng, struct qpic_spi_nand, ecc_eng)
+
+struct qpic_snand_op {
+ u32 cmd_reg;
+ u32 addr1_reg;
+ u32 addr2_reg;
+};
+
+struct snandc_read_status {
+ __le32 snandc_flash;
+ __le32 snandc_buffer;
+ __le32 snandc_erased_cw;
+};
+
+/*
+ * ECC state struct
+ * @corrected: ECC corrected
+ * @bitflips: Max bit flip
+ * @failed: ECC failed
+ */
+struct qcom_ecc_stats {
+ u32 corrected;
+ u32 bitflips;
+ u32 failed;
+};
+
+struct qpic_ecc {
+ struct device *dev;
+ int ecc_bytes_hw;
+ int spare_bytes;
+ int bbm_size;
+ int ecc_mode;
+ int bytes;
+ int steps;
+ int step_size;
+ int strength;
+ int cw_size;
+ int cw_data;
+ u32 cfg0;
+ u32 cfg1;
+ u32 cfg0_raw;
+ u32 cfg1_raw;
+ u32 ecc_buf_cfg;
+ u32 ecc_bch_cfg;
+ u32 clrflashstatus;
+ u32 clrreadstatus;
+ bool bch_enabled;
+};
+
+struct qpic_spi_nand {
+ struct qcom_nand_controller *snandc;
+ struct spi_controller *ctlr;
+ struct mtd_info *mtd;
+ struct clk *iomacro_clk;
+ struct qpic_ecc *ecc;
+ struct qcom_ecc_stats ecc_stats;
+ struct nand_ecc_engine ecc_eng;
+ u8 *data_buf;
+ u8 *oob_buf;
+ u32 wlen;
+ __le32 addr1;
+ __le32 addr2;
+ __le32 cmd;
+ u32 num_cw;
+ bool oob_rw;
+ bool page_rw;
+ bool raw_rw;
+};
+
+static void qcom_spi_set_read_loc_first(struct qcom_nand_controller *snandc,
+ int reg, int cw_offset, int read_size,
+ int is_last_read_loc)
+{
+ __le32 locreg_val;
+ u32 val = (((cw_offset) << READ_LOCATION_OFFSET) |
+ ((read_size) << READ_LOCATION_SIZE) | ((is_last_read_loc)
+ << READ_LOCATION_LAST));
+
+ locreg_val = cpu_to_le32(val);
+
+ if (reg == NAND_READ_LOCATION_0)
+ snandc->regs->read_location0 = locreg_val;
+ else if (reg == NAND_READ_LOCATION_1)
+ snandc->regs->read_location1 = locreg_val;
+ else if (reg == NAND_READ_LOCATION_2)
+ snandc->regs->read_location1 = locreg_val;
+ else if (reg == NAND_READ_LOCATION_3)
+ snandc->regs->read_location3 = locreg_val;
+}
+
+static void qcom_spi_set_read_loc_last(struct qcom_nand_controller *snandc,
+ int reg, int cw_offset, int read_size,
+ int is_last_read_loc)
+{
+ __le32 locreg_val;
+ u32 val = (((cw_offset) << READ_LOCATION_OFFSET) |
+ ((read_size) << READ_LOCATION_SIZE) | ((is_last_read_loc)
+ << READ_LOCATION_LAST));
+
+ locreg_val = cpu_to_le32(val);
+
+ if (reg == NAND_READ_LOCATION_LAST_CW_0)
+ snandc->regs->read_location_last0 = locreg_val;
+ else if (reg == NAND_READ_LOCATION_LAST_CW_1)
+ snandc->regs->read_location_last1 = locreg_val;
+ else if (reg == NAND_READ_LOCATION_LAST_CW_2)
+ snandc->regs->read_location_last2 = locreg_val;
+ else if (reg == NAND_READ_LOCATION_LAST_CW_3)
+ snandc->regs->read_location_last3 = locreg_val;
+}
+
+static struct qcom_nand_controller *nand_to_qcom_snand(struct nand_device *nand)
+{
+ struct nand_ecc_engine *eng = nand->ecc.engine;
+ struct qpic_spi_nand *qspi = ecceng_to_qspi(eng);
+
+ return qspi->snandc;
+}
+
+static int qcom_spi_init(struct qcom_nand_controller *snandc)
+{
+ u32 snand_cfg_val = 0x0;
+ int ret;
+
+ snand_cfg_val = FIELD_PREP(CLK_CNTR_INIT_VAL_VEC_MASK, CLK_CNTR_INIT_VAL_VEC) |
+ FIELD_PREP(LOAD_CLK_CNTR_INIT_EN, 0) |
+ FIELD_PREP(FEA_STATUS_DEV_ADDR_MASK, FEA_STATUS_DEV_ADDR) |
+ FIELD_PREP(SPI_CFG, 0);
+
+ snandc->regs->spi_cfg = cpu_to_le32(snand_cfg_val);
+ snandc->regs->num_addr_cycle = cpu_to_le32(SPI_NUM_ADDR);
+ snandc->regs->busy_wait_cnt = cpu_to_le32(SPI_WAIT_CNT);
+
+ qcom_write_reg_dma(snandc, &snandc->regs->spi_cfg, NAND_FLASH_SPI_CFG, 1, 0);
+
+ snand_cfg_val &= ~LOAD_CLK_CNTR_INIT_EN;
+ snandc->regs->spi_cfg = cpu_to_le32(snand_cfg_val);
+
+ qcom_write_reg_dma(snandc, &snandc->regs->spi_cfg, NAND_FLASH_SPI_CFG, 1, 0);
+
+ qcom_write_reg_dma(snandc, &snandc->regs->num_addr_cycle, NAND_NUM_ADDR_CYCLES, 1, 0);
+ qcom_write_reg_dma(snandc, &snandc->regs->busy_wait_cnt, NAND_BUSY_CHECK_WAIT_CNT, 1,
+ NAND_BAM_NEXT_SGL);
+
+ ret = qcom_submit_descs(snandc);
+ if (ret) {
+ dev_err(snandc->dev, "failure in submitting spi init descriptor\n");
+ return ret;
+ }
+
+ return ret;
+}
+
+static int qcom_spi_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_device *nand = mtd_to_nanddev(mtd);
+ struct qcom_nand_controller *snandc = nand_to_qcom_snand(nand);
+ struct qpic_ecc *qecc = snandc->qspi->ecc;
+
+ if (section > 1)
+ return -ERANGE;
+
+ oobregion->length = qecc->ecc_bytes_hw + qecc->spare_bytes;
+ oobregion->offset = mtd->oobsize - oobregion->length;
+
+ return 0;
+}
+
+static int qcom_spi_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_device *nand = mtd_to_nanddev(mtd);
+ struct qcom_nand_controller *snandc = nand_to_qcom_snand(nand);
+ struct qpic_ecc *qecc = snandc->qspi->ecc;
+
+ if (section)
+ return -ERANGE;
+
+ oobregion->length = qecc->steps * 4;
+ oobregion->offset = ((qecc->steps - 1) * qecc->bytes) + qecc->bbm_size;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops qcom_spi_ooblayout = {
+ .ecc = qcom_spi_ooblayout_ecc,
+ .free = qcom_spi_ooblayout_free,
+};
+
+static int qcom_spi_ecc_init_ctx_pipelined(struct nand_device *nand)
+{
+ struct qcom_nand_controller *snandc = nand_to_qcom_snand(nand);
+ struct nand_ecc_props *conf = &nand->ecc.ctx.conf;
+ struct mtd_info *mtd = nanddev_to_mtd(nand);
+ int cwperpage, bad_block_byte;
+ struct qpic_ecc *ecc_cfg;
+
+ cwperpage = mtd->writesize / NANDC_STEP_SIZE;
+ snandc->qspi->num_cw = cwperpage;
+
+ ecc_cfg = kzalloc(sizeof(*ecc_cfg), GFP_KERNEL);
+ if (!ecc_cfg)
+ return -ENOMEM;
+ snandc->qspi->oob_buf = kzalloc(mtd->writesize + mtd->oobsize,
+ GFP_KERNEL);
+ if (!snandc->qspi->oob_buf) {
+ kfree(ecc_cfg);
+ return -ENOMEM;
+ }
+
+ memset(snandc->qspi->oob_buf, 0xff, mtd->writesize + mtd->oobsize);
+
+ nand->ecc.ctx.priv = ecc_cfg;
+ snandc->qspi->mtd = mtd;
+
+ ecc_cfg->ecc_bytes_hw = 7;
+ ecc_cfg->spare_bytes = 4;
+ ecc_cfg->bbm_size = 1;
+ ecc_cfg->bch_enabled = true;
+ ecc_cfg->bytes = ecc_cfg->ecc_bytes_hw + ecc_cfg->spare_bytes + ecc_cfg->bbm_size;
+
+ ecc_cfg->steps = 4;
+ ecc_cfg->strength = 4;
+ ecc_cfg->step_size = 512;
+ ecc_cfg->cw_data = 516;
+ ecc_cfg->cw_size = ecc_cfg->cw_data + ecc_cfg->bytes;
+ bad_block_byte = mtd->writesize - ecc_cfg->cw_size * (cwperpage - 1) + 1;
+
+ mtd_set_ooblayout(mtd, &qcom_spi_ooblayout);
+
+ ecc_cfg->cfg0 = FIELD_PREP(CW_PER_PAGE_MASK, (cwperpage - 1)) |
+ FIELD_PREP(UD_SIZE_BYTES_MASK, ecc_cfg->cw_data) |
+ FIELD_PREP(DISABLE_STATUS_AFTER_WRITE, 1) |
+ FIELD_PREP(NUM_ADDR_CYCLES_MASK, 3) |
+ FIELD_PREP(ECC_PARITY_SIZE_BYTES_RS, ecc_cfg->ecc_bytes_hw) |
+ FIELD_PREP(STATUS_BFR_READ, 0) |
+ FIELD_PREP(SET_RD_MODE_AFTER_STATUS, 1) |
+ FIELD_PREP(SPARE_SIZE_BYTES_MASK, ecc_cfg->spare_bytes);
+
+ ecc_cfg->cfg1 = FIELD_PREP(NAND_RECOVERY_CYCLES_MASK, 0) |
+ FIELD_PREP(CS_ACTIVE_BSY, 0) |
+ FIELD_PREP(BAD_BLOCK_BYTE_NUM_MASK, bad_block_byte) |
+ FIELD_PREP(BAD_BLOCK_IN_SPARE_AREA, 0) |
+ FIELD_PREP(WR_RD_BSY_GAP_MASK, 20) |
+ FIELD_PREP(WIDE_FLASH, 0) |
+ FIELD_PREP(ENABLE_BCH_ECC, ecc_cfg->bch_enabled);
+
+ ecc_cfg->cfg0_raw = FIELD_PREP(CW_PER_PAGE_MASK, (cwperpage - 1)) |
+ FIELD_PREP(NUM_ADDR_CYCLES_MASK, 3) |
+ FIELD_PREP(UD_SIZE_BYTES_MASK, ecc_cfg->cw_size) |
+ FIELD_PREP(SPARE_SIZE_BYTES_MASK, 0);
+
+ ecc_cfg->cfg1_raw = FIELD_PREP(NAND_RECOVERY_CYCLES_MASK, 0) |
+ FIELD_PREP(CS_ACTIVE_BSY, 0) |
+ FIELD_PREP(BAD_BLOCK_BYTE_NUM_MASK, 17) |
+ FIELD_PREP(BAD_BLOCK_IN_SPARE_AREA, 1) |
+ FIELD_PREP(WR_RD_BSY_GAP_MASK, 20) |
+ FIELD_PREP(WIDE_FLASH, 0) |
+ FIELD_PREP(DEV0_CFG1_ECC_DISABLE, 1);
+
+ ecc_cfg->ecc_bch_cfg = FIELD_PREP(ECC_CFG_ECC_DISABLE, !ecc_cfg->bch_enabled) |
+ FIELD_PREP(ECC_SW_RESET, 0) |
+ FIELD_PREP(ECC_NUM_DATA_BYTES_MASK, ecc_cfg->cw_data) |
+ FIELD_PREP(ECC_FORCE_CLK_OPEN, 1) |
+ FIELD_PREP(ECC_MODE_MASK, 0) |
+ FIELD_PREP(ECC_PARITY_SIZE_BYTES_BCH_MASK, ecc_cfg->ecc_bytes_hw);
+
+ ecc_cfg->ecc_buf_cfg = 0x203 << NUM_STEPS;
+ ecc_cfg->clrflashstatus = FS_READY_BSY_N;
+ ecc_cfg->clrreadstatus = 0xc0;
+
+ conf->step_size = ecc_cfg->step_size;
+ conf->strength = ecc_cfg->strength;
+
+ snandc->regs->erased_cw_detect_cfg_clr = cpu_to_le32(CLR_ERASED_PAGE_DET);
+ snandc->regs->erased_cw_detect_cfg_set = cpu_to_le32(SET_ERASED_PAGE_DET);
+
+ dev_dbg(snandc->dev, "ECC strength: %u bits per %u bytes\n",
+ ecc_cfg->strength, ecc_cfg->step_size);
+
+ return 0;
+}
+
+static void qcom_spi_ecc_cleanup_ctx_pipelined(struct nand_device *nand)
+{
+ struct qpic_ecc *ecc_cfg = nand_to_ecc_ctx(nand);
+
+ kfree(ecc_cfg);
+}
+
+static int qcom_spi_ecc_prepare_io_req_pipelined(struct nand_device *nand,
+ struct nand_page_io_req *req)
+{
+ struct qcom_nand_controller *snandc = nand_to_qcom_snand(nand);
+ struct qpic_ecc *ecc_cfg = nand_to_ecc_ctx(nand);
+
+ snandc->qspi->ecc = ecc_cfg;
+ snandc->qspi->raw_rw = false;
+ snandc->qspi->oob_rw = false;
+ snandc->qspi->page_rw = false;
+
+ if (req->datalen)
+ snandc->qspi->page_rw = true;
+
+ if (req->ooblen)
+ snandc->qspi->oob_rw = true;
+
+ if (req->mode == MTD_OPS_RAW)
+ snandc->qspi->raw_rw = true;
+
+ return 0;
+}
+
+static int qcom_spi_ecc_finish_io_req_pipelined(struct nand_device *nand,
+ struct nand_page_io_req *req)
+{
+ struct qcom_nand_controller *snandc = nand_to_qcom_snand(nand);
+ struct mtd_info *mtd = nanddev_to_mtd(nand);
+
+ if (req->mode == MTD_OPS_RAW || req->type != NAND_PAGE_READ)
+ return 0;
+
+ if (snandc->qspi->ecc_stats.failed)
+ mtd->ecc_stats.failed += snandc->qspi->ecc_stats.failed;
+ else
+ mtd->ecc_stats.corrected += snandc->qspi->ecc_stats.corrected;
+
+ if (snandc->qspi->ecc_stats.failed)
+ return -EBADMSG;
+ else
+ return snandc->qspi->ecc_stats.bitflips;
+}
+
+static struct nand_ecc_engine_ops qcom_spi_ecc_engine_ops_pipelined = {
+ .init_ctx = qcom_spi_ecc_init_ctx_pipelined,
+ .cleanup_ctx = qcom_spi_ecc_cleanup_ctx_pipelined,
+ .prepare_io_req = qcom_spi_ecc_prepare_io_req_pipelined,
+ .finish_io_req = qcom_spi_ecc_finish_io_req_pipelined,
+};
+
+/* helper to configure location register values */
+static void qcom_spi_set_read_loc(struct qcom_nand_controller *snandc, int cw, int reg,
+ int cw_offset, int read_size, int is_last_read_loc)
+{
+ int reg_base = NAND_READ_LOCATION_0;
+ int num_cw = snandc->qspi->num_cw;
+
+ if (cw == (num_cw - 1))
+ reg_base = NAND_READ_LOCATION_LAST_CW_0;
+
+ reg_base += reg * 4;
+
+ if (cw == (num_cw - 1))
+ return qcom_spi_set_read_loc_last(snandc, reg_base, cw_offset,
+ read_size, is_last_read_loc);
+ else
+ return qcom_spi_set_read_loc_first(snandc, reg_base, cw_offset,
+ read_size, is_last_read_loc);
+}
+
+static void
+qcom_spi_config_cw_read(struct qcom_nand_controller *snandc, bool use_ecc, int cw)
+{
+ __le32 *reg = &snandc->regs->read_location0;
+ int num_cw = snandc->qspi->num_cw;
+
+ qcom_write_reg_dma(snandc, reg, NAND_READ_LOCATION_0, 4, NAND_BAM_NEXT_SGL);
+ if (cw == (num_cw - 1)) {
+ reg = &snandc->regs->read_location_last0;
+ qcom_write_reg_dma(snandc, reg, NAND_READ_LOCATION_LAST_CW_0, 4,
+ NAND_BAM_NEXT_SGL);
+ }
+
+ qcom_write_reg_dma(snandc, &snandc->regs->cmd, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
+ qcom_write_reg_dma(snandc, &snandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
+
+ qcom_read_reg_dma(snandc, NAND_FLASH_STATUS, 2, 0);
+ qcom_read_reg_dma(snandc, NAND_ERASED_CW_DETECT_STATUS, 1,
+ NAND_BAM_NEXT_SGL);
+}
+
+static int qcom_spi_block_erase(struct qcom_nand_controller *snandc)
+{
+ struct qpic_ecc *ecc_cfg = snandc->qspi->ecc;
+ int ret;
+
+ snandc->buf_count = 0;
+ snandc->buf_start = 0;
+ qcom_clear_read_regs(snandc);
+ qcom_clear_bam_transaction(snandc);
+
+ snandc->regs->cmd = snandc->qspi->cmd;
+ snandc->regs->addr0 = snandc->qspi->addr1;
+ snandc->regs->addr1 = snandc->qspi->addr2;
+ snandc->regs->cfg0 = cpu_to_le32(ecc_cfg->cfg0_raw & ~(7 << CW_PER_PAGE));
+ snandc->regs->cfg1 = cpu_to_le32(ecc_cfg->cfg1_raw);
+ snandc->regs->exec = cpu_to_le32(1);
+
+ qcom_write_reg_dma(snandc, &snandc->regs->cmd, NAND_FLASH_CMD, 3, NAND_BAM_NEXT_SGL);
+ qcom_write_reg_dma(snandc, &snandc->regs->cfg0, NAND_DEV0_CFG0, 2, NAND_BAM_NEXT_SGL);
+ qcom_write_reg_dma(snandc, &snandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
+
+ ret = qcom_submit_descs(snandc);
+ if (ret) {
+ dev_err(snandc->dev, "failure to erase block\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static void qcom_spi_config_single_cw_page_read(struct qcom_nand_controller *snandc,
+ bool use_ecc, int cw)
+{
+ __le32 *reg = &snandc->regs->read_location0;
+ int num_cw = snandc->qspi->num_cw;
+
+ qcom_write_reg_dma(snandc, &snandc->regs->addr0, NAND_ADDR0, 2, 0);
+ qcom_write_reg_dma(snandc, &snandc->regs->cfg0, NAND_DEV0_CFG0, 3, 0);
+ qcom_write_reg_dma(snandc, &snandc->regs->erased_cw_detect_cfg_clr,
+ NAND_ERASED_CW_DETECT_CFG, 1, 0);
+ qcom_write_reg_dma(snandc, &snandc->regs->erased_cw_detect_cfg_set,
+ NAND_ERASED_CW_DETECT_CFG, 1,
+ NAND_ERASED_CW_SET | NAND_BAM_NEXT_SGL);
+
+ if (cw == (num_cw - 1)) {
+ reg = &snandc->regs->read_location_last0;
+ qcom_write_reg_dma(snandc, reg, NAND_READ_LOCATION_LAST_CW_0, 4, NAND_BAM_NEXT_SGL);
+ }
+ qcom_write_reg_dma(snandc, &snandc->regs->cmd, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
+ qcom_write_reg_dma(snandc, &snandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
+
+ qcom_read_reg_dma(snandc, NAND_FLASH_STATUS, 1, 0);
+}
+
+static int qcom_spi_read_last_cw(struct qcom_nand_controller *snandc,
+ const struct spi_mem_op *op)
+{
+ struct qpic_ecc *ecc_cfg = snandc->qspi->ecc;
+ struct mtd_info *mtd = snandc->qspi->mtd;
+ int size, ret = 0;
+ int col, bbpos;
+ u32 cfg0, cfg1, ecc_bch_cfg;
+ u32 num_cw = snandc->qspi->num_cw;
+
+ qcom_clear_bam_transaction(snandc);
+ qcom_clear_read_regs(snandc);
+
+ size = ecc_cfg->cw_size;
+ col = ecc_cfg->cw_size * (num_cw - 1);
+
+ memset(snandc->data_buffer, 0xff, size);
+ snandc->regs->addr0 = (snandc->qspi->addr1 | cpu_to_le32(col));
+ snandc->regs->addr1 = snandc->qspi->addr2;
+
+ cfg0 = (ecc_cfg->cfg0_raw & ~(7U << CW_PER_PAGE)) |
+ 0 << CW_PER_PAGE;
+ cfg1 = ecc_cfg->cfg1_raw;
+ ecc_bch_cfg = ECC_CFG_ECC_DISABLE;
+
+ snandc->regs->cmd = snandc->qspi->cmd;
+ snandc->regs->cfg0 = cpu_to_le32(cfg0);
+ snandc->regs->cfg1 = cpu_to_le32(cfg1);
+ snandc->regs->ecc_bch_cfg = cpu_to_le32(ecc_bch_cfg);
+ snandc->regs->clrflashstatus = cpu_to_le32(ecc_cfg->clrflashstatus);
+ snandc->regs->clrreadstatus = cpu_to_le32(ecc_cfg->clrreadstatus);
+ snandc->regs->exec = cpu_to_le32(1);
+
+ qcom_spi_set_read_loc(snandc, num_cw - 1, 0, 0, ecc_cfg->cw_size, 1);
+
+ qcom_spi_config_single_cw_page_read(snandc, false, num_cw - 1);
+
+ qcom_read_data_dma(snandc, FLASH_BUF_ACC, snandc->data_buffer, size, 0);
+
+ ret = qcom_submit_descs(snandc);
+ if (ret) {
+ dev_err(snandc->dev, "failed to read last cw\n");
+ return ret;
+ }
+
+ qcom_nandc_dev_to_mem(snandc, true);
+ u32 flash = le32_to_cpu(snandc->reg_read_buf[0]);
+
+ if (flash & (FS_OP_ERR | FS_MPU_ERR))
+ return -EIO;
+
+ bbpos = mtd->writesize - ecc_cfg->cw_size * (num_cw - 1);
+
+ if (snandc->data_buffer[bbpos] == 0xff)
+ snandc->data_buffer[bbpos + 1] = 0xff;
+ if (snandc->data_buffer[bbpos] != 0xff)
+ snandc->data_buffer[bbpos + 1] = snandc->data_buffer[bbpos];
+
+ memcpy(op->data.buf.in, snandc->data_buffer + bbpos, op->data.nbytes);
+
+ return ret;
+}
+
+static int qcom_spi_check_error(struct qcom_nand_controller *snandc, u8 *data_buf, u8 *oob_buf)
+{
+ struct snandc_read_status *buf;
+ struct qpic_ecc *ecc_cfg = snandc->qspi->ecc;
+ int i, num_cw = snandc->qspi->num_cw;
+ bool flash_op_err = false, erased;
+ unsigned int max_bitflips = 0;
+ unsigned int uncorrectable_cws = 0;
+
+ snandc->qspi->ecc_stats.failed = 0;
+ snandc->qspi->ecc_stats.corrected = 0;
+
+ qcom_nandc_dev_to_mem(snandc, true);
+ buf = (struct snandc_read_status *)snandc->reg_read_buf;
+
+ for (i = 0; i < num_cw; i++, buf++) {
+ u32 flash, buffer, erased_cw;
+ int data_len, oob_len;
+
+ if (i == (num_cw - 1)) {
+ data_len = NANDC_STEP_SIZE - ((num_cw - 1) << 2);
+ oob_len = num_cw << 2;
+ } else {
+ data_len = ecc_cfg->cw_data;
+ oob_len = 0;
+ }
+
+ flash = le32_to_cpu(buf->snandc_flash);
+ buffer = le32_to_cpu(buf->snandc_buffer);
+ erased_cw = le32_to_cpu(buf->snandc_erased_cw);
+
+ if ((flash & FS_OP_ERR) && (buffer & BS_UNCORRECTABLE_BIT)) {
+ if (ecc_cfg->bch_enabled)
+ erased = (erased_cw & ERASED_CW) == ERASED_CW;
+ else
+ erased = false;
+
+ if (!erased)
+ uncorrectable_cws |= BIT(i);
+
+ } else if (flash & (FS_OP_ERR | FS_MPU_ERR)) {
+ flash_op_err = true;
+ } else {
+ unsigned int stat;
+
+ stat = buffer & BS_CORRECTABLE_ERR_MSK;
+ snandc->qspi->ecc_stats.corrected += stat;
+ max_bitflips = max(max_bitflips, stat);
+ }
+
+ if (data_buf)
+ data_buf += data_len;
+ if (oob_buf)
+ oob_buf += oob_len + ecc_cfg->bytes;
+ }
+
+ if (flash_op_err)
+ return -EIO;
+
+ if (!uncorrectable_cws)
+ snandc->qspi->ecc_stats.bitflips = max_bitflips;
+ else
+ snandc->qspi->ecc_stats.failed++;
+
+ return 0;
+}
+
+static int qcom_spi_check_raw_flash_errors(struct qcom_nand_controller *snandc, int cw_cnt)
+{
+ int i;
+
+ qcom_nandc_dev_to_mem(snandc, true);
+
+ for (i = 0; i < cw_cnt; i++) {
+ u32 flash = le32_to_cpu(snandc->reg_read_buf[i]);
+
+ if (flash & (FS_OP_ERR | FS_MPU_ERR))
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int qcom_spi_read_cw_raw(struct qcom_nand_controller *snandc, u8 *data_buf,
+ u8 *oob_buf, int cw)
+{
+ struct qpic_ecc *ecc_cfg = snandc->qspi->ecc;
+ struct mtd_info *mtd = snandc->qspi->mtd;
+ int data_size1, data_size2, oob_size1, oob_size2;
+ int ret, reg_off = FLASH_BUF_ACC, read_loc = 0;
+ int raw_cw = cw;
+ u32 cfg0, cfg1, ecc_bch_cfg, num_cw = snandc->qspi->num_cw;
+ int col;
+
+ snandc->buf_count = 0;
+ snandc->buf_start = 0;
+ qcom_clear_read_regs(snandc);
+ qcom_clear_bam_transaction(snandc);
+ raw_cw = num_cw - 1;
+
+ cfg0 = (ecc_cfg->cfg0_raw & ~(7U << CW_PER_PAGE)) |
+ 0 << CW_PER_PAGE;
+ cfg1 = ecc_cfg->cfg1_raw;
+ ecc_bch_cfg = ECC_CFG_ECC_DISABLE;
+
+ col = ecc_cfg->cw_size * cw;
+
+ snandc->regs->addr0 = (snandc->qspi->addr1 | cpu_to_le32(col));
+ snandc->regs->addr1 = snandc->qspi->addr2;
+ snandc->regs->cmd = snandc->qspi->cmd;
+ snandc->regs->cfg0 = cpu_to_le32(cfg0);
+ snandc->regs->cfg1 = cpu_to_le32(cfg1);
+ snandc->regs->ecc_bch_cfg = cpu_to_le32(ecc_bch_cfg);
+ snandc->regs->clrflashstatus = cpu_to_le32(ecc_cfg->clrflashstatus);
+ snandc->regs->clrreadstatus = cpu_to_le32(ecc_cfg->clrreadstatus);
+ snandc->regs->exec = cpu_to_le32(1);
+
+ qcom_spi_set_read_loc(snandc, raw_cw, 0, 0, ecc_cfg->cw_size, 1);
+
+ qcom_write_reg_dma(snandc, &snandc->regs->addr0, NAND_ADDR0, 2, 0);
+ qcom_write_reg_dma(snandc, &snandc->regs->cfg0, NAND_DEV0_CFG0, 3, 0);
+ qcom_write_reg_dma(snandc, &snandc->regs->ecc_buf_cfg, NAND_EBI2_ECC_BUF_CFG, 1, 0);
+
+ qcom_write_reg_dma(snandc, &snandc->regs->erased_cw_detect_cfg_clr,
+ NAND_ERASED_CW_DETECT_CFG, 1, 0);
+ qcom_write_reg_dma(snandc, &snandc->regs->erased_cw_detect_cfg_set,
+ NAND_ERASED_CW_DETECT_CFG, 1,
+ NAND_ERASED_CW_SET | NAND_BAM_NEXT_SGL);
+
+ data_size1 = mtd->writesize - ecc_cfg->cw_size * (num_cw - 1);
+ oob_size1 = ecc_cfg->bbm_size;
+
+ if (cw == (num_cw - 1)) {
+ data_size2 = NANDC_STEP_SIZE - data_size1 -
+ ((num_cw - 1) * 4);
+ oob_size2 = (num_cw * 4) + ecc_cfg->ecc_bytes_hw +
+ ecc_cfg->spare_bytes;
+ } else {
+ data_size2 = ecc_cfg->cw_data - data_size1;
+ oob_size2 = ecc_cfg->ecc_bytes_hw + ecc_cfg->spare_bytes;
+ }
+
+ qcom_spi_set_read_loc(snandc, cw, 0, read_loc, data_size1, 0);
+ read_loc += data_size1;
+
+ qcom_spi_set_read_loc(snandc, cw, 1, read_loc, oob_size1, 0);
+ read_loc += oob_size1;
+
+ qcom_spi_set_read_loc(snandc, cw, 2, read_loc, data_size2, 0);
+ read_loc += data_size2;
+
+ qcom_spi_set_read_loc(snandc, cw, 3, read_loc, oob_size2, 1);
+
+ qcom_spi_config_cw_read(snandc, false, raw_cw);
+
+ qcom_read_data_dma(snandc, reg_off, data_buf, data_size1, 0);
+ reg_off += data_size1;
+
+ qcom_read_data_dma(snandc, reg_off, oob_buf, oob_size1, 0);
+ reg_off += oob_size1;
+
+ qcom_read_data_dma(snandc, reg_off, data_buf + data_size1, data_size2, 0);
+ reg_off += data_size2;
+
+ qcom_read_data_dma(snandc, reg_off, oob_buf + oob_size1, oob_size2, 0);
+
+ ret = qcom_submit_descs(snandc);
+ if (ret) {
+ dev_err(snandc->dev, "failure to read raw cw %d\n", cw);
+ return ret;
+ }
+
+ return qcom_spi_check_raw_flash_errors(snandc, 1);
+}
+
+static int qcom_spi_read_page_raw(struct qcom_nand_controller *snandc,
+ const struct spi_mem_op *op)
+{
+ struct qpic_ecc *ecc_cfg = snandc->qspi->ecc;
+ u8 *data_buf = NULL, *oob_buf = NULL;
+ int ret, cw;
+ u32 num_cw = snandc->qspi->num_cw;
+
+ if (snandc->qspi->page_rw)
+ data_buf = op->data.buf.in;
+
+ oob_buf = snandc->qspi->oob_buf;
+ memset(oob_buf, 0xff, OOB_BUF_SIZE);
+
+ for (cw = 0; cw < num_cw; cw++) {
+ ret = qcom_spi_read_cw_raw(snandc, data_buf, oob_buf, cw);
+ if (ret)
+ return ret;
+
+ if (data_buf)
+ data_buf += ecc_cfg->cw_data;
+ if (oob_buf)
+ oob_buf += ecc_cfg->bytes;
+ }
+
+ return 0;
+}
+
+static int qcom_spi_read_page_ecc(struct qcom_nand_controller *snandc,
+ const struct spi_mem_op *op)
+{
+ struct qpic_ecc *ecc_cfg = snandc->qspi->ecc;
+ u8 *data_buf = NULL, *data_buf_start, *oob_buf = NULL, *oob_buf_start;
+ int ret, i;
+ u32 cfg0, cfg1, ecc_bch_cfg, num_cw = snandc->qspi->num_cw;
+
+ data_buf = op->data.buf.in;
+ data_buf_start = data_buf;
+
+ oob_buf = snandc->qspi->oob_buf;
+ oob_buf_start = oob_buf;
+
+ snandc->buf_count = 0;
+ snandc->buf_start = 0;
+ qcom_clear_read_regs(snandc);
+
+ cfg0 = (ecc_cfg->cfg0 & ~(7U << CW_PER_PAGE)) |
+ (num_cw - 1) << CW_PER_PAGE;
+ cfg1 = ecc_cfg->cfg1;
+ ecc_bch_cfg = ecc_cfg->ecc_bch_cfg;
+
+ snandc->regs->addr0 = snandc->qspi->addr1;
+ snandc->regs->addr1 = snandc->qspi->addr2;
+ snandc->regs->cmd = snandc->qspi->cmd;
+ snandc->regs->cfg0 = cpu_to_le32(cfg0);
+ snandc->regs->cfg1 = cpu_to_le32(cfg1);
+ snandc->regs->ecc_bch_cfg = cpu_to_le32(ecc_bch_cfg);
+ snandc->regs->clrflashstatus = cpu_to_le32(ecc_cfg->clrflashstatus);
+ snandc->regs->clrreadstatus = cpu_to_le32(ecc_cfg->clrreadstatus);
+ snandc->regs->exec = cpu_to_le32(1);
+
+ qcom_spi_set_read_loc(snandc, 0, 0, 0, ecc_cfg->cw_data, 1);
+
+ qcom_clear_bam_transaction(snandc);
+
+ qcom_write_reg_dma(snandc, &snandc->regs->addr0, NAND_ADDR0, 2, 0);
+ qcom_write_reg_dma(snandc, &snandc->regs->cfg0, NAND_DEV0_CFG0, 3, 0);
+ qcom_write_reg_dma(snandc, &snandc->regs->erased_cw_detect_cfg_clr,
+ NAND_ERASED_CW_DETECT_CFG, 1, 0);
+ qcom_write_reg_dma(snandc, &snandc->regs->erased_cw_detect_cfg_set,
+ NAND_ERASED_CW_DETECT_CFG, 1,
+ NAND_ERASED_CW_SET | NAND_BAM_NEXT_SGL);
+
+ for (i = 0; i < num_cw; i++) {
+ int data_size, oob_size;
+
+ if (i == (num_cw - 1)) {
+ data_size = 512 - ((num_cw - 1) << 2);
+ oob_size = (num_cw << 2) + ecc_cfg->ecc_bytes_hw +
+ ecc_cfg->spare_bytes;
+ } else {
+ data_size = ecc_cfg->cw_data;
+ oob_size = ecc_cfg->ecc_bytes_hw + ecc_cfg->spare_bytes;
+ }
+
+ if (data_buf && oob_buf) {
+ qcom_spi_set_read_loc(snandc, i, 0, 0, data_size, 0);
+ qcom_spi_set_read_loc(snandc, i, 1, data_size, oob_size, 1);
+ } else if (data_buf) {
+ qcom_spi_set_read_loc(snandc, i, 0, 0, data_size, 1);
+ } else {
+ qcom_spi_set_read_loc(snandc, i, 0, data_size, oob_size, 1);
+ }
+
+ qcom_spi_config_cw_read(snandc, true, i);
+
+ if (data_buf)
+ qcom_read_data_dma(snandc, FLASH_BUF_ACC, data_buf,
+ data_size, 0);
+ if (oob_buf) {
+ int j;
+
+ for (j = 0; j < ecc_cfg->bbm_size; j++)
+ *oob_buf++ = 0xff;
+
+ qcom_read_data_dma(snandc, FLASH_BUF_ACC + data_size,
+ oob_buf, oob_size, 0);
+ }
+
+ if (data_buf)
+ data_buf += data_size;
+ if (oob_buf)
+ oob_buf += oob_size;
+ }
+
+ ret = qcom_submit_descs(snandc);
+ if (ret) {
+ dev_err(snandc->dev, "failure to read page\n");
+ return ret;
+ }
+
+ return qcom_spi_check_error(snandc, data_buf_start, oob_buf_start);
+}
+
+static int qcom_spi_read_page_oob(struct qcom_nand_controller *snandc,
+ const struct spi_mem_op *op)
+{
+ struct qpic_ecc *ecc_cfg = snandc->qspi->ecc;
+ u8 *data_buf = NULL, *data_buf_start, *oob_buf = NULL, *oob_buf_start;
+ int ret, i;
+ u32 cfg0, cfg1, ecc_bch_cfg, num_cw = snandc->qspi->num_cw;
+
+ oob_buf = op->data.buf.in;
+ oob_buf_start = oob_buf;
+
+ data_buf_start = data_buf;
+
+ snandc->buf_count = 0;
+ snandc->buf_start = 0;
+ qcom_clear_read_regs(snandc);
+ qcom_clear_bam_transaction(snandc);
+
+ cfg0 = (ecc_cfg->cfg0 & ~(7U << CW_PER_PAGE)) |
+ (num_cw - 1) << CW_PER_PAGE;
+ cfg1 = ecc_cfg->cfg1;
+ ecc_bch_cfg = ecc_cfg->ecc_bch_cfg;
+
+ snandc->regs->addr0 = snandc->qspi->addr1;
+ snandc->regs->addr1 = snandc->qspi->addr2;
+ snandc->regs->cmd = snandc->qspi->cmd;
+ snandc->regs->cfg0 = cpu_to_le32(cfg0);
+ snandc->regs->cfg1 = cpu_to_le32(cfg1);
+ snandc->regs->ecc_bch_cfg = cpu_to_le32(ecc_bch_cfg);
+ snandc->regs->clrflashstatus = cpu_to_le32(ecc_cfg->clrflashstatus);
+ snandc->regs->clrreadstatus = cpu_to_le32(ecc_cfg->clrreadstatus);
+ snandc->regs->exec = cpu_to_le32(1);
+
+ qcom_spi_set_read_loc(snandc, 0, 0, 0, ecc_cfg->cw_data, 1);
+
+ qcom_write_reg_dma(snandc, &snandc->regs->addr0, NAND_ADDR0, 2, 0);
+ qcom_write_reg_dma(snandc, &snandc->regs->cfg0, NAND_DEV0_CFG0, 3, 0);
+ qcom_write_reg_dma(snandc, &snandc->regs->erased_cw_detect_cfg_clr,
+ NAND_ERASED_CW_DETECT_CFG, 1, 0);
+ qcom_write_reg_dma(snandc, &snandc->regs->erased_cw_detect_cfg_set,
+ NAND_ERASED_CW_DETECT_CFG, 1,
+ NAND_ERASED_CW_SET | NAND_BAM_NEXT_SGL);
+
+ for (i = 0; i < num_cw; i++) {
+ int data_size, oob_size;
+
+ if (i == (num_cw - 1)) {
+ data_size = NANDC_STEP_SIZE - ((num_cw - 1) << 2);
+ oob_size = (num_cw << 2) + ecc_cfg->ecc_bytes_hw +
+ ecc_cfg->spare_bytes;
+ } else {
+ data_size = ecc_cfg->cw_data;
+ oob_size = ecc_cfg->ecc_bytes_hw + ecc_cfg->spare_bytes;
+ }
+
+ qcom_spi_set_read_loc(snandc, i, 0, data_size, oob_size, 1);
+
+ qcom_spi_config_cw_read(snandc, true, i);
+
+ if (oob_buf) {
+ int j;
+
+ for (j = 0; j < ecc_cfg->bbm_size; j++)
+ *oob_buf++ = 0xff;
+
+ qcom_read_data_dma(snandc, FLASH_BUF_ACC + data_size,
+ oob_buf, oob_size, 0);
+ }
+
+ if (oob_buf)
+ oob_buf += oob_size;
+ }
+
+ ret = qcom_submit_descs(snandc);
+ if (ret) {
+ dev_err(snandc->dev, "failure to read oob\n");
+ return ret;
+ }
+
+ return qcom_spi_check_error(snandc, data_buf_start, oob_buf_start);
+}
+
+static int qcom_spi_read_page(struct qcom_nand_controller *snandc,
+ const struct spi_mem_op *op)
+{
+ if (snandc->qspi->page_rw && snandc->qspi->raw_rw)
+ return qcom_spi_read_page_raw(snandc, op);
+
+ if (snandc->qspi->page_rw)
+ return qcom_spi_read_page_ecc(snandc, op);
+
+ if (snandc->qspi->oob_rw && snandc->qspi->raw_rw)
+ return qcom_spi_read_last_cw(snandc, op);
+
+ if (snandc->qspi->oob_rw)
+ return qcom_spi_read_page_oob(snandc, op);
+
+ return 0;
+}
+
+static void qcom_spi_config_page_write(struct qcom_nand_controller *snandc)
+{
+ qcom_write_reg_dma(snandc, &snandc->regs->addr0, NAND_ADDR0, 2, 0);
+ qcom_write_reg_dma(snandc, &snandc->regs->cfg0, NAND_DEV0_CFG0, 3, 0);
+ qcom_write_reg_dma(snandc, &snandc->regs->ecc_buf_cfg, NAND_EBI2_ECC_BUF_CFG,
+ 1, NAND_BAM_NEXT_SGL);
+}
+
+static void qcom_spi_config_cw_write(struct qcom_nand_controller *snandc)
+{
+ qcom_write_reg_dma(snandc, &snandc->regs->cmd, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
+ qcom_write_reg_dma(snandc, &snandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
+ qcom_read_reg_dma(snandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
+
+ qcom_write_reg_dma(snandc, &snandc->regs->clrflashstatus, NAND_FLASH_STATUS, 1, 0);
+ qcom_write_reg_dma(snandc, &snandc->regs->clrreadstatus, NAND_READ_STATUS, 1,
+ NAND_BAM_NEXT_SGL);
+}
+
+static int qcom_spi_program_raw(struct qcom_nand_controller *snandc,
+ const struct spi_mem_op *op)
+{
+ struct qpic_ecc *ecc_cfg = snandc->qspi->ecc;
+ struct mtd_info *mtd = snandc->qspi->mtd;
+ u8 *data_buf = NULL, *oob_buf = NULL;
+ int i, ret;
+ int num_cw = snandc->qspi->num_cw;
+ u32 cfg0, cfg1, ecc_bch_cfg;
+
+ cfg0 = (ecc_cfg->cfg0_raw & ~(7U << CW_PER_PAGE)) |
+ (num_cw - 1) << CW_PER_PAGE;
+ cfg1 = ecc_cfg->cfg1_raw;
+ ecc_bch_cfg = ECC_CFG_ECC_DISABLE;
+
+ data_buf = snandc->qspi->data_buf;
+
+ oob_buf = snandc->qspi->oob_buf;
+ memset(oob_buf, 0xff, OOB_BUF_SIZE);
+
+ snandc->buf_count = 0;
+ snandc->buf_start = 0;
+ qcom_clear_read_regs(snandc);
+ qcom_clear_bam_transaction(snandc);
+
+ snandc->regs->addr0 = snandc->qspi->addr1;
+ snandc->regs->addr1 = snandc->qspi->addr2;
+ snandc->regs->cmd = snandc->qspi->cmd;
+ snandc->regs->cfg0 = cpu_to_le32(cfg0);
+ snandc->regs->cfg1 = cpu_to_le32(cfg1);
+ snandc->regs->ecc_bch_cfg = cpu_to_le32(ecc_bch_cfg);
+ snandc->regs->clrflashstatus = cpu_to_le32(ecc_cfg->clrflashstatus);
+ snandc->regs->clrreadstatus = cpu_to_le32(ecc_cfg->clrreadstatus);
+ snandc->regs->exec = cpu_to_le32(1);
+
+ qcom_spi_config_page_write(snandc);
+
+ for (i = 0; i < num_cw; i++) {
+ int data_size1, data_size2, oob_size1, oob_size2;
+ int reg_off = FLASH_BUF_ACC;
+
+ data_size1 = mtd->writesize - ecc_cfg->cw_size * (num_cw - 1);
+ oob_size1 = ecc_cfg->bbm_size;
+
+ if (i == (num_cw - 1)) {
+ data_size2 = NANDC_STEP_SIZE - data_size1 -
+ ((num_cw - 1) << 2);
+ oob_size2 = (num_cw << 2) + ecc_cfg->ecc_bytes_hw +
+ ecc_cfg->spare_bytes;
+ } else {
+ data_size2 = ecc_cfg->cw_data - data_size1;
+ oob_size2 = ecc_cfg->ecc_bytes_hw + ecc_cfg->spare_bytes;
+ }
+
+ qcom_write_data_dma(snandc, reg_off, data_buf, data_size1,
+ NAND_BAM_NO_EOT);
+ reg_off += data_size1;
+ data_buf += data_size1;
+
+ qcom_write_data_dma(snandc, reg_off, oob_buf, oob_size1,
+ NAND_BAM_NO_EOT);
+ oob_buf += oob_size1;
+ reg_off += oob_size1;
+
+ qcom_write_data_dma(snandc, reg_off, data_buf, data_size2,
+ NAND_BAM_NO_EOT);
+ reg_off += data_size2;
+ data_buf += data_size2;
+
+ qcom_write_data_dma(snandc, reg_off, oob_buf, oob_size2, 0);
+ oob_buf += oob_size2;
+
+ qcom_spi_config_cw_write(snandc);
+ }
+
+ ret = qcom_submit_descs(snandc);
+ if (ret) {
+ dev_err(snandc->dev, "failure to write raw page\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int qcom_spi_program_ecc(struct qcom_nand_controller *snandc,
+ const struct spi_mem_op *op)
+{
+ struct qpic_ecc *ecc_cfg = snandc->qspi->ecc;
+ u8 *data_buf = NULL, *oob_buf = NULL;
+ int i, ret;
+ int num_cw = snandc->qspi->num_cw;
+ u32 cfg0, cfg1, ecc_bch_cfg, ecc_buf_cfg;
+
+ cfg0 = (ecc_cfg->cfg0 & ~(7U << CW_PER_PAGE)) |
+ (num_cw - 1) << CW_PER_PAGE;
+ cfg1 = ecc_cfg->cfg1;
+ ecc_bch_cfg = ecc_cfg->ecc_bch_cfg;
+ ecc_buf_cfg = ecc_cfg->ecc_buf_cfg;
+
+ if (snandc->qspi->data_buf)
+ data_buf = snandc->qspi->data_buf;
+
+ oob_buf = snandc->qspi->oob_buf;
+
+ snandc->buf_count = 0;
+ snandc->buf_start = 0;
+ qcom_clear_read_regs(snandc);
+ qcom_clear_bam_transaction(snandc);
+
+ snandc->regs->addr0 = snandc->qspi->addr1;
+ snandc->regs->addr1 = snandc->qspi->addr2;
+ snandc->regs->cmd = snandc->qspi->cmd;
+ snandc->regs->cfg0 = cpu_to_le32(cfg0);
+ snandc->regs->cfg1 = cpu_to_le32(cfg1);
+ snandc->regs->ecc_bch_cfg = cpu_to_le32(ecc_bch_cfg);
+ snandc->regs->ecc_buf_cfg = cpu_to_le32(ecc_buf_cfg);
+ snandc->regs->exec = cpu_to_le32(1);
+
+ qcom_spi_config_page_write(snandc);
+
+ for (i = 0; i < num_cw; i++) {
+ int data_size, oob_size;
+
+ if (i == (num_cw - 1)) {
+ data_size = NANDC_STEP_SIZE - ((num_cw - 1) << 2);
+ oob_size = (num_cw << 2) + ecc_cfg->ecc_bytes_hw +
+ ecc_cfg->spare_bytes;
+ } else {
+ data_size = ecc_cfg->cw_data;
+ oob_size = ecc_cfg->bytes;
+ }
+
+ if (data_buf)
+ qcom_write_data_dma(snandc, FLASH_BUF_ACC, data_buf, data_size,
+ i == (num_cw - 1) ? NAND_BAM_NO_EOT : 0);
+
+ if (i == (num_cw - 1)) {
+ if (oob_buf) {
+ oob_buf += ecc_cfg->bbm_size;
+ qcom_write_data_dma(snandc, FLASH_BUF_ACC + data_size,
+ oob_buf, oob_size, 0);
+ }
+ }
+
+ qcom_spi_config_cw_write(snandc);
+
+ if (data_buf)
+ data_buf += data_size;
+ if (oob_buf)
+ oob_buf += oob_size;
+ }
+
+ ret = qcom_submit_descs(snandc);
+ if (ret) {
+ dev_err(snandc->dev, "failure to write page\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int qcom_spi_program_oob(struct qcom_nand_controller *snandc,
+ const struct spi_mem_op *op)
+{
+ struct qpic_ecc *ecc_cfg = snandc->qspi->ecc;
+ u8 *oob_buf = NULL;
+ int ret, col, data_size, oob_size;
+ int num_cw = snandc->qspi->num_cw;
+ u32 cfg0, cfg1, ecc_bch_cfg, ecc_buf_cfg;
+
+ cfg0 = (ecc_cfg->cfg0 & ~(7U << CW_PER_PAGE)) |
+ (num_cw - 1) << CW_PER_PAGE;
+ cfg1 = ecc_cfg->cfg1;
+ ecc_bch_cfg = ecc_cfg->ecc_bch_cfg;
+ ecc_buf_cfg = ecc_cfg->ecc_buf_cfg;
+
+ col = ecc_cfg->cw_size * (num_cw - 1);
+
+ oob_buf = snandc->qspi->data_buf;
+
+ snandc->buf_count = 0;
+ snandc->buf_start = 0;
+ qcom_clear_read_regs(snandc);
+ qcom_clear_bam_transaction(snandc);
+ snandc->regs->addr0 = (snandc->qspi->addr1 | cpu_to_le32(col));
+ snandc->regs->addr1 = snandc->qspi->addr2;
+ snandc->regs->cmd = snandc->qspi->cmd;
+ snandc->regs->cfg0 = cpu_to_le32(cfg0);
+ snandc->regs->cfg1 = cpu_to_le32(cfg1);
+ snandc->regs->ecc_bch_cfg = cpu_to_le32(ecc_bch_cfg);
+ snandc->regs->ecc_buf_cfg = cpu_to_le32(ecc_buf_cfg);
+ snandc->regs->exec = cpu_to_le32(1);
+
+ /* calculate the data and oob size for the last codeword/step */
+ data_size = NANDC_STEP_SIZE - ((num_cw - 1) << 2);
+ oob_size = snandc->qspi->mtd->oobavail;
+
+ memset(snandc->data_buffer, 0xff, ecc_cfg->cw_data);
+ /* override new oob content to last codeword */
+ mtd_ooblayout_get_databytes(snandc->qspi->mtd, snandc->data_buffer + data_size,
+ oob_buf, 0, snandc->qspi->mtd->oobavail);
+ qcom_spi_config_page_write(snandc);
+ qcom_write_data_dma(snandc, FLASH_BUF_ACC, snandc->data_buffer, data_size + oob_size, 0);
+ qcom_spi_config_cw_write(snandc);
+
+ ret = qcom_submit_descs(snandc);
+ if (ret) {
+ dev_err(snandc->dev, "failure to write oob\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int qcom_spi_program_execute(struct qcom_nand_controller *snandc,
+ const struct spi_mem_op *op)
+{
+ if (snandc->qspi->page_rw && snandc->qspi->raw_rw)
+ return qcom_spi_program_raw(snandc, op);
+
+ if (snandc->qspi->page_rw)
+ return qcom_spi_program_ecc(snandc, op);
+
+ if (snandc->qspi->oob_rw)
+ return qcom_spi_program_oob(snandc, op);
+
+ return 0;
+}
+
+static int qcom_spi_cmd_mapping(struct qcom_nand_controller *snandc, u32 opcode, u32 *cmd)
+{
+ switch (opcode) {
+ case SPINAND_RESET:
+ *cmd = (SPI_WP | SPI_HOLD | SPI_TRANSFER_MODE_x1 | OP_RESET_DEVICE);
+ break;
+ case SPINAND_READID:
+ *cmd = (SPI_WP | SPI_HOLD | SPI_TRANSFER_MODE_x1 | OP_FETCH_ID);
+ break;
+ case SPINAND_GET_FEATURE:
+ *cmd = (SPI_TRANSFER_MODE_x1 | SPI_WP | SPI_HOLD | ACC_FEATURE);
+ break;
+ case SPINAND_SET_FEATURE:
+ *cmd = (SPI_TRANSFER_MODE_x1 | SPI_WP | SPI_HOLD | ACC_FEATURE |
+ QPIC_SET_FEATURE);
+ break;
+ case SPINAND_READ:
+ if (snandc->qspi->raw_rw) {
+ *cmd = (PAGE_ACC | LAST_PAGE | SPI_TRANSFER_MODE_x1 |
+ SPI_WP | SPI_HOLD | OP_PAGE_READ);
+ } else {
+ *cmd = (PAGE_ACC | LAST_PAGE | SPI_TRANSFER_MODE_x1 |
+ SPI_WP | SPI_HOLD | OP_PAGE_READ_WITH_ECC);
+ }
+
+ break;
+ case SPINAND_ERASE:
+ *cmd = OP_BLOCK_ERASE | PAGE_ACC | LAST_PAGE | SPI_WP |
+ SPI_HOLD | SPI_TRANSFER_MODE_x1;
+ break;
+ case SPINAND_WRITE_EN:
+ *cmd = SPINAND_WRITE_EN;
+ break;
+ case SPINAND_PROGRAM_EXECUTE:
+ *cmd = (PAGE_ACC | LAST_PAGE | SPI_TRANSFER_MODE_x1 |
+ SPI_WP | SPI_HOLD | OP_PROGRAM_PAGE);
+ break;
+ case SPINAND_PROGRAM_LOAD:
+ *cmd = SPINAND_PROGRAM_LOAD;
+ break;
+ default:
+ dev_err(snandc->dev, "Opcode not supported: %u\n", opcode);
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int qcom_spi_write_page(struct qcom_nand_controller *snandc,
+ const struct spi_mem_op *op)
+{
+ int ret;
+ u32 cmd;
+
+ ret = qcom_spi_cmd_mapping(snandc, op->cmd.opcode, &cmd);
+ if (ret < 0)
+ return ret;
+
+ if (op->cmd.opcode == SPINAND_PROGRAM_LOAD)
+ snandc->qspi->data_buf = (u8 *)op->data.buf.out;
+
+ return 0;
+}
+
+static int qcom_spi_send_cmdaddr(struct qcom_nand_controller *snandc,
+ const struct spi_mem_op *op)
+{
+ struct qpic_snand_op s_op = {};
+ u32 cmd;
+ int ret, opcode;
+
+ ret = qcom_spi_cmd_mapping(snandc, op->cmd.opcode, &cmd);
+ if (ret < 0)
+ return ret;
+
+ s_op.cmd_reg = cmd;
+ s_op.addr1_reg = op->addr.val;
+ s_op.addr2_reg = 0;
+
+ opcode = op->cmd.opcode;
+
+ switch (opcode) {
+ case SPINAND_WRITE_EN:
+ return 0;
+ case SPINAND_PROGRAM_EXECUTE:
+ s_op.addr1_reg = op->addr.val << 16;
+ s_op.addr2_reg = op->addr.val >> 16 & 0xff;
+ snandc->qspi->addr1 = cpu_to_le32(s_op.addr1_reg);
+ snandc->qspi->addr2 = cpu_to_le32(s_op.addr2_reg);
+ snandc->qspi->cmd = cpu_to_le32(cmd);
+ return qcom_spi_program_execute(snandc, op);
+ case SPINAND_READ:
+ s_op.addr1_reg = (op->addr.val << 16);
+ s_op.addr2_reg = op->addr.val >> 16 & 0xff;
+ snandc->qspi->addr1 = cpu_to_le32(s_op.addr1_reg);
+ snandc->qspi->addr2 = cpu_to_le32(s_op.addr2_reg);
+ snandc->qspi->cmd = cpu_to_le32(cmd);
+ return 0;
+ case SPINAND_ERASE:
+ s_op.addr2_reg = (op->addr.val >> 16) & 0xffff;
+ s_op.addr1_reg = op->addr.val;
+ snandc->qspi->addr1 = cpu_to_le32(s_op.addr1_reg << 16);
+ snandc->qspi->addr2 = cpu_to_le32(s_op.addr2_reg);
+ snandc->qspi->cmd = cpu_to_le32(cmd);
+ qcom_spi_block_erase(snandc);
+ return 0;
+ default:
+ break;
+ }
+
+ snandc->buf_count = 0;
+ snandc->buf_start = 0;
+ qcom_clear_read_regs(snandc);
+ qcom_clear_bam_transaction(snandc);
+
+ snandc->regs->cmd = cpu_to_le32(s_op.cmd_reg);
+ snandc->regs->exec = cpu_to_le32(1);
+ snandc->regs->addr0 = cpu_to_le32(s_op.addr1_reg);
+ snandc->regs->addr1 = cpu_to_le32(s_op.addr2_reg);
+
+ qcom_write_reg_dma(snandc, &snandc->regs->cmd, NAND_FLASH_CMD, 3, NAND_BAM_NEXT_SGL);
+ qcom_write_reg_dma(snandc, &snandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
+
+ ret = qcom_submit_descs(snandc);
+ if (ret)
+ dev_err(snandc->dev, "failure in submitting cmd descriptor\n");
+
+ return ret;
+}
+
+static int qcom_spi_io_op(struct qcom_nand_controller *snandc, const struct spi_mem_op *op)
+{
+ int ret, val, opcode;
+ bool copy = false, copy_ftr = false;
+
+ ret = qcom_spi_send_cmdaddr(snandc, op);
+ if (ret)
+ return ret;
+
+ snandc->buf_count = 0;
+ snandc->buf_start = 0;
+ qcom_clear_read_regs(snandc);
+ qcom_clear_bam_transaction(snandc);
+ opcode = op->cmd.opcode;
+
+ switch (opcode) {
+ case SPINAND_READID:
+ snandc->buf_count = 4;
+ qcom_read_reg_dma(snandc, NAND_READ_ID, 1, NAND_BAM_NEXT_SGL);
+ copy = true;
+ break;
+ case SPINAND_GET_FEATURE:
+ snandc->buf_count = 4;
+ qcom_read_reg_dma(snandc, NAND_FLASH_FEATURES, 1, NAND_BAM_NEXT_SGL);
+ copy_ftr = true;
+ break;
+ case SPINAND_SET_FEATURE:
+ snandc->regs->flash_feature = cpu_to_le32(*(u32 *)op->data.buf.out);
+ qcom_write_reg_dma(snandc, &snandc->regs->flash_feature,
+ NAND_FLASH_FEATURES, 1, NAND_BAM_NEXT_SGL);
+ break;
+ case SPINAND_PROGRAM_EXECUTE:
+ case SPINAND_WRITE_EN:
+ case SPINAND_RESET:
+ case SPINAND_ERASE:
+ case SPINAND_READ:
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ ret = qcom_submit_descs(snandc);
+ if (ret)
+ dev_err(snandc->dev, "failure in submitting descriptor for:%d\n", opcode);
+
+ if (copy) {
+ qcom_nandc_dev_to_mem(snandc, true);
+ memcpy(op->data.buf.in, snandc->reg_read_buf, snandc->buf_count);
+ }
+
+ if (copy_ftr) {
+ qcom_nandc_dev_to_mem(snandc, true);
+ val = le32_to_cpu(*(__le32 *)snandc->reg_read_buf);
+ val >>= 8;
+ memcpy(op->data.buf.in, &val, snandc->buf_count);
+ }
+
+ return ret;
+}
+
+static bool qcom_spi_is_page_op(const struct spi_mem_op *op)
+{
+ if (op->addr.buswidth != 1 && op->addr.buswidth != 2 && op->addr.buswidth != 4)
+ return false;
+
+ if (op->data.dir == SPI_MEM_DATA_IN) {
+ if (op->addr.buswidth == 4 && op->data.buswidth == 4)
+ return true;
+
+ if (op->addr.nbytes == 2 && op->addr.buswidth == 1)
+ return true;
+
+ } else if (op->data.dir == SPI_MEM_DATA_OUT) {
+ if (op->data.buswidth == 4)
+ return true;
+ if (op->addr.nbytes == 2 && op->addr.buswidth == 1)
+ return true;
+ }
+
+ return false;
+}
+
+static bool qcom_spi_supports_op(struct spi_mem *mem, const struct spi_mem_op *op)
+{
+ if (!spi_mem_default_supports_op(mem, op))
+ return false;
+
+ if (op->cmd.nbytes != 1 || op->cmd.buswidth != 1)
+ return false;
+
+ if (qcom_spi_is_page_op(op))
+ return true;
+
+ return ((!op->addr.nbytes || op->addr.buswidth == 1) &&
+ (!op->dummy.nbytes || op->dummy.buswidth == 1) &&
+ (!op->data.nbytes || op->data.buswidth == 1));
+}
+
+static int qcom_spi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
+{
+ struct qcom_nand_controller *snandc = spi_controller_get_devdata(mem->spi->controller);
+
+ dev_dbg(snandc->dev, "OP %02x ADDR %08llX@%d:%u DATA %d:%u", op->cmd.opcode,
+ op->addr.val, op->addr.buswidth, op->addr.nbytes,
+ op->data.buswidth, op->data.nbytes);
+
+ if (qcom_spi_is_page_op(op)) {
+ if (op->data.dir == SPI_MEM_DATA_IN)
+ return qcom_spi_read_page(snandc, op);
+ if (op->data.dir == SPI_MEM_DATA_OUT)
+ return qcom_spi_write_page(snandc, op);
+ } else {
+ return qcom_spi_io_op(snandc, op);
+ }
+
+ return 0;
+}
+
+static const struct spi_controller_mem_ops qcom_spi_mem_ops = {
+ .supports_op = qcom_spi_supports_op,
+ .exec_op = qcom_spi_exec_op,
+};
+
+static const struct spi_controller_mem_caps qcom_spi_mem_caps = {
+ .ecc = true,
+};
+
+static int qcom_spi_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct spi_controller *ctlr;
+ struct qcom_nand_controller *snandc;
+ struct qpic_spi_nand *qspi;
+ struct qpic_ecc *ecc;
+ struct resource *res;
+ const void *dev_data;
+ int ret;
+
+ ecc = devm_kzalloc(dev, sizeof(*ecc), GFP_KERNEL);
+ if (!ecc)
+ return -ENOMEM;
+
+ qspi = devm_kzalloc(dev, sizeof(*qspi), GFP_KERNEL);
+ if (!qspi)
+ return -ENOMEM;
+
+ ctlr = __devm_spi_alloc_controller(dev, sizeof(*snandc), false);
+ if (!ctlr)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, ctlr);
+
+ snandc = spi_controller_get_devdata(ctlr);
+ qspi->snandc = snandc;
+
+ snandc->dev = dev;
+ snandc->qspi = qspi;
+ snandc->qspi->ctlr = ctlr;
+ snandc->qspi->ecc = ecc;
+
+ dev_data = of_device_get_match_data(dev);
+ if (!dev_data) {
+ dev_err(&pdev->dev, "failed to get device data\n");
+ return -ENODEV;
+ }
+
+ snandc->props = dev_data;
+ snandc->dev = &pdev->dev;
+
+ snandc->core_clk = devm_clk_get(dev, "core");
+ if (IS_ERR(snandc->core_clk))
+ return PTR_ERR(snandc->core_clk);
+
+ snandc->aon_clk = devm_clk_get(dev, "aon");
+ if (IS_ERR(snandc->aon_clk))
+ return PTR_ERR(snandc->aon_clk);
+
+ snandc->qspi->iomacro_clk = devm_clk_get(dev, "iom");
+ if (IS_ERR(snandc->qspi->iomacro_clk))
+ return PTR_ERR(snandc->qspi->iomacro_clk);
+
+ snandc->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
+ if (IS_ERR(snandc->base))
+ return PTR_ERR(snandc->base);
+
+ snandc->base_phys = res->start;
+ snandc->base_dma = dma_map_resource(dev, res->start, resource_size(res),
+ DMA_BIDIRECTIONAL, 0);
+ if (dma_mapping_error(dev, snandc->base_dma))
+ return -ENXIO;
+
+ ret = clk_prepare_enable(snandc->core_clk);
+ if (ret)
+ goto err_dis_core_clk;
+
+ ret = clk_prepare_enable(snandc->aon_clk);
+ if (ret)
+ goto err_dis_aon_clk;
+
+ ret = clk_prepare_enable(snandc->qspi->iomacro_clk);
+ if (ret)
+ goto err_dis_iom_clk;
+
+ ret = qcom_nandc_alloc(snandc);
+ if (ret)
+ goto err_snand_alloc;
+
+ ret = qcom_spi_init(snandc);
+ if (ret)
+ goto err_spi_init;
+
+ /* setup ECC engine */
+ snandc->qspi->ecc_eng.dev = &pdev->dev;
+ snandc->qspi->ecc_eng.integration = NAND_ECC_ENGINE_INTEGRATION_PIPELINED;
+ snandc->qspi->ecc_eng.ops = &qcom_spi_ecc_engine_ops_pipelined;
+ snandc->qspi->ecc_eng.priv = snandc;
+
+ ret = nand_ecc_register_on_host_hw_engine(&snandc->qspi->ecc_eng);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register ecc engine:%d\n", ret);
+ goto err_spi_init;
+ }
+
+ ctlr->num_chipselect = QPIC_QSPI_NUM_CS;
+ ctlr->mem_ops = &qcom_spi_mem_ops;
+ ctlr->mem_caps = &qcom_spi_mem_caps;
+ ctlr->dev.of_node = pdev->dev.of_node;
+ ctlr->mode_bits = SPI_TX_DUAL | SPI_RX_DUAL |
+ SPI_TX_QUAD | SPI_RX_QUAD;
+
+ ret = spi_register_controller(ctlr);
+ if (ret) {
+ dev_err(&pdev->dev, "spi_register_controller failed.\n");
+ goto err_spi_init;
+ }
+
+ return 0;
+
+err_spi_init:
+ qcom_nandc_unalloc(snandc);
+err_snand_alloc:
+ clk_disable_unprepare(snandc->qspi->iomacro_clk);
+err_dis_iom_clk:
+ clk_disable_unprepare(snandc->aon_clk);
+err_dis_aon_clk:
+ clk_disable_unprepare(snandc->core_clk);
+err_dis_core_clk:
+ dma_unmap_resource(dev, res->start, resource_size(res),
+ DMA_BIDIRECTIONAL, 0);
+ return ret;
+}
+
+static void qcom_spi_remove(struct platform_device *pdev)
+{
+ struct spi_controller *ctlr = platform_get_drvdata(pdev);
+ struct qcom_nand_controller *snandc = spi_controller_get_devdata(ctlr);
+ struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ spi_unregister_controller(ctlr);
+
+ qcom_nandc_unalloc(snandc);
+
+ clk_disable_unprepare(snandc->aon_clk);
+ clk_disable_unprepare(snandc->core_clk);
+ clk_disable_unprepare(snandc->qspi->iomacro_clk);
+
+ dma_unmap_resource(&pdev->dev, snandc->base_dma, resource_size(res),
+ DMA_BIDIRECTIONAL, 0);
+}
+
+static const struct qcom_nandc_props ipq9574_snandc_props = {
+ .dev_cmd_reg_start = 0x7000,
+ .supports_bam = true,
+};
+
+static const struct of_device_id qcom_snandc_of_match[] = {
+ {
+ .compatible = "qcom,ipq9574-snand",
+ .data = &ipq9574_snandc_props,
+ },
+ {}
+}
+MODULE_DEVICE_TABLE(of, qcom_snandc_of_match);
+
+static struct platform_driver qcom_spi_driver = {
+ .driver = {
+ .name = "qcom_snand",
+ .of_match_table = qcom_snandc_of_match,
+ },
+ .probe = qcom_spi_probe,
+ .remove = qcom_spi_remove,
+};
+module_platform_driver(qcom_spi_driver);
+
+MODULE_DESCRIPTION("SPI driver for QPIC QSPI cores");
+MODULE_AUTHOR("Md Sadre Alam <quic_mdalam@quicinc.com>");
+MODULE_LICENSE("GPL");
+
diff --git a/drivers/spi/spi-realtek-rtl-snand.c b/drivers/spi/spi-realtek-rtl-snand.c
index cd0484041147..741cf2af3e91 100644
--- a/drivers/spi/spi-realtek-rtl-snand.c
+++ b/drivers/spi/spi-realtek-rtl-snand.c
@@ -364,7 +364,6 @@ static int rtl_snand_probe(struct platform_device *pdev)
.reg_bits = 32,
.val_bits = 32,
.reg_stride = 4,
- .cache_type = REGCACHE_NONE,
};
int irq, ret;
diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c
index 389275dbc003..9c47f5741c5f 100644
--- a/drivers/spi/spi-s3c64xx.c
+++ b/drivers/spi/spi-s3c64xx.c
@@ -139,7 +139,9 @@ struct s3c64xx_spi_dma_data {
* struct s3c64xx_spi_port_config - SPI Controller hardware info
* @fifo_lvl_mask: [DEPRECATED] use @{rx, tx}_fifomask instead.
* @rx_lvl_offset: [DEPRECATED] use @{rx,tx}_fifomask instead.
- * @fifo_depth: depth of the FIFO.
+ * @fifo_depth: depth of the FIFOs. Used by compatibles where all the instances
+ * of the IP define the same FIFO depth. It has higher precedence
+ * than the FIFO depth specified via DT.
* @rx_fifomask: SPI_STATUS.RX_FIFO_LVL mask. Shifted mask defining the field's
* length and position.
* @tx_fifomask: SPI_STATUS.TX_FIFO_LVL mask. Shifted mask defining the field's
diff --git a/drivers/spi/spi-sg2044-nor.c b/drivers/spi/spi-sg2044-nor.c
new file mode 100644
index 000000000000..a59aa3fc55d2
--- /dev/null
+++ b/drivers/spi/spi-sg2044-nor.c
@@ -0,0 +1,488 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * SG2044 SPI NOR controller driver
+ *
+ * Copyright (c) 2025 Longbin Li <looong.bin@gmail.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/spi/spi-mem.h>
+
+/* Hardware register definitions */
+#define SPIFMC_CTRL 0x00
+#define SPIFMC_CTRL_CPHA BIT(12)
+#define SPIFMC_CTRL_CPOL BIT(13)
+#define SPIFMC_CTRL_HOLD_OL BIT(14)
+#define SPIFMC_CTRL_WP_OL BIT(15)
+#define SPIFMC_CTRL_LSBF BIT(20)
+#define SPIFMC_CTRL_SRST BIT(21)
+#define SPIFMC_CTRL_SCK_DIV_SHIFT 0
+#define SPIFMC_CTRL_FRAME_LEN_SHIFT 16
+#define SPIFMC_CTRL_SCK_DIV_MASK 0x7FF
+
+#define SPIFMC_CE_CTRL 0x04
+#define SPIFMC_CE_CTRL_CEMANUAL BIT(0)
+#define SPIFMC_CE_CTRL_CEMANUAL_EN BIT(1)
+
+#define SPIFMC_DLY_CTRL 0x08
+#define SPIFMC_CTRL_FM_INTVL_MASK 0x000f
+#define SPIFMC_CTRL_FM_INTVL BIT(0)
+#define SPIFMC_CTRL_CET_MASK 0x0f00
+#define SPIFMC_CTRL_CET BIT(8)
+
+#define SPIFMC_DMMR 0x0c
+
+#define SPIFMC_TRAN_CSR 0x10
+#define SPIFMC_TRAN_CSR_TRAN_MODE_MASK GENMASK(1, 0)
+#define SPIFMC_TRAN_CSR_TRAN_MODE_RX BIT(0)
+#define SPIFMC_TRAN_CSR_TRAN_MODE_TX BIT(1)
+#define SPIFMC_TRAN_CSR_FAST_MODE BIT(3)
+#define SPIFMC_TRAN_CSR_BUS_WIDTH_1_BIT (0x00 << 4)
+#define SPIFMC_TRAN_CSR_BUS_WIDTH_2_BIT (0x01 << 4)
+#define SPIFMC_TRAN_CSR_BUS_WIDTH_4_BIT (0x02 << 4)
+#define SPIFMC_TRAN_CSR_DMA_EN BIT(6)
+#define SPIFMC_TRAN_CSR_MISO_LEVEL BIT(7)
+#define SPIFMC_TRAN_CSR_ADDR_BYTES_MASK GENMASK(10, 8)
+#define SPIFMC_TRAN_CSR_ADDR_BYTES_SHIFT 8
+#define SPIFMC_TRAN_CSR_WITH_CMD BIT(11)
+#define SPIFMC_TRAN_CSR_FIFO_TRG_LVL_MASK GENMASK(13, 12)
+#define SPIFMC_TRAN_CSR_FIFO_TRG_LVL_1_BYTE (0x00 << 12)
+#define SPIFMC_TRAN_CSR_FIFO_TRG_LVL_2_BYTE (0x01 << 12)
+#define SPIFMC_TRAN_CSR_FIFO_TRG_LVL_4_BYTE (0x02 << 12)
+#define SPIFMC_TRAN_CSR_FIFO_TRG_LVL_8_BYTE (0x03 << 12)
+#define SPIFMC_TRAN_CSR_GO_BUSY BIT(15)
+#define SPIFMC_TRAN_CSR_ADDR4B_SHIFT 20
+#define SPIFMC_TRAN_CSR_CMD4B_SHIFT 21
+
+#define SPIFMC_TRAN_NUM 0x14
+#define SPIFMC_FIFO_PORT 0x18
+#define SPIFMC_FIFO_PT 0x20
+
+#define SPIFMC_INT_STS 0x28
+#define SPIFMC_INT_TRAN_DONE BIT(0)
+#define SPIFMC_INT_RD_FIFO BIT(2)
+#define SPIFMC_INT_WR_FIFO BIT(3)
+#define SPIFMC_INT_RX_FRAME BIT(4)
+#define SPIFMC_INT_TX_FRAME BIT(5)
+
+#define SPIFMC_INT_EN 0x2c
+#define SPIFMC_INT_TRAN_DONE_EN BIT(0)
+#define SPIFMC_INT_RD_FIFO_EN BIT(2)
+#define SPIFMC_INT_WR_FIFO_EN BIT(3)
+#define SPIFMC_INT_RX_FRAME_EN BIT(4)
+#define SPIFMC_INT_TX_FRAME_EN BIT(5)
+
+#define SPIFMC_OPT 0x030
+#define SPIFMC_OPT_DISABLE_FIFO_FLUSH BIT(1)
+
+#define SPIFMC_MAX_FIFO_DEPTH 8
+
+#define SPIFMC_MAX_READ_SIZE 0x10000
+
+struct sg2044_spifmc {
+ struct spi_controller *ctrl;
+ void __iomem *io_base;
+ struct device *dev;
+ struct mutex lock;
+ struct clk *clk;
+};
+
+static int sg2044_spifmc_wait_int(struct sg2044_spifmc *spifmc, u8 int_type)
+{
+ u32 stat;
+
+ return readl_poll_timeout(spifmc->io_base + SPIFMC_INT_STS, stat,
+ (stat & int_type), 0, 1000000);
+}
+
+static int sg2044_spifmc_wait_xfer_size(struct sg2044_spifmc *spifmc,
+ int xfer_size)
+{
+ u8 stat;
+
+ return readl_poll_timeout(spifmc->io_base + SPIFMC_FIFO_PT, stat,
+ ((stat & 0xf) == xfer_size), 1, 1000000);
+}
+
+static u32 sg2044_spifmc_init_reg(struct sg2044_spifmc *spifmc)
+{
+ u32 reg;
+
+ reg = readl(spifmc->io_base + SPIFMC_TRAN_CSR);
+ reg &= ~(SPIFMC_TRAN_CSR_TRAN_MODE_MASK |
+ SPIFMC_TRAN_CSR_FAST_MODE |
+ SPIFMC_TRAN_CSR_BUS_WIDTH_2_BIT |
+ SPIFMC_TRAN_CSR_BUS_WIDTH_4_BIT |
+ SPIFMC_TRAN_CSR_DMA_EN |
+ SPIFMC_TRAN_CSR_ADDR_BYTES_MASK |
+ SPIFMC_TRAN_CSR_WITH_CMD |
+ SPIFMC_TRAN_CSR_FIFO_TRG_LVL_MASK);
+
+ writel(reg, spifmc->io_base + SPIFMC_TRAN_CSR);
+
+ return reg;
+}
+
+static ssize_t sg2044_spifmc_read_64k(struct sg2044_spifmc *spifmc,
+ const struct spi_mem_op *op, loff_t from,
+ size_t len, u_char *buf)
+{
+ int xfer_size, offset;
+ u32 reg;
+ int ret;
+ int i;
+
+ reg = sg2044_spifmc_init_reg(spifmc);
+ reg |= (op->addr.nbytes + op->dummy.nbytes) << SPIFMC_TRAN_CSR_ADDR_BYTES_SHIFT;
+ reg |= SPIFMC_TRAN_CSR_FIFO_TRG_LVL_8_BYTE;
+ reg |= SPIFMC_TRAN_CSR_WITH_CMD;
+ reg |= SPIFMC_TRAN_CSR_TRAN_MODE_RX;
+
+ writel(0, spifmc->io_base + SPIFMC_FIFO_PT);
+ writeb(op->cmd.opcode, spifmc->io_base + SPIFMC_FIFO_PORT);
+
+ for (i = op->addr.nbytes - 1; i >= 0; i--)
+ writeb((from >> i * 8) & 0xff, spifmc->io_base + SPIFMC_FIFO_PORT);
+
+ for (i = 0; i < op->dummy.nbytes; i++)
+ writeb(0xff, spifmc->io_base + SPIFMC_FIFO_PORT);
+
+ writel(len, spifmc->io_base + SPIFMC_TRAN_NUM);
+ writel(0, spifmc->io_base + SPIFMC_INT_STS);
+ reg |= SPIFMC_TRAN_CSR_GO_BUSY;
+ writel(reg, spifmc->io_base + SPIFMC_TRAN_CSR);
+
+ ret = sg2044_spifmc_wait_int(spifmc, SPIFMC_INT_RD_FIFO);
+ if (ret < 0)
+ return ret;
+
+ offset = 0;
+ while (offset < len) {
+ xfer_size = min_t(size_t, SPIFMC_MAX_FIFO_DEPTH, len - offset);
+
+ ret = sg2044_spifmc_wait_xfer_size(spifmc, xfer_size);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < xfer_size; i++)
+ buf[i + offset] = readb(spifmc->io_base + SPIFMC_FIFO_PORT);
+
+ offset += xfer_size;
+ }
+
+ ret = sg2044_spifmc_wait_int(spifmc, SPIFMC_INT_TRAN_DONE);
+ if (ret < 0)
+ return ret;
+
+ writel(0, spifmc->io_base + SPIFMC_FIFO_PT);
+
+ return len;
+}
+
+static ssize_t sg2044_spifmc_read(struct sg2044_spifmc *spifmc,
+ const struct spi_mem_op *op)
+{
+ size_t xfer_size;
+ size_t offset;
+ loff_t from = op->addr.val;
+ size_t len = op->data.nbytes;
+ int ret;
+ u8 *din = op->data.buf.in;
+
+ offset = 0;
+ while (offset < len) {
+ xfer_size = min_t(size_t, SPIFMC_MAX_READ_SIZE, len - offset);
+
+ ret = sg2044_spifmc_read_64k(spifmc, op, from, xfer_size, din);
+ if (ret < 0)
+ return ret;
+
+ offset += xfer_size;
+ din += xfer_size;
+ from += xfer_size;
+ }
+
+ return 0;
+}
+
+static ssize_t sg2044_spifmc_write(struct sg2044_spifmc *spifmc,
+ const struct spi_mem_op *op)
+{
+ size_t xfer_size;
+ const u8 *dout = op->data.buf.out;
+ int i, offset;
+ int ret;
+ u32 reg;
+
+ reg = sg2044_spifmc_init_reg(spifmc);
+ reg |= (op->addr.nbytes + op->dummy.nbytes) << SPIFMC_TRAN_CSR_ADDR_BYTES_SHIFT;
+ reg |= SPIFMC_TRAN_CSR_FIFO_TRG_LVL_8_BYTE;
+ reg |= SPIFMC_TRAN_CSR_WITH_CMD;
+ reg |= SPIFMC_TRAN_CSR_TRAN_MODE_TX;
+
+ writel(0, spifmc->io_base + SPIFMC_FIFO_PT);
+ writeb(op->cmd.opcode, spifmc->io_base + SPIFMC_FIFO_PORT);
+
+ for (i = op->addr.nbytes - 1; i >= 0; i--)
+ writeb((op->addr.val >> i * 8) & 0xff, spifmc->io_base + SPIFMC_FIFO_PORT);
+
+ for (i = 0; i < op->dummy.nbytes; i++)
+ writeb(0xff, spifmc->io_base + SPIFMC_FIFO_PORT);
+
+ writel(0, spifmc->io_base + SPIFMC_INT_STS);
+ writel(op->data.nbytes, spifmc->io_base + SPIFMC_TRAN_NUM);
+ reg |= SPIFMC_TRAN_CSR_GO_BUSY;
+ writel(reg, spifmc->io_base + SPIFMC_TRAN_CSR);
+
+ ret = sg2044_spifmc_wait_xfer_size(spifmc, 0);
+ if (ret < 0)
+ return ret;
+
+ writel(0, spifmc->io_base + SPIFMC_FIFO_PT);
+
+ offset = 0;
+ while (offset < op->data.nbytes) {
+ xfer_size = min_t(size_t, SPIFMC_MAX_FIFO_DEPTH, op->data.nbytes - offset);
+
+ ret = sg2044_spifmc_wait_xfer_size(spifmc, 0);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < xfer_size; i++)
+ writeb(dout[i + offset], spifmc->io_base + SPIFMC_FIFO_PORT);
+
+ offset += xfer_size;
+ }
+
+ ret = sg2044_spifmc_wait_int(spifmc, SPIFMC_INT_TRAN_DONE);
+ if (ret < 0)
+ return ret;
+
+ writel(0, spifmc->io_base + SPIFMC_FIFO_PT);
+
+ return 0;
+}
+
+static ssize_t sg2044_spifmc_tran_cmd(struct sg2044_spifmc *spifmc,
+ const struct spi_mem_op *op)
+{
+ int i, ret;
+ u32 reg;
+
+ reg = sg2044_spifmc_init_reg(spifmc);
+ reg |= (op->addr.nbytes + op->dummy.nbytes) << SPIFMC_TRAN_CSR_ADDR_BYTES_SHIFT;
+ reg |= SPIFMC_TRAN_CSR_FIFO_TRG_LVL_1_BYTE;
+ reg |= SPIFMC_TRAN_CSR_WITH_CMD;
+
+ writel(0, spifmc->io_base + SPIFMC_FIFO_PT);
+ writeb(op->cmd.opcode, spifmc->io_base + SPIFMC_FIFO_PORT);
+
+ for (i = op->addr.nbytes - 1; i >= 0; i--)
+ writeb((op->addr.val >> i * 8) & 0xff, spifmc->io_base + SPIFMC_FIFO_PORT);
+
+ for (i = 0; i < op->dummy.nbytes; i++)
+ writeb(0xff, spifmc->io_base + SPIFMC_FIFO_PORT);
+
+ writel(0, spifmc->io_base + SPIFMC_INT_STS);
+ reg |= SPIFMC_TRAN_CSR_GO_BUSY;
+ writel(reg, spifmc->io_base + SPIFMC_TRAN_CSR);
+
+ ret = sg2044_spifmc_wait_int(spifmc, SPIFMC_INT_TRAN_DONE);
+ if (ret < 0)
+ return ret;
+
+ writel(0, spifmc->io_base + SPIFMC_FIFO_PT);
+
+ return 0;
+}
+
+static void sg2044_spifmc_trans(struct sg2044_spifmc *spifmc,
+ const struct spi_mem_op *op)
+{
+ if (op->data.dir == SPI_MEM_DATA_IN)
+ sg2044_spifmc_read(spifmc, op);
+ else if (op->data.dir == SPI_MEM_DATA_OUT)
+ sg2044_spifmc_write(spifmc, op);
+ else
+ sg2044_spifmc_tran_cmd(spifmc, op);
+}
+
+static ssize_t sg2044_spifmc_trans_reg(struct sg2044_spifmc *spifmc,
+ const struct spi_mem_op *op)
+{
+ const u8 *dout = NULL;
+ u8 *din = NULL;
+ size_t len = op->data.nbytes;
+ int ret, i;
+ u32 reg;
+
+ if (op->data.dir == SPI_MEM_DATA_IN)
+ din = op->data.buf.in;
+ else
+ dout = op->data.buf.out;
+
+ reg = sg2044_spifmc_init_reg(spifmc);
+ reg |= SPIFMC_TRAN_CSR_FIFO_TRG_LVL_1_BYTE;
+ reg |= SPIFMC_TRAN_CSR_WITH_CMD;
+
+ if (din) {
+ reg |= SPIFMC_TRAN_CSR_BUS_WIDTH_1_BIT;
+ reg |= SPIFMC_TRAN_CSR_TRAN_MODE_RX;
+ reg |= SPIFMC_TRAN_CSR_TRAN_MODE_TX;
+
+ writel(SPIFMC_OPT_DISABLE_FIFO_FLUSH, spifmc->io_base + SPIFMC_OPT);
+ } else {
+ /*
+ * If write values to the Status Register,
+ * configure TRAN_CSR register as the same as
+ * sg2044_spifmc_read_reg.
+ */
+ if (op->cmd.opcode == 0x01) {
+ reg |= SPIFMC_TRAN_CSR_TRAN_MODE_RX;
+ reg |= SPIFMC_TRAN_CSR_TRAN_MODE_TX;
+ writel(len, spifmc->io_base + SPIFMC_TRAN_NUM);
+ }
+ }
+
+ writel(0, spifmc->io_base + SPIFMC_FIFO_PT);
+ writeb(op->cmd.opcode, spifmc->io_base + SPIFMC_FIFO_PORT);
+
+ for (i = 0; i < len; i++) {
+ if (din)
+ writeb(0xff, spifmc->io_base + SPIFMC_FIFO_PORT);
+ else
+ writeb(dout[i], spifmc->io_base + SPIFMC_FIFO_PORT);
+ }
+
+ writel(0, spifmc->io_base + SPIFMC_INT_STS);
+ writel(len, spifmc->io_base + SPIFMC_TRAN_NUM);
+ reg |= SPIFMC_TRAN_CSR_GO_BUSY;
+ writel(reg, spifmc->io_base + SPIFMC_TRAN_CSR);
+
+ ret = sg2044_spifmc_wait_int(spifmc, SPIFMC_INT_TRAN_DONE);
+ if (ret < 0)
+ return ret;
+
+ if (din) {
+ while (len--)
+ *din++ = readb(spifmc->io_base + SPIFMC_FIFO_PORT);
+ }
+
+ writel(0, spifmc->io_base + SPIFMC_FIFO_PT);
+
+ return 0;
+}
+
+static int sg2044_spifmc_exec_op(struct spi_mem *mem,
+ const struct spi_mem_op *op)
+{
+ struct sg2044_spifmc *spifmc;
+
+ spifmc = spi_controller_get_devdata(mem->spi->controller);
+
+ mutex_lock(&spifmc->lock);
+
+ if (op->addr.nbytes == 0)
+ sg2044_spifmc_trans_reg(spifmc, op);
+ else
+ sg2044_spifmc_trans(spifmc, op);
+
+ mutex_unlock(&spifmc->lock);
+
+ return 0;
+}
+
+static const struct spi_controller_mem_ops sg2044_spifmc_mem_ops = {
+ .exec_op = sg2044_spifmc_exec_op,
+};
+
+static void sg2044_spifmc_init(struct sg2044_spifmc *spifmc)
+{
+ u32 tran_csr;
+ u32 reg;
+
+ writel(0, spifmc->io_base + SPIFMC_DMMR);
+
+ reg = readl(spifmc->io_base + SPIFMC_CTRL);
+ reg |= SPIFMC_CTRL_SRST;
+ reg &= ~(SPIFMC_CTRL_SCK_DIV_MASK);
+ reg |= 1;
+ writel(reg, spifmc->io_base + SPIFMC_CTRL);
+
+ writel(0, spifmc->io_base + SPIFMC_CE_CTRL);
+
+ tran_csr = readl(spifmc->io_base + SPIFMC_TRAN_CSR);
+ tran_csr |= (0 << SPIFMC_TRAN_CSR_ADDR_BYTES_SHIFT);
+ tran_csr |= SPIFMC_TRAN_CSR_FIFO_TRG_LVL_4_BYTE;
+ tran_csr |= SPIFMC_TRAN_CSR_WITH_CMD;
+ writel(tran_csr, spifmc->io_base + SPIFMC_TRAN_CSR);
+}
+
+static int sg2044_spifmc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct spi_controller *ctrl;
+ struct sg2044_spifmc *spifmc;
+ int ret;
+
+ ctrl = devm_spi_alloc_host(&pdev->dev, sizeof(*spifmc));
+ if (!ctrl)
+ return -ENOMEM;
+
+ spifmc = spi_controller_get_devdata(ctrl);
+
+ spifmc->clk = devm_clk_get_enabled(&pdev->dev, NULL);
+ if (IS_ERR(spifmc->clk))
+ return dev_err_probe(dev, PTR_ERR(spifmc->clk), "Cannot get and enable AHB clock\n");
+
+ spifmc->dev = &pdev->dev;
+ spifmc->ctrl = ctrl;
+
+ spifmc->io_base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(spifmc->io_base))
+ return PTR_ERR(spifmc->io_base);
+
+ ctrl->num_chipselect = 1;
+ ctrl->dev.of_node = pdev->dev.of_node;
+ ctrl->bits_per_word_mask = SPI_BPW_MASK(8);
+ ctrl->auto_runtime_pm = false;
+ ctrl->mem_ops = &sg2044_spifmc_mem_ops;
+ ctrl->mode_bits = SPI_RX_DUAL | SPI_TX_DUAL | SPI_RX_QUAD | SPI_TX_QUAD;
+
+ ret = devm_mutex_init(dev, &spifmc->lock);
+ if (ret)
+ return ret;
+
+ sg2044_spifmc_init(spifmc);
+ sg2044_spifmc_init_reg(spifmc);
+
+ ret = devm_spi_register_controller(&pdev->dev, ctrl);
+ if (ret)
+ return dev_err_probe(dev, ret, "spi_register_controller failed\n");
+
+ return 0;
+}
+
+static const struct of_device_id sg2044_spifmc_match[] = {
+ { .compatible = "sophgo,sg2044-spifmc-nor" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, sg2044_spifmc_match);
+
+static struct platform_driver sg2044_nor_driver = {
+ .driver = {
+ .name = "sg2044,spifmc-nor",
+ .of_match_table = sg2044_spifmc_match,
+ },
+ .probe = sg2044_spifmc_probe,
+};
+module_platform_driver(sg2044_nor_driver);
+
+MODULE_DESCRIPTION("SG2044 SPI NOR controller driver");
+MODULE_AUTHOR("Longbin Li <looong.bin@gmail.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-stm32-ospi.c b/drivers/spi/spi-stm32-ospi.c
new file mode 100644
index 000000000000..668022098b1e
--- /dev/null
+++ b/drivers/spi/spi-stm32-ospi.c
@@ -0,0 +1,1063 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) STMicroelectronics 2025 - All Rights Reserved
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/gpio/consumer.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/reset.h>
+#include <linux/sizes.h>
+#include <linux/spi/spi-mem.h>
+#include <linux/types.h>
+
+#define OSPI_CR 0x00
+#define CR_EN BIT(0)
+#define CR_ABORT BIT(1)
+#define CR_DMAEN BIT(2)
+#define CR_FTHRES_SHIFT 8
+#define CR_TEIE BIT(16)
+#define CR_TCIE BIT(17)
+#define CR_SMIE BIT(19)
+#define CR_APMS BIT(22)
+#define CR_CSSEL BIT(24)
+#define CR_FMODE_MASK GENMASK(29, 28)
+#define CR_FMODE_INDW (0U)
+#define CR_FMODE_INDR (1U)
+#define CR_FMODE_APM (2U)
+#define CR_FMODE_MM (3U)
+
+#define OSPI_DCR1 0x08
+#define DCR1_DLYBYP BIT(3)
+#define DCR1_DEVSIZE_MASK GENMASK(20, 16)
+#define DCR1_MTYP_MASK GENMASK(26, 24)
+#define DCR1_MTYP_MX_MODE 1
+#define DCR1_MTYP_HP_MEMMODE 4
+
+#define OSPI_DCR2 0x0c
+#define DCR2_PRESC_MASK GENMASK(7, 0)
+
+#define OSPI_SR 0x20
+#define SR_TEF BIT(0)
+#define SR_TCF BIT(1)
+#define SR_FTF BIT(2)
+#define SR_SMF BIT(3)
+#define SR_BUSY BIT(5)
+
+#define OSPI_FCR 0x24
+#define FCR_CTEF BIT(0)
+#define FCR_CTCF BIT(1)
+#define FCR_CSMF BIT(3)
+
+#define OSPI_DLR 0x40
+#define OSPI_AR 0x48
+#define OSPI_DR 0x50
+#define OSPI_PSMKR 0x80
+#define OSPI_PSMAR 0x88
+
+#define OSPI_CCR 0x100
+#define CCR_IMODE_MASK GENMASK(2, 0)
+#define CCR_IDTR BIT(3)
+#define CCR_ISIZE_MASK GENMASK(5, 4)
+#define CCR_ADMODE_MASK GENMASK(10, 8)
+#define CCR_ADMODE_8LINES 4
+#define CCR_ADDTR BIT(11)
+#define CCR_ADSIZE_MASK GENMASK(13, 12)
+#define CCR_ADSIZE_32BITS 3
+#define CCR_DMODE_MASK GENMASK(26, 24)
+#define CCR_DMODE_8LINES 4
+#define CCR_DQSE BIT(29)
+#define CCR_DDTR BIT(27)
+#define CCR_BUSWIDTH_0 0x0
+#define CCR_BUSWIDTH_1 0x1
+#define CCR_BUSWIDTH_2 0x2
+#define CCR_BUSWIDTH_4 0x3
+#define CCR_BUSWIDTH_8 0x4
+
+#define OSPI_TCR 0x108
+#define TCR_DCYC_MASK GENMASK(4, 0)
+#define TCR_DHQC BIT(28)
+#define TCR_SSHIFT BIT(30)
+
+#define OSPI_IR 0x110
+
+#define STM32_OSPI_MAX_MMAP_SZ SZ_256M
+#define STM32_OSPI_MAX_NORCHIP 2
+
+#define STM32_FIFO_TIMEOUT_US 30000
+#define STM32_ABT_TIMEOUT_US 100000
+#define STM32_COMP_TIMEOUT_MS 5000
+#define STM32_BUSY_TIMEOUT_US 100000
+
+
+#define STM32_AUTOSUSPEND_DELAY -1
+
+struct stm32_ospi {
+ struct device *dev;
+ struct spi_controller *ctrl;
+ struct clk *clk;
+ struct reset_control *rstc;
+
+ struct completion data_completion;
+ struct completion match_completion;
+
+ struct dma_chan *dma_chtx;
+ struct dma_chan *dma_chrx;
+ struct completion dma_completion;
+
+ void __iomem *regs_base;
+ void __iomem *mm_base;
+ phys_addr_t regs_phys_base;
+ resource_size_t mm_size;
+ u32 clk_rate;
+ u32 fmode;
+ u32 cr_reg;
+ u32 dcr_reg;
+ u32 flash_presc[STM32_OSPI_MAX_NORCHIP];
+ int irq;
+ unsigned long status_timeout;
+
+ /*
+ * To protect device configuration, could be different between
+ * 2 flash access
+ */
+ struct mutex lock;
+};
+
+static void stm32_ospi_read_fifo(u8 *val, void __iomem *addr)
+{
+ *val = readb_relaxed(addr);
+}
+
+static void stm32_ospi_write_fifo(u8 *val, void __iomem *addr)
+{
+ writeb_relaxed(*val, addr);
+}
+
+static int stm32_ospi_abort(struct stm32_ospi *ospi)
+{
+ void __iomem *regs_base = ospi->regs_base;
+ u32 cr;
+ int timeout;
+
+ cr = readl_relaxed(regs_base + OSPI_CR) | CR_ABORT;
+ writel_relaxed(cr, regs_base + OSPI_CR);
+
+ /* wait clear of abort bit by hw */
+ timeout = readl_relaxed_poll_timeout_atomic(regs_base + OSPI_CR,
+ cr, !(cr & CR_ABORT), 1,
+ STM32_ABT_TIMEOUT_US);
+
+ if (timeout)
+ dev_err(ospi->dev, "%s abort timeout:%d\n", __func__, timeout);
+
+ return timeout;
+}
+
+static int stm32_ospi_poll(struct stm32_ospi *ospi, u8 *buf, u32 len, bool read)
+{
+ void __iomem *regs_base = ospi->regs_base;
+ void (*fifo)(u8 *val, void __iomem *addr);
+ u32 sr;
+ int ret;
+
+ if (read)
+ fifo = stm32_ospi_read_fifo;
+ else
+ fifo = stm32_ospi_write_fifo;
+
+ while (len--) {
+ ret = readl_relaxed_poll_timeout_atomic(regs_base + OSPI_SR,
+ sr, sr & SR_FTF, 1,
+ STM32_FIFO_TIMEOUT_US);
+ if (ret) {
+ dev_err(ospi->dev, "fifo timeout (len:%d stat:%#x)\n",
+ len, sr);
+ return ret;
+ }
+ fifo(buf++, regs_base + OSPI_DR);
+ }
+
+ return 0;
+}
+
+static int stm32_ospi_wait_nobusy(struct stm32_ospi *ospi)
+{
+ u32 sr;
+
+ return readl_relaxed_poll_timeout_atomic(ospi->regs_base + OSPI_SR,
+ sr, !(sr & SR_BUSY), 1,
+ STM32_BUSY_TIMEOUT_US);
+}
+
+static int stm32_ospi_wait_cmd(struct stm32_ospi *ospi)
+{
+ void __iomem *regs_base = ospi->regs_base;
+ u32 cr, sr;
+ int err = 0;
+
+ if ((readl_relaxed(regs_base + OSPI_SR) & SR_TCF) ||
+ ospi->fmode == CR_FMODE_APM)
+ goto out;
+
+ reinit_completion(&ospi->data_completion);
+ cr = readl_relaxed(regs_base + OSPI_CR);
+ writel_relaxed(cr | CR_TCIE | CR_TEIE, regs_base + OSPI_CR);
+
+ if (!wait_for_completion_timeout(&ospi->data_completion,
+ msecs_to_jiffies(STM32_COMP_TIMEOUT_MS)))
+ err = -ETIMEDOUT;
+
+ sr = readl_relaxed(regs_base + OSPI_SR);
+ if (sr & SR_TCF)
+ /* avoid false timeout */
+ err = 0;
+ if (sr & SR_TEF)
+ err = -EIO;
+
+out:
+ /* clear flags */
+ writel_relaxed(FCR_CTCF | FCR_CTEF, regs_base + OSPI_FCR);
+
+ if (!err)
+ err = stm32_ospi_wait_nobusy(ospi);
+
+ return err;
+}
+
+static void stm32_ospi_dma_callback(void *arg)
+{
+ struct completion *dma_completion = arg;
+
+ complete(dma_completion);
+}
+
+static irqreturn_t stm32_ospi_irq(int irq, void *dev_id)
+{
+ struct stm32_ospi *ospi = (struct stm32_ospi *)dev_id;
+ void __iomem *regs_base = ospi->regs_base;
+ u32 cr, sr;
+
+ cr = readl_relaxed(regs_base + OSPI_CR);
+ sr = readl_relaxed(regs_base + OSPI_SR);
+
+ if (cr & CR_SMIE && sr & SR_SMF) {
+ /* disable irq */
+ cr &= ~CR_SMIE;
+ writel_relaxed(cr, regs_base + OSPI_CR);
+ complete(&ospi->match_completion);
+
+ return IRQ_HANDLED;
+ }
+
+ if (sr & (SR_TEF | SR_TCF)) {
+ /* disable irq */
+ cr &= ~CR_TCIE & ~CR_TEIE;
+ writel_relaxed(cr, regs_base + OSPI_CR);
+ complete(&ospi->data_completion);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void stm32_ospi_dma_setup(struct stm32_ospi *ospi,
+ struct dma_slave_config *dma_cfg)
+{
+ if (dma_cfg && ospi->dma_chrx) {
+ if (dmaengine_slave_config(ospi->dma_chrx, dma_cfg)) {
+ dev_err(ospi->dev, "dma rx config failed\n");
+ dma_release_channel(ospi->dma_chrx);
+ ospi->dma_chrx = NULL;
+ }
+ }
+
+ if (dma_cfg && ospi->dma_chtx) {
+ if (dmaengine_slave_config(ospi->dma_chtx, dma_cfg)) {
+ dev_err(ospi->dev, "dma tx config failed\n");
+ dma_release_channel(ospi->dma_chtx);
+ ospi->dma_chtx = NULL;
+ }
+ }
+
+ init_completion(&ospi->dma_completion);
+}
+
+static int stm32_ospi_tx_mm(struct stm32_ospi *ospi,
+ const struct spi_mem_op *op)
+{
+ memcpy_fromio(op->data.buf.in, ospi->mm_base + op->addr.val,
+ op->data.nbytes);
+ return 0;
+}
+
+static int stm32_ospi_tx_dma(struct stm32_ospi *ospi,
+ const struct spi_mem_op *op)
+{
+ struct dma_async_tx_descriptor *desc;
+ void __iomem *regs_base = ospi->regs_base;
+ enum dma_transfer_direction dma_dir;
+ struct dma_chan *dma_ch;
+ struct sg_table sgt;
+ dma_cookie_t cookie;
+ u32 cr, t_out;
+ int err;
+
+ if (op->data.dir == SPI_MEM_DATA_IN) {
+ dma_dir = DMA_DEV_TO_MEM;
+ dma_ch = ospi->dma_chrx;
+ } else {
+ dma_dir = DMA_MEM_TO_DEV;
+ dma_ch = ospi->dma_chtx;
+ }
+
+ /*
+ * Spi_map_buf return -EINVAL if the buffer is not DMA-able
+ * (DMA-able: in vmalloc | kmap | virt_addr_valid)
+ */
+ err = spi_controller_dma_map_mem_op_data(ospi->ctrl, op, &sgt);
+ if (err)
+ return err;
+
+ desc = dmaengine_prep_slave_sg(dma_ch, sgt.sgl, sgt.nents,
+ dma_dir, DMA_PREP_INTERRUPT);
+ if (!desc) {
+ err = -ENOMEM;
+ goto out_unmap;
+ }
+
+ cr = readl_relaxed(regs_base + OSPI_CR);
+
+ reinit_completion(&ospi->dma_completion);
+ desc->callback = stm32_ospi_dma_callback;
+ desc->callback_param = &ospi->dma_completion;
+ cookie = dmaengine_submit(desc);
+ err = dma_submit_error(cookie);
+ if (err)
+ goto out;
+
+ dma_async_issue_pending(dma_ch);
+
+ writel_relaxed(cr | CR_DMAEN, regs_base + OSPI_CR);
+
+ t_out = sgt.nents * STM32_COMP_TIMEOUT_MS;
+ if (!wait_for_completion_timeout(&ospi->dma_completion,
+ msecs_to_jiffies(t_out)))
+ err = -ETIMEDOUT;
+
+ if (err)
+ dmaengine_terminate_all(dma_ch);
+
+out:
+ writel_relaxed(cr & ~CR_DMAEN, regs_base + OSPI_CR);
+out_unmap:
+ spi_controller_dma_unmap_mem_op_data(ospi->ctrl, op, &sgt);
+
+ return err;
+}
+
+static int stm32_ospi_xfer(struct stm32_ospi *ospi, const struct spi_mem_op *op)
+{
+ u8 *buf;
+
+ if (!op->data.nbytes)
+ return 0;
+
+ if (ospi->fmode == CR_FMODE_MM)
+ return stm32_ospi_tx_mm(ospi, op);
+ else if (((op->data.dir == SPI_MEM_DATA_IN && ospi->dma_chrx) ||
+ (op->data.dir == SPI_MEM_DATA_OUT && ospi->dma_chtx)) &&
+ op->data.nbytes > 8)
+ if (!stm32_ospi_tx_dma(ospi, op))
+ return 0;
+
+ if (op->data.dir == SPI_MEM_DATA_IN)
+ buf = op->data.buf.in;
+ else
+ buf = (u8 *)op->data.buf.out;
+
+ return stm32_ospi_poll(ospi, buf, op->data.nbytes,
+ op->data.dir == SPI_MEM_DATA_IN);
+}
+
+static int stm32_ospi_wait_poll_status(struct stm32_ospi *ospi,
+ const struct spi_mem_op *op)
+{
+ void __iomem *regs_base = ospi->regs_base;
+ u32 cr;
+
+ reinit_completion(&ospi->match_completion);
+ cr = readl_relaxed(regs_base + OSPI_CR);
+ writel_relaxed(cr | CR_SMIE, regs_base + OSPI_CR);
+
+ if (!wait_for_completion_timeout(&ospi->match_completion,
+ msecs_to_jiffies(ospi->status_timeout))) {
+ u32 sr = readl_relaxed(regs_base + OSPI_SR);
+
+ /* Avoid false timeout */
+ if (!(sr & SR_SMF))
+ return -ETIMEDOUT;
+ }
+
+ writel_relaxed(FCR_CSMF, regs_base + OSPI_FCR);
+
+ return 0;
+}
+
+static int stm32_ospi_get_mode(u8 buswidth)
+{
+ switch (buswidth) {
+ case 8:
+ return CCR_BUSWIDTH_8;
+ case 4:
+ return CCR_BUSWIDTH_4;
+ default:
+ return buswidth;
+ }
+}
+
+static int stm32_ospi_send(struct spi_device *spi, const struct spi_mem_op *op)
+{
+ struct stm32_ospi *ospi = spi_controller_get_devdata(spi->controller);
+ void __iomem *regs_base = ospi->regs_base;
+ u32 ccr, cr, dcr2, tcr;
+ int timeout, err = 0, err_poll_status = 0;
+ u8 cs = spi->chip_select[ffs(spi->cs_index_mask) - 1];
+
+ dev_dbg(ospi->dev, "cmd:%#x mode:%d.%d.%d.%d addr:%#llx len:%#x\n",
+ op->cmd.opcode, op->cmd.buswidth, op->addr.buswidth,
+ op->dummy.buswidth, op->data.buswidth,
+ op->addr.val, op->data.nbytes);
+
+ cr = readl_relaxed(ospi->regs_base + OSPI_CR);
+ cr &= ~CR_CSSEL;
+ cr |= FIELD_PREP(CR_CSSEL, cs);
+ cr &= ~CR_FMODE_MASK;
+ cr |= FIELD_PREP(CR_FMODE_MASK, ospi->fmode);
+ writel_relaxed(cr, regs_base + OSPI_CR);
+
+ if (op->data.nbytes)
+ writel_relaxed(op->data.nbytes - 1, regs_base + OSPI_DLR);
+
+ /* set prescaler */
+ dcr2 = readl_relaxed(regs_base + OSPI_DCR2);
+ dcr2 |= FIELD_PREP(DCR2_PRESC_MASK, ospi->flash_presc[cs]);
+ writel_relaxed(dcr2, regs_base + OSPI_DCR2);
+
+ ccr = FIELD_PREP(CCR_IMODE_MASK, stm32_ospi_get_mode(op->cmd.buswidth));
+
+ if (op->addr.nbytes) {
+ ccr |= FIELD_PREP(CCR_ADMODE_MASK,
+ stm32_ospi_get_mode(op->addr.buswidth));
+ ccr |= FIELD_PREP(CCR_ADSIZE_MASK, op->addr.nbytes - 1);
+ }
+
+ tcr = TCR_SSHIFT;
+ if (op->dummy.buswidth && op->dummy.nbytes) {
+ tcr |= FIELD_PREP(TCR_DCYC_MASK,
+ op->dummy.nbytes * 8 / op->dummy.buswidth);
+ }
+ writel_relaxed(tcr, regs_base + OSPI_TCR);
+
+ if (op->data.nbytes) {
+ ccr |= FIELD_PREP(CCR_DMODE_MASK,
+ stm32_ospi_get_mode(op->data.buswidth));
+ }
+
+ writel_relaxed(ccr, regs_base + OSPI_CCR);
+
+ /* set instruction, must be set after ccr register update */
+ writel_relaxed(op->cmd.opcode, regs_base + OSPI_IR);
+
+ if (op->addr.nbytes && ospi->fmode != CR_FMODE_MM)
+ writel_relaxed(op->addr.val, regs_base + OSPI_AR);
+
+ if (ospi->fmode == CR_FMODE_APM)
+ err_poll_status = stm32_ospi_wait_poll_status(ospi, op);
+
+ err = stm32_ospi_xfer(ospi, op);
+
+ /*
+ * Abort in:
+ * -error case
+ * -read memory map: prefetching must be stopped if we read the last
+ * byte of device (device size - fifo size). like device size is not
+ * knows, the prefetching is always stop.
+ */
+ if (err || err_poll_status || ospi->fmode == CR_FMODE_MM)
+ goto abort;
+
+ /* Wait end of tx in indirect mode */
+ err = stm32_ospi_wait_cmd(ospi);
+ if (err)
+ goto abort;
+
+ return 0;
+
+abort:
+ timeout = stm32_ospi_abort(ospi);
+ writel_relaxed(FCR_CTCF | FCR_CSMF, regs_base + OSPI_FCR);
+
+ if (err || err_poll_status || timeout)
+ dev_err(ospi->dev, "%s err:%d err_poll_status:%d abort timeout:%d\n",
+ __func__, err, err_poll_status, timeout);
+
+ return err;
+}
+
+static int stm32_ospi_poll_status(struct spi_mem *mem,
+ const struct spi_mem_op *op,
+ u16 mask, u16 match,
+ unsigned long initial_delay_us,
+ unsigned long polling_rate_us,
+ unsigned long timeout_ms)
+{
+ struct stm32_ospi *ospi = spi_controller_get_devdata(mem->spi->controller);
+ void __iomem *regs_base = ospi->regs_base;
+ int ret;
+
+ ret = pm_runtime_resume_and_get(ospi->dev);
+ if (ret < 0)
+ return ret;
+
+ mutex_lock(&ospi->lock);
+
+ writel_relaxed(mask, regs_base + OSPI_PSMKR);
+ writel_relaxed(match, regs_base + OSPI_PSMAR);
+ ospi->fmode = CR_FMODE_APM;
+ ospi->status_timeout = timeout_ms;
+
+ ret = stm32_ospi_send(mem->spi, op);
+ mutex_unlock(&ospi->lock);
+
+ pm_runtime_mark_last_busy(ospi->dev);
+ pm_runtime_put_autosuspend(ospi->dev);
+
+ return ret;
+}
+
+static int stm32_ospi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
+{
+ struct stm32_ospi *ospi = spi_controller_get_devdata(mem->spi->controller);
+ int ret;
+
+ ret = pm_runtime_resume_and_get(ospi->dev);
+ if (ret < 0)
+ return ret;
+
+ mutex_lock(&ospi->lock);
+ if (op->data.dir == SPI_MEM_DATA_IN && op->data.nbytes)
+ ospi->fmode = CR_FMODE_INDR;
+ else
+ ospi->fmode = CR_FMODE_INDW;
+
+ ret = stm32_ospi_send(mem->spi, op);
+ mutex_unlock(&ospi->lock);
+
+ pm_runtime_mark_last_busy(ospi->dev);
+ pm_runtime_put_autosuspend(ospi->dev);
+
+ return ret;
+}
+
+static int stm32_ospi_dirmap_create(struct spi_mem_dirmap_desc *desc)
+{
+ struct stm32_ospi *ospi = spi_controller_get_devdata(desc->mem->spi->controller);
+
+ if (desc->info.op_tmpl.data.dir == SPI_MEM_DATA_OUT)
+ return -EOPNOTSUPP;
+
+ /* Should never happen, as mm_base == null is an error probe exit condition */
+ if (!ospi->mm_base && desc->info.op_tmpl.data.dir == SPI_MEM_DATA_IN)
+ return -EOPNOTSUPP;
+
+ if (!ospi->mm_size)
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
+static ssize_t stm32_ospi_dirmap_read(struct spi_mem_dirmap_desc *desc,
+ u64 offs, size_t len, void *buf)
+{
+ struct stm32_ospi *ospi = spi_controller_get_devdata(desc->mem->spi->controller);
+ struct spi_mem_op op;
+ u32 addr_max;
+ int ret;
+
+ ret = pm_runtime_resume_and_get(ospi->dev);
+ if (ret < 0)
+ return ret;
+
+ mutex_lock(&ospi->lock);
+ /*
+ * Make a local copy of desc op_tmpl and complete dirmap rdesc
+ * spi_mem_op template with offs, len and *buf in order to get
+ * all needed transfer information into struct spi_mem_op
+ */
+ memcpy(&op, &desc->info.op_tmpl, sizeof(struct spi_mem_op));
+ dev_dbg(ospi->dev, "%s len = 0x%zx offs = 0x%llx buf = 0x%p\n", __func__, len, offs, buf);
+
+ op.data.nbytes = len;
+ op.addr.val = desc->info.offset + offs;
+ op.data.buf.in = buf;
+
+ addr_max = op.addr.val + op.data.nbytes + 1;
+ if (addr_max < ospi->mm_size && op.addr.buswidth)
+ ospi->fmode = CR_FMODE_MM;
+ else
+ ospi->fmode = CR_FMODE_INDR;
+
+ ret = stm32_ospi_send(desc->mem->spi, &op);
+ mutex_unlock(&ospi->lock);
+
+ pm_runtime_mark_last_busy(ospi->dev);
+ pm_runtime_put_autosuspend(ospi->dev);
+
+ return ret ?: len;
+}
+
+static int stm32_ospi_transfer_one_message(struct spi_controller *ctrl,
+ struct spi_message *msg)
+{
+ struct stm32_ospi *ospi = spi_controller_get_devdata(ctrl);
+ struct spi_transfer *transfer;
+ struct spi_device *spi = msg->spi;
+ struct spi_mem_op op;
+ struct gpio_desc *cs_gpiod = spi->cs_gpiod[ffs(spi->cs_index_mask) - 1];
+ int ret = 0;
+
+ if (!cs_gpiod)
+ return -EOPNOTSUPP;
+
+ ret = pm_runtime_resume_and_get(ospi->dev);
+ if (ret < 0)
+ return ret;
+
+ mutex_lock(&ospi->lock);
+
+ gpiod_set_value_cansleep(cs_gpiod, true);
+
+ list_for_each_entry(transfer, &msg->transfers, transfer_list) {
+ u8 dummy_bytes = 0;
+
+ memset(&op, 0, sizeof(op));
+
+ dev_dbg(ospi->dev, "tx_buf:%p tx_nbits:%d rx_buf:%p rx_nbits:%d len:%d dummy_data:%d\n",
+ transfer->tx_buf, transfer->tx_nbits,
+ transfer->rx_buf, transfer->rx_nbits,
+ transfer->len, transfer->dummy_data);
+
+ /*
+ * OSPI hardware supports dummy bytes transfer.
+ * If current transfer is dummy byte, merge it with the next
+ * transfer in order to take into account OSPI block constraint
+ */
+ if (transfer->dummy_data) {
+ op.dummy.buswidth = transfer->tx_nbits;
+ op.dummy.nbytes = transfer->len;
+ dummy_bytes = transfer->len;
+
+ /* If happens, means that message is not correctly built */
+ if (list_is_last(&transfer->transfer_list, &msg->transfers)) {
+ ret = -EINVAL;
+ goto end_of_transfer;
+ }
+
+ transfer = list_next_entry(transfer, transfer_list);
+ }
+
+ op.data.nbytes = transfer->len;
+
+ if (transfer->rx_buf) {
+ ospi->fmode = CR_FMODE_INDR;
+ op.data.buswidth = transfer->rx_nbits;
+ op.data.dir = SPI_MEM_DATA_IN;
+ op.data.buf.in = transfer->rx_buf;
+ } else {
+ ospi->fmode = CR_FMODE_INDW;
+ op.data.buswidth = transfer->tx_nbits;
+ op.data.dir = SPI_MEM_DATA_OUT;
+ op.data.buf.out = transfer->tx_buf;
+ }
+
+ ret = stm32_ospi_send(spi, &op);
+ if (ret)
+ goto end_of_transfer;
+
+ msg->actual_length += transfer->len + dummy_bytes;
+ }
+
+end_of_transfer:
+ gpiod_set_value_cansleep(cs_gpiod, false);
+
+ mutex_unlock(&ospi->lock);
+
+ msg->status = ret;
+ spi_finalize_current_message(ctrl);
+
+ pm_runtime_mark_last_busy(ospi->dev);
+ pm_runtime_put_autosuspend(ospi->dev);
+
+ return ret;
+}
+
+static int stm32_ospi_setup(struct spi_device *spi)
+{
+ struct spi_controller *ctrl = spi->controller;
+ struct stm32_ospi *ospi = spi_controller_get_devdata(ctrl);
+ void __iomem *regs_base = ospi->regs_base;
+ int ret;
+ u8 cs = spi->chip_select[ffs(spi->cs_index_mask) - 1];
+
+ if (ctrl->busy)
+ return -EBUSY;
+
+ if (!spi->max_speed_hz)
+ return -EINVAL;
+
+ ret = pm_runtime_resume_and_get(ospi->dev);
+ if (ret < 0)
+ return ret;
+
+ ospi->flash_presc[cs] = DIV_ROUND_UP(ospi->clk_rate, spi->max_speed_hz) - 1;
+
+ mutex_lock(&ospi->lock);
+
+ ospi->cr_reg = CR_APMS | 3 << CR_FTHRES_SHIFT | CR_EN;
+ writel_relaxed(ospi->cr_reg, regs_base + OSPI_CR);
+
+ /* set dcr fsize to max address */
+ ospi->dcr_reg = DCR1_DEVSIZE_MASK | DCR1_DLYBYP;
+ writel_relaxed(ospi->dcr_reg, regs_base + OSPI_DCR1);
+
+ mutex_unlock(&ospi->lock);
+
+ pm_runtime_mark_last_busy(ospi->dev);
+ pm_runtime_put_autosuspend(ospi->dev);
+
+ return 0;
+}
+
+/*
+ * No special host constraint, so use default spi_mem_default_supports_op
+ * to check supported mode.
+ */
+static const struct spi_controller_mem_ops stm32_ospi_mem_ops = {
+ .exec_op = stm32_ospi_exec_op,
+ .dirmap_create = stm32_ospi_dirmap_create,
+ .dirmap_read = stm32_ospi_dirmap_read,
+ .poll_status = stm32_ospi_poll_status,
+};
+
+static int stm32_ospi_get_resources(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct stm32_ospi *ospi = platform_get_drvdata(pdev);
+ struct resource *res;
+ struct reserved_mem *rmem = NULL;
+ struct device_node *node;
+ int ret;
+
+ ospi->regs_base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
+ if (IS_ERR(ospi->regs_base))
+ return PTR_ERR(ospi->regs_base);
+
+ ospi->regs_phys_base = res->start;
+
+ ospi->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(ospi->clk))
+ return dev_err_probe(dev, PTR_ERR(ospi->clk),
+ "Can't get clock\n");
+
+ ospi->clk_rate = clk_get_rate(ospi->clk);
+ if (!ospi->clk_rate) {
+ dev_err(dev, "Invalid clock rate\n");
+ return -EINVAL;
+ }
+
+ ospi->irq = platform_get_irq(pdev, 0);
+ if (ospi->irq < 0)
+ return ospi->irq;
+
+ ret = devm_request_irq(dev, ospi->irq, stm32_ospi_irq, 0,
+ dev_name(dev), ospi);
+ if (ret) {
+ dev_err(dev, "Failed to request irq\n");
+ return ret;
+ }
+
+ ospi->rstc = devm_reset_control_array_get_optional_exclusive(dev);
+ if (IS_ERR(ospi->rstc))
+ return dev_err_probe(dev, PTR_ERR(ospi->rstc),
+ "Can't get reset\n");
+
+ ospi->dma_chrx = dma_request_chan(dev, "rx");
+ if (IS_ERR(ospi->dma_chrx)) {
+ ret = PTR_ERR(ospi->dma_chrx);
+ ospi->dma_chrx = NULL;
+ if (ret == -EPROBE_DEFER)
+ goto err_dma;
+ }
+
+ ospi->dma_chtx = dma_request_chan(dev, "tx");
+ if (IS_ERR(ospi->dma_chtx)) {
+ ret = PTR_ERR(ospi->dma_chtx);
+ ospi->dma_chtx = NULL;
+ if (ret == -EPROBE_DEFER)
+ goto err_dma;
+ }
+
+ node = of_parse_phandle(dev->of_node, "memory-region", 0);
+ if (node)
+ rmem = of_reserved_mem_lookup(node);
+ of_node_put(node);
+
+ if (rmem) {
+ ospi->mm_size = rmem->size;
+ ospi->mm_base = devm_ioremap(dev, rmem->base, rmem->size);
+ if (!ospi->mm_base) {
+ dev_err(dev, "unable to map memory region: %pa+%pa\n",
+ &rmem->base, &rmem->size);
+ ret = -ENOMEM;
+ goto err_dma;
+ }
+
+ if (ospi->mm_size > STM32_OSPI_MAX_MMAP_SZ) {
+ dev_err(dev, "Memory map size outsize bounds\n");
+ ret = -EINVAL;
+ goto err_dma;
+ }
+ } else {
+ dev_info(dev, "No memory-map region found\n");
+ }
+
+ init_completion(&ospi->data_completion);
+ init_completion(&ospi->match_completion);
+
+ return 0;
+
+err_dma:
+ dev_info(dev, "Can't get all resources (%d)\n", ret);
+
+ if (ospi->dma_chtx)
+ dma_release_channel(ospi->dma_chtx);
+ if (ospi->dma_chrx)
+ dma_release_channel(ospi->dma_chrx);
+
+ return ret;
+};
+
+static int stm32_ospi_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct spi_controller *ctrl;
+ struct stm32_ospi *ospi;
+ struct dma_slave_config dma_cfg;
+ struct device_node *child;
+ int ret;
+ u8 spi_flash_count = 0;
+
+ /*
+ * Flash subnodes sanity check:
+ * 1 or 2 spi-nand/spi-nor flashes => supported
+ * All other flash node configuration => not supported
+ */
+ for_each_available_child_of_node(dev->of_node, child) {
+ if (of_device_is_compatible(child, "jedec,spi-nor") ||
+ of_device_is_compatible(child, "spi-nand"))
+ spi_flash_count++;
+ }
+
+ if (spi_flash_count == 0 || spi_flash_count > 2) {
+ dev_err(dev, "Incorrect DT flash node\n");
+ return -ENODEV;
+ }
+
+ ctrl = devm_spi_alloc_host(dev, sizeof(*ospi));
+ if (!ctrl)
+ return -ENOMEM;
+
+ ospi = spi_controller_get_devdata(ctrl);
+ ospi->ctrl = ctrl;
+
+ ospi->dev = &pdev->dev;
+ platform_set_drvdata(pdev, ospi);
+
+ ret = stm32_ospi_get_resources(pdev);
+ if (ret)
+ return ret;
+
+ memset(&dma_cfg, 0, sizeof(dma_cfg));
+ dma_cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ dma_cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ dma_cfg.src_addr = ospi->regs_phys_base + OSPI_DR;
+ dma_cfg.dst_addr = ospi->regs_phys_base + OSPI_DR;
+ dma_cfg.src_maxburst = 4;
+ dma_cfg.dst_maxburst = 4;
+ stm32_ospi_dma_setup(ospi, &dma_cfg);
+
+ mutex_init(&ospi->lock);
+
+ ctrl->mode_bits = SPI_RX_DUAL | SPI_RX_QUAD |
+ SPI_TX_DUAL | SPI_TX_QUAD |
+ SPI_TX_OCTAL | SPI_RX_OCTAL;
+ ctrl->flags = SPI_CONTROLLER_HALF_DUPLEX;
+ ctrl->setup = stm32_ospi_setup;
+ ctrl->bus_num = -1;
+ ctrl->mem_ops = &stm32_ospi_mem_ops;
+ ctrl->use_gpio_descriptors = true;
+ ctrl->transfer_one_message = stm32_ospi_transfer_one_message;
+ ctrl->num_chipselect = STM32_OSPI_MAX_NORCHIP;
+ ctrl->dev.of_node = dev->of_node;
+
+ pm_runtime_enable(ospi->dev);
+ pm_runtime_set_autosuspend_delay(ospi->dev, STM32_AUTOSUSPEND_DELAY);
+ pm_runtime_use_autosuspend(ospi->dev);
+
+ ret = pm_runtime_resume_and_get(ospi->dev);
+ if (ret < 0)
+ goto err_pm_enable;
+
+ if (ospi->rstc) {
+ reset_control_assert(ospi->rstc);
+ udelay(2);
+ reset_control_deassert(ospi->rstc);
+ }
+
+ ret = spi_register_controller(ctrl);
+ if (ret) {
+ /* Disable ospi */
+ writel_relaxed(0, ospi->regs_base + OSPI_CR);
+ goto err_pm_resume;
+ }
+
+ pm_runtime_mark_last_busy(ospi->dev);
+ pm_runtime_put_autosuspend(ospi->dev);
+
+ return 0;
+
+err_pm_resume:
+ pm_runtime_put_sync_suspend(ospi->dev);
+
+err_pm_enable:
+ pm_runtime_force_suspend(ospi->dev);
+ mutex_destroy(&ospi->lock);
+
+ return ret;
+}
+
+static void stm32_ospi_remove(struct platform_device *pdev)
+{
+ struct stm32_ospi *ospi = platform_get_drvdata(pdev);
+ int ret;
+
+ ret = pm_runtime_resume_and_get(ospi->dev);
+ if (ret < 0)
+ return;
+
+ spi_unregister_controller(ospi->ctrl);
+ /* Disable ospi */
+ writel_relaxed(0, ospi->regs_base + OSPI_CR);
+ mutex_destroy(&ospi->lock);
+
+ if (ospi->dma_chtx)
+ dma_release_channel(ospi->dma_chtx);
+ if (ospi->dma_chrx)
+ dma_release_channel(ospi->dma_chrx);
+
+ pm_runtime_put_sync_suspend(ospi->dev);
+ pm_runtime_force_suspend(ospi->dev);
+}
+
+static int __maybe_unused stm32_ospi_suspend(struct device *dev)
+{
+ struct stm32_ospi *ospi = dev_get_drvdata(dev);
+
+ pinctrl_pm_select_sleep_state(dev);
+
+ return pm_runtime_force_suspend(ospi->dev);
+}
+
+static int __maybe_unused stm32_ospi_resume(struct device *dev)
+{
+ struct stm32_ospi *ospi = dev_get_drvdata(dev);
+ void __iomem *regs_base = ospi->regs_base;
+ int ret;
+
+ ret = pm_runtime_force_resume(ospi->dev);
+ if (ret < 0)
+ return ret;
+
+ pinctrl_pm_select_default_state(dev);
+
+ ret = pm_runtime_resume_and_get(ospi->dev);
+ if (ret < 0)
+ return ret;
+
+ writel_relaxed(ospi->cr_reg, regs_base + OSPI_CR);
+ writel_relaxed(ospi->dcr_reg, regs_base + OSPI_DCR1);
+ pm_runtime_mark_last_busy(ospi->dev);
+ pm_runtime_put_autosuspend(ospi->dev);
+
+ return 0;
+}
+
+static int __maybe_unused stm32_ospi_runtime_suspend(struct device *dev)
+{
+ struct stm32_ospi *ospi = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(ospi->clk);
+
+ return 0;
+}
+
+static int __maybe_unused stm32_ospi_runtime_resume(struct device *dev)
+{
+ struct stm32_ospi *ospi = dev_get_drvdata(dev);
+
+ return clk_prepare_enable(ospi->clk);
+}
+
+static const struct dev_pm_ops stm32_ospi_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(stm32_ospi_suspend, stm32_ospi_resume)
+ SET_RUNTIME_PM_OPS(stm32_ospi_runtime_suspend,
+ stm32_ospi_runtime_resume, NULL)
+};
+
+static const struct of_device_id stm32_ospi_of_match[] = {
+ { .compatible = "st,stm32mp25-ospi" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, stm32_ospi_of_match);
+
+static struct platform_driver stm32_ospi_driver = {
+ .probe = stm32_ospi_probe,
+ .remove = stm32_ospi_remove,
+ .driver = {
+ .name = "stm32-ospi",
+ .pm = &stm32_ospi_pm_ops,
+ .of_match_table = stm32_ospi_of_match,
+ },
+};
+module_platform_driver(stm32_ospi_driver);
+
+MODULE_DESCRIPTION("STMicroelectronics STM32 OCTO SPI driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-stm32-qspi.c b/drivers/spi/spi-stm32-qspi.c
index 540b6948b24d..9691197bbf5a 100644
--- a/drivers/spi/spi-stm32-qspi.c
+++ b/drivers/spi/spi-stm32-qspi.c
@@ -362,11 +362,6 @@ static int stm32_qspi_send(struct spi_device *spi, const struct spi_mem_op *op)
u32 ccr, cr;
int timeout, err = 0, err_poll_status = 0;
- dev_dbg(qspi->dev, "cmd:%#x mode:%d.%d.%d.%d addr:%#llx len:%#x\n",
- op->cmd.opcode, op->cmd.buswidth, op->addr.buswidth,
- op->dummy.buswidth, op->data.buswidth,
- op->addr.val, op->data.nbytes);
-
cr = readl_relaxed(qspi->io_base + QSPI_CR);
cr &= ~CR_PRESC_MASK & ~CR_FSEL;
cr |= FIELD_PREP(CR_PRESC_MASK, flash->presc);
diff --git a/drivers/spi/spi-zynq-qspi.c b/drivers/spi/spi-zynq-qspi.c
index 2bd25c75f881..5232483c4a3a 100644
--- a/drivers/spi/spi-zynq-qspi.c
+++ b/drivers/spi/spi-zynq-qspi.c
@@ -540,10 +540,6 @@ static int zynq_qspi_exec_mem_op(struct spi_mem *mem,
int err = 0, i;
u8 *tmpbuf;
- dev_dbg(xqspi->dev, "cmd:%#x mode:%d.%d.%d.%d\n",
- op->cmd.opcode, op->cmd.buswidth, op->addr.buswidth,
- op->dummy.buswidth, op->data.buswidth);
-
zynq_qspi_chipselect(mem->spi, true);
zynq_qspi_config_op(xqspi, mem->spi, op);
diff --git a/drivers/spi/spi-zynqmp-gqspi.c b/drivers/spi/spi-zynqmp-gqspi.c
index d800d79f62a7..595b6dc10845 100644
--- a/drivers/spi/spi-zynqmp-gqspi.c
+++ b/drivers/spi/spi-zynqmp-gqspi.c
@@ -82,7 +82,6 @@
#define GQSPI_GENFIFO_RX 0x00020000
#define GQSPI_GENFIFO_STRIPE 0x00040000
#define GQSPI_GENFIFO_POLL 0x00080000
-#define GQSPI_GENFIFO_EXP_START 0x00000100
#define GQSPI_FIFO_CTRL_RST_RX_FIFO_MASK 0x00000004
#define GQSPI_FIFO_CTRL_RST_TX_FIFO_MASK 0x00000002
#define GQSPI_FIFO_CTRL_RST_GEN_FIFO_MASK 0x00000001
@@ -580,6 +579,8 @@ static int zynqmp_qspi_config_op(struct zynqmp_qspi *xqspi,
zynqmp_gqspi_write(xqspi, GQSPI_CONFIG_OFST, config_reg);
zynqmp_qspi_set_tapdelay(xqspi, baud_rate_val);
}
+
+ dev_dbg(xqspi->dev, "config speed %u\n", req_speed_hz);
return 0;
}
@@ -670,69 +671,77 @@ static void zynqmp_qspi_readrxfifo(struct zynqmp_qspi *xqspi, u32 size)
static void zynqmp_qspi_fillgenfifo(struct zynqmp_qspi *xqspi, u8 nbits,
u32 genfifoentry)
{
- u32 transfer_len = 0;
+ u32 transfer_len, tempcount, exponent;
+ u8 imm_data;
- if (xqspi->txbuf) {
- genfifoentry &= ~GQSPI_GENFIFO_RX;
- genfifoentry |= GQSPI_GENFIFO_DATA_XFER;
- genfifoentry |= GQSPI_GENFIFO_TX;
- transfer_len = xqspi->bytes_to_transfer;
- } else if (xqspi->rxbuf) {
- genfifoentry &= ~GQSPI_GENFIFO_TX;
- genfifoentry |= GQSPI_GENFIFO_DATA_XFER;
+ genfifoentry |= GQSPI_GENFIFO_DATA_XFER;
+ if (xqspi->rxbuf) {
genfifoentry |= GQSPI_GENFIFO_RX;
if (xqspi->mode == GQSPI_MODE_DMA)
transfer_len = xqspi->dma_rx_bytes;
else
transfer_len = xqspi->bytes_to_receive;
} else {
- /* Sending dummy circles here */
- genfifoentry &= ~(GQSPI_GENFIFO_TX | GQSPI_GENFIFO_RX);
- genfifoentry |= GQSPI_GENFIFO_DATA_XFER;
transfer_len = xqspi->bytes_to_transfer;
}
+
+ if (xqspi->txbuf)
+ genfifoentry |= GQSPI_GENFIFO_TX;
+
genfifoentry |= zynqmp_qspi_selectspimode(xqspi, nbits);
xqspi->genfifoentry = genfifoentry;
-
- if ((transfer_len) < GQSPI_GENFIFO_IMM_DATA_MASK) {
- genfifoentry &= ~GQSPI_GENFIFO_IMM_DATA_MASK;
- genfifoentry |= transfer_len;
- zynqmp_gqspi_write(xqspi, GQSPI_GEN_FIFO_OFST, genfifoentry);
- } else {
- int tempcount = transfer_len;
- u32 exponent = 8; /* 2^8 = 256 */
- u8 imm_data = tempcount & 0xFF;
-
- tempcount &= ~(tempcount & 0xFF);
- /* Immediate entry */
- if (tempcount != 0) {
- /* Exponent entries */
- genfifoentry |= GQSPI_GENFIFO_EXP;
- while (tempcount != 0) {
- if (tempcount & GQSPI_GENFIFO_EXP_START) {
- genfifoentry &=
- ~GQSPI_GENFIFO_IMM_DATA_MASK;
- genfifoentry |= exponent;
- zynqmp_gqspi_write(xqspi,
- GQSPI_GEN_FIFO_OFST,
- genfifoentry);
- }
- tempcount = tempcount >> 1;
- exponent++;
- }
- }
- if (imm_data != 0) {
- genfifoentry &= ~GQSPI_GENFIFO_EXP;
- genfifoentry &= ~GQSPI_GENFIFO_IMM_DATA_MASK;
- genfifoentry |= (u8)(imm_data & 0xFF);
+ dev_dbg(xqspi->dev, "genfifo %05x transfer_len %u\n",
+ genfifoentry, transfer_len);
+
+ /* Exponent entries */
+ imm_data = transfer_len;
+ tempcount = transfer_len >> 8;
+ exponent = 8;
+ genfifoentry |= GQSPI_GENFIFO_EXP;
+ while (tempcount) {
+ if (tempcount & 1)
zynqmp_gqspi_write(xqspi, GQSPI_GEN_FIFO_OFST,
- genfifoentry);
- }
- }
- if (xqspi->mode == GQSPI_MODE_IO && xqspi->rxbuf) {
- /* Dummy generic FIFO entry */
- zynqmp_gqspi_write(xqspi, GQSPI_GEN_FIFO_OFST, 0x0);
+ genfifoentry | exponent);
+ tempcount >>= 1;
+ exponent++;
}
+
+ /* Immediate entry */
+ genfifoentry &= ~GQSPI_GENFIFO_EXP;
+ if (imm_data)
+ zynqmp_gqspi_write(xqspi, GQSPI_GEN_FIFO_OFST,
+ genfifoentry | imm_data);
+
+ /* Dummy generic FIFO entry */
+ if (xqspi->mode == GQSPI_MODE_IO && xqspi->rxbuf)
+ zynqmp_gqspi_write(xqspi, GQSPI_GEN_FIFO_OFST, 0);
+}
+
+/**
+ * zynqmp_qspi_disable_dma() - Disable DMA mode
+ * @xqspi: GQSPI instance
+ */
+static void zynqmp_qspi_disable_dma(struct zynqmp_qspi *xqspi)
+{
+ u32 config_reg = zynqmp_gqspi_read(xqspi, GQSPI_CONFIG_OFST);
+
+ config_reg &= ~GQSPI_CFG_MODE_EN_MASK;
+ zynqmp_gqspi_write(xqspi, GQSPI_CONFIG_OFST, config_reg);
+ xqspi->mode = GQSPI_MODE_IO;
+}
+
+/**
+ * zynqmp_qspi_enable_dma() - Enable DMA mode
+ * @xqspi: GQSPI instance
+ */
+static void zynqmp_qspi_enable_dma(struct zynqmp_qspi *xqspi)
+{
+ u32 config_reg = zynqmp_gqspi_read(xqspi, GQSPI_CONFIG_OFST);
+
+ config_reg &= ~GQSPI_CFG_MODE_EN_MASK;
+ config_reg |= GQSPI_CFG_MODE_EN_DMA_MASK;
+ zynqmp_gqspi_write(xqspi, GQSPI_CONFIG_OFST, config_reg);
+ xqspi->mode = GQSPI_MODE_DMA;
}
/**
@@ -744,7 +753,7 @@ static void zynqmp_qspi_fillgenfifo(struct zynqmp_qspi *xqspi, u8 nbits,
*/
static void zynqmp_process_dma_irq(struct zynqmp_qspi *xqspi)
{
- u32 config_reg, genfifoentry;
+ u32 genfifoentry;
dma_unmap_single(xqspi->dev, xqspi->dma_addr,
xqspi->dma_rx_bytes, DMA_FROM_DEVICE);
@@ -758,9 +767,7 @@ static void zynqmp_process_dma_irq(struct zynqmp_qspi *xqspi)
if (xqspi->bytes_to_receive > 0) {
/* Switch to IO mode,for remaining bytes to receive */
- config_reg = zynqmp_gqspi_read(xqspi, GQSPI_CONFIG_OFST);
- config_reg &= ~GQSPI_CFG_MODE_EN_MASK;
- zynqmp_gqspi_write(xqspi, GQSPI_CONFIG_OFST, config_reg);
+ zynqmp_qspi_disable_dma(xqspi);
/* Initiate the transfer of remaining bytes */
genfifoentry = xqspi->genfifoentry;
@@ -799,7 +806,6 @@ static void zynqmp_process_dma_irq(struct zynqmp_qspi *xqspi)
static irqreturn_t zynqmp_qspi_irq(int irq, void *dev_id)
{
struct zynqmp_qspi *xqspi = (struct zynqmp_qspi *)dev_id;
- irqreturn_t ret = IRQ_NONE;
u32 status, mask, dma_status = 0;
status = zynqmp_gqspi_read(xqspi, GQSPI_ISR_OFST);
@@ -814,27 +820,24 @@ static irqreturn_t zynqmp_qspi_irq(int irq, void *dev_id)
dma_status);
}
- if (mask & GQSPI_ISR_TXNOT_FULL_MASK) {
+ if (!mask && !dma_status)
+ return IRQ_NONE;
+
+ if (mask & GQSPI_ISR_TXNOT_FULL_MASK)
zynqmp_qspi_filltxfifo(xqspi, GQSPI_TX_FIFO_FILL);
- ret = IRQ_HANDLED;
- }
- if (dma_status & GQSPI_QSPIDMA_DST_I_STS_DONE_MASK) {
+ if (dma_status & GQSPI_QSPIDMA_DST_I_STS_DONE_MASK)
zynqmp_process_dma_irq(xqspi);
- ret = IRQ_HANDLED;
- } else if (!(mask & GQSPI_IER_RXEMPTY_MASK) &&
- (mask & GQSPI_IER_GENFIFOEMPTY_MASK)) {
+ else if (!(mask & GQSPI_IER_RXEMPTY_MASK) &&
+ (mask & GQSPI_IER_GENFIFOEMPTY_MASK))
zynqmp_qspi_readrxfifo(xqspi, GQSPI_RX_FIFO_FILL);
- ret = IRQ_HANDLED;
- }
if (xqspi->bytes_to_receive == 0 && xqspi->bytes_to_transfer == 0 &&
((status & GQSPI_IRQ_MASK) == GQSPI_IRQ_MASK)) {
zynqmp_gqspi_write(xqspi, GQSPI_IDR_OFST, GQSPI_ISR_IDR_MASK);
complete(&xqspi->data_completion);
- ret = IRQ_HANDLED;
}
- return ret;
+ return IRQ_HANDLED;
}
/**
@@ -845,17 +848,14 @@ static irqreturn_t zynqmp_qspi_irq(int irq, void *dev_id)
*/
static int zynqmp_qspi_setuprxdma(struct zynqmp_qspi *xqspi)
{
- u32 rx_bytes, rx_rem, config_reg;
+ u32 rx_bytes, rx_rem;
dma_addr_t addr;
u64 dma_align = (u64)(uintptr_t)xqspi->rxbuf;
if (xqspi->bytes_to_receive < 8 ||
((dma_align & GQSPI_DMA_UNALIGN) != 0x0)) {
/* Setting to IO mode */
- config_reg = zynqmp_gqspi_read(xqspi, GQSPI_CONFIG_OFST);
- config_reg &= ~GQSPI_CFG_MODE_EN_MASK;
- zynqmp_gqspi_write(xqspi, GQSPI_CONFIG_OFST, config_reg);
- xqspi->mode = GQSPI_MODE_IO;
+ zynqmp_qspi_disable_dma(xqspi);
xqspi->dma_rx_bytes = 0;
return 0;
}
@@ -878,14 +878,7 @@ static int zynqmp_qspi_setuprxdma(struct zynqmp_qspi *xqspi)
zynqmp_gqspi_write(xqspi, GQSPI_QSPIDMA_DST_ADDR_MSB_OFST,
((u32)addr) & 0xfff);
- /* Enabling the DMA mode */
- config_reg = zynqmp_gqspi_read(xqspi, GQSPI_CONFIG_OFST);
- config_reg &= ~GQSPI_CFG_MODE_EN_MASK;
- config_reg |= GQSPI_CFG_MODE_EN_DMA_MASK;
- zynqmp_gqspi_write(xqspi, GQSPI_CONFIG_OFST, config_reg);
-
- /* Switch to DMA mode */
- xqspi->mode = GQSPI_MODE_DMA;
+ zynqmp_qspi_enable_dma(xqspi);
/* Write the number of bytes to transfer */
zynqmp_gqspi_write(xqspi, GQSPI_QSPIDMA_DST_SIZE_OFST, rx_bytes);
@@ -905,18 +898,10 @@ static int zynqmp_qspi_setuprxdma(struct zynqmp_qspi *xqspi)
static void zynqmp_qspi_write_op(struct zynqmp_qspi *xqspi, u8 tx_nbits,
u32 genfifoentry)
{
- u32 config_reg;
-
zynqmp_qspi_fillgenfifo(xqspi, tx_nbits, genfifoentry);
zynqmp_qspi_filltxfifo(xqspi, GQSPI_TXD_DEPTH);
- if (xqspi->mode == GQSPI_MODE_DMA) {
- config_reg = zynqmp_gqspi_read(xqspi,
- GQSPI_CONFIG_OFST);
- config_reg &= ~GQSPI_CFG_MODE_EN_MASK;
- zynqmp_gqspi_write(xqspi, GQSPI_CONFIG_OFST,
- config_reg);
- xqspi->mode = GQSPI_MODE_IO;
- }
+ if (xqspi->mode == GQSPI_MODE_DMA)
+ zynqmp_qspi_disable_dma(xqspi);
}
/**
@@ -1059,18 +1044,14 @@ static unsigned long zynqmp_qspi_timeout(struct zynqmp_qspi *xqspi, u8 bits,
static int zynqmp_qspi_exec_op(struct spi_mem *mem,
const struct spi_mem_op *op)
{
- struct zynqmp_qspi *xqspi = spi_controller_get_devdata
- (mem->spi->controller);
+ struct zynqmp_qspi *xqspi =
+ spi_controller_get_devdata(mem->spi->controller);
unsigned long timeout;
int err = 0, i;
u32 genfifoentry = 0;
u16 opcode = op->cmd.opcode;
u64 opaddr;
- dev_dbg(xqspi->dev, "cmd:%#x mode:%d.%d.%d.%d\n",
- op->cmd.opcode, op->cmd.buswidth, op->addr.buswidth,
- op->dummy.buswidth, op->data.buswidth);
-
mutex_lock(&xqspi->op_lock);
zynqmp_qspi_config_op(xqspi, op);
zynqmp_qspi_chipselect(mem->spi, false);
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index a7a4647717d4..90e27729ef6b 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -31,6 +31,7 @@
#include <linux/ptp_clock_kernel.h>
#include <linux/sched/rt.h>
#include <linux/slab.h>
+#include <linux/spi/offload/types.h>
#include <linux/spi/spi.h>
#include <linux/spi/spi-mem.h>
#include <uapi/linux/sched/types.h>
@@ -42,7 +43,7 @@ EXPORT_TRACEPOINT_SYMBOL(spi_transfer_stop);
#include "internals.h"
-static DEFINE_IDR(spi_master_idr);
+static DEFINE_IDR(spi_controller_idr);
static void spidev_release(struct device *dev)
{
@@ -305,7 +306,7 @@ static const struct attribute_group spi_controller_statistics_group = {
.attrs = spi_controller_statistics_attrs,
};
-static const struct attribute_group *spi_master_groups[] = {
+static const struct attribute_group *spi_controller_groups[] = {
&spi_controller_statistics_group,
NULL,
};
@@ -1106,7 +1107,7 @@ static void spi_set_cs(struct spi_device *spi, bool enable, bool force)
spi_toggle_csgpiod(spi, idx, enable, activate);
}
}
- /* Some SPI masters need both GPIO CS & slave_select */
+ /* Some SPI controllers need both GPIO CS & ->set_cs() */
if ((spi->controller->flags & SPI_CONTROLLER_GPIO_SS) &&
spi->controller->set_cs)
spi->controller->set_cs(spi, !enable);
@@ -1495,10 +1496,7 @@ static void _spi_transfer_delay_ns(u32 ns)
} else {
u32 us = DIV_ROUND_UP(ns, NSEC_PER_USEC);
- if (us <= 10)
- udelay(us);
- else
- usleep_range(us, us + DIV_ROUND_UP(us, 10));
+ fsleep(us);
}
}
@@ -2534,7 +2532,7 @@ err_out:
* @ctlr: Pointer to spi_controller device
*
* Registers an spi_device for each child node of controller node which
- * represents a valid SPI slave.
+ * represents a valid SPI target device.
*/
static void of_register_spi_devices(struct spi_controller *ctlr)
{
@@ -2819,7 +2817,7 @@ struct spi_device *acpi_spi_device_alloc(struct spi_controller *ctlr,
if (!lookup.max_speed_hz &&
ACPI_SUCCESS(acpi_get_parent(adev->handle, &parent_handle)) &&
device_match_acpi_handle(lookup.ctlr->dev.parent, parent_handle)) {
- /* Apple does not use _CRS but nested devices for SPI slaves */
+ /* Apple does not use _CRS but nested devices for SPI target devices */
acpi_spi_parse_apple_properties(adev, &lookup);
}
@@ -2911,7 +2909,7 @@ static void acpi_register_spi_devices(struct spi_controller *ctlr)
SPI_ACPI_ENUMERATE_MAX_DEPTH,
acpi_spi_add_device, NULL, ctlr, NULL);
if (ACPI_FAILURE(status))
- dev_warn(&ctlr->dev, "failed to enumerate SPI slaves\n");
+ dev_warn(&ctlr->dev, "failed to enumerate SPI target devices\n");
}
#else
static inline void acpi_register_spi_devices(struct spi_controller *ctlr) {}
@@ -2925,16 +2923,15 @@ static void spi_controller_release(struct device *dev)
kfree(ctlr);
}
-static const struct class spi_master_class = {
+static const struct class spi_controller_class = {
.name = "spi_master",
.dev_release = spi_controller_release,
- .dev_groups = spi_master_groups,
+ .dev_groups = spi_controller_groups,
};
#ifdef CONFIG_SPI_SLAVE
/**
- * spi_target_abort - abort the ongoing transfer request on an SPI slave
- * controller
+ * spi_target_abort - abort the ongoing transfer request on an SPI target controller
* @spi: device used for the current transfer
*/
int spi_target_abort(struct spi_device *spi)
@@ -2954,9 +2951,13 @@ static ssize_t slave_show(struct device *dev, struct device_attribute *attr,
struct spi_controller *ctlr = container_of(dev, struct spi_controller,
dev);
struct device *child;
+ int ret;
child = device_find_any_child(&ctlr->dev);
- return sysfs_emit(buf, "%s\n", child ? to_spi_device(child)->modalias : NULL);
+ ret = sysfs_emit(buf, "%s\n", child ? to_spi_device(child)->modalias : NULL);
+ put_device(child);
+
+ return ret;
}
static ssize_t slave_store(struct device *dev, struct device_attribute *attr,
@@ -2975,13 +2976,13 @@ static ssize_t slave_store(struct device *dev, struct device_attribute *attr,
child = device_find_any_child(&ctlr->dev);
if (child) {
- /* Remove registered slave */
+ /* Remove registered target device */
device_unregister(child);
put_device(child);
}
if (strcmp(name, "(null)")) {
- /* Register new slave */
+ /* Register new target device */
spi = spi_alloc_device(ctlr);
if (!spi)
return -ENOMEM;
@@ -3000,40 +3001,40 @@ static ssize_t slave_store(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR_RW(slave);
-static struct attribute *spi_slave_attrs[] = {
+static struct attribute *spi_target_attrs[] = {
&dev_attr_slave.attr,
NULL,
};
-static const struct attribute_group spi_slave_group = {
- .attrs = spi_slave_attrs,
+static const struct attribute_group spi_target_group = {
+ .attrs = spi_target_attrs,
};
-static const struct attribute_group *spi_slave_groups[] = {
+static const struct attribute_group *spi_target_groups[] = {
&spi_controller_statistics_group,
- &spi_slave_group,
+ &spi_target_group,
NULL,
};
-static const struct class spi_slave_class = {
+static const struct class spi_target_class = {
.name = "spi_slave",
.dev_release = spi_controller_release,
- .dev_groups = spi_slave_groups,
+ .dev_groups = spi_target_groups,
};
#else
-extern struct class spi_slave_class; /* dummy */
+extern struct class spi_target_class; /* dummy */
#endif
/**
- * __spi_alloc_controller - allocate an SPI master or slave controller
+ * __spi_alloc_controller - allocate an SPI host or target controller
* @dev: the controller, possibly using the platform_bus
* @size: how much zeroed driver-private data to allocate; the pointer to this
* memory is in the driver_data field of the returned device, accessible
* with spi_controller_get_devdata(); the memory is cacheline aligned;
* drivers granting DMA access to portions of their private data need to
* round up @size using ALIGN(size, dma_get_cache_alignment()).
- * @slave: flag indicating whether to allocate an SPI master (false) or SPI
- * slave (true) controller
+ * @target: flag indicating whether to allocate an SPI host (false) or SPI target (true)
+ * controller
* Context: can sleep
*
* This call is used only by SPI controller drivers, which are the
@@ -3050,7 +3051,7 @@ extern struct class spi_slave_class; /* dummy */
* Return: the SPI controller structure on success, else NULL.
*/
struct spi_controller *__spi_alloc_controller(struct device *dev,
- unsigned int size, bool slave)
+ unsigned int size, bool target)
{
struct spi_controller *ctlr;
size_t ctlr_size = ALIGN(sizeof(*ctlr), dma_get_cache_alignment());
@@ -3071,11 +3072,11 @@ struct spi_controller *__spi_alloc_controller(struct device *dev,
mutex_init(&ctlr->add_lock);
ctlr->bus_num = -1;
ctlr->num_chipselect = 1;
- ctlr->slave = slave;
- if (IS_ENABLED(CONFIG_SPI_SLAVE) && slave)
- ctlr->dev.class = &spi_slave_class;
+ ctlr->target = target;
+ if (IS_ENABLED(CONFIG_SPI_SLAVE) && target)
+ ctlr->dev.class = &spi_target_class;
else
- ctlr->dev.class = &spi_master_class;
+ ctlr->dev.class = &spi_controller_class;
ctlr->dev.parent = dev;
pm_suspend_ignore_children(&ctlr->dev, true);
spi_controller_set_devdata(ctlr, (void *)ctlr + ctlr_size);
@@ -3093,7 +3094,7 @@ static void devm_spi_release_controller(struct device *dev, void *ctlr)
* __devm_spi_alloc_controller - resource-managed __spi_alloc_controller()
* @dev: physical device of SPI controller
* @size: how much zeroed driver-private data to allocate
- * @slave: whether to allocate an SPI master (false) or SPI slave (true)
+ * @target: whether to allocate an SPI host (false) or SPI target (true) controller
* Context: can sleep
*
* Allocate an SPI controller and automatically release a reference on it
@@ -3106,7 +3107,7 @@ static void devm_spi_release_controller(struct device *dev, void *ctlr)
*/
struct spi_controller *__devm_spi_alloc_controller(struct device *dev,
unsigned int size,
- bool slave)
+ bool target)
{
struct spi_controller **ptr, *ctlr;
@@ -3115,7 +3116,7 @@ struct spi_controller *__devm_spi_alloc_controller(struct device *dev,
if (!ptr)
return NULL;
- ctlr = __spi_alloc_controller(dev, size, slave);
+ ctlr = __spi_alloc_controller(dev, size, target);
if (ctlr) {
ctlr->devm_allocated = true;
*ptr = ctlr;
@@ -3129,8 +3130,8 @@ struct spi_controller *__devm_spi_alloc_controller(struct device *dev,
EXPORT_SYMBOL_GPL(__devm_spi_alloc_controller);
/**
- * spi_get_gpio_descs() - grab chip select GPIOs for the master
- * @ctlr: The SPI master to grab GPIO descriptors for
+ * spi_get_gpio_descs() - grab chip select GPIOs for the controller
+ * @ctlr: The SPI controller to grab GPIO descriptors for
*/
static int spi_get_gpio_descs(struct spi_controller *ctlr)
{
@@ -3228,7 +3229,7 @@ static int spi_controller_id_alloc(struct spi_controller *ctlr, int start, int e
int id;
mutex_lock(&board_lock);
- id = idr_alloc(&spi_master_idr, ctlr, start, end, GFP_KERNEL);
+ id = idr_alloc(&spi_controller_idr, ctlr, start, end, GFP_KERNEL);
mutex_unlock(&board_lock);
if (WARN(id < 0, "couldn't get idr"))
return id == -ENOSPC ? -EBUSY : id;
@@ -3377,7 +3378,7 @@ destroy_queue:
spi_destroy_queue(ctlr);
free_bus_id:
mutex_lock(&board_lock);
- idr_remove(&spi_master_idr, ctlr->bus_num);
+ idr_remove(&spi_controller_idr, ctlr->bus_num);
mutex_unlock(&board_lock);
return status;
}
@@ -3389,8 +3390,7 @@ static void devm_spi_unregister(struct device *dev, void *res)
}
/**
- * devm_spi_register_controller - register managed SPI host or target
- * controller
+ * devm_spi_register_controller - register managed SPI host or target controller
* @dev: device managing SPI controller
* @ctlr: initialized controller, originally from spi_alloc_host() or
* spi_alloc_target()
@@ -3430,7 +3430,7 @@ static int __unregister(struct device *dev, void *null)
}
/**
- * spi_unregister_controller - unregister SPI master or slave controller
+ * spi_unregister_controller - unregister SPI host or target controller
* @ctlr: the controller being unregistered
* Context: can sleep
*
@@ -3454,7 +3454,7 @@ void spi_unregister_controller(struct spi_controller *ctlr)
/* First make sure that this controller was ever added */
mutex_lock(&board_lock);
- found = idr_find(&spi_master_idr, id);
+ found = idr_find(&spi_controller_idr, id);
mutex_unlock(&board_lock);
if (ctlr->queued) {
if (spi_destroy_queue(ctlr))
@@ -3469,7 +3469,7 @@ void spi_unregister_controller(struct spi_controller *ctlr)
/* Free bus id */
mutex_lock(&board_lock);
if (found == ctlr)
- idr_remove(&spi_master_idr, id);
+ idr_remove(&spi_controller_idr, id);
mutex_unlock(&board_lock);
if (IS_ENABLED(CONFIG_SPI_DYNAMIC))
@@ -4158,6 +4158,15 @@ static int __spi_validate(struct spi_device *spi, struct spi_message *message)
if (_spi_xfer_word_delay_update(xfer, spi))
return -EINVAL;
+
+ /* Make sure controller supports required offload features. */
+ if (xfer->offload_flags) {
+ if (!message->offload)
+ return -EINVAL;
+
+ if (xfer->offload_flags & ~message->offload->xfer_flags)
+ return -EINVAL;
+ }
}
message->status = -EINPROGRESS;
@@ -4613,7 +4622,7 @@ EXPORT_SYMBOL_GPL(spi_sync_locked);
/**
* spi_bus_lock - obtain a lock for exclusive SPI bus usage
- * @ctlr: SPI bus master that should be locked for exclusive bus access
+ * @ctlr: SPI bus controller that should be locked for exclusive bus access
* Context: can sleep
*
* This call may only be used from a context that may sleep. The sleep
@@ -4644,7 +4653,7 @@ EXPORT_SYMBOL_GPL(spi_bus_lock);
/**
* spi_bus_unlock - release the lock for exclusive SPI bus usage
- * @ctlr: SPI bus master that was locked for exclusive bus access
+ * @ctlr: SPI bus controller that was locked for exclusive bus access
* Context: can sleep
*
* This call may only be used from a context that may sleep. The sleep
@@ -4761,9 +4770,9 @@ static struct spi_controller *of_find_spi_controller_by_node(struct device_node
{
struct device *dev;
- dev = class_find_device_by_of_node(&spi_master_class, node);
+ dev = class_find_device_by_of_node(&spi_controller_class, node);
if (!dev && IS_ENABLED(CONFIG_SPI_SLAVE))
- dev = class_find_device_by_of_node(&spi_slave_class, node);
+ dev = class_find_device_by_of_node(&spi_target_class, node);
if (!dev)
return NULL;
@@ -4843,10 +4852,10 @@ struct spi_controller *acpi_spi_find_controller_by_adev(struct acpi_device *adev
{
struct device *dev;
- dev = class_find_device(&spi_master_class, NULL, adev,
+ dev = class_find_device(&spi_controller_class, NULL, adev,
spi_acpi_controller_match);
if (!dev && IS_ENABLED(CONFIG_SPI_SLAVE))
- dev = class_find_device(&spi_slave_class, NULL, adev,
+ dev = class_find_device(&spi_target_class, NULL, adev,
spi_acpi_controller_match);
if (!dev)
return NULL;
@@ -4916,12 +4925,12 @@ static int __init spi_init(void)
if (status < 0)
goto err1;
- status = class_register(&spi_master_class);
+ status = class_register(&spi_controller_class);
if (status < 0)
goto err2;
if (IS_ENABLED(CONFIG_SPI_SLAVE)) {
- status = class_register(&spi_slave_class);
+ status = class_register(&spi_target_class);
if (status < 0)
goto err3;
}
@@ -4934,7 +4943,7 @@ static int __init spi_init(void)
return 0;
err3:
- class_unregister(&spi_master_class);
+ class_unregister(&spi_controller_class);
err2:
bus_unregister(&spi_bus_type);
err1:
diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
index 58ae4304fdab..6108959c28d9 100644
--- a/drivers/spi/spidev.c
+++ b/drivers/spi/spidev.c
@@ -706,6 +706,7 @@ static const struct spi_device_id spidev_spi_ids[] = {
{ .name = /* cisco */ "spi-petra" },
{ .name = /* dh */ "dhcom-board" },
{ .name = /* elgin */ "jg10309-01" },
+ { .name = /* gocontroll */ "moduline-module-slot"},
{ .name = /* lineartechnology */ "ltc2488" },
{ .name = /* lwn */ "bk4" },
{ .name = /* lwn */ "bk4-spi" },
@@ -737,6 +738,7 @@ static const struct of_device_id spidev_dt_ids[] = {
{ .compatible = "cisco,spi-petra", .data = &spidev_of_check },
{ .compatible = "dh,dhcom-board", .data = &spidev_of_check },
{ .compatible = "elgin,jg10309-01", .data = &spidev_of_check },
+ { .compatible = "gocontroll,moduline-module-slot", .data = &spidev_of_check},
{ .compatible = "lineartechnology,ltc2488", .data = &spidev_of_check },
{ .compatible = "lwn,bk4", .data = &spidev_of_check },
{ .compatible = "lwn,bk4-spi", .data = &spidev_of_check },