summaryrefslogtreecommitdiff
path: root/drivers/spi
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/spi')
-rw-r--r--drivers/spi/Kconfig60
-rw-r--r--drivers/spi/Makefile10
-rw-r--r--drivers/spi/atmel-quadspi.c1002
-rw-r--r--drivers/spi/spi-amd-pci.c70
-rw-r--r--drivers/spi/spi-amd.c255
-rw-r--r--drivers/spi/spi-amd.h44
-rw-r--r--drivers/spi/spi-amlogic-spifc-a1.c7
-rw-r--r--drivers/spi/spi-aspeed-smc.c7
-rw-r--r--drivers/spi/spi-axi-spi-engine.c404
-rw-r--r--drivers/spi/spi-bcm2835.c18
-rw-r--r--drivers/spi/spi-cadence-quadspi.c63
-rw-r--r--drivers/spi/spi-cadence-xspi.c2
-rw-r--r--drivers/spi/spi-cavium-thunderx.c4
-rw-r--r--drivers/spi/spi-cs42l43.c4
-rw-r--r--drivers/spi/spi-dw-core.c12
-rw-r--r--drivers/spi/spi-fsi.c13
-rw-r--r--drivers/spi/spi-fsl-dspi.c46
-rw-r--r--drivers/spi/spi-fsl-lpspi.c2
-rw-r--r--drivers/spi/spi-fsl-qspi.c122
-rw-r--r--drivers/spi/spi-fsl-spi.c2
-rw-r--r--drivers/spi/spi-gpio.c47
-rw-r--r--drivers/spi/spi-imx.c7
-rw-r--r--drivers/spi/spi-intel-pci.c8
-rw-r--r--drivers/spi/spi-intel-platform.c9
-rw-r--r--drivers/spi/spi-intel.c9
-rw-r--r--drivers/spi/spi-intel.h4
-rw-r--r--drivers/spi/spi-kspi2.c431
-rw-r--r--drivers/spi/spi-loopback-test.c10
-rw-r--r--drivers/spi/spi-mem.c79
-rw-r--r--drivers/spi/spi-meson-spicc.c241
-rw-r--r--drivers/spi/spi-microchip-core-qspi.c26
-rw-r--r--drivers/spi/spi-microchip-core.c41
-rw-r--r--drivers/spi/spi-mt65xx.c24
-rw-r--r--drivers/spi/spi-mtk-snfi.c3
-rw-r--r--drivers/spi/spi-mux.c4
-rw-r--r--drivers/spi/spi-mxic.c3
-rw-r--r--drivers/spi/spi-mxs.c2
-rw-r--r--drivers/spi/spi-npcm-fiu.c5
-rw-r--r--drivers/spi/spi-nxp-fspi.c201
-rw-r--r--drivers/spi/spi-offload-trigger-pwm.c169
-rw-r--r--drivers/spi/spi-offload.c465
-rw-r--r--drivers/spi/spi-omap2-mcspi.c11
-rw-r--r--drivers/spi/spi-pci1xxxx.c24
-rw-r--r--drivers/spi/spi-pxa2xx.c88
-rw-r--r--drivers/spi/spi-qpic-snand.c1644
-rw-r--r--drivers/spi/spi-realtek-rtl-snand.c1
-rw-r--r--drivers/spi/spi-rockchip-sfc.c233
-rw-r--r--drivers/spi/spi-rockchip.c2
-rw-r--r--drivers/spi/spi-rpc-if.c16
-rw-r--r--drivers/spi/spi-s3c64xx.c4
-rw-r--r--drivers/spi/spi-sc18is602.c34
-rw-r--r--drivers/spi/spi-sg2044-nor.c488
-rw-r--r--drivers/spi/spi-sh-msiof.c397
-rw-r--r--drivers/spi/spi-sn-f-ospi.c11
-rw-r--r--drivers/spi/spi-stm32-ospi.c1067
-rw-r--r--drivers/spi/spi-stm32-qspi.c5
-rw-r--r--drivers/spi/spi-sun4i.c6
-rw-r--r--drivers/spi/spi-tegra114.c6
-rw-r--r--drivers/spi/spi-tegra210-quad.c286
-rw-r--r--drivers/spi/spi-ti-qspi.c19
-rw-r--r--drivers/spi/spi-xcomm.c8
-rw-r--r--drivers/spi/spi-zynq-qspi.c30
-rw-r--r--drivers/spi/spi-zynqmp-gqspi.c186
-rw-r--r--drivers/spi/spi.c179
-rw-r--r--drivers/spi/spidev.c32
65 files changed, 7489 insertions, 1223 deletions
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index f51f9466e518..c51da3fc3604 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -55,6 +55,9 @@ config SPI_MEM
This extension is meant to simplify interaction with SPI memories
by providing a high-level interface to send memory-like commands.
+config SPI_OFFLOAD
+ bool
+
comment "SPI Master Controller Drivers"
config SPI_AIROHA_SNFI
@@ -176,6 +179,7 @@ config SPI_AU1550
config SPI_AXI_SPI_ENGINE
tristate "Analog Devices AXI SPI Engine controller"
depends on HAS_IOMEM
+ select SPI_OFFLOAD
help
This enables support for the Analog Devices AXI SPI Engine SPI controller.
It is part of the SPI Engine framework that is used in some Analog Devices
@@ -542,6 +546,18 @@ config SPI_JCORE
This enables support for the SPI master controller in the J-Core
synthesizable, open source SoC.
+config SPI_KSPI2
+ tristate "Support for KEBA SPI master type 2 hardware"
+ depends on HAS_IOMEM
+ depends on KEBA_CP500 || COMPILE_TEST
+ select AUXILIARY_BUS
+ help
+ This driver supports KEBA SPI master type 2 FPGA implementation,
+ as found on CP500 devices for example.
+
+ This driver can also be built as a module. If so, the module
+ will be called spi-kspi2.
+
config SPI_LM70_LLP
tristate "Parallel port adapter for LM70 eval board (DEVELOPMENT)"
depends on PARPORT
@@ -920,6 +936,15 @@ config SPI_QCOM_QSPI
help
QSPI(Quad SPI) driver for Qualcomm QSPI controller.
+config SPI_QPIC_SNAND
+ tristate "QPIC SNAND controller"
+ depends on ARCH_QCOM || COMPILE_TEST
+ depends on MTD
+ help
+ QPIC_SNAND (QPIC SPI NAND) driver for Qualcomm QPIC controller.
+ QPIC controller supports both parallel nand and serial nand.
+ This config will enable serial nand driver for QPIC controller.
+
config SPI_QUP
tristate "Qualcomm SPI controller with QUP interface"
depends on ARCH_QCOM || COMPILE_TEST
@@ -1009,6 +1034,15 @@ config SPI_SN_F_OSPI
for connecting an SPI Flash memory over up to 8-bit wide bus.
It supports indirect access mode only.
+config SPI_SG2044_NOR
+ tristate "SG2044 SPI NOR Controller"
+ depends on ARCH_SOPHGO || COMPILE_TEST
+ help
+ This enables support for the SG2044 SPI NOR controller,
+ which supports Dual/Quad read and write operations while
+ also supporting 3Byte address devices and 4Byte address
+ devices.
+
config SPI_SPRD
tristate "Spreadtrum SPI controller"
depends on ARCH_SPRD || COMPILE_TEST
@@ -1033,6 +1067,16 @@ config SPI_STM32
is not available, the driver automatically falls back to
PIO mode.
+config SPI_STM32_OSPI
+ tristate "STMicroelectronics STM32 OCTO SPI controller"
+ depends on ARCH_STM32 || COMPILE_TEST
+ depends on OF
+ depends on SPI_MEM
+ help
+ This enables support for the Octo SPI controller in master mode.
+ This driver does not support generic SPI. The implementation only
+ supports spi-mem interface.
+
config SPI_STM32_QSPI
tristate "STMicroelectronics STM32 QUAD SPI controller"
depends on ARCH_STM32 || COMPILE_TEST
@@ -1222,7 +1266,9 @@ config SPI_ZYNQMP_GQSPI
config SPI_AMD
tristate "AMD SPI controller"
- depends on SPI_MASTER || COMPILE_TEST
+ depends on PCI
+ depends on SPI_MASTER || X86 || COMPILE_TEST
+ depends on SPI_MEM
help
Enables SPI controller driver for AMD SoC.
@@ -1305,4 +1351,16 @@ endif # SPI_SLAVE
config SPI_DYNAMIC
def_bool ACPI || OF_DYNAMIC || SPI_SLAVE
+if SPI_OFFLOAD
+
+comment "SPI Offload triggers"
+
+config SPI_OFFLOAD_TRIGGER_PWM
+ tristate "SPI offload trigger using PWM"
+ depends on PWM
+ help
+ Generic SPI offload trigger implemented using PWM output.
+
+endif # SPI_OFFLOAD
+
endif # SPI
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index aea5e54de195..4ea89f6fc531 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -10,6 +10,7 @@ ccflags-$(CONFIG_SPI_DEBUG) := -DDEBUG
obj-$(CONFIG_SPI_MASTER) += spi.o
obj-$(CONFIG_SPI_MEM) += spi-mem.o
obj-$(CONFIG_SPI_MUX) += spi-mux.o
+obj-$(CONFIG_SPI_OFFLOAD) += spi-offload.o
obj-$(CONFIG_SPI_SPIDEV) += spidev.o
obj-$(CONFIG_SPI_LOOPBACK_TEST) += spi-loopback-test.o
@@ -74,6 +75,7 @@ obj-$(CONFIG_SPI_INTEL_PCI) += spi-intel-pci.o
obj-$(CONFIG_SPI_INTEL_PLATFORM) += spi-intel-platform.o
obj-$(CONFIG_SPI_LANTIQ_SSC) += spi-lantiq-ssc.o
obj-$(CONFIG_SPI_JCORE) += spi-jcore.o
+obj-$(CONFIG_SPI_KSPI2) += spi-kspi2.o
obj-$(CONFIG_SPI_LJCA) += spi-ljca.o
obj-$(CONFIG_SPI_LM70_LLP) += spi-lm70llp.o
obj-$(CONFIG_SPI_LOONGSON_CORE) += spi-loongson-core.o
@@ -115,6 +117,7 @@ obj-$(CONFIG_SPI_PXA2XX) += spi-pxa2xx-platform.o
obj-$(CONFIG_SPI_PXA2XX_PCI) += spi-pxa2xx-pci.o
obj-$(CONFIG_SPI_QCOM_GENI) += spi-geni-qcom.o
obj-$(CONFIG_SPI_QCOM_QSPI) += spi-qcom-qspi.o
+obj-$(CONFIG_SPI_QPIC_SNAND) += spi-qpic-snand.o
obj-$(CONFIG_SPI_QUP) += spi-qup.o
obj-$(CONFIG_SPI_ROCKCHIP) += spi-rockchip.o
obj-$(CONFIG_SPI_ROCKCHIP_SFC) += spi-rockchip-sfc.o
@@ -133,9 +136,11 @@ obj-$(CONFIG_SPI_SH_SCI) += spi-sh-sci.o
obj-$(CONFIG_SPI_SIFIVE) += spi-sifive.o
obj-$(CONFIG_SPI_SLAVE_MT27XX) += spi-slave-mt27xx.o
obj-$(CONFIG_SPI_SN_F_OSPI) += spi-sn-f-ospi.o
+obj-$(CONFIG_SPI_SG2044_NOR) += spi-sg2044-nor.o
obj-$(CONFIG_SPI_SPRD) += spi-sprd.o
obj-$(CONFIG_SPI_SPRD_ADI) += spi-sprd-adi.o
obj-$(CONFIG_SPI_STM32) += spi-stm32.o
+obj-$(CONFIG_SPI_STM32_OSPI) += spi-stm32-ospi.o
obj-$(CONFIG_SPI_STM32_QSPI) += spi-stm32-qspi.o
obj-$(CONFIG_SPI_ST_SSC4) += spi-st-ssc4.o
obj-$(CONFIG_SPI_SUN4I) += spi-sun4i.o
@@ -157,8 +162,11 @@ obj-$(CONFIG_SPI_XLP) += spi-xlp.o
obj-$(CONFIG_SPI_XTENSA_XTFPGA) += spi-xtensa-xtfpga.o
obj-$(CONFIG_SPI_ZYNQ_QSPI) += spi-zynq-qspi.o
obj-$(CONFIG_SPI_ZYNQMP_GQSPI) += spi-zynqmp-gqspi.o
-obj-$(CONFIG_SPI_AMD) += spi-amd.o
+obj-$(CONFIG_SPI_AMD) += spi-amd.o spi-amd-pci.o
# SPI slave protocol handlers
obj-$(CONFIG_SPI_SLAVE_TIME) += spi-slave-time.o
obj-$(CONFIG_SPI_SLAVE_SYSTEM_CONTROL) += spi-slave-system-control.o
+
+# SPI offload triggers
+obj-$(CONFIG_SPI_OFFLOAD_TRIGGER_PWM) += spi-offload-trigger-pwm.o
diff --git a/drivers/spi/atmel-quadspi.c b/drivers/spi/atmel-quadspi.c
index 316bce577081..2f6797324227 100644
--- a/drivers/spi/atmel-quadspi.c
+++ b/drivers/spi/atmel-quadspi.c
@@ -11,11 +11,15 @@
* This driver is based on drivers/mtd/spi-nor/fsl-quadspi.c from Freescale.
*/
+#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/io.h>
+#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
@@ -34,6 +38,7 @@
#define QSPI_IDR 0x0018 /* Interrupt Disable Register */
#define QSPI_IMR 0x001c /* Interrupt Mask Register */
#define QSPI_SCR 0x0020 /* Serial Clock Register */
+#define QSPI_SR2 0x0024 /* SAMA7G5 Status Register */
#define QSPI_IAR 0x0030 /* Instruction Address Register */
#define QSPI_ICR 0x0034 /* Instruction Code Register */
@@ -44,16 +49,32 @@
#define QSPI_SMR 0x0040 /* Scrambling Mode Register */
#define QSPI_SKR 0x0044 /* Scrambling Key Register */
+#define QSPI_REFRESH 0x0050 /* Refresh Register */
+#define QSPI_WRACNT 0x0054 /* Write Access Counter Register */
+#define QSPI_DLLCFG 0x0058 /* DLL Configuration Register */
+#define QSPI_PCALCFG 0x005C /* Pad Calibration Configuration Register */
+#define QSPI_PCALBP 0x0060 /* Pad Calibration Bypass Register */
+#define QSPI_TOUT 0x0064 /* Timeout Register */
+
#define QSPI_WPMR 0x00E4 /* Write Protection Mode Register */
#define QSPI_WPSR 0x00E8 /* Write Protection Status Register */
#define QSPI_VERSION 0x00FC /* Version Register */
+#define SAMA7G5_QSPI0_MAX_SPEED_HZ 200000000
+#define SAMA7G5_QSPI1_SDR_MAX_SPEED_HZ 133000000
/* Bitfields in QSPI_CR (Control Register) */
#define QSPI_CR_QSPIEN BIT(0)
#define QSPI_CR_QSPIDIS BIT(1)
+#define QSPI_CR_DLLON BIT(2)
+#define QSPI_CR_DLLOFF BIT(3)
+#define QSPI_CR_STPCAL BIT(4)
+#define QSPI_CR_SRFRSH BIT(5)
#define QSPI_CR_SWRST BIT(7)
+#define QSPI_CR_UPDCFG BIT(8)
+#define QSPI_CR_STTFR BIT(9)
+#define QSPI_CR_RTOUT BIT(10)
#define QSPI_CR_LASTXFER BIT(24)
/* Bitfields in QSPI_MR (Mode Register) */
@@ -61,12 +82,14 @@
#define QSPI_MR_LLB BIT(1)
#define QSPI_MR_WDRBT BIT(2)
#define QSPI_MR_SMRM BIT(3)
+#define QSPI_MR_DQSDLYEN BIT(3)
#define QSPI_MR_CSMODE_MASK GENMASK(5, 4)
#define QSPI_MR_CSMODE_NOT_RELOADED (0 << 4)
#define QSPI_MR_CSMODE_LASTXFER (1 << 4)
#define QSPI_MR_CSMODE_SYSTEMATICALLY (2 << 4)
#define QSPI_MR_NBBITS_MASK GENMASK(11, 8)
#define QSPI_MR_NBBITS(n) ((((n) - 8) << 8) & QSPI_MR_NBBITS_MASK)
+#define QSPI_MR_OENSD BIT(15)
#define QSPI_MR_DLYBCT_MASK GENMASK(23, 16)
#define QSPI_MR_DLYBCT(n) (((n) << 16) & QSPI_MR_DLYBCT_MASK)
#define QSPI_MR_DLYCS_MASK GENMASK(31, 24)
@@ -80,6 +103,13 @@
#define QSPI_SR_CSR BIT(8)
#define QSPI_SR_CSS BIT(9)
#define QSPI_SR_INSTRE BIT(10)
+#define QSPI_SR_LWRA BIT(11)
+#define QSPI_SR_QITF BIT(12)
+#define QSPI_SR_QITR BIT(13)
+#define QSPI_SR_CSFA BIT(14)
+#define QSPI_SR_CSRA BIT(15)
+#define QSPI_SR_RFRSHD BIT(16)
+#define QSPI_SR_TOUT BIT(17)
#define QSPI_SR_QSPIENS BIT(24)
#define QSPI_SR_CMD_COMPLETED (QSPI_SR_INSTRE | QSPI_SR_CSR)
@@ -92,9 +122,22 @@
#define QSPI_SCR_DLYBS_MASK GENMASK(23, 16)
#define QSPI_SCR_DLYBS(n) (((n) << 16) & QSPI_SCR_DLYBS_MASK)
+/* Bitfields in QSPI_SR2 (SAMA7G5 Status Register) */
+#define QSPI_SR2_SYNCBSY BIT(0)
+#define QSPI_SR2_QSPIENS BIT(1)
+#define QSPI_SR2_CSS BIT(2)
+#define QSPI_SR2_RBUSY BIT(3)
+#define QSPI_SR2_HIDLE BIT(4)
+#define QSPI_SR2_DLOCK BIT(5)
+#define QSPI_SR2_CALBSY BIT(6)
+
+/* Bitfields in QSPI_IAR (Instruction Address Register) */
+#define QSPI_IAR_ADDR GENMASK(31, 0)
+
/* Bitfields in QSPI_ICR (Read/Write Instruction Code Register) */
#define QSPI_ICR_INST_MASK GENMASK(7, 0)
#define QSPI_ICR_INST(inst) (((inst) << 0) & QSPI_ICR_INST_MASK)
+#define QSPI_ICR_INST_MASK_SAMA7G5 GENMASK(15, 0)
#define QSPI_ICR_OPT_MASK GENMASK(23, 16)
#define QSPI_ICR_OPT(opt) (((opt) << 16) & QSPI_ICR_OPT_MASK)
@@ -107,6 +150,9 @@
#define QSPI_IFR_WIDTH_QUAD_IO (4 << 0)
#define QSPI_IFR_WIDTH_DUAL_CMD (5 << 0)
#define QSPI_IFR_WIDTH_QUAD_CMD (6 << 0)
+#define QSPI_IFR_WIDTH_OCT_OUTPUT (7 << 0)
+#define QSPI_IFR_WIDTH_OCT_IO (8 << 0)
+#define QSPI_IFR_WIDTH_OCT_CMD (9 << 0)
#define QSPI_IFR_INSTEN BIT(4)
#define QSPI_IFR_ADDREN BIT(5)
#define QSPI_IFR_OPTEN BIT(6)
@@ -117,19 +163,60 @@
#define QSPI_IFR_OPTL_4BIT (2 << 8)
#define QSPI_IFR_OPTL_8BIT (3 << 8)
#define QSPI_IFR_ADDRL BIT(10)
+#define QSPI_IFR_ADDRL_SAMA7G5 GENMASK(11, 10)
#define QSPI_IFR_TFRTYP_MEM BIT(12)
#define QSPI_IFR_SAMA5D2_WRITE_TRSFR BIT(13)
#define QSPI_IFR_CRM BIT(14)
+#define QSPI_IFR_DDREN BIT(15)
#define QSPI_IFR_NBDUM_MASK GENMASK(20, 16)
#define QSPI_IFR_NBDUM(n) (((n) << 16) & QSPI_IFR_NBDUM_MASK)
+#define QSPI_IFR_END BIT(22)
+#define QSPI_IFR_SMRM BIT(23)
#define QSPI_IFR_APBTFRTYP_READ BIT(24) /* Defined in SAM9X60 */
+#define QSPI_IFR_DQSEN BIT(25)
+#define QSPI_IFR_DDRCMDEN BIT(26)
+#define QSPI_IFR_HFWBEN BIT(27)
+#define QSPI_IFR_PROTTYP GENMASK(29, 28)
+#define QSPI_IFR_PROTTYP_STD_SPI 0
+#define QSPI_IFR_PROTTYP_TWIN_QUAD 1
+#define QSPI_IFR_PROTTYP_OCTAFLASH 2
+#define QSPI_IFR_PROTTYP_HYPERFLASH 3
/* Bitfields in QSPI_SMR (Scrambling Mode Register) */
#define QSPI_SMR_SCREN BIT(0)
#define QSPI_SMR_RVDIS BIT(1)
+#define QSPI_SMR_SCRKL BIT(2)
+
+/* Bitfields in QSPI_REFRESH (Refresh Register) */
+#define QSPI_REFRESH_DELAY_COUNTER GENMASK(31, 0)
+
+/* Bitfields in QSPI_WRACNT (Write Access Counter Register) */
+#define QSPI_WRACNT_NBWRA GENMASK(31, 0)
+
+/* Bitfields in QSPI_DLLCFG (DLL Configuration Register) */
+#define QSPI_DLLCFG_RANGE BIT(0)
+
+/* Bitfields in QSPI_PCALCFG (DLL Pad Calibration Configuration Register) */
+#define QSPI_PCALCFG_AAON BIT(0)
+#define QSPI_PCALCFG_DAPCAL BIT(1)
+#define QSPI_PCALCFG_DIFFPM BIT(2)
+#define QSPI_PCALCFG_CLKDIV GENMASK(6, 4)
+#define QSPI_PCALCFG_CALCNT GENMASK(16, 8)
+#define QSPI_PCALCFG_CALP GENMASK(27, 24)
+#define QSPI_PCALCFG_CALN GENMASK(31, 28)
+
+/* Bitfields in QSPI_PCALBP (DLL Pad Calibration Bypass Register) */
+#define QSPI_PCALBP_BPEN BIT(0)
+#define QSPI_PCALBP_CALPBP GENMASK(11, 8)
+#define QSPI_PCALBP_CALNBP GENMASK(19, 16)
+
+/* Bitfields in QSPI_TOUT (Timeout Register) */
+#define QSPI_TOUT_TCNTM GENMASK(15, 0)
/* Bitfields in QSPI_WPMR (Write Protection Mode Register) */
#define QSPI_WPMR_WPEN BIT(0)
+#define QSPI_WPMR_WPITEN BIT(1)
+#define QSPI_WPMR_WPCREN BIT(2)
#define QSPI_WPMR_WPKEY_MASK GENMASK(31, 8)
#define QSPI_WPMR_WPKEY(wpkey) (((wpkey) << 8) & QSPI_WPMR_WPKEY_MASK)
@@ -138,23 +225,74 @@
#define QSPI_WPSR_WPVSRC_MASK GENMASK(15, 8)
#define QSPI_WPSR_WPVSRC(src) (((src) << 8) & QSPI_WPSR_WPVSRC)
+#define ATMEL_QSPI_TIMEOUT 1000 /* ms */
+#define ATMEL_QSPI_SYNC_TIMEOUT 300 /* ms */
+#define QSPI_DLLCFG_THRESHOLD_FREQ 90000000U
+#define QSPI_CALIB_TIME 2000 /* 2 us */
+
+/* Use PIO for small transfers. */
+#define ATMEL_QSPI_DMA_MIN_BYTES 16
+/**
+ * struct atmel_qspi_pcal - Pad Calibration Clock Division
+ * @pclk_rate: peripheral clock rate.
+ * @pclk_div: calibration clock division. The clock applied to the calibration
+ * cell is divided by pclk_div + 1.
+ */
+struct atmel_qspi_pcal {
+ u32 pclk_rate;
+ u8 pclk_div;
+};
+
+#define ATMEL_QSPI_PCAL_ARRAY_SIZE 8
+static const struct atmel_qspi_pcal pcal[ATMEL_QSPI_PCAL_ARRAY_SIZE] = {
+ {25000000, 0},
+ {50000000, 1},
+ {75000000, 2},
+ {100000000, 3},
+ {125000000, 4},
+ {150000000, 5},
+ {175000000, 6},
+ {200000000, 7},
+};
+
struct atmel_qspi_caps {
+ u32 max_speed_hz;
bool has_qspick;
+ bool has_gclk;
bool has_ricr;
+ bool octal;
+ bool has_dma;
};
+struct atmel_qspi_ops;
+
struct atmel_qspi {
void __iomem *regs;
void __iomem *mem;
struct clk *pclk;
struct clk *qspick;
+ struct clk *gclk;
struct platform_device *pdev;
const struct atmel_qspi_caps *caps;
+ const struct atmel_qspi_ops *ops;
resource_size_t mmap_size;
u32 pending;
+ u32 irq_mask;
u32 mr;
u32 scr;
+ u32 target_max_speed_hz;
struct completion cmd_completion;
+ struct completion dma_completion;
+ dma_addr_t mmap_phys_base;
+ struct dma_chan *rx_chan;
+ struct dma_chan *tx_chan;
+};
+
+struct atmel_qspi_ops {
+ int (*set_cfg)(struct atmel_qspi *aq, const struct spi_mem_op *op,
+ u32 *offset);
+ int (*transfer)(struct spi_mem *mem, const struct spi_mem_op *op,
+ u32 offset);
};
struct atmel_qspi_mode {
@@ -174,6 +312,19 @@ static const struct atmel_qspi_mode atmel_qspi_modes[] = {
{ 4, 4, 4, QSPI_IFR_WIDTH_QUAD_CMD },
};
+static const struct atmel_qspi_mode atmel_qspi_sama7g5_modes[] = {
+ { 1, 1, 1, QSPI_IFR_WIDTH_SINGLE_BIT_SPI },
+ { 1, 1, 2, QSPI_IFR_WIDTH_DUAL_OUTPUT },
+ { 1, 1, 4, QSPI_IFR_WIDTH_QUAD_OUTPUT },
+ { 1, 2, 2, QSPI_IFR_WIDTH_DUAL_IO },
+ { 1, 4, 4, QSPI_IFR_WIDTH_QUAD_IO },
+ { 2, 2, 2, QSPI_IFR_WIDTH_DUAL_CMD },
+ { 4, 4, 4, QSPI_IFR_WIDTH_QUAD_CMD },
+ { 1, 1, 8, QSPI_IFR_WIDTH_OCT_OUTPUT },
+ { 1, 8, 8, QSPI_IFR_WIDTH_OCT_IO },
+ { 8, 8, 8, QSPI_IFR_WIDTH_OCT_CMD },
+};
+
#ifdef VERBOSE_DEBUG
static const char *atmel_qspi_reg_name(u32 offset, char *tmp, size_t sz)
{
@@ -196,6 +347,8 @@ static const char *atmel_qspi_reg_name(u32 offset, char *tmp, size_t sz)
return "IMR";
case QSPI_SCR:
return "SCR";
+ case QSPI_SR2:
+ return "SR2";
case QSPI_IAR:
return "IAR";
case QSPI_ICR:
@@ -208,6 +361,18 @@ static const char *atmel_qspi_reg_name(u32 offset, char *tmp, size_t sz)
return "SMR";
case QSPI_SKR:
return "SKR";
+ case QSPI_REFRESH:
+ return "REFRESH";
+ case QSPI_WRACNT:
+ return "WRACNT";
+ case QSPI_DLLCFG:
+ return "DLLCFG";
+ case QSPI_PCALCFG:
+ return "PCALCFG";
+ case QSPI_PCALBP:
+ return "PCALBP";
+ case QSPI_TOUT:
+ return "TOUT";
case QSPI_WPMR:
return "WPMR";
case QSPI_WPSR:
@@ -249,6 +414,28 @@ static void atmel_qspi_write(u32 value, struct atmel_qspi *aq, u32 offset)
writel_relaxed(value, aq->regs + offset);
}
+static int atmel_qspi_reg_sync(struct atmel_qspi *aq)
+{
+ u32 val;
+ int ret;
+
+ ret = readl_poll_timeout(aq->regs + QSPI_SR2, val,
+ !(val & QSPI_SR2_SYNCBSY), 40,
+ ATMEL_QSPI_SYNC_TIMEOUT);
+ return ret;
+}
+
+static int atmel_qspi_update_config(struct atmel_qspi *aq)
+{
+ int ret;
+
+ ret = atmel_qspi_reg_sync(aq);
+ if (ret)
+ return ret;
+ atmel_qspi_write(QSPI_CR_UPDCFG, aq, QSPI_CR);
+ return atmel_qspi_reg_sync(aq);
+}
+
static inline bool atmel_qspi_is_compatible(const struct spi_mem_op *op,
const struct atmel_qspi_mode *mode)
{
@@ -275,12 +462,31 @@ static int atmel_qspi_find_mode(const struct spi_mem_op *op)
return -EOPNOTSUPP;
}
+static int atmel_qspi_sama7g5_find_mode(const struct spi_mem_op *op)
+{
+ u32 i;
+
+ for (i = 0; i < ARRAY_SIZE(atmel_qspi_sama7g5_modes); i++)
+ if (atmel_qspi_is_compatible(op, &atmel_qspi_sama7g5_modes[i]))
+ return i;
+
+ return -EOPNOTSUPP;
+}
+
static bool atmel_qspi_supports_op(struct spi_mem *mem,
const struct spi_mem_op *op)
{
+ struct atmel_qspi *aq = spi_controller_get_devdata(mem->spi->controller);
if (!spi_mem_default_supports_op(mem, op))
return false;
+ if (aq->caps->octal) {
+ if (atmel_qspi_sama7g5_find_mode(op) < 0)
+ return false;
+ else
+ return true;
+ }
+
if (atmel_qspi_find_mode(op) < 0)
return false;
@@ -292,6 +498,25 @@ static bool atmel_qspi_supports_op(struct spi_mem *mem,
return true;
}
+/*
+ * If the QSPI controller is set in regular SPI mode, set it in
+ * Serial Memory Mode (SMM).
+ */
+static int atmel_qspi_set_serial_memory_mode(struct atmel_qspi *aq)
+{
+ int ret = 0;
+
+ if (!(aq->mr & QSPI_MR_SMM)) {
+ aq->mr |= QSPI_MR_SMM;
+ atmel_qspi_write(aq->mr, aq, QSPI_MR);
+
+ if (aq->caps->has_gclk)
+ ret = atmel_qspi_update_config(aq);
+ }
+
+ return ret;
+}
+
static int atmel_qspi_set_cfg(struct atmel_qspi *aq,
const struct spi_mem_op *op, u32 *offset)
{
@@ -371,14 +596,9 @@ static int atmel_qspi_set_cfg(struct atmel_qspi *aq,
ifr |= QSPI_IFR_TFRTYP_MEM;
}
- /*
- * If the QSPI controller is set in regular SPI mode, set it in
- * Serial Memory Mode (SMM).
- */
- if (!(aq->mr & QSPI_MR_SMM)) {
- aq->mr |= QSPI_MR_SMM;
- atmel_qspi_write(aq->mr, aq, QSPI_MR);
- }
+ mode = atmel_qspi_set_serial_memory_mode(aq);
+ if (mode < 0)
+ return mode;
/* Clear pending interrupts */
(void)atmel_qspi_read(aq, QSPI_SR);
@@ -404,10 +624,323 @@ static int atmel_qspi_set_cfg(struct atmel_qspi *aq,
return 0;
}
+static int atmel_qspi_wait_for_completion(struct atmel_qspi *aq, u32 irq_mask)
+{
+ int err = 0;
+ u32 sr;
+
+ /* Poll INSTRuction End status */
+ sr = atmel_qspi_read(aq, QSPI_SR);
+ if ((sr & irq_mask) == irq_mask)
+ return 0;
+
+ /* Wait for INSTRuction End interrupt */
+ reinit_completion(&aq->cmd_completion);
+ aq->pending = sr & irq_mask;
+ aq->irq_mask = irq_mask;
+ atmel_qspi_write(irq_mask, aq, QSPI_IER);
+ if (!wait_for_completion_timeout(&aq->cmd_completion,
+ msecs_to_jiffies(ATMEL_QSPI_TIMEOUT)))
+ err = -ETIMEDOUT;
+ atmel_qspi_write(irq_mask, aq, QSPI_IDR);
+
+ return err;
+}
+
+static int atmel_qspi_transfer(struct spi_mem *mem,
+ const struct spi_mem_op *op, u32 offset)
+{
+ struct atmel_qspi *aq = spi_controller_get_devdata(mem->spi->controller);
+
+ /* Skip to the final steps if there is no data */
+ if (!op->data.nbytes)
+ return atmel_qspi_wait_for_completion(aq,
+ QSPI_SR_CMD_COMPLETED);
+
+ /* Dummy read of QSPI_IFR to synchronize APB and AHB accesses */
+ (void)atmel_qspi_read(aq, QSPI_IFR);
+
+ /* Send/Receive data */
+ if (op->data.dir == SPI_MEM_DATA_IN) {
+ memcpy_fromio(op->data.buf.in, aq->mem + offset,
+ op->data.nbytes);
+
+ /* Synchronize AHB and APB accesses again */
+ rmb();
+ } else {
+ memcpy_toio(aq->mem + offset, op->data.buf.out,
+ op->data.nbytes);
+
+ /* Synchronize AHB and APB accesses again */
+ wmb();
+ }
+
+ /* Release the chip-select */
+ atmel_qspi_write(QSPI_CR_LASTXFER, aq, QSPI_CR);
+
+ return atmel_qspi_wait_for_completion(aq, QSPI_SR_CMD_COMPLETED);
+}
+
+static int atmel_qspi_sama7g5_set_cfg(struct atmel_qspi *aq,
+ const struct spi_mem_op *op, u32 *offset)
+{
+ u32 iar, icr, ifr;
+ int mode, ret;
+
+ iar = 0;
+ icr = FIELD_PREP(QSPI_ICR_INST_MASK_SAMA7G5, op->cmd.opcode);
+ ifr = QSPI_IFR_INSTEN;
+
+ mode = atmel_qspi_sama7g5_find_mode(op);
+ if (mode < 0)
+ return mode;
+ ifr |= atmel_qspi_sama7g5_modes[mode].config;
+
+ if (op->dummy.buswidth && op->dummy.nbytes) {
+ if (op->addr.dtr && op->dummy.dtr && op->data.dtr)
+ ifr |= QSPI_IFR_NBDUM(op->dummy.nbytes * 8 /
+ (2 * op->dummy.buswidth));
+ else
+ ifr |= QSPI_IFR_NBDUM(op->dummy.nbytes * 8 /
+ op->dummy.buswidth);
+ }
+
+ if (op->addr.buswidth && op->addr.nbytes) {
+ ifr |= FIELD_PREP(QSPI_IFR_ADDRL_SAMA7G5, op->addr.nbytes - 1) |
+ QSPI_IFR_ADDREN;
+ iar = FIELD_PREP(QSPI_IAR_ADDR, op->addr.val);
+ }
+
+ if (op->addr.dtr && op->dummy.dtr && op->data.dtr) {
+ ifr |= QSPI_IFR_DDREN;
+ if (op->cmd.dtr)
+ ifr |= QSPI_IFR_DDRCMDEN;
+
+ ifr |= QSPI_IFR_DQSEN;
+ }
+
+ if (op->cmd.buswidth == 8 || op->addr.buswidth == 8 ||
+ op->data.buswidth == 8)
+ ifr |= FIELD_PREP(QSPI_IFR_PROTTYP, QSPI_IFR_PROTTYP_OCTAFLASH);
+
+ /* offset of the data access in the QSPI memory space */
+ *offset = iar;
+
+ /* Set data enable */
+ if (op->data.nbytes) {
+ ifr |= QSPI_IFR_DATAEN;
+
+ if (op->addr.nbytes)
+ ifr |= QSPI_IFR_TFRTYP_MEM;
+ }
+
+ ret = atmel_qspi_set_serial_memory_mode(aq);
+ if (ret < 0)
+ return ret;
+
+ /* Clear pending interrupts */
+ (void)atmel_qspi_read(aq, QSPI_SR);
+
+ /* Set QSPI Instruction Frame registers */
+ if (op->addr.nbytes && !op->data.nbytes)
+ atmel_qspi_write(iar, aq, QSPI_IAR);
+
+ if (op->data.dir == SPI_MEM_DATA_IN) {
+ atmel_qspi_write(icr, aq, QSPI_RICR);
+ } else {
+ atmel_qspi_write(icr, aq, QSPI_WICR);
+ if (op->data.nbytes)
+ atmel_qspi_write(FIELD_PREP(QSPI_WRACNT_NBWRA,
+ op->data.nbytes),
+ aq, QSPI_WRACNT);
+ }
+
+ atmel_qspi_write(ifr, aq, QSPI_IFR);
+
+ return atmel_qspi_update_config(aq);
+}
+
+static void atmel_qspi_dma_callback(void *param)
+{
+ struct atmel_qspi *aq = param;
+
+ complete(&aq->dma_completion);
+}
+
+static int atmel_qspi_dma_xfer(struct atmel_qspi *aq, struct dma_chan *chan,
+ dma_addr_t dma_dst, dma_addr_t dma_src,
+ unsigned int len)
+{
+ struct dma_async_tx_descriptor *tx;
+ dma_cookie_t cookie;
+ int ret;
+
+ tx = dmaengine_prep_dma_memcpy(chan, dma_dst, dma_src, len,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!tx) {
+ dev_err(&aq->pdev->dev, "device_prep_dma_memcpy error\n");
+ return -EIO;
+ }
+
+ reinit_completion(&aq->dma_completion);
+ tx->callback = atmel_qspi_dma_callback;
+ tx->callback_param = aq;
+ cookie = tx->tx_submit(tx);
+ ret = dma_submit_error(cookie);
+ if (ret) {
+ dev_err(&aq->pdev->dev, "dma_submit_error %d\n", cookie);
+ return ret;
+ }
+
+ dma_async_issue_pending(chan);
+ ret = wait_for_completion_timeout(&aq->dma_completion,
+ msecs_to_jiffies(20 * ATMEL_QSPI_TIMEOUT));
+ if (ret == 0) {
+ dmaengine_terminate_sync(chan);
+ dev_err(&aq->pdev->dev, "DMA wait_for_completion_timeout\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int atmel_qspi_dma_rx_xfer(struct spi_mem *mem,
+ const struct spi_mem_op *op,
+ struct sg_table *sgt, loff_t loff)
+{
+ struct atmel_qspi *aq =
+ spi_controller_get_devdata(mem->spi->controller);
+ struct scatterlist *sg;
+ dma_addr_t dma_src;
+ unsigned int i, len;
+ int ret;
+
+ dma_src = aq->mmap_phys_base + loff;
+
+ for_each_sg(sgt->sgl, sg, sgt->nents, i) {
+ len = sg_dma_len(sg);
+ ret = atmel_qspi_dma_xfer(aq, aq->rx_chan, sg_dma_address(sg),
+ dma_src, len);
+ if (ret)
+ return ret;
+ dma_src += len;
+ }
+
+ return 0;
+}
+
+static int atmel_qspi_dma_tx_xfer(struct spi_mem *mem,
+ const struct spi_mem_op *op,
+ struct sg_table *sgt, loff_t loff)
+{
+ struct atmel_qspi *aq =
+ spi_controller_get_devdata(mem->spi->controller);
+ struct scatterlist *sg;
+ dma_addr_t dma_dst;
+ unsigned int i, len;
+ int ret;
+
+ dma_dst = aq->mmap_phys_base + loff;
+
+ for_each_sg(sgt->sgl, sg, sgt->nents, i) {
+ len = sg_dma_len(sg);
+ ret = atmel_qspi_dma_xfer(aq, aq->tx_chan, dma_dst,
+ sg_dma_address(sg), len);
+ if (ret)
+ return ret;
+ dma_dst += len;
+ }
+
+ return 0;
+}
+
+static int atmel_qspi_dma_transfer(struct spi_mem *mem,
+ const struct spi_mem_op *op, loff_t loff)
+{
+ struct sg_table sgt;
+ int ret;
+
+ ret = spi_controller_dma_map_mem_op_data(mem->spi->controller, op,
+ &sgt);
+ if (ret)
+ return ret;
+
+ if (op->data.dir == SPI_MEM_DATA_IN)
+ ret = atmel_qspi_dma_rx_xfer(mem, op, &sgt, loff);
+ else
+ ret = atmel_qspi_dma_tx_xfer(mem, op, &sgt, loff);
+
+ spi_controller_dma_unmap_mem_op_data(mem->spi->controller, op, &sgt);
+
+ return ret;
+}
+
+static int atmel_qspi_sama7g5_transfer(struct spi_mem *mem,
+ const struct spi_mem_op *op, u32 offset)
+{
+ struct atmel_qspi *aq =
+ spi_controller_get_devdata(mem->spi->controller);
+ u32 val;
+ int ret;
+
+ if (!op->data.nbytes) {
+ /* Start the transfer. */
+ ret = atmel_qspi_reg_sync(aq);
+ if (ret)
+ return ret;
+ atmel_qspi_write(QSPI_CR_STTFR, aq, QSPI_CR);
+
+ return atmel_qspi_wait_for_completion(aq, QSPI_SR_CSRA);
+ }
+
+ /* Send/Receive data. */
+ if (op->data.dir == SPI_MEM_DATA_IN) {
+ if (aq->rx_chan && op->addr.nbytes &&
+ op->data.nbytes > ATMEL_QSPI_DMA_MIN_BYTES) {
+ ret = atmel_qspi_dma_transfer(mem, op, offset);
+ if (ret)
+ return ret;
+ } else {
+ memcpy_fromio(op->data.buf.in, aq->mem + offset,
+ op->data.nbytes);
+ }
+
+ if (op->addr.nbytes) {
+ ret = readl_poll_timeout(aq->regs + QSPI_SR2, val,
+ !(val & QSPI_SR2_RBUSY), 40,
+ ATMEL_QSPI_SYNC_TIMEOUT);
+ if (ret)
+ return ret;
+ }
+ } else {
+ if (aq->tx_chan && op->addr.nbytes &&
+ op->data.nbytes > ATMEL_QSPI_DMA_MIN_BYTES) {
+ ret = atmel_qspi_dma_transfer(mem, op, offset);
+ if (ret)
+ return ret;
+ } else {
+ memcpy_toio(aq->mem + offset, op->data.buf.out,
+ op->data.nbytes);
+ }
+
+ ret = atmel_qspi_wait_for_completion(aq, QSPI_SR_LWRA);
+ if (ret)
+ return ret;
+ }
+
+ /* Release the chip-select. */
+ ret = atmel_qspi_reg_sync(aq);
+ if (ret)
+ return ret;
+ atmel_qspi_write(QSPI_CR_LASTXFER, aq, QSPI_CR);
+
+ return atmel_qspi_wait_for_completion(aq, QSPI_SR_CSRA);
+}
+
static int atmel_qspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
{
struct atmel_qspi *aq = spi_controller_get_devdata(mem->spi->controller);
- u32 sr, offset;
+ u32 offset;
int err;
/*
@@ -416,46 +949,20 @@ static int atmel_qspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
* when the flash memories overrun the controller's memory space.
*/
if (op->addr.val + op->data.nbytes > aq->mmap_size)
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
+
+ if (op->addr.nbytes > 4)
+ return -EOPNOTSUPP;
err = pm_runtime_resume_and_get(&aq->pdev->dev);
if (err < 0)
return err;
- err = atmel_qspi_set_cfg(aq, op, &offset);
+ err = aq->ops->set_cfg(aq, op, &offset);
if (err)
goto pm_runtime_put;
- /* Skip to the final steps if there is no data */
- if (op->data.nbytes) {
- /* Dummy read of QSPI_IFR to synchronize APB and AHB accesses */
- (void)atmel_qspi_read(aq, QSPI_IFR);
-
- /* Send/Receive data */
- if (op->data.dir == SPI_MEM_DATA_IN)
- memcpy_fromio(op->data.buf.in, aq->mem + offset,
- op->data.nbytes);
- else
- memcpy_toio(aq->mem + offset, op->data.buf.out,
- op->data.nbytes);
-
- /* Release the chip-select */
- atmel_qspi_write(QSPI_CR_LASTXFER, aq, QSPI_CR);
- }
-
- /* Poll INSTRuction End status */
- sr = atmel_qspi_read(aq, QSPI_SR);
- if ((sr & QSPI_SR_CMD_COMPLETED) == QSPI_SR_CMD_COMPLETED)
- goto pm_runtime_put;
-
- /* Wait for INSTRuction End interrupt */
- reinit_completion(&aq->cmd_completion);
- aq->pending = sr & QSPI_SR_CMD_COMPLETED;
- atmel_qspi_write(QSPI_SR_CMD_COMPLETED, aq, QSPI_IER);
- if (!wait_for_completion_timeout(&aq->cmd_completion,
- msecs_to_jiffies(1000)))
- err = -ETIMEDOUT;
- atmel_qspi_write(QSPI_SR_CMD_COMPLETED, aq, QSPI_IDR);
+ err = aq->ops->transfer(mem, op, offset);
pm_runtime_put:
pm_runtime_mark_last_busy(&aq->pdev->dev);
@@ -474,6 +981,159 @@ static const struct spi_controller_mem_ops atmel_qspi_mem_ops = {
.get_name = atmel_qspi_get_name
};
+static int atmel_qspi_set_pad_calibration(struct atmel_qspi *aq)
+{
+ unsigned long pclk_rate;
+ u32 status, val;
+ int i, ret;
+ u8 pclk_div = 0;
+
+ pclk_rate = clk_get_rate(aq->pclk);
+ if (!pclk_rate)
+ return -EINVAL;
+
+ for (i = 0; i < ATMEL_QSPI_PCAL_ARRAY_SIZE; i++) {
+ if (pclk_rate <= pcal[i].pclk_rate) {
+ pclk_div = pcal[i].pclk_div;
+ break;
+ }
+ }
+
+ /*
+ * Use the biggest divider in case the peripheral clock exceeds
+ * 200MHZ.
+ */
+ if (pclk_rate > pcal[ATMEL_QSPI_PCAL_ARRAY_SIZE - 1].pclk_rate)
+ pclk_div = pcal[ATMEL_QSPI_PCAL_ARRAY_SIZE - 1].pclk_div;
+
+ /* Disable QSPI while configuring the pad calibration. */
+ status = atmel_qspi_read(aq, QSPI_SR2);
+ if (status & QSPI_SR2_QSPIENS) {
+ ret = atmel_qspi_reg_sync(aq);
+ if (ret)
+ return ret;
+ atmel_qspi_write(QSPI_CR_QSPIDIS, aq, QSPI_CR);
+ }
+
+ /*
+ * The analog circuitry is not shut down at the end of the calibration
+ * and the start-up time is only required for the first calibration
+ * sequence, thus increasing performance. Set the delay between the Pad
+ * calibration analog circuitry and the calibration request to 2us.
+ */
+ atmel_qspi_write(QSPI_PCALCFG_AAON |
+ FIELD_PREP(QSPI_PCALCFG_CLKDIV, pclk_div) |
+ FIELD_PREP(QSPI_PCALCFG_CALCNT,
+ 2 * (pclk_rate / 1000000)),
+ aq, QSPI_PCALCFG);
+
+ /* DLL On + start calibration. */
+ atmel_qspi_write(QSPI_CR_DLLON | QSPI_CR_STPCAL, aq, QSPI_CR);
+
+ /* Check synchronization status before updating configuration. */
+ ret = readl_poll_timeout(aq->regs + QSPI_SR2, val,
+ (val & QSPI_SR2_DLOCK) &&
+ !(val & QSPI_SR2_CALBSY), 40,
+ ATMEL_QSPI_TIMEOUT);
+
+ /* Refresh analogic blocks every 1 ms.*/
+ atmel_qspi_write(FIELD_PREP(QSPI_REFRESH_DELAY_COUNTER,
+ aq->target_max_speed_hz / 1000),
+ aq, QSPI_REFRESH);
+
+ return ret;
+}
+
+static int atmel_qspi_set_gclk(struct atmel_qspi *aq)
+{
+ u32 status, val;
+ int ret;
+
+ /* Disable DLL before setting GCLK */
+ status = atmel_qspi_read(aq, QSPI_SR2);
+ if (status & QSPI_SR2_DLOCK) {
+ atmel_qspi_write(QSPI_CR_DLLOFF, aq, QSPI_CR);
+
+ ret = readl_poll_timeout(aq->regs + QSPI_SR2, val,
+ !(val & QSPI_SR2_DLOCK), 40,
+ ATMEL_QSPI_TIMEOUT);
+ if (ret)
+ return ret;
+ }
+
+ if (aq->target_max_speed_hz > QSPI_DLLCFG_THRESHOLD_FREQ)
+ atmel_qspi_write(QSPI_DLLCFG_RANGE, aq, QSPI_DLLCFG);
+ else
+ atmel_qspi_write(0, aq, QSPI_DLLCFG);
+
+ ret = clk_set_rate(aq->gclk, aq->target_max_speed_hz);
+ if (ret) {
+ dev_err(&aq->pdev->dev, "Failed to set generic clock rate.\n");
+ return ret;
+ }
+
+ /* Enable the QSPI generic clock */
+ ret = clk_prepare_enable(aq->gclk);
+ if (ret)
+ dev_err(&aq->pdev->dev, "Failed to enable generic clock.\n");
+
+ return ret;
+}
+
+static int atmel_qspi_sama7g5_init(struct atmel_qspi *aq)
+{
+ u32 val;
+ int ret;
+
+ ret = atmel_qspi_set_gclk(aq);
+ if (ret)
+ return ret;
+
+ if (aq->caps->octal) {
+ ret = atmel_qspi_set_pad_calibration(aq);
+ if (ret)
+ return ret;
+ } else {
+ atmel_qspi_write(QSPI_CR_DLLON, aq, QSPI_CR);
+ ret = readl_poll_timeout(aq->regs + QSPI_SR2, val,
+ (val & QSPI_SR2_DLOCK), 40,
+ ATMEL_QSPI_TIMEOUT);
+ }
+
+ /* Set the QSPI controller by default in Serial Memory Mode */
+ aq->mr |= QSPI_MR_DQSDLYEN;
+ ret = atmel_qspi_set_serial_memory_mode(aq);
+ if (ret < 0)
+ return ret;
+
+ /* Enable the QSPI controller. */
+ atmel_qspi_write(QSPI_CR_QSPIEN, aq, QSPI_CR);
+ ret = readl_poll_timeout(aq->regs + QSPI_SR2, val,
+ val & QSPI_SR2_QSPIENS, 40,
+ ATMEL_QSPI_SYNC_TIMEOUT);
+ if (ret)
+ return ret;
+
+ if (aq->caps->octal) {
+ ret = readl_poll_timeout(aq->regs + QSPI_SR, val,
+ val & QSPI_SR_RFRSHD, 40,
+ ATMEL_QSPI_TIMEOUT);
+ }
+
+ atmel_qspi_write(QSPI_TOUT_TCNTM, aq, QSPI_TOUT);
+ return ret;
+}
+
+static int atmel_qspi_sama7g5_setup(struct spi_device *spi)
+{
+ struct atmel_qspi *aq = spi_controller_get_devdata(spi->controller);
+
+ /* The controller can communicate with a single peripheral device (target). */
+ aq->target_max_speed_hz = spi->max_speed_hz;
+
+ return atmel_qspi_sama7g5_init(aq);
+}
+
static int atmel_qspi_setup(struct spi_device *spi)
{
struct spi_controller *ctrl = spi->controller;
@@ -488,6 +1148,9 @@ static int atmel_qspi_setup(struct spi_device *spi)
if (!spi->max_speed_hz)
return -EINVAL;
+ if (aq->caps->has_gclk)
+ return atmel_qspi_sama7g5_setup(spi);
+
src_rate = clk_get_rate(aq->pclk);
if (!src_rate)
return -EINVAL;
@@ -573,17 +1236,29 @@ static int atmel_qspi_set_cs_timing(struct spi_device *spi)
return 0;
}
-static void atmel_qspi_init(struct atmel_qspi *aq)
+static int atmel_qspi_init(struct atmel_qspi *aq)
{
+ int ret;
+
+ if (aq->caps->has_gclk) {
+ ret = atmel_qspi_reg_sync(aq);
+ if (ret)
+ return ret;
+ atmel_qspi_write(QSPI_CR_SWRST, aq, QSPI_CR);
+ return 0;
+ }
+
/* Reset the QSPI controller */
atmel_qspi_write(QSPI_CR_SWRST, aq, QSPI_CR);
/* Set the QSPI controller by default in Serial Memory Mode */
- aq->mr |= QSPI_MR_SMM;
- atmel_qspi_write(aq->mr, aq, QSPI_MR);
+ ret = atmel_qspi_set_serial_memory_mode(aq);
+ if (ret < 0)
+ return ret;
/* Enable the QSPI controller */
atmel_qspi_write(QSPI_CR_QSPIEN, aq, QSPI_CR);
+ return 0;
}
static irqreturn_t atmel_qspi_interrupt(int irq, void *dev_id)
@@ -599,12 +1274,66 @@ static irqreturn_t atmel_qspi_interrupt(int irq, void *dev_id)
return IRQ_NONE;
aq->pending |= pending;
- if ((aq->pending & QSPI_SR_CMD_COMPLETED) == QSPI_SR_CMD_COMPLETED)
+ if ((aq->pending & aq->irq_mask) == aq->irq_mask)
complete(&aq->cmd_completion);
return IRQ_HANDLED;
}
+static int atmel_qspi_dma_init(struct spi_controller *ctrl)
+{
+ struct atmel_qspi *aq = spi_controller_get_devdata(ctrl);
+ int ret;
+
+ aq->rx_chan = dma_request_chan(&aq->pdev->dev, "rx");
+ if (IS_ERR(aq->rx_chan)) {
+ ret = dev_err_probe(&aq->pdev->dev, PTR_ERR(aq->rx_chan),
+ "RX DMA channel is not available\n");
+ goto null_rx_chan;
+ }
+
+ aq->tx_chan = dma_request_chan(&aq->pdev->dev, "tx");
+ if (IS_ERR(aq->tx_chan)) {
+ ret = dev_err_probe(&aq->pdev->dev, PTR_ERR(aq->tx_chan),
+ "TX DMA channel is not available\n");
+ goto release_rx_chan;
+ }
+
+ ctrl->dma_rx = aq->rx_chan;
+ ctrl->dma_tx = aq->tx_chan;
+ init_completion(&aq->dma_completion);
+
+ dev_info(&aq->pdev->dev, "Using %s (tx) and %s (rx) for DMA transfers\n",
+ dma_chan_name(aq->tx_chan), dma_chan_name(aq->rx_chan));
+
+ return 0;
+
+release_rx_chan:
+ dma_release_channel(aq->rx_chan);
+ aq->tx_chan = NULL;
+null_rx_chan:
+ aq->rx_chan = NULL;
+ return ret;
+}
+
+static void atmel_qspi_dma_release(struct atmel_qspi *aq)
+{
+ if (aq->rx_chan)
+ dma_release_channel(aq->rx_chan);
+ if (aq->tx_chan)
+ dma_release_channel(aq->tx_chan);
+}
+
+static const struct atmel_qspi_ops atmel_qspi_ops = {
+ .set_cfg = atmel_qspi_set_cfg,
+ .transfer = atmel_qspi_transfer,
+};
+
+static const struct atmel_qspi_ops atmel_qspi_sama7g5_ops = {
+ .set_cfg = atmel_qspi_sama7g5_set_cfg,
+ .transfer = atmel_qspi_sama7g5_transfer,
+};
+
static int atmel_qspi_probe(struct platform_device *pdev)
{
struct spi_controller *ctrl;
@@ -616,7 +1345,27 @@ static int atmel_qspi_probe(struct platform_device *pdev)
if (!ctrl)
return -ENOMEM;
+ aq = spi_controller_get_devdata(ctrl);
+
+ aq->caps = of_device_get_match_data(&pdev->dev);
+ if (!aq->caps) {
+ dev_err(&pdev->dev, "Could not retrieve QSPI caps\n");
+ return -EINVAL;
+ }
+
+ init_completion(&aq->cmd_completion);
+ aq->pdev = pdev;
+
ctrl->mode_bits = SPI_RX_DUAL | SPI_RX_QUAD | SPI_TX_DUAL | SPI_TX_QUAD;
+ if (aq->caps->octal)
+ ctrl->mode_bits |= SPI_RX_OCTAL | SPI_TX_OCTAL;
+
+ if (aq->caps->has_gclk)
+ aq->ops = &atmel_qspi_sama7g5_ops;
+ else
+ aq->ops = &atmel_qspi_ops;
+
+ ctrl->max_speed_hz = aq->caps->max_speed_hz;
ctrl->setup = atmel_qspi_setup;
ctrl->set_cs_timing = atmel_qspi_set_cs_timing;
ctrl->bus_num = -1;
@@ -625,11 +1374,6 @@ static int atmel_qspi_probe(struct platform_device *pdev)
ctrl->dev.of_node = pdev->dev.of_node;
platform_set_drvdata(pdev, ctrl);
- aq = spi_controller_get_devdata(ctrl);
-
- init_completion(&aq->cmd_completion);
- aq->pdev = pdev;
-
/* Map the registers */
aq->regs = devm_platform_ioremap_resource_byname(pdev, "qspi_base");
if (IS_ERR(aq->regs))
@@ -644,87 +1388,115 @@ static int atmel_qspi_probe(struct platform_device *pdev)
"missing AHB memory\n");
aq->mmap_size = resource_size(res);
+ aq->mmap_phys_base = (dma_addr_t)res->start;
/* Get the peripheral clock */
- aq->pclk = devm_clk_get(&pdev->dev, "pclk");
+ aq->pclk = devm_clk_get_enabled(&pdev->dev, "pclk");
if (IS_ERR(aq->pclk))
- aq->pclk = devm_clk_get(&pdev->dev, NULL);
+ aq->pclk = devm_clk_get_enabled(&pdev->dev, NULL);
if (IS_ERR(aq->pclk))
return dev_err_probe(&pdev->dev, PTR_ERR(aq->pclk),
"missing peripheral clock\n");
- /* Enable the peripheral clock */
- err = clk_prepare_enable(aq->pclk);
- if (err)
- return dev_err_probe(&pdev->dev, err,
- "failed to enable the peripheral clock\n");
-
- aq->caps = of_device_get_match_data(&pdev->dev);
- if (!aq->caps) {
- dev_err(&pdev->dev, "Could not retrieve QSPI caps\n");
- err = -EINVAL;
- goto disable_pclk;
- }
-
if (aq->caps->has_qspick) {
/* Get the QSPI system clock */
- aq->qspick = devm_clk_get(&pdev->dev, "qspick");
+ aq->qspick = devm_clk_get_enabled(&pdev->dev, "qspick");
if (IS_ERR(aq->qspick)) {
dev_err(&pdev->dev, "missing system clock\n");
err = PTR_ERR(aq->qspick);
- goto disable_pclk;
+ return err;
}
- /* Enable the QSPI system clock */
- err = clk_prepare_enable(aq->qspick);
- if (err) {
- dev_err(&pdev->dev,
- "failed to enable the QSPI system clock\n");
- goto disable_pclk;
+ } else if (aq->caps->has_gclk) {
+ /* Get the QSPI generic clock */
+ aq->gclk = devm_clk_get(&pdev->dev, "gclk");
+ if (IS_ERR(aq->gclk)) {
+ dev_err(&pdev->dev, "missing Generic clock\n");
+ err = PTR_ERR(aq->gclk);
+ return err;
}
}
+ if (aq->caps->has_dma) {
+ err = atmel_qspi_dma_init(ctrl);
+ if (err == -EPROBE_DEFER)
+ return err;
+ }
+
/* Request the IRQ */
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
err = irq;
- goto disable_qspick;
+ goto dma_release;
}
err = devm_request_irq(&pdev->dev, irq, atmel_qspi_interrupt,
0, dev_name(&pdev->dev), aq);
if (err)
- goto disable_qspick;
+ goto dma_release;
pm_runtime_set_autosuspend_delay(&pdev->dev, 500);
pm_runtime_use_autosuspend(&pdev->dev);
- pm_runtime_set_active(&pdev->dev);
- pm_runtime_enable(&pdev->dev);
- pm_runtime_get_noresume(&pdev->dev);
+ devm_pm_runtime_set_active_enabled(&pdev->dev);
+ devm_pm_runtime_get_noresume(&pdev->dev);
- atmel_qspi_init(aq);
+ err = atmel_qspi_init(aq);
+ if (err)
+ goto dma_release;
err = spi_register_controller(ctrl);
- if (err) {
- pm_runtime_put_noidle(&pdev->dev);
- pm_runtime_disable(&pdev->dev);
- pm_runtime_set_suspended(&pdev->dev);
- pm_runtime_dont_use_autosuspend(&pdev->dev);
- goto disable_qspick;
- }
+ if (err)
+ goto dma_release;
+
pm_runtime_mark_last_busy(&pdev->dev);
pm_runtime_put_autosuspend(&pdev->dev);
return 0;
-disable_qspick:
- clk_disable_unprepare(aq->qspick);
-disable_pclk:
- clk_disable_unprepare(aq->pclk);
+dma_release:
+ if (aq->caps->has_dma)
+ atmel_qspi_dma_release(aq);
return err;
}
+static int atmel_qspi_sama7g5_suspend(struct atmel_qspi *aq)
+{
+ int ret;
+ u32 val;
+
+ ret = readl_poll_timeout(aq->regs + QSPI_SR2, val,
+ !(val & QSPI_SR2_RBUSY) &&
+ (val & QSPI_SR2_HIDLE), 40,
+ ATMEL_QSPI_SYNC_TIMEOUT);
+ if (ret)
+ return ret;
+
+ atmel_qspi_write(QSPI_CR_QSPIDIS, aq, QSPI_CR);
+ ret = readl_poll_timeout(aq->regs + QSPI_SR2, val,
+ !(val & QSPI_SR2_QSPIENS), 40,
+ ATMEL_QSPI_SYNC_TIMEOUT);
+ if (ret)
+ return ret;
+
+ clk_disable_unprepare(aq->gclk);
+
+ atmel_qspi_write(QSPI_CR_DLLOFF, aq, QSPI_CR);
+ ret = readl_poll_timeout(aq->regs + QSPI_SR2, val,
+ !(val & QSPI_SR2_DLOCK), 40,
+ ATMEL_QSPI_TIMEOUT);
+ if (ret)
+ return ret;
+
+ ret = readl_poll_timeout(aq->regs + QSPI_SR2, val,
+ !(val & QSPI_SR2_CALBSY), 40,
+ ATMEL_QSPI_TIMEOUT);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
static void atmel_qspi_remove(struct platform_device *pdev)
{
struct spi_controller *ctrl = platform_get_drvdata(pdev);
@@ -735,9 +1507,17 @@ static void atmel_qspi_remove(struct platform_device *pdev)
ret = pm_runtime_get_sync(&pdev->dev);
if (ret >= 0) {
+ if (aq->caps->has_dma)
+ atmel_qspi_dma_release(aq);
+
+ if (aq->caps->has_gclk) {
+ ret = atmel_qspi_sama7g5_suspend(aq);
+ if (ret)
+ dev_warn(&pdev->dev, "Failed to de-init device on remove: %d\n", ret);
+ return;
+ }
+
atmel_qspi_write(QSPI_CR_QSPIDIS, aq, QSPI_CR);
- clk_disable(aq->qspick);
- clk_disable(aq->pclk);
} else {
/*
* atmel_qspi_runtime_{suspend,resume} just disable and enable
@@ -746,13 +1526,6 @@ static void atmel_qspi_remove(struct platform_device *pdev)
*/
dev_warn(&pdev->dev, "Failed to resume device on remove\n");
}
-
- clk_unprepare(aq->qspick);
- clk_unprepare(aq->pclk);
-
- pm_runtime_disable(&pdev->dev);
- pm_runtime_dont_use_autosuspend(&pdev->dev);
- pm_runtime_put_noidle(&pdev->dev);
}
static int __maybe_unused atmel_qspi_suspend(struct device *dev)
@@ -765,6 +1538,12 @@ static int __maybe_unused atmel_qspi_suspend(struct device *dev)
if (ret < 0)
return ret;
+ if (aq->caps->has_gclk) {
+ ret = atmel_qspi_sama7g5_suspend(aq);
+ clk_disable_unprepare(aq->pclk);
+ return ret;
+ }
+
atmel_qspi_write(QSPI_CR_QSPIDIS, aq, QSPI_CR);
pm_runtime_mark_last_busy(dev);
@@ -792,6 +1571,9 @@ static int __maybe_unused atmel_qspi_resume(struct device *dev)
return ret;
}
+ if (aq->caps->has_gclk)
+ return atmel_qspi_sama7g5_init(aq);
+
ret = pm_runtime_force_resume(dev);
if (ret < 0)
return ret;
@@ -847,6 +1629,19 @@ static const struct atmel_qspi_caps atmel_sam9x60_qspi_caps = {
.has_ricr = true,
};
+static const struct atmel_qspi_caps atmel_sama7g5_ospi_caps = {
+ .max_speed_hz = SAMA7G5_QSPI0_MAX_SPEED_HZ,
+ .has_gclk = true,
+ .octal = true,
+ .has_dma = true,
+};
+
+static const struct atmel_qspi_caps atmel_sama7g5_qspi_caps = {
+ .max_speed_hz = SAMA7G5_QSPI1_SDR_MAX_SPEED_HZ,
+ .has_gclk = true,
+ .has_dma = true,
+};
+
static const struct of_device_id atmel_qspi_dt_ids[] = {
{
.compatible = "atmel,sama5d2-qspi",
@@ -856,6 +1651,15 @@ static const struct of_device_id atmel_qspi_dt_ids[] = {
.compatible = "microchip,sam9x60-qspi",
.data = &atmel_sam9x60_qspi_caps,
},
+ {
+ .compatible = "microchip,sama7g5-ospi",
+ .data = &atmel_sama7g5_ospi_caps,
+ },
+ {
+ .compatible = "microchip,sama7g5-qspi",
+ .data = &atmel_sama7g5_qspi_caps,
+ },
+
{ /* sentinel */ }
};
diff --git a/drivers/spi/spi-amd-pci.c b/drivers/spi/spi-amd-pci.c
new file mode 100644
index 000000000000..e5faab414c17
--- /dev/null
+++ b/drivers/spi/spi-amd-pci.c
@@ -0,0 +1,70 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * AMD SPI controller driver
+ *
+ * Copyright (c) 2025, Advanced Micro Devices, Inc.
+ * All Rights Reserved.
+ *
+ * Authors: Krishnamoorthi M <krishnamoorthi.m@amd.com>
+ * Akshata MukundShetty <akshata.mukundshetty@amd.com>
+ */
+
+#include <linux/init.h>
+#include <linux/spi/spi.h>
+#include <linux/pci.h>
+
+#include "spi-amd.h"
+
+#define AMD_PCI_DEVICE_ID_LPC_BRIDGE 0x1682
+#define AMD_PCI_LPC_SPI_BASE_ADDR_REG 0xA0
+#define AMD_SPI_BASE_ADDR_MASK ~0xFF
+#define AMD_HID2_PCI_BAR_OFFSET 0x00002000
+#define AMD_HID2_MEM_SIZE 0x200
+
+static struct pci_device_id pci_spi_ids[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_PCI_DEVICE_ID_LPC_BRIDGE) },
+ { 0, }
+};
+MODULE_DEVICE_TABLE(pci, pci_spi_ids);
+
+static int amd_spi_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ struct device *dev = &pdev->dev;
+ struct spi_controller *host;
+ struct amd_spi *amd_spi;
+ u32 io_base_addr;
+
+ /* Allocate storage for host and driver private data */
+ host = devm_spi_alloc_host(dev, sizeof(struct amd_spi));
+ if (!host)
+ return dev_err_probe(dev, -ENOMEM, "Error allocating SPI host\n");
+
+ amd_spi = spi_controller_get_devdata(host);
+
+ pci_read_config_dword(pdev, AMD_PCI_LPC_SPI_BASE_ADDR_REG, &io_base_addr);
+ io_base_addr = (io_base_addr & AMD_SPI_BASE_ADDR_MASK) + AMD_HID2_PCI_BAR_OFFSET;
+ amd_spi->io_remap_addr = devm_ioremap(dev, io_base_addr, AMD_HID2_MEM_SIZE);
+
+ if (!amd_spi->io_remap_addr)
+ return dev_err_probe(dev, -ENOMEM,
+ "ioremap of SPI registers failed\n");
+
+ dev_dbg(dev, "io_remap_address: %p\n", amd_spi->io_remap_addr);
+
+ amd_spi->version = AMD_HID2_SPI;
+ host->bus_num = 2;
+
+ return amd_spi_probe_common(dev, host);
+}
+
+static struct pci_driver amd_spi_pci_driver = {
+ .name = "amd_spi_pci",
+ .id_table = pci_spi_ids,
+ .probe = amd_spi_pci_probe,
+};
+
+module_pci_driver(amd_spi_pci_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("AMD HID2 SPI Controller Driver");
diff --git a/drivers/spi/spi-amd.c b/drivers/spi/spi-amd.c
index d30a21b0b05f..02e7fe095a0b 100644
--- a/drivers/spi/spi-amd.c
+++ b/drivers/spi/spi-amd.c
@@ -17,6 +17,8 @@
#include <linux/spi/spi.h>
#include <linux/spi/spi-mem.h>
+#include "spi-amd.h"
+
#define AMD_SPI_CTRL0_REG 0x00
#define AMD_SPI_EXEC_CMD BIT(16)
#define AMD_SPI_FIFO_CLEAR BIT(20)
@@ -52,10 +54,13 @@
#define AMD_SPI_SPD7_MASK GENMASK(13, AMD_SPI_SPD7_SHIFT)
#define AMD_SPI_HID2_INPUT_RING_BUF0 0X100
+#define AMD_SPI_HID2_OUTPUT_BUF0 0x140
#define AMD_SPI_HID2_CNTRL 0x150
#define AMD_SPI_HID2_INT_STATUS 0x154
#define AMD_SPI_HID2_CMD_START 0x156
#define AMD_SPI_HID2_INT_MASK 0x158
+#define AMD_SPI_HID2_WRITE_CNTRL0 0x160
+#define AMD_SPI_HID2_WRITE_CNTRL1 0x164
#define AMD_SPI_HID2_READ_CNTRL0 0x170
#define AMD_SPI_HID2_READ_CNTRL1 0x174
#define AMD_SPI_HID2_READ_CNTRL2 0x180
@@ -81,17 +86,9 @@
#define AMD_SPI_OP_READ_1_1_4_4B 0x6c /* Read data bytes (Quad Output SPI) */
#define AMD_SPI_OP_READ_1_4_4_4B 0xec /* Read data bytes (Quad I/O SPI) */
-/**
- * enum amd_spi_versions - SPI controller versions
- * @AMD_SPI_V1: AMDI0061 hardware version
- * @AMD_SPI_V2: AMDI0062 hardware version
- * @AMD_HID2_SPI: AMDI0063 hardware version
- */
-enum amd_spi_versions {
- AMD_SPI_V1 = 1,
- AMD_SPI_V2,
- AMD_HID2_SPI,
-};
+/* SPINAND write command opcodes */
+#define AMD_SPI_OP_PP 0x02 /* Page program */
+#define AMD_SPI_OP_PP_RANDOM 0x84 /* Page program */
enum amd_spi_speed {
F_66_66MHz,
@@ -118,22 +115,6 @@ struct amd_spi_freq {
u32 spd7_val;
};
-/**
- * struct amd_spi - SPI driver instance
- * @io_remap_addr: Start address of the SPI controller registers
- * @phy_dma_buf: Physical address of DMA buffer
- * @dma_virt_addr: Virtual address of DMA buffer
- * @version: SPI controller hardware version
- * @speed_hz: Device frequency
- */
-struct amd_spi {
- void __iomem *io_remap_addr;
- dma_addr_t phy_dma_buf;
- void *dma_virt_addr;
- enum amd_spi_versions version;
- unsigned int speed_hz;
-};
-
static inline u8 amd_spi_readreg8(struct amd_spi *amd_spi, int idx)
{
return readb((u8 __iomem *)amd_spi->io_remap_addr + idx);
@@ -298,19 +279,16 @@ static const struct amd_spi_freq amd_spi_freq[] = {
{ AMD_SPI_MIN_HZ, F_800KHz, 0},
};
-static int amd_set_spi_freq(struct amd_spi *amd_spi, u32 speed_hz)
+static void amd_set_spi_freq(struct amd_spi *amd_spi, u32 speed_hz)
{
unsigned int i, spd7_val, alt_spd;
- if (speed_hz < AMD_SPI_MIN_HZ)
- return -EINVAL;
-
- for (i = 0; i < ARRAY_SIZE(amd_spi_freq); i++)
+ for (i = 0; i < ARRAY_SIZE(amd_spi_freq)-1; i++)
if (speed_hz >= amd_spi_freq[i].speed_hz)
break;
if (amd_spi->speed_hz == amd_spi_freq[i].speed_hz)
- return 0;
+ return;
amd_spi->speed_hz = amd_spi_freq[i].speed_hz;
@@ -329,8 +307,6 @@ static int amd_set_spi_freq(struct amd_spi *amd_spi, u32 speed_hz)
amd_spi_setclear_reg32(amd_spi, AMD_SPI_SPEED_REG, spd7_val,
AMD_SPI_SPD7_MASK);
}
-
- return 0;
}
static inline int amd_spi_fifo_xfer(struct amd_spi *amd_spi,
@@ -450,6 +426,17 @@ static inline bool amd_is_spi_read_cmd(const u16 op)
}
}
+static inline bool amd_is_spi_write_cmd(const u16 op)
+{
+ switch (op) {
+ case AMD_SPI_OP_PP:
+ case AMD_SPI_OP_PP_RANDOM:
+ return true;
+ default:
+ return false;
+ }
+}
+
static bool amd_spi_supports_op(struct spi_mem *mem,
const struct spi_mem_op *op)
{
@@ -460,7 +447,7 @@ static bool amd_spi_supports_op(struct spi_mem *mem,
return false;
/* AMD SPI controllers support quad mode only for read operations */
- if (amd_is_spi_read_cmd(op->cmd.opcode)) {
+ if (amd_is_spi_read_cmd(op->cmd.opcode) || amd_is_spi_write_cmd(op->cmd.opcode)) {
if (op->data.buswidth > 4)
return false;
@@ -469,7 +456,8 @@ static bool amd_spi_supports_op(struct spi_mem *mem,
* doesn't support 4-byte address commands.
*/
if (amd_spi->version == AMD_HID2_SPI) {
- if (amd_is_spi_read_cmd_4b(op->cmd.opcode) ||
+ if ((amd_is_spi_read_cmd_4b(op->cmd.opcode) ||
+ amd_is_spi_write_cmd(op->cmd.opcode)) &&
op->data.nbytes > AMD_SPI_HID2_DMA_SIZE)
return false;
} else if (op->data.nbytes > AMD_SPI_MAX_DATA) {
@@ -479,6 +467,9 @@ static bool amd_spi_supports_op(struct spi_mem *mem,
return false;
}
+ if (op->max_freq < mem->spi->controller->min_speed_hz)
+ return false;
+
return spi_mem_default_supports_op(mem, op);
}
@@ -492,7 +483,8 @@ static int amd_spi_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op)
* controller index mode supports maximum of 64 bytes in a single
* transaction.
*/
- if (amd_spi->version == AMD_HID2_SPI && amd_is_spi_read_cmd(op->cmd.opcode))
+ if (amd_spi->version == AMD_HID2_SPI && (amd_is_spi_read_cmd(op->cmd.opcode) ||
+ amd_is_spi_write_cmd(op->cmd.opcode)))
op->data.nbytes = clamp_val(op->data.nbytes, 0, AMD_SPI_HID2_DMA_SIZE);
else
op->data.nbytes = clamp_val(op->data.nbytes, 0, AMD_SPI_MAX_DATA);
@@ -516,32 +508,96 @@ static void amd_spi_set_addr(struct amd_spi *amd_spi,
}
}
+static void amd_spi_hiddma_write(struct amd_spi *amd_spi, const struct spi_mem_op *op)
+{
+ u16 hid_cmd_start, val;
+ u32 hid_regval;
+
+ /*
+ * Program the HID2 output Buffer0. 4k aligned buf_memory_addr[31:12],
+ * buf_size[2:0].
+ */
+ hid_regval = amd_spi->phy_dma_buf | BIT(0);
+ amd_spi_writereg32(amd_spi, AMD_SPI_HID2_OUTPUT_BUF0, hid_regval);
+
+ /* Program max write length in hid2_write_control1 register */
+ hid_regval = amd_spi_readreg32(amd_spi, AMD_SPI_HID2_WRITE_CNTRL1);
+ hid_regval = (hid_regval & ~GENMASK(15, 0)) | ((op->data.nbytes) + 3);
+ amd_spi_writereg32(amd_spi, AMD_SPI_HID2_WRITE_CNTRL1, hid_regval);
+
+ /* Set cmd start bit in hid2_cmd_start register to trigger HID basic write operation */
+ hid_cmd_start = amd_spi_readreg16(amd_spi, AMD_SPI_HID2_CMD_START);
+ amd_spi_writereg16(amd_spi, AMD_SPI_HID2_CMD_START, (hid_cmd_start | BIT(2)));
+
+ /* Check interrupt status of HIDDMA basic write operation in hid2_int_status register */
+ readw_poll_timeout(amd_spi->io_remap_addr + AMD_SPI_HID2_INT_STATUS, val,
+ (val & BIT(2)), AMD_SPI_IO_SLEEP_US, AMD_SPI_IO_TIMEOUT_US);
+
+ /* Clear the interrupts by writing to hid2_int_status register */
+ val = amd_spi_readreg16(amd_spi, AMD_SPI_HID2_INT_STATUS);
+ amd_spi_writereg16(amd_spi, AMD_SPI_HID2_INT_STATUS, val);
+}
+
static void amd_spi_mem_data_out(struct amd_spi *amd_spi,
const struct spi_mem_op *op)
{
int base_addr = AMD_SPI_FIFO_BASE + op->addr.nbytes;
u64 *buf_64 = (u64 *)op->data.buf.out;
+ u64 addr_val = op->addr.val;
u32 nbytes = op->data.nbytes;
u32 left_data = nbytes;
u8 *buf;
int i;
- amd_spi_set_opcode(amd_spi, op->cmd.opcode);
- amd_spi_set_addr(amd_spi, op);
+ /*
+ * Condition for using HID write mode. Only for writing complete page data, use HID write.
+ * Use index mode otherwise.
+ */
+ if (amd_spi->version == AMD_HID2_SPI && amd_is_spi_write_cmd(op->cmd.opcode)) {
+ u64 *dma_buf64 = (u64 *)(amd_spi->dma_virt_addr + op->addr.nbytes + op->cmd.nbytes);
+ u8 *dma_buf = (u8 *)amd_spi->dma_virt_addr;
+
+ /* Copy opcode and address to DMA buffer */
+ *dma_buf = op->cmd.opcode;
- for (i = 0; left_data >= 8; i++, left_data -= 8)
- amd_spi_writereg64(amd_spi, base_addr + op->dummy.nbytes + (i * 8), *buf_64++);
+ dma_buf = (u8 *)dma_buf64;
+ for (i = 0; i < op->addr.nbytes; i++) {
+ *--dma_buf = addr_val & GENMASK(7, 0);
+ addr_val >>= 8;
+ }
- buf = (u8 *)buf_64;
- for (i = 0; i < left_data; i++) {
- amd_spi_writereg8(amd_spi, base_addr + op->dummy.nbytes + nbytes + i - left_data,
- buf[i]);
- }
+ /* Copy data to DMA buffer */
+ while (left_data >= 8) {
+ *dma_buf64++ = *buf_64++;
+ left_data -= 8;
+ }
- amd_spi_set_tx_count(amd_spi, op->addr.nbytes + op->data.nbytes);
- amd_spi_set_rx_count(amd_spi, 0);
- amd_spi_clear_fifo_ptr(amd_spi);
- amd_spi_execute_opcode(amd_spi);
+ buf = (u8 *)buf_64;
+ dma_buf = (u8 *)dma_buf64;
+ while (left_data--)
+ *dma_buf++ = *buf++;
+
+ amd_spi_hiddma_write(amd_spi, op);
+ } else {
+ amd_spi_set_opcode(amd_spi, op->cmd.opcode);
+ amd_spi_set_addr(amd_spi, op);
+
+ for (i = 0; left_data >= 8; i++, left_data -= 8)
+ amd_spi_writereg64(amd_spi, base_addr + op->dummy.nbytes + (i * 8),
+ *buf_64++);
+
+ buf = (u8 *)buf_64;
+ for (i = 0; i < left_data; i++) {
+ amd_spi_writereg8(amd_spi,
+ base_addr + op->dummy.nbytes + nbytes + i - left_data,
+ buf[i]);
+ }
+
+ amd_spi_set_tx_count(amd_spi, op->addr.nbytes + op->data.nbytes);
+ amd_spi_set_rx_count(amd_spi, 0);
+ amd_spi_clear_fifo_ptr(amd_spi);
+ amd_spi_execute_opcode(amd_spi);
+ }
}
static void amd_spi_hiddma_read(struct amd_spi *amd_spi, const struct spi_mem_op *op)
@@ -618,15 +674,21 @@ static void amd_spi_mem_data_in(struct amd_spi *amd_spi,
* Use index mode otherwise.
*/
if (amd_spi->version == AMD_HID2_SPI && amd_is_spi_read_cmd(op->cmd.opcode)) {
+ u64 *dma_buf64 = (u64 *)amd_spi->dma_virt_addr;
+ u8 *dma_buf;
+
amd_spi_hiddma_read(amd_spi, op);
- for (i = 0; left_data >= 8; i++, left_data -= 8)
- *buf_64++ = readq((u8 __iomem *)amd_spi->dma_virt_addr + (i * 8));
+ /* Copy data from DMA buffer */
+ while (left_data >= 8) {
+ *buf_64++ = *dma_buf64++;
+ left_data -= 8;
+ }
buf = (u8 *)buf_64;
- for (i = 0; i < left_data; i++)
- buf[i] = readb((u8 __iomem *)amd_spi->dma_virt_addr +
- (nbytes - left_data + i));
+ dma_buf = (u8 *)dma_buf64;
+ while (left_data--)
+ *buf++ = *dma_buf++;
/* Reset HID RX memory logic */
data = amd_spi_readreg32(amd_spi, AMD_SPI_HID2_CNTRL);
@@ -672,13 +734,10 @@ static int amd_spi_exec_mem_op(struct spi_mem *mem,
const struct spi_mem_op *op)
{
struct amd_spi *amd_spi;
- int ret;
amd_spi = spi_controller_get_devdata(mem->spi->controller);
- ret = amd_set_spi_freq(amd_spi, mem->spi->max_speed_hz);
- if (ret)
- return ret;
+ amd_set_spi_freq(amd_spi, op->max_freq);
if (amd_spi->version == AMD_SPI_V2)
amd_set_spi_addr_mode(amd_spi, op);
@@ -693,10 +752,10 @@ static int amd_spi_exec_mem_op(struct spi_mem *mem,
amd_spi_mem_data_out(amd_spi, op);
break;
default:
- ret = -EOPNOTSUPP;
+ return -EOPNOTSUPP;
}
- return ret;
+ return 0;
}
static const struct spi_controller_mem_ops amd_spi_mem_ops = {
@@ -705,6 +764,10 @@ static const struct spi_controller_mem_ops amd_spi_mem_ops = {
.supports_op = amd_spi_supports_op,
};
+static const struct spi_controller_mem_caps amd_spi_mem_caps = {
+ .per_op_freq = true,
+};
+
static int amd_spi_host_transfer(struct spi_controller *host,
struct spi_message *msg)
{
@@ -729,51 +792,38 @@ static int amd_spi_setup_hiddma(struct amd_spi *amd_spi, struct device *dev)
{
u32 hid_regval;
- /* Allocate DMA buffer to use for HID basic read operation */
- amd_spi->dma_virt_addr = dma_alloc_coherent(dev, AMD_SPI_HID2_DMA_SIZE,
- &amd_spi->phy_dma_buf, GFP_KERNEL);
+ /* Allocate DMA buffer to use for HID basic read and write operations. For write
+ * operations, the DMA buffer should include the opcode, address bytes and dummy
+ * bytes(if any) in addition to the data bytes. Additionally, the hardware requires
+ * that the buffer address be 4K aligned. So, allocate DMA buffer of size
+ * 2 * AMD_SPI_HID2_DMA_SIZE.
+ */
+ amd_spi->dma_virt_addr = dmam_alloc_coherent(dev, AMD_SPI_HID2_DMA_SIZE * 2,
+ &amd_spi->phy_dma_buf, GFP_KERNEL);
if (!amd_spi->dma_virt_addr)
return -ENOMEM;
/*
* Enable interrupts and set mask bits in hid2_int_mask register to generate interrupt
- * properly for HIDDMA basic read operations.
+ * properly for HIDDMA basic read and write operations.
*/
hid_regval = amd_spi_readreg32(amd_spi, AMD_SPI_HID2_INT_MASK);
- hid_regval = (hid_regval & GENMASK(31, 8)) | BIT(19);
+ hid_regval = (hid_regval & GENMASK(31, 8)) | BIT(18) | BIT(19);
amd_spi_writereg32(amd_spi, AMD_SPI_HID2_INT_MASK, hid_regval);
- /* Configure buffer unit(4k) in hid2_control register */
+ /* Configure buffer unit(4k) and write threshold in hid2_control register */
hid_regval = amd_spi_readreg32(amd_spi, AMD_SPI_HID2_CNTRL);
- amd_spi_writereg32(amd_spi, AMD_SPI_HID2_CNTRL, hid_regval & ~BIT(3));
+ amd_spi_writereg32(amd_spi, AMD_SPI_HID2_CNTRL, (hid_regval | GENMASK(13, 12)) & ~BIT(3));
return 0;
}
-static int amd_spi_probe(struct platform_device *pdev)
+int amd_spi_probe_common(struct device *dev, struct spi_controller *host)
{
- struct device *dev = &pdev->dev;
- struct spi_controller *host;
- struct amd_spi *amd_spi;
+ struct amd_spi *amd_spi = spi_controller_get_devdata(host);
int err;
- /* Allocate storage for host and driver private data */
- host = devm_spi_alloc_host(dev, sizeof(struct amd_spi));
- if (!host)
- return dev_err_probe(dev, -ENOMEM, "Error allocating SPI host\n");
-
- amd_spi = spi_controller_get_devdata(host);
- amd_spi->io_remap_addr = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(amd_spi->io_remap_addr))
- return dev_err_probe(dev, PTR_ERR(amd_spi->io_remap_addr),
- "ioremap of SPI registers failed\n");
-
- dev_dbg(dev, "io_remap_address: %p\n", amd_spi->io_remap_addr);
-
- amd_spi->version = (uintptr_t) device_get_match_data(dev);
-
/* Initialize the spi_controller fields */
- host->bus_num = (amd_spi->version == AMD_HID2_SPI) ? 2 : 0;
host->num_chipselect = 4;
host->mode_bits = SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD;
host->flags = SPI_CONTROLLER_HALF_DUPLEX;
@@ -782,6 +832,7 @@ static int amd_spi_probe(struct platform_device *pdev)
host->setup = amd_spi_host_setup;
host->transfer_one_message = amd_spi_host_transfer;
host->mem_ops = &amd_spi_mem_ops;
+ host->mem_caps = &amd_spi_mem_caps;
host->max_transfer_size = amd_spi_max_transfer_size;
host->max_message_size = amd_spi_max_transfer_size;
@@ -795,6 +846,32 @@ static int amd_spi_probe(struct platform_device *pdev)
return err;
}
+EXPORT_SYMBOL_GPL(amd_spi_probe_common);
+
+static int amd_spi_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct spi_controller *host;
+ struct amd_spi *amd_spi;
+
+ /* Allocate storage for host and driver private data */
+ host = devm_spi_alloc_host(dev, sizeof(struct amd_spi));
+ if (!host)
+ return dev_err_probe(dev, -ENOMEM, "Error allocating SPI host\n");
+
+ amd_spi = spi_controller_get_devdata(host);
+ amd_spi->io_remap_addr = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(amd_spi->io_remap_addr))
+ return dev_err_probe(dev, PTR_ERR(amd_spi->io_remap_addr),
+ "ioremap of SPI registers failed\n");
+
+ dev_dbg(dev, "io_remap_address: %p\n", amd_spi->io_remap_addr);
+
+ amd_spi->version = (uintptr_t)device_get_match_data(dev);
+ host->bus_num = 0;
+
+ return amd_spi_probe_common(dev, host);
+}
#ifdef CONFIG_ACPI
static const struct acpi_device_id spi_acpi_match[] = {
diff --git a/drivers/spi/spi-amd.h b/drivers/spi/spi-amd.h
new file mode 100644
index 000000000000..5f39ce7b5587
--- /dev/null
+++ b/drivers/spi/spi-amd.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * AMD SPI controller driver common stuff
+ *
+ * Copyright (c) 2025, Advanced Micro Devices, Inc.
+ * All Rights Reserved.
+ *
+ * Author: Krishnamoorthi M <krishnamoorthi.m@amd.com>
+ */
+
+#ifndef SPI_AMD_H
+#define SPI_AMD_H
+
+/**
+ * enum amd_spi_versions - SPI controller versions
+ * @AMD_SPI_V1: AMDI0061 hardware version
+ * @AMD_SPI_V2: AMDI0062 hardware version
+ * @AMD_HID2_SPI: AMDI0063 hardware version
+ */
+enum amd_spi_versions {
+ AMD_SPI_V1 = 1,
+ AMD_SPI_V2,
+ AMD_HID2_SPI,
+};
+
+/**
+ * struct amd_spi - SPI driver instance
+ * @io_remap_addr: Start address of the SPI controller registers
+ * @phy_dma_buf: Physical address of DMA buffer
+ * @dma_virt_addr: Virtual address of DMA buffer
+ * @version: SPI controller hardware version
+ * @speed_hz: Device frequency
+ */
+struct amd_spi {
+ void __iomem *io_remap_addr;
+ dma_addr_t phy_dma_buf;
+ void *dma_virt_addr;
+ enum amd_spi_versions version;
+ unsigned int speed_hz;
+};
+
+int amd_spi_probe_common(struct device *dev, struct spi_controller *host);
+
+#endif /* SPI_AMD_H */
diff --git a/drivers/spi/spi-amlogic-spifc-a1.c b/drivers/spi/spi-amlogic-spifc-a1.c
index fadf6667cd51..18c9aa2cbc29 100644
--- a/drivers/spi/spi-amlogic-spifc-a1.c
+++ b/drivers/spi/spi-amlogic-spifc-a1.c
@@ -259,7 +259,7 @@ static int amlogic_spifc_a1_exec_op(struct spi_mem *mem,
size_t data_size = op->data.nbytes;
int ret;
- ret = amlogic_spifc_a1_set_freq(spifc, mem->spi->max_speed_hz);
+ ret = amlogic_spifc_a1_set_freq(spifc, op->max_freq);
if (ret)
return ret;
@@ -320,6 +320,10 @@ static const struct spi_controller_mem_ops amlogic_spifc_a1_mem_ops = {
.adjust_op_size = amlogic_spifc_a1_adjust_op_size,
};
+static const struct spi_controller_mem_caps amlogic_spifc_a1_mem_caps = {
+ .per_op_freq = true,
+};
+
static int amlogic_spifc_a1_probe(struct platform_device *pdev)
{
struct spi_controller *ctrl;
@@ -356,6 +360,7 @@ static int amlogic_spifc_a1_probe(struct platform_device *pdev)
ctrl->bits_per_word_mask = SPI_BPW_MASK(8);
ctrl->auto_runtime_pm = true;
ctrl->mem_ops = &amlogic_spifc_a1_mem_ops;
+ ctrl->mem_caps = &amlogic_spifc_a1_mem_caps;
ctrl->min_speed_hz = SPIFC_A1_MIN_HZ;
ctrl->max_speed_hz = SPIFC_A1_MAX_HZ;
ctrl->mode_bits = (SPI_RX_DUAL | SPI_TX_DUAL |
diff --git a/drivers/spi/spi-aspeed-smc.c b/drivers/spi/spi-aspeed-smc.c
index e9beae95dded..62a11142bd63 100644
--- a/drivers/spi/spi-aspeed-smc.c
+++ b/drivers/spi/spi-aspeed-smc.c
@@ -303,13 +303,6 @@ static int do_aspeed_spi_exec_op(struct spi_mem *mem, const struct spi_mem_op *o
u32 ctl_val;
int ret = 0;
- dev_dbg(aspi->dev,
- "CE%d %s OP %#x mode:%d.%d.%d.%d naddr:%#x ndummies:%#x len:%#x",
- chip->cs, op->data.dir == SPI_MEM_DATA_IN ? "read" : "write",
- op->cmd.opcode, op->cmd.buswidth, op->addr.buswidth,
- op->dummy.buswidth, op->data.buswidth,
- op->addr.nbytes, op->dummy.nbytes, op->data.nbytes);
-
addr_mode = readl(aspi->regs + CE_CTRL_REG);
addr_mode_backup = addr_mode;
diff --git a/drivers/spi/spi-axi-spi-engine.c b/drivers/spi/spi-axi-spi-engine.c
index 7c252126b33e..8cc19934b48b 100644
--- a/drivers/spi/spi-axi-spi-engine.c
+++ b/drivers/spi/spi-axi-spi-engine.c
@@ -2,21 +2,28 @@
/*
* SPI-Engine SPI controller driver
* Copyright 2015 Analog Devices Inc.
+ * Copyright 2024 BayLibre, SAS
* Author: Lars-Peter Clausen <lars@metafoo.de>
*/
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
#include <linux/clk.h>
#include <linux/completion.h>
+#include <linux/dmaengine.h>
#include <linux/fpga/adi-axi-common.h>
#include <linux/interrupt.h>
#include <linux/io.h>
+#include <linux/iopoll.h>
#include <linux/of.h>
#include <linux/module.h>
#include <linux/overflow.h>
#include <linux/platform_device.h>
+#include <linux/spi/offload/provider.h>
#include <linux/spi/spi.h>
#include <trace/events/spi.h>
+#define SPI_ENGINE_REG_OFFLOAD_MEM_ADDR_WIDTH 0x10
#define SPI_ENGINE_REG_RESET 0x40
#define SPI_ENGINE_REG_INT_ENABLE 0x80
@@ -24,6 +31,7 @@
#define SPI_ENGINE_REG_INT_SOURCE 0x88
#define SPI_ENGINE_REG_SYNC_ID 0xc0
+#define SPI_ENGINE_REG_OFFLOAD_SYNC_ID 0xc4
#define SPI_ENGINE_REG_CMD_FIFO_ROOM 0xd0
#define SPI_ENGINE_REG_SDO_FIFO_ROOM 0xd4
@@ -34,10 +42,24 @@
#define SPI_ENGINE_REG_SDI_DATA_FIFO 0xe8
#define SPI_ENGINE_REG_SDI_DATA_FIFO_PEEK 0xec
+#define SPI_ENGINE_MAX_NUM_OFFLOADS 32
+
+#define SPI_ENGINE_REG_OFFLOAD_CTRL(x) (0x100 + SPI_ENGINE_MAX_NUM_OFFLOADS * (x))
+#define SPI_ENGINE_REG_OFFLOAD_STATUS(x) (0x104 + SPI_ENGINE_MAX_NUM_OFFLOADS * (x))
+#define SPI_ENGINE_REG_OFFLOAD_RESET(x) (0x108 + SPI_ENGINE_MAX_NUM_OFFLOADS * (x))
+#define SPI_ENGINE_REG_OFFLOAD_CMD_FIFO(x) (0x110 + SPI_ENGINE_MAX_NUM_OFFLOADS * (x))
+#define SPI_ENGINE_REG_OFFLOAD_SDO_FIFO(x) (0x114 + SPI_ENGINE_MAX_NUM_OFFLOADS * (x))
+
+#define SPI_ENGINE_SPI_OFFLOAD_MEM_WIDTH_SDO GENMASK(15, 8)
+#define SPI_ENGINE_SPI_OFFLOAD_MEM_WIDTH_CMD GENMASK(7, 0)
+
#define SPI_ENGINE_INT_CMD_ALMOST_EMPTY BIT(0)
#define SPI_ENGINE_INT_SDO_ALMOST_EMPTY BIT(1)
#define SPI_ENGINE_INT_SDI_ALMOST_FULL BIT(2)
#define SPI_ENGINE_INT_SYNC BIT(3)
+#define SPI_ENGINE_INT_OFFLOAD_SYNC BIT(4)
+
+#define SPI_ENGINE_OFFLOAD_CTRL_ENABLE BIT(0)
#define SPI_ENGINE_CONFIG_CPHA BIT(0)
#define SPI_ENGINE_CONFIG_CPOL BIT(1)
@@ -79,6 +101,10 @@
#define SPI_ENGINE_CMD_CS_INV(flags) \
SPI_ENGINE_CMD(SPI_ENGINE_INST_CS_INV, 0, (flags))
+/* default sizes - can be changed when SPI Engine firmware is compiled */
+#define SPI_ENGINE_OFFLOAD_CMD_FIFO_SIZE 16
+#define SPI_ENGINE_OFFLOAD_SDO_FIFO_SIZE 16
+
struct spi_engine_program {
unsigned int length;
uint16_t instructions[] __counted_by(length);
@@ -106,6 +132,19 @@ struct spi_engine_message_state {
uint8_t *rx_buf;
};
+enum {
+ SPI_ENGINE_OFFLOAD_FLAG_ASSIGNED,
+ SPI_ENGINE_OFFLOAD_FLAG_PREPARED,
+};
+
+struct spi_engine_offload {
+ struct spi_engine *spi_engine;
+ unsigned long flags;
+ unsigned int offload_num;
+ unsigned int spi_mode_config;
+ u8 bits_per_word;
+};
+
struct spi_engine {
struct clk *clk;
struct clk *ref_clk;
@@ -118,6 +157,12 @@ struct spi_engine {
unsigned int int_enable;
/* shadows hardware CS inversion flag state */
u8 cs_inv;
+
+ unsigned int offload_ctrl_mem_size;
+ unsigned int offload_sdo_mem_size;
+ struct spi_offload *offload;
+ u32 offload_caps;
+ bool offload_requires_sync;
};
static void spi_engine_program_add_cmd(struct spi_engine_program *p,
@@ -163,9 +208,9 @@ static void spi_engine_gen_xfer(struct spi_engine_program *p, bool dry,
unsigned int n = min(len, 256U);
unsigned int flags = 0;
- if (xfer->tx_buf)
+ if (xfer->tx_buf || (xfer->offload_flags & SPI_OFFLOAD_XFER_TX_STREAM))
flags |= SPI_ENGINE_TRANSFER_WRITE;
- if (xfer->rx_buf)
+ if (xfer->rx_buf || (xfer->offload_flags & SPI_OFFLOAD_XFER_RX_STREAM))
flags |= SPI_ENGINE_TRANSFER_READ;
spi_engine_program_add_cmd(p, dry,
@@ -217,16 +262,44 @@ static void spi_engine_gen_cs(struct spi_engine_program *p, bool dry,
*
* NB: This is separate from spi_engine_compile_message() because the latter
* is called twice and would otherwise result in double-evaluation.
+ *
+ * Returns 0 on success, -EINVAL on failure.
*/
-static void spi_engine_precompile_message(struct spi_message *msg)
+static int spi_engine_precompile_message(struct spi_message *msg)
{
unsigned int clk_div, max_hz = msg->spi->controller->max_speed_hz;
struct spi_transfer *xfer;
+ u8 min_bits_per_word = U8_MAX;
+ u8 max_bits_per_word = 0;
list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+ /* If we have an offload transfer, we can't rx to buffer */
+ if (msg->offload && xfer->rx_buf)
+ return -EINVAL;
+
clk_div = DIV_ROUND_UP(max_hz, xfer->speed_hz);
xfer->effective_speed_hz = max_hz / min(clk_div, 256U);
+
+ if (xfer->len) {
+ min_bits_per_word = min(min_bits_per_word, xfer->bits_per_word);
+ max_bits_per_word = max(max_bits_per_word, xfer->bits_per_word);
+ }
+ }
+
+ /*
+ * If all xfers in the message use the same bits_per_word, we can
+ * provide some optimization when using SPI offload.
+ */
+ if (msg->offload) {
+ struct spi_engine_offload *priv = msg->offload->priv;
+
+ if (min_bits_per_word == max_bits_per_word)
+ priv->bits_per_word = min_bits_per_word;
+ else
+ priv->bits_per_word = 0;
}
+
+ return 0;
}
static void spi_engine_compile_message(struct spi_message *msg, bool dry,
@@ -234,6 +307,7 @@ static void spi_engine_compile_message(struct spi_message *msg, bool dry,
{
struct spi_device *spi = msg->spi;
struct spi_controller *host = spi->controller;
+ struct spi_engine_offload *priv;
struct spi_transfer *xfer;
int clk_div, new_clk_div, inst_ns;
bool keep_cs = false;
@@ -247,9 +321,24 @@ static void spi_engine_compile_message(struct spi_message *msg, bool dry,
clk_div = 1;
- spi_engine_program_add_cmd(p, dry,
- SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_CONFIG,
- spi_engine_get_config(spi)));
+ /*
+ * As an optimization, SPI offload sets once this when the offload is
+ * enabled instead of repeating the instruction in each message.
+ */
+ if (msg->offload) {
+ priv = msg->offload->priv;
+ priv->spi_mode_config = spi_engine_get_config(spi);
+
+ /*
+ * If all xfers use the same bits_per_word, it can be optimized
+ * in the same way.
+ */
+ bits_per_word = priv->bits_per_word;
+ } else {
+ spi_engine_program_add_cmd(p, dry,
+ SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_CONFIG,
+ spi_engine_get_config(spi)));
+ }
xfer = list_first_entry(&msg->transfers, struct spi_transfer, transfer_list);
spi_engine_gen_cs(p, dry, spi, !xfer->cs_off);
@@ -521,11 +610,107 @@ static irqreturn_t spi_engine_irq(int irq, void *devid)
return IRQ_HANDLED;
}
+static int spi_engine_offload_prepare(struct spi_message *msg)
+{
+ struct spi_controller *host = msg->spi->controller;
+ struct spi_engine *spi_engine = spi_controller_get_devdata(host);
+ struct spi_engine_program *p = msg->opt_state;
+ struct spi_engine_offload *priv = msg->offload->priv;
+ struct spi_transfer *xfer;
+ void __iomem *cmd_addr;
+ void __iomem *sdo_addr;
+ size_t tx_word_count = 0;
+ unsigned int i;
+
+ if (p->length > spi_engine->offload_ctrl_mem_size)
+ return -EINVAL;
+
+ /* count total number of tx words in message */
+ list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+ /* no support for reading to rx_buf */
+ if (xfer->rx_buf)
+ return -EINVAL;
+
+ if (!xfer->tx_buf)
+ continue;
+
+ if (xfer->bits_per_word <= 8)
+ tx_word_count += xfer->len;
+ else if (xfer->bits_per_word <= 16)
+ tx_word_count += xfer->len / 2;
+ else
+ tx_word_count += xfer->len / 4;
+ }
+
+ if (tx_word_count && !(spi_engine->offload_caps & SPI_OFFLOAD_CAP_TX_STATIC_DATA))
+ return -EINVAL;
+
+ if (tx_word_count > spi_engine->offload_sdo_mem_size)
+ return -EINVAL;
+
+ /*
+ * This protects against calling spi_optimize_message() with an offload
+ * that has already been prepared with a different message.
+ */
+ if (test_and_set_bit_lock(SPI_ENGINE_OFFLOAD_FLAG_PREPARED, &priv->flags))
+ return -EBUSY;
+
+ cmd_addr = spi_engine->base +
+ SPI_ENGINE_REG_OFFLOAD_CMD_FIFO(priv->offload_num);
+ sdo_addr = spi_engine->base +
+ SPI_ENGINE_REG_OFFLOAD_SDO_FIFO(priv->offload_num);
+
+ list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+ if (!xfer->tx_buf)
+ continue;
+
+ if (xfer->bits_per_word <= 8) {
+ const u8 *buf = xfer->tx_buf;
+
+ for (i = 0; i < xfer->len; i++)
+ writel_relaxed(buf[i], sdo_addr);
+ } else if (xfer->bits_per_word <= 16) {
+ const u16 *buf = xfer->tx_buf;
+
+ for (i = 0; i < xfer->len / 2; i++)
+ writel_relaxed(buf[i], sdo_addr);
+ } else {
+ const u32 *buf = xfer->tx_buf;
+
+ for (i = 0; i < xfer->len / 4; i++)
+ writel_relaxed(buf[i], sdo_addr);
+ }
+ }
+
+ for (i = 0; i < p->length; i++)
+ writel_relaxed(p->instructions[i], cmd_addr);
+
+ return 0;
+}
+
+static void spi_engine_offload_unprepare(struct spi_offload *offload)
+{
+ struct spi_engine_offload *priv = offload->priv;
+ struct spi_engine *spi_engine = priv->spi_engine;
+
+ writel_relaxed(1, spi_engine->base +
+ SPI_ENGINE_REG_OFFLOAD_RESET(priv->offload_num));
+ writel_relaxed(0, spi_engine->base +
+ SPI_ENGINE_REG_OFFLOAD_RESET(priv->offload_num));
+
+ clear_bit_unlock(SPI_ENGINE_OFFLOAD_FLAG_PREPARED, &priv->flags);
+}
+
static int spi_engine_optimize_message(struct spi_message *msg)
{
+ struct spi_controller *host = msg->spi->controller;
+ struct spi_engine *spi_engine = spi_controller_get_devdata(host);
struct spi_engine_program p_dry, *p;
+ int ret;
- spi_engine_precompile_message(msg);
+ ret = spi_engine_precompile_message(msg);
+ if (ret)
+ return ret;
p_dry.length = 0;
spi_engine_compile_message(msg, true, &p_dry);
@@ -536,31 +721,81 @@ static int spi_engine_optimize_message(struct spi_message *msg)
spi_engine_compile_message(msg, false, p);
- spi_engine_program_add_cmd(p, false, SPI_ENGINE_CMD_SYNC(
- AXI_SPI_ENGINE_CUR_MSG_SYNC_ID));
+ /*
+ * Non-offload needs SYNC for completion interrupt. Older versions of
+ * the IP core also need SYNC for offload to work properly.
+ */
+ if (!msg->offload || spi_engine->offload_requires_sync)
+ spi_engine_program_add_cmd(p, false, SPI_ENGINE_CMD_SYNC(
+ msg->offload ? 0 : AXI_SPI_ENGINE_CUR_MSG_SYNC_ID));
msg->opt_state = p;
+ if (msg->offload) {
+ ret = spi_engine_offload_prepare(msg);
+ if (ret) {
+ msg->opt_state = NULL;
+ kfree(p);
+ return ret;
+ }
+ }
+
return 0;
}
static int spi_engine_unoptimize_message(struct spi_message *msg)
{
+ if (msg->offload)
+ spi_engine_offload_unprepare(msg->offload);
+
kfree(msg->opt_state);
return 0;
}
+static struct spi_offload
+*spi_engine_get_offload(struct spi_device *spi,
+ const struct spi_offload_config *config)
+{
+ struct spi_controller *host = spi->controller;
+ struct spi_engine *spi_engine = spi_controller_get_devdata(host);
+ struct spi_engine_offload *priv;
+
+ if (!spi_engine->offload)
+ return ERR_PTR(-ENODEV);
+
+ if (config->capability_flags & ~spi_engine->offload_caps)
+ return ERR_PTR(-EINVAL);
+
+ priv = spi_engine->offload->priv;
+
+ if (test_and_set_bit_lock(SPI_ENGINE_OFFLOAD_FLAG_ASSIGNED, &priv->flags))
+ return ERR_PTR(-EBUSY);
+
+ return spi_engine->offload;
+}
+
+static void spi_engine_put_offload(struct spi_offload *offload)
+{
+ struct spi_engine_offload *priv = offload->priv;
+
+ clear_bit_unlock(SPI_ENGINE_OFFLOAD_FLAG_ASSIGNED, &priv->flags);
+}
+
static int spi_engine_setup(struct spi_device *device)
{
struct spi_controller *host = device->controller;
struct spi_engine *spi_engine = spi_controller_get_devdata(host);
+ unsigned int reg;
if (device->mode & SPI_CS_HIGH)
spi_engine->cs_inv |= BIT(spi_get_chipselect(device, 0));
else
spi_engine->cs_inv &= ~BIT(spi_get_chipselect(device, 0));
+ writel_relaxed(SPI_ENGINE_CMD_SYNC(0),
+ spi_engine->base + SPI_ENGINE_REG_CMD_FIFO);
+
writel_relaxed(SPI_ENGINE_CMD_CS_INV(spi_engine->cs_inv),
spi_engine->base + SPI_ENGINE_REG_CMD_FIFO);
@@ -571,7 +806,11 @@ static int spi_engine_setup(struct spi_device *device)
writel_relaxed(SPI_ENGINE_CMD_ASSERT(0, 0xff),
spi_engine->base + SPI_ENGINE_REG_CMD_FIFO);
- return 0;
+ writel_relaxed(SPI_ENGINE_CMD_SYNC(1),
+ spi_engine->base + SPI_ENGINE_REG_CMD_FIFO);
+
+ return readl_relaxed_poll_timeout(spi_engine->base + SPI_ENGINE_REG_SYNC_ID,
+ reg, reg == 1, 1, 1000);
}
static int spi_engine_transfer_one_message(struct spi_controller *host,
@@ -583,6 +822,12 @@ static int spi_engine_transfer_one_message(struct spi_controller *host,
unsigned int int_enable = 0;
unsigned long flags;
+ if (msg->offload) {
+ dev_err(&host->dev, "Single transfer offload not supported\n");
+ msg->status = -EOPNOTSUPP;
+ goto out;
+ }
+
/* reinitialize message state for this transfer */
memset(st, 0, sizeof(*st));
st->cmd_buf = p->instructions;
@@ -632,11 +877,89 @@ static int spi_engine_transfer_one_message(struct spi_controller *host,
trace_spi_transfer_stop(msg, xfer);
}
+out:
spi_finalize_current_message(host);
return msg->status;
}
+static int spi_engine_trigger_enable(struct spi_offload *offload)
+{
+ struct spi_engine_offload *priv = offload->priv;
+ struct spi_engine *spi_engine = priv->spi_engine;
+ unsigned int reg;
+ int ret;
+
+ writel_relaxed(SPI_ENGINE_CMD_SYNC(0),
+ spi_engine->base + SPI_ENGINE_REG_CMD_FIFO);
+
+ writel_relaxed(SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_CONFIG,
+ priv->spi_mode_config),
+ spi_engine->base + SPI_ENGINE_REG_CMD_FIFO);
+
+ if (priv->bits_per_word)
+ writel_relaxed(SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_XFER_BITS,
+ priv->bits_per_word),
+ spi_engine->base + SPI_ENGINE_REG_CMD_FIFO);
+
+ writel_relaxed(SPI_ENGINE_CMD_SYNC(1),
+ spi_engine->base + SPI_ENGINE_REG_CMD_FIFO);
+
+ ret = readl_relaxed_poll_timeout(spi_engine->base + SPI_ENGINE_REG_SYNC_ID,
+ reg, reg == 1, 1, 1000);
+ if (ret)
+ return ret;
+
+ reg = readl_relaxed(spi_engine->base +
+ SPI_ENGINE_REG_OFFLOAD_CTRL(priv->offload_num));
+ reg |= SPI_ENGINE_OFFLOAD_CTRL_ENABLE;
+ writel_relaxed(reg, spi_engine->base +
+ SPI_ENGINE_REG_OFFLOAD_CTRL(priv->offload_num));
+ return 0;
+}
+
+static void spi_engine_trigger_disable(struct spi_offload *offload)
+{
+ struct spi_engine_offload *priv = offload->priv;
+ struct spi_engine *spi_engine = priv->spi_engine;
+ unsigned int reg;
+
+ reg = readl_relaxed(spi_engine->base +
+ SPI_ENGINE_REG_OFFLOAD_CTRL(priv->offload_num));
+ reg &= ~SPI_ENGINE_OFFLOAD_CTRL_ENABLE;
+ writel_relaxed(reg, spi_engine->base +
+ SPI_ENGINE_REG_OFFLOAD_CTRL(priv->offload_num));
+}
+
+static struct dma_chan
+*spi_engine_tx_stream_request_dma_chan(struct spi_offload *offload)
+{
+ struct spi_engine_offload *priv = offload->priv;
+ char name[16];
+
+ snprintf(name, sizeof(name), "offload%u-tx", priv->offload_num);
+
+ return dma_request_chan(offload->provider_dev, name);
+}
+
+static struct dma_chan
+*spi_engine_rx_stream_request_dma_chan(struct spi_offload *offload)
+{
+ struct spi_engine_offload *priv = offload->priv;
+ char name[16];
+
+ snprintf(name, sizeof(name), "offload%u-rx", priv->offload_num);
+
+ return dma_request_chan(offload->provider_dev, name);
+}
+
+static const struct spi_offload_ops spi_engine_offload_ops = {
+ .trigger_enable = spi_engine_trigger_enable,
+ .trigger_disable = spi_engine_trigger_disable,
+ .tx_stream_request_dma_chan = spi_engine_tx_stream_request_dma_chan,
+ .rx_stream_request_dma_chan = spi_engine_rx_stream_request_dma_chan,
+};
+
static void spi_engine_release_hw(void *p)
{
struct spi_engine *spi_engine = p;
@@ -651,8 +974,7 @@ static int spi_engine_probe(struct platform_device *pdev)
struct spi_engine *spi_engine;
struct spi_controller *host;
unsigned int version;
- int irq;
- int ret;
+ int irq, ret;
irq = platform_get_irq(pdev, 0);
if (irq < 0)
@@ -667,6 +989,46 @@ static int spi_engine_probe(struct platform_device *pdev)
spin_lock_init(&spi_engine->lock);
init_completion(&spi_engine->msg_complete);
+ /*
+ * REVISIT: for now, all SPI Engines only have one offload. In the
+ * future, this should be read from a memory mapped register to
+ * determine the number of offloads enabled at HDL compile time. For
+ * now, we can tell if an offload is present if there is a trigger
+ * source wired up to it.
+ */
+ if (device_property_present(&pdev->dev, "trigger-sources")) {
+ struct spi_engine_offload *priv;
+
+ spi_engine->offload =
+ devm_spi_offload_alloc(&pdev->dev,
+ sizeof(struct spi_engine_offload));
+ if (IS_ERR(spi_engine->offload))
+ return PTR_ERR(spi_engine->offload);
+
+ priv = spi_engine->offload->priv;
+ priv->spi_engine = spi_engine;
+ priv->offload_num = 0;
+
+ spi_engine->offload->ops = &spi_engine_offload_ops;
+ spi_engine->offload_caps = SPI_OFFLOAD_CAP_TRIGGER;
+
+ if (device_property_match_string(&pdev->dev, "dma-names", "offload0-rx") >= 0) {
+ spi_engine->offload_caps |= SPI_OFFLOAD_CAP_RX_STREAM_DMA;
+ spi_engine->offload->xfer_flags |= SPI_OFFLOAD_XFER_RX_STREAM;
+ }
+
+ if (device_property_match_string(&pdev->dev, "dma-names", "offload0-tx") >= 0) {
+ spi_engine->offload_caps |= SPI_OFFLOAD_CAP_TX_STREAM_DMA;
+ spi_engine->offload->xfer_flags |= SPI_OFFLOAD_XFER_TX_STREAM;
+ } else {
+ /*
+ * HDL compile option to enable TX DMA stream also disables
+ * the SDO memory, so can't do both at the same time.
+ */
+ spi_engine->offload_caps |= SPI_OFFLOAD_CAP_TX_STATIC_DATA;
+ }
+ }
+
spi_engine->clk = devm_clk_get_enabled(&pdev->dev, "s_axi_aclk");
if (IS_ERR(spi_engine->clk))
return PTR_ERR(spi_engine->clk);
@@ -688,6 +1050,22 @@ static int spi_engine_probe(struct platform_device *pdev)
return -ENODEV;
}
+ if (ADI_AXI_PCORE_VER_MINOR(version) >= 1) {
+ unsigned int sizes = readl(spi_engine->base +
+ SPI_ENGINE_REG_OFFLOAD_MEM_ADDR_WIDTH);
+
+ spi_engine->offload_ctrl_mem_size = 1 <<
+ FIELD_GET(SPI_ENGINE_SPI_OFFLOAD_MEM_WIDTH_CMD, sizes);
+ spi_engine->offload_sdo_mem_size = 1 <<
+ FIELD_GET(SPI_ENGINE_SPI_OFFLOAD_MEM_WIDTH_SDO, sizes);
+ } else {
+ spi_engine->offload_ctrl_mem_size = SPI_ENGINE_OFFLOAD_CMD_FIFO_SIZE;
+ spi_engine->offload_sdo_mem_size = SPI_ENGINE_OFFLOAD_SDO_FIFO_SIZE;
+ }
+
+ /* IP v1.5 dropped the requirement for SYNC in offload messages. */
+ spi_engine->offload_requires_sync = ADI_AXI_PCORE_VER_MINOR(version) < 5;
+
writel_relaxed(0x00, spi_engine->base + SPI_ENGINE_REG_RESET);
writel_relaxed(0xff, spi_engine->base + SPI_ENGINE_REG_INT_PENDING);
writel_relaxed(0x00, spi_engine->base + SPI_ENGINE_REG_INT_ENABLE);
@@ -709,6 +1087,8 @@ static int spi_engine_probe(struct platform_device *pdev)
host->transfer_one_message = spi_engine_transfer_one_message;
host->optimize_message = spi_engine_optimize_message;
host->unoptimize_message = spi_engine_unoptimize_message;
+ host->get_offload = spi_engine_get_offload;
+ host->put_offload = spi_engine_put_offload;
host->num_chipselect = 8;
/* Some features depend of the IP core version. */
diff --git a/drivers/spi/spi-bcm2835.c b/drivers/spi/spi-bcm2835.c
index 0d1aa6592484..77de5a07639a 100644
--- a/drivers/spi/spi-bcm2835.c
+++ b/drivers/spi/spi-bcm2835.c
@@ -1162,7 +1162,8 @@ static void bcm2835_spi_cleanup(struct spi_device *spi)
sizeof(u32),
DMA_TO_DEVICE);
- gpiod_put(bs->cs_gpio);
+ if (!IS_ERR(bs->cs_gpio))
+ gpiod_put(bs->cs_gpio);
spi_set_csgpiod(spi, 0, NULL);
kfree(target);
@@ -1225,7 +1226,12 @@ static int bcm2835_spi_setup(struct spi_device *spi)
struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
struct bcm2835_spidev *target = spi_get_ctldata(spi);
struct gpiod_lookup_table *lookup __free(kfree) = NULL;
- int ret;
+ const char *pinctrl_compats[] = {
+ "brcm,bcm2835-gpio",
+ "brcm,bcm2711-gpio",
+ "brcm,bcm7211-gpio",
+ };
+ int ret, i;
u32 cs;
if (!target) {
@@ -1290,6 +1296,14 @@ static int bcm2835_spi_setup(struct spi_device *spi)
goto err_cleanup;
}
+ for (i = 0; i < ARRAY_SIZE(pinctrl_compats); i++) {
+ if (of_find_compatible_node(NULL, NULL, pinctrl_compats[i]))
+ break;
+ }
+
+ if (i == ARRAY_SIZE(pinctrl_compats))
+ return 0;
+
/*
* TODO: The code below is a slightly better alternative to the utter
* abuse of the GPIO API that I found here before. It creates a
diff --git a/drivers/spi/spi-cadence-quadspi.c b/drivers/spi/spi-cadence-quadspi.c
index a031ecb358e0..fe0f122f07b0 100644
--- a/drivers/spi/spi-cadence-quadspi.c
+++ b/drivers/spi/spi-cadence-quadspi.c
@@ -43,10 +43,13 @@ static_assert(CQSPI_MAX_CHIPSELECT <= SPI_CS_CNT_MAX);
#define CQSPI_SLOW_SRAM BIT(4)
#define CQSPI_NEEDS_APB_AHB_HAZARD_WAR BIT(5)
#define CQSPI_RD_NO_IRQ BIT(6)
-#define CQSPI_DISABLE_STIG_MODE BIT(7)
+#define CQSPI_DMA_SET_MASK BIT(7)
+#define CQSPI_SUPPORT_DEVICE_RESET BIT(8)
+#define CQSPI_DISABLE_STIG_MODE BIT(9)
/* Capabilities */
#define CQSPI_SUPPORTS_OCTAL BIT(0)
+#define CQSPI_SUPPORTS_QUAD BIT(1)
#define CQSPI_OP_WIDTH(part) ((part).nbytes ? ilog2((part).buswidth) : 0)
@@ -111,7 +114,7 @@ struct cqspi_st {
struct cqspi_driver_platdata {
u32 hwcaps_mask;
- u8 quirks;
+ u16 quirks;
int (*indirect_read_dma)(struct cqspi_flash_pdata *f_pdata,
u_char *rxbuf, loff_t from_addr, size_t n_rx);
u32 (*get_dma_status)(struct cqspi_st *cqspi);
@@ -146,6 +149,8 @@ struct cqspi_driver_platdata {
#define CQSPI_REG_CONFIG_IDLE_LSB 31
#define CQSPI_REG_CONFIG_CHIPSELECT_MASK 0xF
#define CQSPI_REG_CONFIG_BAUD_MASK 0xF
+#define CQSPI_REG_CONFIG_RESET_PIN_FLD_MASK BIT(5)
+#define CQSPI_REG_CONFIG_RESET_CFG_FLD_MASK BIT(6)
#define CQSPI_REG_RD_INSTR 0x04
#define CQSPI_REG_RD_INSTR_OPCODE_LSB 0
@@ -832,6 +837,25 @@ failrd:
return ret;
}
+static void cqspi_device_reset(struct cqspi_st *cqspi)
+{
+ u32 reg;
+
+ reg = readl(cqspi->iobase + CQSPI_REG_CONFIG);
+ reg |= CQSPI_REG_CONFIG_RESET_CFG_FLD_MASK;
+ writel(reg, cqspi->iobase + CQSPI_REG_CONFIG);
+ /*
+ * NOTE: Delay timing implementation is derived from
+ * spi_nor_hw_reset()
+ */
+ writel(reg & ~CQSPI_REG_CONFIG_RESET_PIN_FLD_MASK, cqspi->iobase + CQSPI_REG_CONFIG);
+ usleep_range(1, 5);
+ writel(reg | CQSPI_REG_CONFIG_RESET_PIN_FLD_MASK, cqspi->iobase + CQSPI_REG_CONFIG);
+ usleep_range(100, 150);
+ writel(reg & ~CQSPI_REG_CONFIG_RESET_PIN_FLD_MASK, cqspi->iobase + CQSPI_REG_CONFIG);
+ usleep_range(1000, 1200);
+}
+
static void cqspi_controller_enable(struct cqspi_st *cqspi, bool enable)
{
void __iomem *reg_base = cqspi->iobase;
@@ -1409,7 +1433,7 @@ static int cqspi_mem_process(struct spi_mem *mem, const struct spi_mem_op *op)
struct cqspi_flash_pdata *f_pdata;
f_pdata = &cqspi->f_pdata[spi_get_chipselect(mem->spi, 0)];
- cqspi_configure(f_pdata, mem->spi->max_speed_hz);
+ cqspi_configure(f_pdata, op->max_freq);
if (op->data.dir == SPI_MEM_DATA_IN && op->data.buf.in) {
/*
@@ -1634,6 +1658,12 @@ static int cqspi_request_mmap_dma(struct cqspi_st *cqspi)
int ret = PTR_ERR(cqspi->rx_chan);
cqspi->rx_chan = NULL;
+ if (ret == -ENODEV) {
+ /* DMA support is not mandatory */
+ dev_info(&cqspi->pdev->dev, "No Rx DMA available\n");
+ return 0;
+ }
+
return dev_err_probe(&cqspi->pdev->dev, ret, "No Rx DMA available\n");
}
init_completion(&cqspi->rx_dma_complete);
@@ -1658,6 +1688,7 @@ static const struct spi_controller_mem_ops cqspi_mem_ops = {
static const struct spi_controller_mem_caps cqspi_mem_caps = {
.dtr = true,
+ .per_op_freq = true,
};
static int cqspi_setup_flash(struct cqspi_st *cqspi)
@@ -1865,6 +1896,8 @@ static int cqspi_probe(struct platform_device *pdev)
cqspi->master_ref_clk_hz);
if (ddata->hwcaps_mask & CQSPI_SUPPORTS_OCTAL)
host->mode_bits |= SPI_RX_OCTAL | SPI_TX_OCTAL;
+ if (ddata->hwcaps_mask & CQSPI_SUPPORTS_QUAD)
+ host->mode_bits |= SPI_TX_QUAD;
if (!(ddata->quirks & CQSPI_DISABLE_DAC_MODE)) {
cqspi->use_direct_mode = true;
cqspi->use_direct_mode_wr = true;
@@ -1886,8 +1919,7 @@ static int cqspi_probe(struct platform_device *pdev)
if (ddata->quirks & CQSPI_DISABLE_STIG_MODE)
cqspi->disable_stig_mode = true;
- if (of_device_is_compatible(pdev->dev.of_node,
- "xlnx,versal-ospi-1.0")) {
+ if (ddata->quirks & CQSPI_DMA_SET_MASK) {
ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
if (ret)
goto probe_reset_failed;
@@ -1917,6 +1949,9 @@ static int cqspi_probe(struct platform_device *pdev)
host->num_chipselect = cqspi->num_chipselect;
+ if (ddata && (ddata->quirks & CQSPI_SUPPORT_DEVICE_RESET))
+ cqspi_device_reset(cqspi);
+
if (cqspi->use_direct_mode) {
ret = cqspi_request_mmap_dma(cqspi);
if (ret == -EPROBE_DEFER)
@@ -2037,7 +2072,7 @@ static const struct cqspi_driver_platdata k2g_qspi = {
};
static const struct cqspi_driver_platdata am654_ospi = {
- .hwcaps_mask = CQSPI_SUPPORTS_OCTAL,
+ .hwcaps_mask = CQSPI_SUPPORTS_OCTAL | CQSPI_SUPPORTS_QUAD,
.quirks = CQSPI_NEEDS_WR_DELAY,
};
@@ -2054,7 +2089,17 @@ static const struct cqspi_driver_platdata socfpga_qspi = {
static const struct cqspi_driver_platdata versal_ospi = {
.hwcaps_mask = CQSPI_SUPPORTS_OCTAL,
- .quirks = CQSPI_DISABLE_DAC_MODE | CQSPI_SUPPORT_EXTERNAL_DMA,
+ .quirks = CQSPI_DISABLE_DAC_MODE | CQSPI_SUPPORT_EXTERNAL_DMA
+ | CQSPI_DMA_SET_MASK,
+ .indirect_read_dma = cqspi_versal_indirect_read_dma,
+ .get_dma_status = cqspi_get_versal_dma_status,
+};
+
+static const struct cqspi_driver_platdata versal2_ospi = {
+ .hwcaps_mask = CQSPI_SUPPORTS_OCTAL,
+ .quirks = CQSPI_DISABLE_DAC_MODE | CQSPI_SUPPORT_EXTERNAL_DMA
+ | CQSPI_DMA_SET_MASK
+ | CQSPI_SUPPORT_DEVICE_RESET,
.indirect_read_dma = cqspi_versal_indirect_read_dma,
.get_dma_status = cqspi_get_versal_dma_status,
};
@@ -2111,6 +2156,10 @@ static const struct of_device_id cqspi_dt_ids[] = {
.compatible = "mobileye,eyeq5-ospi",
.data = &mobileye_eyeq5_ospi,
},
+ {
+ .compatible = "amd,versal2-ospi",
+ .data = &versal2_ospi,
+ },
{ /* end of table */ }
};
diff --git a/drivers/spi/spi-cadence-xspi.c b/drivers/spi/spi-cadence-xspi.c
index aed98ab14334..6dcba0e0ddaa 100644
--- a/drivers/spi/spi-cadence-xspi.c
+++ b/drivers/spi/spi-cadence-xspi.c
@@ -432,7 +432,7 @@ static bool cdns_mrvl_xspi_setup_clock(struct cdns_xspi_dev *cdns_xspi,
u32 clk_reg;
bool update_clk = false;
- while (i < ARRAY_SIZE(cdns_mrvl_xspi_clk_div_list)) {
+ while (i < (ARRAY_SIZE(cdns_mrvl_xspi_clk_div_list) - 1)) {
clk_val = MRVL_XSPI_CLOCK_DIVIDED(
cdns_mrvl_xspi_clk_div_list[i]);
if (clk_val <= requested_clk)
diff --git a/drivers/spi/spi-cavium-thunderx.c b/drivers/spi/spi-cavium-thunderx.c
index 337aef12abcc..367ae7120bb3 100644
--- a/drivers/spi/spi-cavium-thunderx.c
+++ b/drivers/spi/spi-cavium-thunderx.c
@@ -34,7 +34,7 @@ static int thunderx_spi_probe(struct pci_dev *pdev,
if (ret)
goto error;
- ret = pci_request_regions(pdev, DRV_NAME);
+ ret = pcim_request_all_regions(pdev, DRV_NAME);
if (ret)
goto error;
@@ -78,7 +78,6 @@ static int thunderx_spi_probe(struct pci_dev *pdev,
return 0;
error:
- pci_release_regions(pdev);
spi_controller_put(host);
return ret;
}
@@ -92,7 +91,6 @@ static void thunderx_spi_remove(struct pci_dev *pdev)
if (!p)
return;
- pci_release_regions(pdev);
/* Put everything in a known state. */
writeq(0, p->register_base + OCTEON_SPI_CFG(p));
}
diff --git a/drivers/spi/spi-cs42l43.c b/drivers/spi/spi-cs42l43.c
index ceefc253c549..b28a840b3b04 100644
--- a/drivers/spi/spi-cs42l43.c
+++ b/drivers/spi/spi-cs42l43.c
@@ -237,7 +237,9 @@ static int cs42l43_get_speaker_id_gpios(struct cs42l43_spi *priv, int *result)
int i, ret;
descs = gpiod_get_array_optional(priv->dev, "spk-id", GPIOD_IN);
- if (IS_ERR_OR_NULL(descs))
+ if (!descs)
+ return 0;
+ else if (IS_ERR(descs))
return PTR_ERR(descs);
spkid = 0;
diff --git a/drivers/spi/spi-dw-core.c b/drivers/spi/spi-dw-core.c
index ea517af9435f..b3b883cb9541 100644
--- a/drivers/spi/spi-dw-core.c
+++ b/drivers/spi/spi-dw-core.c
@@ -423,7 +423,7 @@ static int dw_spi_transfer_one(struct spi_controller *host,
int ret;
dws->dma_mapped = 0;
- dws->n_bytes = roundup_pow_of_two(BITS_TO_BYTES(transfer->bits_per_word));
+ dws->n_bytes = spi_bpw_to_bytes(transfer->bits_per_word);
dws->tx = (void *)transfer->tx_buf;
dws->tx_len = transfer->len / dws->n_bytes;
dws->rx = transfer->rx_buf;
@@ -677,7 +677,7 @@ static int dw_spi_exec_mem_op(struct spi_mem *mem, const struct spi_mem_op *op)
* operation. Transmit-only mode is suitable for the rest of them.
*/
cfg.dfs = 8;
- cfg.freq = clamp(mem->spi->max_speed_hz, 0U, dws->max_mem_freq);
+ cfg.freq = clamp(op->max_freq, 0U, dws->max_mem_freq);
if (op->data.dir == SPI_MEM_DATA_IN) {
cfg.tmode = DW_SPI_CTRLR0_TMOD_EPROMREAD;
cfg.ndf = op->data.nbytes;
@@ -894,6 +894,10 @@ static void dw_spi_hw_init(struct device *dev, struct dw_spi *dws)
dw_writel(dws, DW_SPI_CS_OVERRIDE, 0xF);
}
+static const struct spi_controller_mem_caps dw_spi_mem_caps = {
+ .per_op_freq = true,
+};
+
int dw_spi_add_host(struct device *dev, struct dw_spi *dws)
{
struct spi_controller *host;
@@ -941,8 +945,10 @@ int dw_spi_add_host(struct device *dev, struct dw_spi *dws)
host->set_cs = dw_spi_set_cs;
host->transfer_one = dw_spi_transfer_one;
host->handle_err = dw_spi_handle_err;
- if (dws->mem_ops.exec_op)
+ if (dws->mem_ops.exec_op) {
host->mem_ops = &dws->mem_ops;
+ host->mem_caps = &dw_spi_mem_caps;
+ }
host->max_speed_hz = dws->max_freq;
host->flags = SPI_CONTROLLER_GPIO_SS;
host->auto_runtime_pm = true;
diff --git a/drivers/spi/spi-fsi.c b/drivers/spi/spi-fsi.c
index fc9e33be1e0e..e01c63d23b64 100644
--- a/drivers/spi/spi-fsi.c
+++ b/drivers/spi/spi-fsi.c
@@ -479,6 +479,19 @@ static int fsi_spi_transfer_one_message(struct spi_controller *ctlr,
shift = SPI_FSI_SEQUENCE_SHIFT_IN(next->len);
fsi_spi_sequence_add(&seq, shift);
+ } else if (next->tx_buf) {
+ if ((next->len + transfer->len) > (SPI_FSI_MAX_TX_SIZE + 8)) {
+ rc = -EINVAL;
+ goto error;
+ }
+
+ len = next->len;
+ while (len > 8) {
+ fsi_spi_sequence_add(&seq,
+ SPI_FSI_SEQUENCE_SHIFT_OUT(8));
+ len -= 8;
+ }
+ fsi_spi_sequence_add(&seq, SPI_FSI_SEQUENCE_SHIFT_OUT(len));
} else {
next = NULL;
}
diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c
index 067c954cb6ea..863781ba6c16 100644
--- a/drivers/spi/spi-fsl-dspi.c
+++ b/drivers/spi/spi-fsl-dspi.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0+
//
// Copyright 2013 Freescale Semiconductor, Inc.
-// Copyright 2020 NXP
+// Copyright 2020-2025 NXP
//
// Freescale DSPI driver
// This file contains a driver for the Freescale DSPI
@@ -62,6 +62,7 @@
#define SPI_SR_TFIWF BIT(18)
#define SPI_SR_RFDF BIT(17)
#define SPI_SR_CMDFFF BIT(16)
+#define SPI_SR_TXRXS BIT(30)
#define SPI_SR_CLEAR (SPI_SR_TCFQF | \
SPI_SR_TFUF | SPI_SR_TFFF | \
SPI_SR_CMDTCF | SPI_SR_SPEF | \
@@ -921,9 +922,20 @@ static int dspi_transfer_one_message(struct spi_controller *ctlr,
struct spi_transfer *transfer;
bool cs = false;
int status = 0;
+ u32 val = 0;
+ bool cs_change = false;
message->actual_length = 0;
+ /* Put DSPI in running mode if halted. */
+ regmap_read(dspi->regmap, SPI_MCR, &val);
+ if (val & SPI_MCR_HALT) {
+ regmap_update_bits(dspi->regmap, SPI_MCR, SPI_MCR_HALT, 0);
+ while (regmap_read(dspi->regmap, SPI_SR, &val) >= 0 &&
+ !(val & SPI_SR_TXRXS))
+ ;
+ }
+
list_for_each_entry(transfer, &message->transfers, transfer_list) {
dspi->cur_transfer = transfer;
dspi->cur_msg = message;
@@ -953,6 +965,7 @@ static int dspi_transfer_one_message(struct spi_controller *ctlr,
dspi->tx_cmd |= SPI_PUSHR_CMD_CONT;
}
+ cs_change = transfer->cs_change;
dspi->tx = transfer->tx_buf;
dspi->rx = transfer->rx_buf;
dspi->len = transfer->len;
@@ -962,6 +975,8 @@ static int dspi_transfer_one_message(struct spi_controller *ctlr,
SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF,
SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF);
+ regmap_write(dspi->regmap, SPI_SR, SPI_SR_CLEAR);
+
spi_take_timestamp_pre(dspi->ctlr, dspi->cur_transfer,
dspi->progress, !dspi->irq);
@@ -988,6 +1003,15 @@ static int dspi_transfer_one_message(struct spi_controller *ctlr,
dspi_deassert_cs(spi, &cs);
}
+ if (status || !cs_change) {
+ /* Put DSPI in stop mode */
+ regmap_update_bits(dspi->regmap, SPI_MCR,
+ SPI_MCR_HALT, SPI_MCR_HALT);
+ while (regmap_read(dspi->regmap, SPI_SR, &val) >= 0 &&
+ val & SPI_SR_TXRXS)
+ ;
+ }
+
message->status = status;
spi_finalize_current_message(ctlr);
@@ -1167,6 +1191,20 @@ static int dspi_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(dspi_pm, dspi_suspend, dspi_resume);
+static const struct regmap_range dspi_yes_ranges[] = {
+ regmap_reg_range(SPI_MCR, SPI_MCR),
+ regmap_reg_range(SPI_TCR, SPI_CTAR(3)),
+ regmap_reg_range(SPI_SR, SPI_TXFR3),
+ regmap_reg_range(SPI_RXFR0, SPI_RXFR3),
+ regmap_reg_range(SPI_CTARE(0), SPI_CTARE(3)),
+ regmap_reg_range(SPI_SREX, SPI_SREX),
+};
+
+static const struct regmap_access_table dspi_access_table = {
+ .yes_ranges = dspi_yes_ranges,
+ .n_yes_ranges = ARRAY_SIZE(dspi_yes_ranges),
+};
+
static const struct regmap_range dspi_volatile_ranges[] = {
regmap_reg_range(SPI_MCR, SPI_TCR),
regmap_reg_range(SPI_SR, SPI_SR),
@@ -1184,6 +1222,8 @@ static const struct regmap_config dspi_regmap_config = {
.reg_stride = 4,
.max_register = 0x88,
.volatile_table = &dspi_volatile_table,
+ .rd_table = &dspi_access_table,
+ .wr_table = &dspi_access_table,
};
static const struct regmap_range dspi_xspi_volatile_ranges[] = {
@@ -1205,6 +1245,8 @@ static const struct regmap_config dspi_xspi_regmap_config[] = {
.reg_stride = 4,
.max_register = 0x13c,
.volatile_table = &dspi_xspi_volatile_table,
+ .rd_table = &dspi_access_table,
+ .wr_table = &dspi_access_table,
},
{
.name = "pushr",
@@ -1227,6 +1269,8 @@ static int dspi_init(struct fsl_dspi *dspi)
if (!spi_controller_is_target(dspi->ctlr))
mcr |= SPI_MCR_HOST;
+ mcr |= SPI_MCR_HALT;
+
regmap_write(dspi->regmap, SPI_MCR, mcr);
regmap_write(dspi->regmap, SPI_SR, SPI_SR_CLEAR);
diff --git a/drivers/spi/spi-fsl-lpspi.c b/drivers/spi/spi-fsl-lpspi.c
index 40f5c8fdba76..5e3818445234 100644
--- a/drivers/spi/spi-fsl-lpspi.c
+++ b/drivers/spi/spi-fsl-lpspi.c
@@ -572,7 +572,7 @@ static int fsl_lpspi_calculate_timeout(struct fsl_lpspi_data *fsl_lpspi,
timeout += 1;
/* Double calculated timeout */
- return msecs_to_jiffies(2 * timeout * MSEC_PER_SEC);
+ return secs_to_jiffies(2 * timeout);
}
static int fsl_lpspi_dma_transfer(struct spi_controller *controller,
diff --git a/drivers/spi/spi-fsl-qspi.c b/drivers/spi/spi-fsl-qspi.c
index 9ec53bf0dda8..c887abb028d7 100644
--- a/drivers/spi/spi-fsl-qspi.c
+++ b/drivers/spi/spi-fsl-qspi.c
@@ -264,14 +264,14 @@ static const struct fsl_qspi_devtype_data ls2080a_data = {
struct fsl_qspi {
void __iomem *iobase;
void __iomem *ahb_addr;
- u32 memmap_phy;
- struct clk *clk, *clk_en;
- struct device *dev;
- struct completion c;
const struct fsl_qspi_devtype_data *devtype_data;
struct mutex lock;
+ struct completion c;
+ struct clk *clk, *clk_en;
struct pm_qos_request pm_qos_req;
+ struct device *dev;
int selected;
+ u32 memmap_phy;
};
static inline int needs_swap_endian(struct fsl_qspi *q)
@@ -522,9 +522,10 @@ static void fsl_qspi_invalidate(struct fsl_qspi *q)
qspi_writel(q, reg, q->iobase + QUADSPI_MCR);
}
-static void fsl_qspi_select_mem(struct fsl_qspi *q, struct spi_device *spi)
+static void fsl_qspi_select_mem(struct fsl_qspi *q, struct spi_device *spi,
+ const struct spi_mem_op *op)
{
- unsigned long rate = spi->max_speed_hz;
+ unsigned long rate = op->max_freq;
int ret;
if (q->selected == spi_get_chipselect(spi, 0))
@@ -652,7 +653,7 @@ static int fsl_qspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
fsl_qspi_readl_poll_tout(q, base + QUADSPI_SR, (QUADSPI_SR_IP_ACC_MASK |
QUADSPI_SR_AHB_ACC_MASK), 10, 1000);
- fsl_qspi_select_mem(q, mem->spi);
+ fsl_qspi_select_mem(q, mem->spi, op);
if (needs_amba_base_offset(q))
addr_offset = q->memmap_phy;
@@ -839,6 +840,28 @@ static const struct spi_controller_mem_ops fsl_qspi_mem_ops = {
.get_name = fsl_qspi_get_name,
};
+static const struct spi_controller_mem_caps fsl_qspi_mem_caps = {
+ .per_op_freq = true,
+};
+
+static void fsl_qspi_disable(void *data)
+{
+ struct fsl_qspi *q = data;
+
+ /* disable the hardware */
+ qspi_writel(q, QUADSPI_MCR_MDIS_MASK, q->iobase + QUADSPI_MCR);
+ qspi_writel(q, 0x0, q->iobase + QUADSPI_RSER);
+}
+
+static void fsl_qspi_cleanup(void *data)
+{
+ struct fsl_qspi *q = data;
+
+ fsl_qspi_clk_disable_unprep(q);
+
+ mutex_destroy(&q->lock);
+}
+
static int fsl_qspi_probe(struct platform_device *pdev)
{
struct spi_controller *ctlr;
@@ -848,7 +871,7 @@ static int fsl_qspi_probe(struct platform_device *pdev)
struct fsl_qspi *q;
int ret;
- ctlr = spi_alloc_host(&pdev->dev, sizeof(*q));
+ ctlr = devm_spi_alloc_host(&pdev->dev, sizeof(*q));
if (!ctlr)
return -ENOMEM;
@@ -858,106 +881,78 @@ static int fsl_qspi_probe(struct platform_device *pdev)
q = spi_controller_get_devdata(ctlr);
q->dev = dev;
q->devtype_data = of_device_get_match_data(dev);
- if (!q->devtype_data) {
- ret = -ENODEV;
- goto err_put_ctrl;
- }
+ if (!q->devtype_data)
+ return -ENODEV;
platform_set_drvdata(pdev, q);
/* find the resources */
q->iobase = devm_platform_ioremap_resource_byname(pdev, "QuadSPI");
- if (IS_ERR(q->iobase)) {
- ret = PTR_ERR(q->iobase);
- goto err_put_ctrl;
- }
+ if (IS_ERR(q->iobase))
+ return PTR_ERR(q->iobase);
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
"QuadSPI-memory");
- if (!res) {
- ret = -EINVAL;
- goto err_put_ctrl;
- }
+ if (!res)
+ return -EINVAL;
q->memmap_phy = res->start;
/* Since there are 4 cs, map size required is 4 times ahb_buf_size */
q->ahb_addr = devm_ioremap(dev, q->memmap_phy,
(q->devtype_data->ahb_buf_size * 4));
- if (!q->ahb_addr) {
- ret = -ENOMEM;
- goto err_put_ctrl;
- }
+ if (!q->ahb_addr)
+ return -ENOMEM;
/* find the clocks */
q->clk_en = devm_clk_get(dev, "qspi_en");
- if (IS_ERR(q->clk_en)) {
- ret = PTR_ERR(q->clk_en);
- goto err_put_ctrl;
- }
+ if (IS_ERR(q->clk_en))
+ return PTR_ERR(q->clk_en);
q->clk = devm_clk_get(dev, "qspi");
- if (IS_ERR(q->clk)) {
- ret = PTR_ERR(q->clk);
- goto err_put_ctrl;
- }
+ if (IS_ERR(q->clk))
+ return PTR_ERR(q->clk);
+
+ mutex_init(&q->lock);
ret = fsl_qspi_clk_prep_enable(q);
if (ret) {
dev_err(dev, "can not enable the clock\n");
- goto err_put_ctrl;
+ return ret;
}
+ ret = devm_add_action_or_reset(dev, fsl_qspi_cleanup, q);
+ if (ret)
+ return ret;
+
/* find the irq */
ret = platform_get_irq(pdev, 0);
if (ret < 0)
- goto err_disable_clk;
+ return ret;
ret = devm_request_irq(dev, ret,
fsl_qspi_irq_handler, 0, pdev->name, q);
if (ret) {
dev_err(dev, "failed to request irq: %d\n", ret);
- goto err_disable_clk;
+ return ret;
}
- mutex_init(&q->lock);
-
ctlr->bus_num = -1;
ctlr->num_chipselect = 4;
ctlr->mem_ops = &fsl_qspi_mem_ops;
+ ctlr->mem_caps = &fsl_qspi_mem_caps;
fsl_qspi_default_setup(q);
ctlr->dev.of_node = np;
+ ret = devm_add_action_or_reset(dev, fsl_qspi_disable, q);
+ if (ret)
+ return ret;
+
ret = devm_spi_register_controller(dev, ctlr);
if (ret)
- goto err_destroy_mutex;
+ return ret;
return 0;
-
-err_destroy_mutex:
- mutex_destroy(&q->lock);
-
-err_disable_clk:
- fsl_qspi_clk_disable_unprep(q);
-
-err_put_ctrl:
- spi_controller_put(ctlr);
-
- dev_err(dev, "Freescale QuadSPI probe failed\n");
- return ret;
-}
-
-static void fsl_qspi_remove(struct platform_device *pdev)
-{
- struct fsl_qspi *q = platform_get_drvdata(pdev);
-
- /* disable the hardware */
- qspi_writel(q, QUADSPI_MCR_MDIS_MASK, q->iobase + QUADSPI_MCR);
- qspi_writel(q, 0x0, q->iobase + QUADSPI_RSER);
-
- fsl_qspi_clk_disable_unprep(q);
-
- mutex_destroy(&q->lock);
}
static int fsl_qspi_suspend(struct device *dev)
@@ -997,7 +992,6 @@ static struct platform_driver fsl_qspi_driver = {
.pm = &fsl_qspi_pm_ops,
},
.probe = fsl_qspi_probe,
- .remove = fsl_qspi_remove,
};
module_platform_driver(fsl_qspi_driver);
diff --git a/drivers/spi/spi-fsl-spi.c b/drivers/spi/spi-fsl-spi.c
index 856a4a9def66..2f2082652a1a 100644
--- a/drivers/spi/spi-fsl-spi.c
+++ b/drivers/spi/spi-fsl-spi.c
@@ -618,7 +618,7 @@ static struct spi_controller *fsl_spi_probe(struct device *dev,
if (ret < 0)
goto err_probe;
- dev_info(dev, "at 0x%p (irq = %d), %s mode\n", reg_base,
+ dev_info(dev, "at MMIO %pa (irq = %d), %s mode\n", &mem->start,
mpc8xxx_spi->irq, mpc8xxx_spi_strmode(mpc8xxx_spi->flags));
return host;
diff --git a/drivers/spi/spi-gpio.c b/drivers/spi/spi-gpio.c
index 4f192e013cd6..ea5f1b10b79e 100644
--- a/drivers/spi/spi-gpio.c
+++ b/drivers/spi/spi-gpio.c
@@ -39,42 +39,14 @@ struct spi_gpio {
/*----------------------------------------------------------------------*/
-/*
- * Because the overhead of going through four GPIO procedure calls
- * per transferred bit can make performance a problem, this code
- * is set up so that you can use it in either of two ways:
- *
- * - The slow generic way: set up platform_data to hold the GPIO
- * numbers used for MISO/MOSI/SCK, and issue procedure calls for
- * each of them. This driver can handle several such busses.
- *
- * - The quicker inlined way: only helps with platform GPIO code
- * that inlines operations for constant GPIOs. This can give
- * you tight (fast!) inner loops, but each such bus needs a
- * new driver. You'll define a new C file, with Makefile and
- * Kconfig support; the C code can be a total of six lines:
- *
- * #define DRIVER_NAME "myboard_spi2"
- * #define SPI_MISO_GPIO 119
- * #define SPI_MOSI_GPIO 120
- * #define SPI_SCK_GPIO 121
- * #define SPI_N_CHIPSEL 4
- * #include "spi-gpio.c"
- */
-
-#ifndef DRIVER_NAME
#define DRIVER_NAME "spi_gpio"
-#define GENERIC_BITBANG /* vs tight inlines */
-
-#endif
-
/*----------------------------------------------------------------------*/
static inline struct spi_gpio *__pure
spi_to_spi_gpio(const struct spi_device *spi)
{
- const struct spi_bitbang *bang;
+ struct spi_bitbang *bang;
struct spi_gpio *spi_gpio;
bang = spi_controller_get_devdata(spi->controller);
@@ -341,16 +313,14 @@ static int spi_gpio_probe_pdata(struct platform_device *pdev,
struct spi_gpio *spi_gpio = spi_controller_get_devdata(host);
int i;
-#ifdef GENERIC_BITBANG
- if (!pdata || !pdata->num_chipselect)
+ if (!pdata)
return -ENODEV;
-#endif
- /*
- * The host needs to think there is a chipselect even if not
- * connected
- */
- host->num_chipselect = pdata->num_chipselect ?: 1;
+ /* It's just one always-selected device, fine to continue */
+ if (!pdata->num_chipselect)
+ return 0;
+
+ host->num_chipselect = pdata->num_chipselect;
spi_gpio->cs_gpios = devm_kcalloc(dev, host->num_chipselect,
sizeof(*spi_gpio->cs_gpios),
GFP_KERNEL);
@@ -445,8 +415,6 @@ static int spi_gpio_probe(struct platform_device *pdev)
return devm_spi_register_controller(&pdev->dev, host);
}
-MODULE_ALIAS("platform:" DRIVER_NAME);
-
static const struct of_device_id spi_gpio_dt_ids[] = {
{ .compatible = "spi-gpio" },
{}
@@ -465,3 +433,4 @@ module_platform_driver(spi_gpio_driver);
MODULE_DESCRIPTION("SPI host driver using generic bitbanged GPIO ");
MODULE_AUTHOR("David Brownell");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
index eeb7d082c247..c93d80a4d734 100644
--- a/drivers/spi/spi-imx.c
+++ b/drivers/spi/spi-imx.c
@@ -1449,7 +1449,7 @@ static int spi_imx_calculate_timeout(struct spi_imx_data *spi_imx, int size)
timeout += 1;
/* Double calculated timeout */
- return msecs_to_jiffies(2 * timeout * MSEC_PER_SEC);
+ return secs_to_jiffies(2 * timeout);
}
static int spi_imx_dma_transfer(struct spi_imx_data *spi_imx,
@@ -1695,9 +1695,12 @@ static int spi_imx_transfer_one(struct spi_controller *controller,
struct spi_device *spi,
struct spi_transfer *transfer)
{
+ int ret;
struct spi_imx_data *spi_imx = spi_controller_get_devdata(spi->controller);
- spi_imx_setupxfer(spi, transfer);
+ ret = spi_imx_setupxfer(spi, transfer);
+ if (ret < 0)
+ return ret;
transfer->effective_speed_hz = spi_imx->spi_bus_clk;
/* flush rxfifo before transfer */
diff --git a/drivers/spi/spi-intel-pci.c b/drivers/spi/spi-intel-pci.c
index 4d9ffec900bb..4b63cb98df9c 100644
--- a/drivers/spi/spi-intel-pci.c
+++ b/drivers/spi/spi-intel-pci.c
@@ -44,6 +44,7 @@ static int intel_spi_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
struct intel_spi_boardinfo *info;
+ void __iomem *base;
int ret;
ret = pcim_enable_device(pdev);
@@ -56,7 +57,12 @@ static int intel_spi_pci_probe(struct pci_dev *pdev,
return -ENOMEM;
info->data = pdev;
- return intel_spi_probe(&pdev->dev, &pdev->resource[0], info);
+
+ base = pcim_iomap_region(pdev, 0, KBUILD_MODNAME);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ return intel_spi_probe(&pdev->dev, base, info);
}
static const struct pci_device_id intel_spi_pci_ids[] = {
diff --git a/drivers/spi/spi-intel-platform.c b/drivers/spi/spi-intel-platform.c
index 0974cca83a5d..6cbed0b2e063 100644
--- a/drivers/spi/spi-intel-platform.c
+++ b/drivers/spi/spi-intel-platform.c
@@ -14,14 +14,17 @@
static int intel_spi_platform_probe(struct platform_device *pdev)
{
struct intel_spi_boardinfo *info;
- struct resource *mem;
+ void __iomem *base;
info = dev_get_platdata(&pdev->dev);
if (!info)
return -EINVAL;
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- return intel_spi_probe(&pdev->dev, mem, info);
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ return intel_spi_probe(&pdev->dev, base, info);
}
static struct platform_driver intel_spi_platform_driver = {
diff --git a/drivers/spi/spi-intel.c b/drivers/spi/spi-intel.c
index b0dcdb6fb8fa..5d5a546c62ea 100644
--- a/drivers/spi/spi-intel.c
+++ b/drivers/spi/spi-intel.c
@@ -1467,13 +1467,13 @@ EXPORT_SYMBOL_GPL(intel_spi_groups);
/**
* intel_spi_probe() - Probe the Intel SPI flash controller
* @dev: Pointer to the parent device
- * @mem: MMIO resource
+ * @base: iomapped MMIO resource
* @info: Platform specific information
*
* Probes Intel SPI flash controller and creates the flash chip device.
* Returns %0 on success and negative errno in case of failure.
*/
-int intel_spi_probe(struct device *dev, struct resource *mem,
+int intel_spi_probe(struct device *dev, void __iomem *base,
const struct intel_spi_boardinfo *info)
{
struct spi_controller *host;
@@ -1488,10 +1488,7 @@ int intel_spi_probe(struct device *dev, struct resource *mem,
ispi = spi_controller_get_devdata(host);
- ispi->base = devm_ioremap_resource(dev, mem);
- if (IS_ERR(ispi->base))
- return PTR_ERR(ispi->base);
-
+ ispi->base = base;
ispi->dev = dev;
ispi->host = host;
ispi->info = info;
diff --git a/drivers/spi/spi-intel.h b/drivers/spi/spi-intel.h
index c5f35060dd63..53b292c6ea0c 100644
--- a/drivers/spi/spi-intel.h
+++ b/drivers/spi/spi-intel.h
@@ -11,11 +11,9 @@
#include <linux/platform_data/x86/spi-intel.h>
-struct resource;
-
extern const struct attribute_group *intel_spi_groups[];
-int intel_spi_probe(struct device *dev, struct resource *mem,
+int intel_spi_probe(struct device *dev, void __iomem *base,
const struct intel_spi_boardinfo *info);
#endif /* SPI_INTEL_H */
diff --git a/drivers/spi/spi-kspi2.c b/drivers/spi/spi-kspi2.c
new file mode 100644
index 000000000000..ca73ec52ce63
--- /dev/null
+++ b/drivers/spi/spi-kspi2.c
@@ -0,0 +1,431 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) KEBA Industrial Automation Gmbh 2024
+ *
+ * Driver for KEBA SPI host controller type 2 FPGA IP core
+ */
+
+#include <linux/iopoll.h>
+#include <linux/misc/keba.h>
+#include <linux/spi/spi.h>
+
+#define KSPI2 "kspi2"
+
+#define KSPI2_CLK_FREQ_REG 0x03
+#define KSPI2_CLK_FREQ_MASK 0x0f
+#define KSPI2_CLK_FREQ_62_5M 0x0
+#define KSPI2_CLK_FREQ_33_3M 0x1
+#define KSPI2_CLK_FREQ_125M 0x2
+#define KSPI2_CLK_FREQ_50M 0x3
+#define KSPI2_CLK_FREQ_100M 0x4
+
+#define KSPI2_CONTROL_REG 0x04
+#define KSPI2_CONTROL_CLK_DIV_MAX 0x0f
+#define KSPI2_CONTROL_CLK_DIV_MASK 0x0f
+#define KSPI2_CONTROL_CPHA 0x10
+#define KSPI2_CONTROL_CPOL 0x20
+#define KSPI2_CONTROL_CLK_MODE_MASK 0x30
+#define KSPI2_CONTROL_INIT KSPI2_CONTROL_CLK_DIV_MAX
+
+#define KSPI2_STATUS_REG 0x08
+#define KSPI2_STATUS_IN_USE 0x01
+#define KSPI2_STATUS_BUSY 0x02
+
+#define KSPI2_DATA_REG 0x0c
+
+#define KSPI2_CS_NR_REG 0x10
+#define KSPI2_CS_NR_NONE 0xff
+
+#define KSPI2_MODE_BITS (SPI_CPHA | SPI_CPOL)
+#define KSPI2_NUM_CS 255
+
+#define KSPI2_SPEED_HZ_MIN(kspi) (kspi->base_speed_hz / 65536)
+#define KSPI2_SPEED_HZ_MAX(kspi) (kspi->base_speed_hz / 2)
+
+/* timeout is 10 times the time to transfer one byte at slowest clock */
+#define KSPI2_XFER_TIMEOUT_US(kspi) (USEC_PER_SEC / \
+ KSPI2_SPEED_HZ_MIN(kspi) * 8 * 10)
+
+#define KSPI2_INUSE_SLEEP_US (2 * USEC_PER_MSEC)
+#define KSPI2_INUSE_TIMEOUT_US (10 * USEC_PER_SEC)
+
+struct kspi2 {
+ struct keba_spi_auxdev *auxdev;
+ void __iomem *base;
+ struct spi_controller *host;
+
+ u32 base_speed_hz; /* SPI base clock frequency in HZ */
+ u8 control_shadow;
+
+ struct spi_device **device;
+ int device_size;
+};
+
+static int kspi2_inuse_lock(struct kspi2 *kspi)
+{
+ u8 sts;
+ int ret;
+
+ /*
+ * The SPI controller has an IN_USE bit for locking access to the
+ * controller. This enables the use of the SPI controller by other none
+ * Linux processors.
+ *
+ * If the SPI controller is free, then the first read returns
+ * IN_USE == 0. After that the SPI controller is locked and further
+ * reads of IN_USE return 1.
+ *
+ * The SPI controller is unlocked by writing 1 into IN_USE.
+ *
+ * The IN_USE bit acts as a hardware semaphore for the SPI controller.
+ * Poll for semaphore, but sleep while polling to free the CPU.
+ */
+ ret = readb_poll_timeout(kspi->base + KSPI2_STATUS_REG,
+ sts, (sts & KSPI2_STATUS_IN_USE) == 0,
+ KSPI2_INUSE_SLEEP_US, KSPI2_INUSE_TIMEOUT_US);
+ if (ret != 0)
+ dev_warn(&kspi->auxdev->auxdev.dev, "%s err!\n", __func__);
+
+ return ret;
+}
+
+static void kspi2_inuse_unlock(struct kspi2 *kspi)
+{
+ /* unlock the controller by writing 1 into IN_USE */
+ iowrite8(KSPI2_STATUS_IN_USE, kspi->base + KSPI2_STATUS_REG);
+}
+
+static int kspi2_prepare_hardware(struct spi_controller *host)
+{
+ struct kspi2 *kspi = spi_controller_get_devdata(host);
+
+ /* lock hardware semaphore before actual use of controller */
+ return kspi2_inuse_lock(kspi);
+}
+
+static int kspi2_unprepare_hardware(struct spi_controller *host)
+{
+ struct kspi2 *kspi = spi_controller_get_devdata(host);
+
+ /* unlock hardware semaphore after actual use of controller */
+ kspi2_inuse_unlock(kspi);
+
+ return 0;
+}
+
+static u8 kspi2_calc_minimal_divider(struct kspi2 *kspi, u32 max_speed_hz)
+{
+ u8 div;
+
+ /*
+ * Divider values 2, 4, 8, 16, ..., 65536 are possible. They are coded
+ * as 0, 1, 2, 3, ..., 15 in the CONTROL_CLK_DIV bit.
+ */
+ for (div = 0; div < KSPI2_CONTROL_CLK_DIV_MAX; div++) {
+ if ((kspi->base_speed_hz >> (div + 1)) <= max_speed_hz)
+ return div;
+ }
+
+ /* return divider for slowest clock if loop fails to find one */
+ return KSPI2_CONTROL_CLK_DIV_MAX;
+}
+
+static void kspi2_write_control_reg(struct kspi2 *kspi, u8 val, u8 mask)
+{
+ /* write control register only when necessary to improve performance */
+ if (val != (kspi->control_shadow & mask)) {
+ kspi->control_shadow = (kspi->control_shadow & ~mask) | val;
+ iowrite8(kspi->control_shadow, kspi->base + KSPI2_CONTROL_REG);
+ }
+}
+
+static int kspi2_txrx_byte(struct kspi2 *kspi, u8 tx, u8 *rx)
+{
+ u8 sts;
+ int ret;
+
+ /* start transfer by writing TX byte */
+ iowrite8(tx, kspi->base + KSPI2_DATA_REG);
+
+ /* wait till finished (BUSY == 0) */
+ ret = readb_poll_timeout(kspi->base + KSPI2_STATUS_REG,
+ sts, (sts & KSPI2_STATUS_BUSY) == 0,
+ 0, KSPI2_XFER_TIMEOUT_US(kspi));
+ if (ret != 0)
+ return ret;
+
+ /* read RX byte */
+ if (rx)
+ *rx = ioread8(kspi->base + KSPI2_DATA_REG);
+
+ return 0;
+}
+
+static int kspi2_process_transfer(struct kspi2 *kspi, struct spi_transfer *t)
+{
+ u8 tx = 0;
+ u8 rx;
+ int i;
+ int ret;
+
+ for (i = 0; i < t->len; i++) {
+ if (t->tx_buf)
+ tx = ((const u8 *)t->tx_buf)[i];
+
+ ret = kspi2_txrx_byte(kspi, tx, &rx);
+ if (ret)
+ return ret;
+
+ if (t->rx_buf)
+ ((u8 *)t->rx_buf)[i] = rx;
+ }
+
+ return 0;
+}
+
+static int kspi2_setup_transfer(struct kspi2 *kspi,
+ struct spi_device *spi,
+ struct spi_transfer *t)
+{
+ u32 max_speed_hz = spi->max_speed_hz;
+ u8 clk_div;
+
+ /*
+ * spi_device (spi) has default parameters. Some of these can be
+ * overwritten by parameters in spi_transfer (t).
+ */
+ if (t->bits_per_word && ((t->bits_per_word % 8) != 0)) {
+ dev_err(&spi->dev, "Word width %d not supported!\n",
+ t->bits_per_word);
+
+ return -EINVAL;
+ }
+
+ if (t->speed_hz && (t->speed_hz < max_speed_hz))
+ max_speed_hz = t->speed_hz;
+
+ clk_div = kspi2_calc_minimal_divider(kspi, max_speed_hz);
+ kspi2_write_control_reg(kspi, clk_div, KSPI2_CONTROL_CLK_DIV_MASK);
+
+ return 0;
+}
+
+static int kspi2_transfer_one(struct spi_controller *host,
+ struct spi_device *spi,
+ struct spi_transfer *t)
+{
+ struct kspi2 *kspi = spi_controller_get_devdata(host);
+ int ret;
+
+ ret = kspi2_setup_transfer(kspi, spi, t);
+ if (ret != 0)
+ return ret;
+
+ if (t->len) {
+ ret = kspi2_process_transfer(kspi, t);
+ if (ret != 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static void kspi2_set_cs(struct spi_device *spi, bool enable)
+{
+ struct spi_controller *host = spi->controller;
+ struct kspi2 *kspi = spi_controller_get_devdata(host);
+
+ /* controller is using active low chip select signals by design */
+ if (!enable)
+ iowrite8(spi_get_chipselect(spi, 0), kspi->base + KSPI2_CS_NR_REG);
+ else
+ iowrite8(KSPI2_CS_NR_NONE, kspi->base + KSPI2_CS_NR_REG);
+}
+
+static int kspi2_prepare_message(struct spi_controller *host,
+ struct spi_message *msg)
+{
+ struct kspi2 *kspi = spi_controller_get_devdata(host);
+ struct spi_device *spi = msg->spi;
+ u8 mode = 0;
+
+ /* setup SPI clock phase and polarity */
+ if (spi->mode & SPI_CPHA)
+ mode |= KSPI2_CONTROL_CPHA;
+ if (spi->mode & SPI_CPOL)
+ mode |= KSPI2_CONTROL_CPOL;
+ kspi2_write_control_reg(kspi, mode, KSPI2_CONTROL_CLK_MODE_MASK);
+
+ return 0;
+}
+
+static int kspi2_setup(struct spi_device *spi)
+{
+ struct kspi2 *kspi = spi_controller_get_devdata(spi->controller);
+
+ /*
+ * Check only parameters. Actual setup is done in kspi2_prepare_message
+ * and directly before the SPI transfer starts.
+ */
+
+ if (spi->mode & ~KSPI2_MODE_BITS) {
+ dev_err(&spi->dev, "Mode %d not supported!\n", spi->mode);
+
+ return -EINVAL;
+ }
+
+ if ((spi->bits_per_word % 8) != 0) {
+ dev_err(&spi->dev, "Word width %d not supported!\n",
+ spi->bits_per_word);
+
+ return -EINVAL;
+ }
+
+ if ((spi->max_speed_hz == 0) ||
+ (spi->max_speed_hz > KSPI2_SPEED_HZ_MAX(kspi)))
+ spi->max_speed_hz = KSPI2_SPEED_HZ_MAX(kspi);
+
+ if (spi->max_speed_hz < KSPI2_SPEED_HZ_MIN(kspi)) {
+ dev_err(&spi->dev, "Requested speed of %d Hz is too low!\n",
+ spi->max_speed_hz);
+
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void kspi2_unregister_devices(struct kspi2 *kspi)
+{
+ int i;
+
+ for (i = 0; i < kspi->device_size; i++) {
+ struct spi_device *device = kspi->device[i];
+
+ if (device)
+ spi_unregister_device(device);
+ }
+}
+
+static int kspi2_register_devices(struct kspi2 *kspi)
+{
+ struct spi_board_info *info = kspi->auxdev->info;
+ int i;
+
+ /* register all known SPI devices */
+ for (i = 0; i < kspi->auxdev->info_size; i++) {
+ struct spi_device *device = spi_new_device(kspi->host, &info[i]);
+
+ if (!device) {
+ kspi2_unregister_devices(kspi);
+
+ return -ENODEV;
+ }
+ kspi->device[i] = device;
+ }
+
+ return 0;
+}
+
+static void kspi2_init(struct kspi2 *kspi)
+{
+ iowrite8(KSPI2_CONTROL_INIT, kspi->base + KSPI2_CONTROL_REG);
+ kspi->control_shadow = KSPI2_CONTROL_INIT;
+
+ iowrite8(KSPI2_CS_NR_NONE, kspi->base + KSPI2_CS_NR_REG);
+}
+
+static int kspi2_probe(struct auxiliary_device *auxdev,
+ const struct auxiliary_device_id *id)
+{
+ struct device *dev = &auxdev->dev;
+ struct spi_controller *host;
+ struct kspi2 *kspi;
+ u8 clk_reg;
+ int ret;
+
+ host = devm_spi_alloc_host(dev, sizeof(struct kspi2));
+ if (!host)
+ return -ENOMEM;
+ kspi = spi_controller_get_devdata(host);
+ kspi->auxdev = container_of(auxdev, struct keba_spi_auxdev, auxdev);
+ kspi->host = host;
+ kspi->device = devm_kcalloc(dev, kspi->auxdev->info_size,
+ sizeof(*kspi->device), GFP_KERNEL);
+ if (!kspi->device)
+ return -ENOMEM;
+ kspi->device_size = kspi->auxdev->info_size;
+ auxiliary_set_drvdata(auxdev, kspi);
+
+ kspi->base = devm_ioremap_resource(dev, &kspi->auxdev->io);
+ if (IS_ERR(kspi->base))
+ return PTR_ERR(kspi->base);
+
+ /* read the SPI base clock frequency */
+ clk_reg = ioread8(kspi->base + KSPI2_CLK_FREQ_REG);
+ switch (clk_reg & KSPI2_CLK_FREQ_MASK) {
+ case KSPI2_CLK_FREQ_62_5M:
+ kspi->base_speed_hz = 62500000; break;
+ case KSPI2_CLK_FREQ_33_3M:
+ kspi->base_speed_hz = 33333333; break;
+ case KSPI2_CLK_FREQ_125M:
+ kspi->base_speed_hz = 125000000; break;
+ case KSPI2_CLK_FREQ_50M:
+ kspi->base_speed_hz = 50000000; break;
+ case KSPI2_CLK_FREQ_100M:
+ kspi->base_speed_hz = 100000000; break;
+ default:
+ dev_err(dev, "Undefined SPI base clock frequency!\n");
+ return -ENODEV;
+ }
+
+ kspi2_init(kspi);
+
+ host->bus_num = -1;
+ host->num_chipselect = KSPI2_NUM_CS;
+ host->mode_bits = KSPI2_MODE_BITS;
+ host->setup = kspi2_setup;
+ host->prepare_transfer_hardware = kspi2_prepare_hardware;
+ host->unprepare_transfer_hardware = kspi2_unprepare_hardware;
+ host->prepare_message = kspi2_prepare_message;
+ host->set_cs = kspi2_set_cs;
+ host->transfer_one = kspi2_transfer_one;
+ ret = devm_spi_register_controller(dev, host);
+ if (ret) {
+ dev_err(dev, "Failed to register host (%d)!\n", ret);
+ return ret;
+ }
+
+ ret = kspi2_register_devices(kspi);
+ if (ret) {
+ dev_err(dev, "Failed to register devices (%d)!\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void kspi2_remove(struct auxiliary_device *auxdev)
+{
+ struct kspi2 *kspi = auxiliary_get_drvdata(auxdev);
+
+ kspi2_unregister_devices(kspi);
+}
+
+static const struct auxiliary_device_id kspi2_devtype_aux[] = {
+ { .name = "keba.spi" },
+ { },
+};
+MODULE_DEVICE_TABLE(auxiliary, kspi2_devtype_aux);
+
+static struct auxiliary_driver kspi2_driver_aux = {
+ .name = KSPI2,
+ .id_table = kspi2_devtype_aux,
+ .probe = kspi2_probe,
+ .remove = kspi2_remove,
+};
+module_auxiliary_driver(kspi2_driver_aux);
+
+MODULE_AUTHOR("Gerhard Engleder <eg@keba.com>");
+MODULE_DESCRIPTION("KEBA SPI host controller driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-loopback-test.c b/drivers/spi/spi-loopback-test.c
index 31a878d9458d..7dd92deffe3f 100644
--- a/drivers/spi/spi-loopback-test.c
+++ b/drivers/spi/spi-loopback-test.c
@@ -420,7 +420,7 @@ MODULE_LICENSE("GPL");
static void spi_test_print_hex_dump(char *pre, const void *ptr, size_t len)
{
/* limit the hex_dump */
- if (len < 1024) {
+ if (len <= 1024) {
print_hex_dump(KERN_INFO, pre,
DUMP_PREFIX_OFFSET, 16, 1,
ptr, len, 0);
@@ -494,8 +494,8 @@ struct rx_ranges {
static int rx_ranges_cmp(void *priv, const struct list_head *a,
const struct list_head *b)
{
- struct rx_ranges *rx_a = list_entry(a, struct rx_ranges, list);
- struct rx_ranges *rx_b = list_entry(b, struct rx_ranges, list);
+ const struct rx_ranges *rx_a = list_entry(a, struct rx_ranges, list);
+ const struct rx_ranges *rx_b = list_entry(b, struct rx_ranges, list);
if (rx_a->start > rx_b->start)
return 1;
@@ -635,8 +635,8 @@ static int spi_test_check_loopback_result(struct spi_device *spi,
} else {
/* first byte received */
txb = ((u8 *)xfer->rx_buf)[0];
- /* first byte may be 0 or xff */
- if (!((txb == 0) || (txb == 0xff))) {
+ /* first byte may be 0 or 0xff */
+ if (txb != 0 && txb != 0xff) {
dev_err(&spi->dev,
"loopback strangeness - we expect 0x00 or 0xff, but not 0x%02x\n",
txb);
diff --git a/drivers/spi/spi-mem.c b/drivers/spi/spi-mem.c
index abc6792e738c..5db0639d3b01 100644
--- a/drivers/spi/spi-mem.c
+++ b/drivers/spi/spi-mem.c
@@ -187,6 +187,16 @@ bool spi_mem_default_supports_op(struct spi_mem *mem,
return false;
}
+ if (op->max_freq && mem->spi->controller->min_speed_hz &&
+ op->max_freq < mem->spi->controller->min_speed_hz)
+ return false;
+
+ if (op->max_freq &&
+ op->max_freq < mem->spi->max_speed_hz) {
+ if (!spi_mem_controller_is_capable(ctlr, per_op_freq))
+ return false;
+ }
+
return spi_mem_check_buswidth(mem, op);
}
EXPORT_SYMBOL_GPL(spi_mem_default_supports_op);
@@ -364,6 +374,20 @@ int spi_mem_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
u8 *tmpbuf;
int ret;
+ /* Make sure the operation frequency is correct before going futher */
+ spi_mem_adjust_op_freq(mem, (struct spi_mem_op *)op);
+
+ dev_vdbg(&mem->spi->dev, "[cmd: 0x%02x][%dB addr: %#8llx][%2dB dummy][%4dB data %s] %d%c-%d%c-%d%c-%d%c @ %uHz\n",
+ op->cmd.opcode,
+ op->addr.nbytes, (op->addr.nbytes ? op->addr.val : 0),
+ op->dummy.nbytes,
+ op->data.nbytes, (op->data.nbytes ? (op->data.dir == SPI_MEM_DATA_IN ? " read" : "write") : " "),
+ op->cmd.buswidth, op->cmd.dtr ? 'D' : 'S',
+ op->addr.buswidth, op->addr.dtr ? 'D' : 'S',
+ op->dummy.buswidth, op->dummy.dtr ? 'D' : 'S',
+ op->data.buswidth, op->data.dtr ? 'D' : 'S',
+ op->max_freq ? op->max_freq : mem->spi->max_speed_hz);
+
ret = spi_mem_check_op(op);
if (ret)
return ret;
@@ -410,6 +434,7 @@ int spi_mem_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
xfers[xferpos].tx_buf = tmpbuf;
xfers[xferpos].len = op->cmd.nbytes;
xfers[xferpos].tx_nbits = op->cmd.buswidth;
+ xfers[xferpos].speed_hz = op->max_freq;
spi_message_add_tail(&xfers[xferpos], &msg);
xferpos++;
totalxferlen++;
@@ -424,6 +449,7 @@ int spi_mem_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
xfers[xferpos].tx_buf = tmpbuf + 1;
xfers[xferpos].len = op->addr.nbytes;
xfers[xferpos].tx_nbits = op->addr.buswidth;
+ xfers[xferpos].speed_hz = op->max_freq;
spi_message_add_tail(&xfers[xferpos], &msg);
xferpos++;
totalxferlen += op->addr.nbytes;
@@ -435,6 +461,7 @@ int spi_mem_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
xfers[xferpos].len = op->dummy.nbytes;
xfers[xferpos].tx_nbits = op->dummy.buswidth;
xfers[xferpos].dummy_data = 1;
+ xfers[xferpos].speed_hz = op->max_freq;
spi_message_add_tail(&xfers[xferpos], &msg);
xferpos++;
totalxferlen += op->dummy.nbytes;
@@ -450,6 +477,7 @@ int spi_mem_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
}
xfers[xferpos].len = op->data.nbytes;
+ xfers[xferpos].speed_hz = op->max_freq;
spi_message_add_tail(&xfers[xferpos], &msg);
xferpos++;
totalxferlen += op->data.nbytes;
@@ -528,6 +556,57 @@ int spi_mem_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op)
}
EXPORT_SYMBOL_GPL(spi_mem_adjust_op_size);
+/**
+ * spi_mem_adjust_op_freq() - Adjust the frequency of a SPI mem operation to
+ * match controller, PCB and chip limitations
+ * @mem: the SPI memory
+ * @op: the operation to adjust
+ *
+ * Some chips have per-op frequency limitations and must adapt the maximum
+ * speed. This function allows SPI mem drivers to set @op->max_freq to the
+ * maximum supported value.
+ */
+void spi_mem_adjust_op_freq(struct spi_mem *mem, struct spi_mem_op *op)
+{
+ if (!op->max_freq || op->max_freq > mem->spi->max_speed_hz)
+ op->max_freq = mem->spi->max_speed_hz;
+}
+EXPORT_SYMBOL_GPL(spi_mem_adjust_op_freq);
+
+/**
+ * spi_mem_calc_op_duration() - Derives the theoretical length (in ns) of an
+ * operation. This helps finding the best variant
+ * among a list of possible choices.
+ * @op: the operation to benchmark
+ *
+ * Some chips have per-op frequency limitations, PCBs usually have their own
+ * limitations as well, and controllers can support dual, quad or even octal
+ * modes, sometimes in DTR. All these combinations make it impossible to
+ * statically list the best combination for all situations. If we want something
+ * accurate, all these combinations should be rated (eg. with a time estimate)
+ * and the best pick should be taken based on these calculations.
+ *
+ * Returns a ns estimate for the time this op would take.
+ */
+u64 spi_mem_calc_op_duration(struct spi_mem_op *op)
+{
+ u64 ncycles = 0;
+ u32 ns_per_cycles;
+
+ ns_per_cycles = 1000000000 / op->max_freq;
+ ncycles += ((op->cmd.nbytes * 8) / op->cmd.buswidth) / (op->cmd.dtr ? 2 : 1);
+ ncycles += ((op->addr.nbytes * 8) / op->addr.buswidth) / (op->addr.dtr ? 2 : 1);
+
+ /* Dummy bytes are optional for some SPI flash memory operations */
+ if (op->dummy.nbytes)
+ ncycles += ((op->dummy.nbytes * 8) / op->dummy.buswidth) / (op->dummy.dtr ? 2 : 1);
+
+ ncycles += ((op->data.nbytes * 8) / op->data.buswidth) / (op->data.dtr ? 2 : 1);
+
+ return ncycles * ns_per_cycles;
+}
+EXPORT_SYMBOL_GPL(spi_mem_calc_op_duration);
+
static ssize_t spi_mem_no_dirmap_read(struct spi_mem_dirmap_desc *desc,
u64 offs, size_t len, void *buf)
{
diff --git a/drivers/spi/spi-meson-spicc.c b/drivers/spi/spi-meson-spicc.c
index df74ad5060f8..6b9137307533 100644
--- a/drivers/spi/spi-meson-spicc.c
+++ b/drivers/spi/spi-meson-spicc.c
@@ -21,18 +21,26 @@
#include <linux/interrupt.h>
#include <linux/reset.h>
#include <linux/pinctrl/consumer.h>
+#include <linux/dma-mapping.h>
/*
- * The Meson SPICC controller could support DMA based transfers, but is not
- * implemented by the vendor code, and while having the registers documentation
- * it has never worked on the GXL Hardware.
- * The PIO mode is the only mode implemented, and due to badly designed HW :
- * - all transfers are cutted in 16 words burst because the FIFO hangs on
- * TX underflow, and there is no TX "Half-Empty" interrupt, so we go by
- * FIFO max size chunk only
- * - CS management is dumb, and goes UP between every burst, so is really a
- * "Data Valid" signal than a Chip Select, GPIO link should be used instead
- * to have a CS go down over the full transfer
+ * There are two modes for data transmission: PIO and DMA.
+ * When bits_per_word is 8, 16, 24, or 32, data is transferred using PIO mode.
+ * When bits_per_word is 64, DMA mode is used by default.
+ *
+ * DMA achieves a transfer with one or more SPI bursts, each SPI burst is made
+ * up of one or more DMA bursts. The DMA burst implementation mechanism is,
+ * For TX, when the number of words in TXFIFO is less than the preset
+ * reading threshold, SPICC starts a reading DMA burst, which reads the preset
+ * number of words from TX buffer, then writes them into TXFIFO.
+ * For RX, when the number of words in RXFIFO is greater than the preset
+ * writing threshold, SPICC starts a writing request burst, which reads the
+ * preset number of words from RXFIFO, then write them into RX buffer.
+ * DMA works if the transfer meets the following conditions,
+ * - 64 bits per word
+ * - The transfer length in word must be multiples of the dma_burst_len, and
+ * the dma_burst_len should be one of 8,7...2, otherwise, it will be split
+ * into several SPI bursts by this driver
*/
#define SPICC_MAX_BURST 128
@@ -128,6 +136,23 @@
#define SPICC_DWADDR 0x24 /* Write Address of DMA */
+#define SPICC_LD_CNTL0 0x28
+#define VSYNC_IRQ_SRC_SELECT BIT(0)
+#define DMA_EN_SET_BY_VSYNC BIT(2)
+#define XCH_EN_SET_BY_VSYNC BIT(3)
+#define DMA_READ_COUNTER_EN BIT(4)
+#define DMA_WRITE_COUNTER_EN BIT(5)
+#define DMA_RADDR_LOAD_BY_VSYNC BIT(6)
+#define DMA_WADDR_LOAD_BY_VSYNC BIT(7)
+#define DMA_ADDR_LOAD_FROM_LD_ADDR BIT(8)
+
+#define SPICC_LD_CNTL1 0x2c
+#define DMA_READ_COUNTER GENMASK(15, 0)
+#define DMA_WRITE_COUNTER GENMASK(31, 16)
+#define DMA_BURST_LEN_DEFAULT 8
+#define DMA_BURST_COUNT_MAX 0xffff
+#define SPI_BURST_LEN_MAX (DMA_BURST_LEN_DEFAULT * DMA_BURST_COUNT_MAX)
+
#define SPICC_ENH_CTL0 0x38 /* Enhanced Feature */
#define SPICC_ENH_CLK_CS_DELAY_MASK GENMASK(15, 0)
#define SPICC_ENH_DATARATE_MASK GENMASK(23, 16)
@@ -171,6 +196,9 @@ struct meson_spicc_device {
struct pinctrl *pinctrl;
struct pinctrl_state *pins_idle_high;
struct pinctrl_state *pins_idle_low;
+ dma_addr_t tx_dma;
+ dma_addr_t rx_dma;
+ bool using_dma;
};
#define pow2_clk_to_spicc(_div) container_of(_div, struct meson_spicc_device, pow2_div)
@@ -202,6 +230,148 @@ static void meson_spicc_oen_enable(struct meson_spicc_device *spicc)
writel_relaxed(conf, spicc->base + SPICC_ENH_CTL0);
}
+static int meson_spicc_dma_map(struct meson_spicc_device *spicc,
+ struct spi_transfer *t)
+{
+ struct device *dev = spicc->host->dev.parent;
+
+ if (!(t->tx_buf && t->rx_buf))
+ return -EINVAL;
+
+ t->tx_dma = dma_map_single(dev, (void *)t->tx_buf, t->len, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, t->tx_dma))
+ return -ENOMEM;
+
+ t->rx_dma = dma_map_single(dev, t->rx_buf, t->len, DMA_FROM_DEVICE);
+ if (dma_mapping_error(dev, t->rx_dma))
+ return -ENOMEM;
+
+ spicc->tx_dma = t->tx_dma;
+ spicc->rx_dma = t->rx_dma;
+
+ return 0;
+}
+
+static void meson_spicc_dma_unmap(struct meson_spicc_device *spicc,
+ struct spi_transfer *t)
+{
+ struct device *dev = spicc->host->dev.parent;
+
+ if (t->tx_dma)
+ dma_unmap_single(dev, t->tx_dma, t->len, DMA_TO_DEVICE);
+ if (t->rx_dma)
+ dma_unmap_single(dev, t->rx_dma, t->len, DMA_FROM_DEVICE);
+}
+
+/*
+ * According to the remain words length, calculate a suitable spi burst length
+ * and a dma burst length for current spi burst
+ */
+static u32 meson_spicc_calc_dma_len(struct meson_spicc_device *spicc,
+ u32 len, u32 *dma_burst_len)
+{
+ u32 i;
+
+ if (len <= spicc->data->fifo_size) {
+ *dma_burst_len = len;
+ return len;
+ }
+
+ *dma_burst_len = DMA_BURST_LEN_DEFAULT;
+
+ if (len == (SPI_BURST_LEN_MAX + 1))
+ return SPI_BURST_LEN_MAX - DMA_BURST_LEN_DEFAULT;
+
+ if (len >= SPI_BURST_LEN_MAX)
+ return SPI_BURST_LEN_MAX;
+
+ for (i = DMA_BURST_LEN_DEFAULT; i > 1; i--)
+ if ((len % i) == 0) {
+ *dma_burst_len = i;
+ return len;
+ }
+
+ i = len % DMA_BURST_LEN_DEFAULT;
+ len -= i;
+
+ if (i == 1)
+ len -= DMA_BURST_LEN_DEFAULT;
+
+ return len;
+}
+
+static void meson_spicc_setup_dma(struct meson_spicc_device *spicc)
+{
+ unsigned int len;
+ unsigned int dma_burst_len, dma_burst_count;
+ unsigned int count_en = 0;
+ unsigned int txfifo_thres = 0;
+ unsigned int read_req = 0;
+ unsigned int rxfifo_thres = 31;
+ unsigned int write_req = 0;
+ unsigned int ld_ctr1 = 0;
+
+ writel_relaxed(spicc->tx_dma, spicc->base + SPICC_DRADDR);
+ writel_relaxed(spicc->rx_dma, spicc->base + SPICC_DWADDR);
+
+ /* Set the max burst length to support a transmission with length of
+ * no more than 1024 bytes(128 words), which must use the CS management
+ * because of some strict timing requirements
+ */
+ writel_bits_relaxed(SPICC_BURSTLENGTH_MASK, SPICC_BURSTLENGTH_MASK,
+ spicc->base + SPICC_CONREG);
+
+ len = meson_spicc_calc_dma_len(spicc, spicc->xfer_remain,
+ &dma_burst_len);
+ spicc->xfer_remain -= len;
+ dma_burst_count = DIV_ROUND_UP(len, dma_burst_len);
+ dma_burst_len--;
+
+ if (spicc->tx_dma) {
+ spicc->tx_dma += len;
+ count_en |= DMA_READ_COUNTER_EN;
+ txfifo_thres = spicc->data->fifo_size - dma_burst_len;
+ read_req = dma_burst_len;
+ ld_ctr1 |= FIELD_PREP(DMA_READ_COUNTER, dma_burst_count);
+ }
+
+ if (spicc->rx_dma) {
+ spicc->rx_dma += len;
+ count_en |= DMA_WRITE_COUNTER_EN;
+ rxfifo_thres = dma_burst_len;
+ write_req = dma_burst_len;
+ ld_ctr1 |= FIELD_PREP(DMA_WRITE_COUNTER, dma_burst_count);
+ }
+
+ writel_relaxed(count_en, spicc->base + SPICC_LD_CNTL0);
+ writel_relaxed(ld_ctr1, spicc->base + SPICC_LD_CNTL1);
+ writel_relaxed(SPICC_DMA_ENABLE
+ | SPICC_DMA_URGENT
+ | FIELD_PREP(SPICC_TXFIFO_THRESHOLD_MASK, txfifo_thres)
+ | FIELD_PREP(SPICC_READ_BURST_MASK, read_req)
+ | FIELD_PREP(SPICC_RXFIFO_THRESHOLD_MASK, rxfifo_thres)
+ | FIELD_PREP(SPICC_WRITE_BURST_MASK, write_req),
+ spicc->base + SPICC_DMAREG);
+}
+
+static irqreturn_t meson_spicc_dma_irq(struct meson_spicc_device *spicc)
+{
+ if (readl_relaxed(spicc->base + SPICC_DMAREG) & SPICC_DMA_ENABLE)
+ return IRQ_HANDLED;
+
+ if (spicc->xfer_remain) {
+ meson_spicc_setup_dma(spicc);
+ } else {
+ writel_bits_relaxed(SPICC_SMC, 0, spicc->base + SPICC_CONREG);
+ writel_relaxed(0, spicc->base + SPICC_INTREG);
+ writel_relaxed(0, spicc->base + SPICC_DMAREG);
+ meson_spicc_dma_unmap(spicc, spicc->xfer);
+ complete(&spicc->done);
+ }
+
+ return IRQ_HANDLED;
+}
+
static inline bool meson_spicc_txfull(struct meson_spicc_device *spicc)
{
return !!FIELD_GET(SPICC_TF,
@@ -293,6 +463,9 @@ static irqreturn_t meson_spicc_irq(int irq, void *data)
writel_bits_relaxed(SPICC_TC, SPICC_TC, spicc->base + SPICC_STATREG);
+ if (spicc->using_dma)
+ return meson_spicc_dma_irq(spicc);
+
/* Empty RX FIFO */
meson_spicc_rx(spicc);
@@ -426,9 +599,6 @@ static int meson_spicc_transfer_one(struct spi_controller *host,
meson_spicc_reset_fifo(spicc);
- /* Setup burst */
- meson_spicc_setup_burst(spicc);
-
/* Setup wait for completion */
reinit_completion(&spicc->done);
@@ -442,11 +612,36 @@ static int meson_spicc_transfer_one(struct spi_controller *host,
/* Increase it twice and add 200 ms tolerance */
timeout += timeout + 200;
- /* Start burst */
- writel_bits_relaxed(SPICC_XCH, SPICC_XCH, spicc->base + SPICC_CONREG);
+ if (xfer->bits_per_word == 64) {
+ int ret;
+
+ /* dma_burst_len 1 can't trigger a dma burst */
+ if (xfer->len < 16)
+ return -EINVAL;
+
+ ret = meson_spicc_dma_map(spicc, xfer);
+ if (ret) {
+ meson_spicc_dma_unmap(spicc, xfer);
+ dev_err(host->dev.parent, "dma map failed\n");
+ return ret;
+ }
+
+ spicc->using_dma = true;
+ spicc->xfer_remain = DIV_ROUND_UP(xfer->len, spicc->bytes_per_word);
+ meson_spicc_setup_dma(spicc);
+ writel_relaxed(SPICC_TE_EN, spicc->base + SPICC_INTREG);
+ writel_bits_relaxed(SPICC_SMC, SPICC_SMC, spicc->base + SPICC_CONREG);
+ } else {
+ spicc->using_dma = false;
+ /* Setup burst */
+ meson_spicc_setup_burst(spicc);
- /* Enable interrupts */
- writel_relaxed(SPICC_TC_EN, spicc->base + SPICC_INTREG);
+ /* Start burst */
+ writel_bits_relaxed(SPICC_XCH, SPICC_XCH, spicc->base + SPICC_CONREG);
+
+ /* Enable interrupts */
+ writel_relaxed(SPICC_TC_EN, spicc->base + SPICC_INTREG);
+ }
if (!wait_for_completion_timeout(&spicc->done, msecs_to_jiffies(timeout)))
return -ETIMEDOUT;
@@ -545,6 +740,14 @@ static int meson_spicc_setup(struct spi_device *spi)
if (!spi->controller_state)
spi->controller_state = spi_controller_get_devdata(spi->controller);
+ /* DMA works at 64 bits, the rest works on PIO */
+ if (spi->bits_per_word != 8 &&
+ spi->bits_per_word != 16 &&
+ spi->bits_per_word != 24 &&
+ spi->bits_per_word != 32 &&
+ spi->bits_per_word != 64)
+ return -EINVAL;
+
return 0;
}
@@ -853,10 +1056,6 @@ static int meson_spicc_probe(struct platform_device *pdev)
host->num_chipselect = 4;
host->dev.of_node = pdev->dev.of_node;
host->mode_bits = SPI_CPHA | SPI_CPOL | SPI_CS_HIGH | SPI_LOOP;
- host->bits_per_word_mask = SPI_BPW_MASK(32) |
- SPI_BPW_MASK(24) |
- SPI_BPW_MASK(16) |
- SPI_BPW_MASK(8);
host->flags = (SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX);
host->min_speed_hz = spicc->data->min_speed_hz;
host->max_speed_hz = spicc->data->max_speed_hz;
diff --git a/drivers/spi/spi-microchip-core-qspi.c b/drivers/spi/spi-microchip-core-qspi.c
index ad2b5ffa6153..fa828fcaaef2 100644
--- a/drivers/spi/spi-microchip-core-qspi.c
+++ b/drivers/spi/spi-microchip-core-qspi.c
@@ -265,7 +265,8 @@ static irqreturn_t mchp_coreqspi_isr(int irq, void *dev_id)
return ret;
}
-static int mchp_coreqspi_setup_clock(struct mchp_coreqspi *qspi, struct spi_device *spi)
+static int mchp_coreqspi_setup_clock(struct mchp_coreqspi *qspi, struct spi_device *spi,
+ const struct spi_mem_op *op)
{
unsigned long clk_hz;
u32 control, baud_rate_val = 0;
@@ -274,11 +275,11 @@ static int mchp_coreqspi_setup_clock(struct mchp_coreqspi *qspi, struct spi_devi
if (!clk_hz)
return -EINVAL;
- baud_rate_val = DIV_ROUND_UP(clk_hz, 2 * spi->max_speed_hz);
+ baud_rate_val = DIV_ROUND_UP(clk_hz, 2 * op->max_freq);
if (baud_rate_val > MAX_DIVIDER || baud_rate_val < MIN_DIVIDER) {
dev_err(&spi->dev,
"could not configure the clock for spi clock %d Hz & system clock %ld Hz\n",
- spi->max_speed_hz, clk_hz);
+ op->max_freq, clk_hz);
return -EINVAL;
}
@@ -399,7 +400,7 @@ static int mchp_coreqspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *o
if (err)
goto error;
- err = mchp_coreqspi_setup_clock(qspi, mem->spi);
+ err = mchp_coreqspi_setup_clock(qspi, mem->spi, op);
if (err)
goto error;
@@ -457,6 +458,10 @@ error:
static bool mchp_coreqspi_supports_op(struct spi_mem *mem, const struct spi_mem_op *op)
{
+ struct mchp_coreqspi *qspi = spi_controller_get_devdata(mem->spi->controller);
+ unsigned long clk_hz;
+ u32 baud_rate_val;
+
if (!spi_mem_default_supports_op(mem, op))
return false;
@@ -479,6 +484,14 @@ static bool mchp_coreqspi_supports_op(struct spi_mem *mem, const struct spi_mem_
return false;
}
+ clk_hz = clk_get_rate(qspi->clk);
+ if (!clk_hz)
+ return false;
+
+ baud_rate_val = DIV_ROUND_UP(clk_hz, 2 * op->max_freq);
+ if (baud_rate_val > MAX_DIVIDER || baud_rate_val < MIN_DIVIDER)
+ return false;
+
return true;
}
@@ -498,6 +511,10 @@ static const struct spi_controller_mem_ops mchp_coreqspi_mem_ops = {
.exec_op = mchp_coreqspi_exec_op,
};
+static const struct spi_controller_mem_caps mchp_coreqspi_mem_caps = {
+ .per_op_freq = true,
+};
+
static int mchp_coreqspi_probe(struct platform_device *pdev)
{
struct spi_controller *ctlr;
@@ -540,6 +557,7 @@ static int mchp_coreqspi_probe(struct platform_device *pdev)
ctlr->bits_per_word_mask = SPI_BPW_MASK(8);
ctlr->mem_ops = &mchp_coreqspi_mem_ops;
+ ctlr->mem_caps = &mchp_coreqspi_mem_caps;
ctlr->setup = mchp_coreqspi_setup_op;
ctlr->mode_bits = SPI_CPOL | SPI_CPHA | SPI_RX_DUAL | SPI_RX_QUAD |
SPI_TX_DUAL | SPI_TX_QUAD;
diff --git a/drivers/spi/spi-microchip-core.c b/drivers/spi/spi-microchip-core.c
index 5b6af55855ef..62ba0bd9cbb7 100644
--- a/drivers/spi/spi-microchip-core.c
+++ b/drivers/spi/spi-microchip-core.c
@@ -70,8 +70,7 @@
#define INT_RX_CHANNEL_OVERFLOW BIT(2)
#define INT_TX_CHANNEL_UNDERRUN BIT(3)
-#define INT_ENABLE_MASK (CONTROL_RX_DATA_INT | CONTROL_TX_DATA_INT | \
- CONTROL_RX_OVER_INT | CONTROL_TX_UNDER_INT)
+#define INT_ENABLE_MASK (CONTROL_RX_OVER_INT | CONTROL_TX_UNDER_INT)
#define REG_CONTROL (0x00)
#define REG_FRAME_SIZE (0x04)
@@ -133,10 +132,15 @@ static inline void mchp_corespi_disable(struct mchp_corespi *spi)
mchp_corespi_write(spi, REG_CONTROL, control);
}
-static inline void mchp_corespi_read_fifo(struct mchp_corespi *spi)
+static inline void mchp_corespi_read_fifo(struct mchp_corespi *spi, int fifo_max)
{
- while (spi->rx_len >= spi->n_bytes && !(mchp_corespi_read(spi, REG_STATUS) & STATUS_RXFIFO_EMPTY)) {
- u32 data = mchp_corespi_read(spi, REG_RX_DATA);
+ for (int i = 0; i < fifo_max; i++) {
+ u32 data;
+
+ while (mchp_corespi_read(spi, REG_STATUS) & STATUS_RXFIFO_EMPTY)
+ ;
+
+ data = mchp_corespi_read(spi, REG_RX_DATA);
spi->rx_len -= spi->n_bytes;
@@ -211,11 +215,10 @@ static inline void mchp_corespi_set_xfer_size(struct mchp_corespi *spi, int len)
mchp_corespi_write(spi, REG_FRAMESUP, len);
}
-static inline void mchp_corespi_write_fifo(struct mchp_corespi *spi)
+static inline void mchp_corespi_write_fifo(struct mchp_corespi *spi, int fifo_max)
{
- int fifo_max, i = 0;
+ int i = 0;
- fifo_max = DIV_ROUND_UP(min(spi->tx_len, FIFO_DEPTH), spi->n_bytes);
mchp_corespi_set_xfer_size(spi, fifo_max);
while ((i < fifo_max) && !(mchp_corespi_read(spi, REG_STATUS) & STATUS_TXFIFO_FULL)) {
@@ -413,19 +416,6 @@ static irqreturn_t mchp_corespi_interrupt(int irq, void *dev_id)
if (intfield == 0)
return IRQ_NONE;
- if (intfield & INT_TXDONE)
- mchp_corespi_write(spi, REG_INT_CLEAR, INT_TXDONE);
-
- if (intfield & INT_RXRDY) {
- mchp_corespi_write(spi, REG_INT_CLEAR, INT_RXRDY);
-
- if (spi->rx_len)
- mchp_corespi_read_fifo(spi);
- }
-
- if (!spi->rx_len && !spi->tx_len)
- finalise = true;
-
if (intfield & INT_RX_CHANNEL_OVERFLOW) {
mchp_corespi_write(spi, REG_INT_CLEAR, INT_RX_CHANNEL_OVERFLOW);
finalise = true;
@@ -512,9 +502,14 @@ static int mchp_corespi_transfer_one(struct spi_controller *host,
mchp_corespi_write(spi, REG_SLAVE_SELECT, spi->pending_slave_select);
- while (spi->tx_len)
- mchp_corespi_write_fifo(spi);
+ while (spi->tx_len) {
+ int fifo_max = DIV_ROUND_UP(min(spi->tx_len, FIFO_DEPTH), spi->n_bytes);
+
+ mchp_corespi_write_fifo(spi, fifo_max);
+ mchp_corespi_read_fifo(spi, fifo_max);
+ }
+ spi_finalize_current_transfer(host);
return 1;
}
diff --git a/drivers/spi/spi-mt65xx.c b/drivers/spi/spi-mt65xx.c
index 85f3bafc975d..4b0a1c0db041 100644
--- a/drivers/spi/spi-mt65xx.c
+++ b/drivers/spi/spi-mt65xx.c
@@ -20,6 +20,7 @@
#include <linux/spi/spi.h>
#include <linux/spi/spi-mem.h>
#include <linux/dma-mapping.h>
+#include <linux/pm_qos.h>
#define SPI_CFG0_REG 0x0000
#define SPI_CFG1_REG 0x0004
@@ -147,6 +148,7 @@ struct mtk_spi_compatible {
* @tx_sgl_len: Size of TX DMA transfer
* @rx_sgl_len: Size of RX DMA transfer
* @dev_comp: Device data structure
+ * @qos_request: QoS request
* @spi_clk_hz: Current SPI clock in Hz
* @spimem_done: SPI-MEM operation completion
* @use_spimem: Enables SPI-MEM
@@ -166,6 +168,7 @@ struct mtk_spi {
struct scatterlist *tx_sgl, *rx_sgl;
u32 tx_sgl_len, rx_sgl_len;
const struct mtk_spi_compatible *dev_comp;
+ struct pm_qos_request qos_request;
u32 spi_clk_hz;
struct completion spimem_done;
bool use_spimem;
@@ -356,6 +359,7 @@ static int mtk_spi_hw_init(struct spi_controller *host,
struct mtk_chip_config *chip_config = spi->controller_data;
struct mtk_spi *mdata = spi_controller_get_devdata(host);
+ cpu_latency_qos_update_request(&mdata->qos_request, 500);
cpha = spi->mode & SPI_CPHA ? 1 : 0;
cpol = spi->mode & SPI_CPOL ? 1 : 0;
@@ -459,6 +463,15 @@ static int mtk_spi_prepare_message(struct spi_controller *host,
return mtk_spi_hw_init(host, msg->spi);
}
+static int mtk_spi_unprepare_message(struct spi_controller *host,
+ struct spi_message *message)
+{
+ struct mtk_spi *mdata = spi_controller_get_devdata(host);
+
+ cpu_latency_qos_update_request(&mdata->qos_request, PM_QOS_DEFAULT_VALUE);
+ return 0;
+}
+
static void mtk_spi_set_cs(struct spi_device *spi, bool enable)
{
u32 reg_val;
@@ -961,7 +974,7 @@ static int mtk_spi_mem_exec_op(struct spi_mem *mem,
mtk_spi_reset(mdata);
mtk_spi_hw_init(mem->spi->controller, mem->spi);
- mtk_spi_prepare_transfer(mem->spi->controller, mem->spi->max_speed_hz);
+ mtk_spi_prepare_transfer(mem->spi->controller, op->max_freq);
reg_val = readl(mdata->base + SPI_CFG3_IPM_REG);
/* opcode byte len */
@@ -1122,6 +1135,10 @@ static const struct spi_controller_mem_ops mtk_spi_mem_ops = {
.exec_op = mtk_spi_mem_exec_op,
};
+static const struct spi_controller_mem_caps mtk_spi_mem_caps = {
+ .per_op_freq = true,
+};
+
static int mtk_spi_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -1139,6 +1156,7 @@ static int mtk_spi_probe(struct platform_device *pdev)
host->set_cs = mtk_spi_set_cs;
host->prepare_message = mtk_spi_prepare_message;
+ host->unprepare_message = mtk_spi_unprepare_message;
host->transfer_one = mtk_spi_transfer_one;
host->can_dma = mtk_spi_can_dma;
host->setup = mtk_spi_setup;
@@ -1160,6 +1178,7 @@ static int mtk_spi_probe(struct platform_device *pdev)
if (mdata->dev_comp->ipm_design) {
mdata->dev = dev;
host->mem_ops = &mtk_spi_mem_ops;
+ host->mem_caps = &mtk_spi_mem_caps;
init_completion(&mdata->spimem_done);
}
@@ -1244,6 +1263,8 @@ static int mtk_spi_probe(struct platform_device *pdev)
clk_disable_unprepare(mdata->spi_hclk);
}
+ cpu_latency_qos_add_request(&mdata->qos_request, PM_QOS_DEFAULT_VALUE);
+
if (mdata->dev_comp->need_pad_sel) {
if (mdata->pad_num != host->num_chipselect)
return dev_err_probe(dev, -EINVAL,
@@ -1287,6 +1308,7 @@ static void mtk_spi_remove(struct platform_device *pdev)
struct mtk_spi *mdata = spi_controller_get_devdata(host);
int ret;
+ cpu_latency_qos_remove_request(&mdata->qos_request);
if (mdata->use_spimem && !completion_done(&mdata->spimem_done))
complete(&mdata->spimem_done);
diff --git a/drivers/spi/spi-mtk-snfi.c b/drivers/spi/spi-mtk-snfi.c
index fdbea9dffb62..e82ee6dcf498 100644
--- a/drivers/spi/spi-mtk-snfi.c
+++ b/drivers/spi/spi-mtk-snfi.c
@@ -1284,9 +1284,6 @@ static int mtk_snand_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
{
struct mtk_snand *ms = spi_controller_get_devdata(mem->spi->controller);
- dev_dbg(ms->dev, "OP %02x ADDR %08llX@%d:%u DATA %d:%u", op->cmd.opcode,
- op->addr.val, op->addr.buswidth, op->addr.nbytes,
- op->data.buswidth, op->data.nbytes);
if (mtk_snand_is_page_ops(op)) {
if (op->data.dir == SPI_MEM_DATA_IN)
return mtk_snand_read_page_cache(ms, op);
diff --git a/drivers/spi/spi-mux.c b/drivers/spi/spi-mux.c
index c02c4204442f..0eb35c4e3987 100644
--- a/drivers/spi/spi-mux.c
+++ b/drivers/spi/spi-mux.c
@@ -68,9 +68,7 @@ static int spi_mux_select(struct spi_device *spi)
priv->current_cs = spi_get_chipselect(spi, 0);
- spi_setup(priv->spi);
-
- return 0;
+ return spi_setup(priv->spi);
}
static int spi_mux_setup(struct spi_device *spi)
diff --git a/drivers/spi/spi-mxic.c b/drivers/spi/spi-mxic.c
index 809767d3145c..eeaea6a5e310 100644
--- a/drivers/spi/spi-mxic.c
+++ b/drivers/spi/spi-mxic.c
@@ -522,7 +522,7 @@ static int mxic_spi_mem_exec_op(struct spi_mem *mem,
int i, ret;
u8 addr[8], cmd[2];
- ret = mxic_spi_set_freq(mxic, mem->spi->max_speed_hz);
+ ret = mxic_spi_set_freq(mxic, op->max_freq);
if (ret)
return ret;
@@ -582,6 +582,7 @@ static const struct spi_controller_mem_caps mxic_spi_mem_caps = {
.dtr = true,
.ecc = true,
.swap16 = true,
+ .per_op_freq = true,
};
static void mxic_spi_set_cs(struct spi_device *spi, bool lvl)
diff --git a/drivers/spi/spi-mxs.c b/drivers/spi/spi-mxs.c
index e6d955d964f4..43455305fdf4 100644
--- a/drivers/spi/spi-mxs.c
+++ b/drivers/spi/spi-mxs.c
@@ -381,6 +381,8 @@ static int mxs_spi_transfer_one(struct spi_controller *host,
if (status)
break;
+ t->effective_speed_hz = ssp->clk_rate;
+
/* De-assert on last transfer, inverted by cs_change flag */
flag = (&t->transfer_list == m->transfers.prev) ^ t->cs_change ?
TXRX_DEASSERT_CS : 0;
diff --git a/drivers/spi/spi-npcm-fiu.c b/drivers/spi/spi-npcm-fiu.c
index 958bab27a081..67cc1d86de42 100644
--- a/drivers/spi/spi-npcm-fiu.c
+++ b/drivers/spi/spi-npcm-fiu.c
@@ -550,11 +550,6 @@ static int npcm_fiu_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
int ret = 0;
u8 *buf;
- dev_dbg(fiu->dev, "cmd:%#x mode:%d.%d.%d.%d addr:%#llx len:%#x\n",
- op->cmd.opcode, op->cmd.buswidth, op->addr.buswidth,
- op->dummy.buswidth, op->data.buswidth, op->addr.val,
- op->data.nbytes);
-
if (fiu->spix_mode || op->addr.nbytes > 4)
return -EOPNOTSUPP;
diff --git a/drivers/spi/spi-nxp-fspi.c b/drivers/spi/spi-nxp-fspi.c
index 1161b9e5a4dc..e63c77e41823 100644
--- a/drivers/spi/spi-nxp-fspi.c
+++ b/drivers/spi/spi-nxp-fspi.c
@@ -48,6 +48,8 @@
#include <linux/mutex.h>
#include <linux/of.h>
#include <linux/platform_device.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/pm_runtime.h>
#include <linux/pm_qos.h>
#include <linux/regmap.h>
#include <linux/sizes.h>
@@ -57,6 +59,9 @@
#include <linux/spi/spi.h>
#include <linux/spi/spi-mem.h>
+/* runtime pm timeout */
+#define FSPI_RPM_TIMEOUT 50 /* 50ms */
+
/* Registers used by the driver */
#define FSPI_MCR0 0x00
#define FSPI_MCR0_AHB_TIMEOUT(x) ((x) << 24)
@@ -394,6 +399,8 @@ struct nxp_fspi {
struct mutex lock;
struct pm_qos_request pm_qos_req;
int selected;
+#define FSPI_NEED_INIT (1 << 0)
+ int flags;
};
static inline int needs_ip_only(struct nxp_fspi *f)
@@ -627,15 +634,15 @@ static int nxp_fspi_clk_prep_enable(struct nxp_fspi *f)
return 0;
}
-static int nxp_fspi_clk_disable_unprep(struct nxp_fspi *f)
+static void nxp_fspi_clk_disable_unprep(struct nxp_fspi *f)
{
if (is_acpi_node(dev_fwnode(f->dev)))
- return 0;
+ return;
clk_disable_unprepare(f->clk);
clk_disable_unprepare(f->clk_en);
- return 0;
+ return;
}
static void nxp_fspi_dll_calibration(struct nxp_fspi *f)
@@ -705,9 +712,10 @@ static void nxp_fspi_dll_calibration(struct nxp_fspi *f)
* Value for rest of the CS FLSHxxCR0 register would be zero.
*
*/
-static void nxp_fspi_select_mem(struct nxp_fspi *f, struct spi_device *spi)
+static void nxp_fspi_select_mem(struct nxp_fspi *f, struct spi_device *spi,
+ const struct spi_mem_op *op)
{
- unsigned long rate = spi->max_speed_hz;
+ unsigned long rate = op->max_freq;
int ret;
uint64_t size_kb;
@@ -924,14 +932,20 @@ static int nxp_fspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
struct nxp_fspi *f = spi_controller_get_devdata(mem->spi->controller);
int err = 0;
- mutex_lock(&f->lock);
+ guard(mutex)(&f->lock);
+
+ err = pm_runtime_get_sync(f->dev);
+ if (err < 0) {
+ dev_err(f->dev, "Failed to enable clock %d\n", __LINE__);
+ return err;
+ }
/* Wait for controller being ready. */
err = fspi_readl_poll_tout(f, f->iobase + FSPI_STS0,
FSPI_STS0_ARB_IDLE, 1, POLL_TOUT, true);
WARN_ON(err);
- nxp_fspi_select_mem(f, mem->spi);
+ nxp_fspi_select_mem(f, mem->spi, op);
nxp_fspi_prepare_lut(f, op);
/*
@@ -954,7 +968,8 @@ static int nxp_fspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
/* Invalidate the data in the AHB buffer. */
nxp_fspi_invalid(f);
- mutex_unlock(&f->lock);
+ pm_runtime_mark_last_busy(f->dev);
+ pm_runtime_put_autosuspend(f->dev);
return err;
}
@@ -1149,6 +1164,28 @@ static const struct spi_controller_mem_ops nxp_fspi_mem_ops = {
.get_name = nxp_fspi_get_name,
};
+static const struct spi_controller_mem_caps nxp_fspi_mem_caps = {
+ .per_op_freq = true,
+};
+
+static void nxp_fspi_cleanup(void *data)
+{
+ struct nxp_fspi *f = data;
+
+ /* enable clock first since there is register access */
+ pm_runtime_get_sync(f->dev);
+
+ /* disable the hardware */
+ fspi_writel(f, FSPI_MCR0_MDIS, f->iobase + FSPI_MCR0);
+
+ pm_runtime_disable(f->dev);
+ pm_runtime_put_noidle(f->dev);
+ nxp_fspi_clk_disable_unprep(f);
+
+ if (f->ahb_addr)
+ iounmap(f->ahb_addr);
+}
+
static int nxp_fspi_probe(struct platform_device *pdev)
{
struct spi_controller *ctlr;
@@ -1156,10 +1193,10 @@ static int nxp_fspi_probe(struct platform_device *pdev)
struct device_node *np = dev->of_node;
struct resource *res;
struct nxp_fspi *f;
- int ret;
+ int ret, irq;
u32 reg;
- ctlr = spi_alloc_host(&pdev->dev, sizeof(*f));
+ ctlr = devm_spi_alloc_host(&pdev->dev, sizeof(*f));
if (!ctlr)
return -ENOMEM;
@@ -1169,10 +1206,8 @@ static int nxp_fspi_probe(struct platform_device *pdev)
f = spi_controller_get_devdata(ctlr);
f->dev = dev;
f->devtype_data = (struct nxp_fspi_devtype_data *)device_get_match_data(dev);
- if (!f->devtype_data) {
- ret = -ENODEV;
- goto err_put_ctrl;
- }
+ if (!f->devtype_data)
+ return -ENODEV;
platform_set_drvdata(pdev, f);
@@ -1181,11 +1216,8 @@ static int nxp_fspi_probe(struct platform_device *pdev)
f->iobase = devm_platform_ioremap_resource(pdev, 0);
else
f->iobase = devm_platform_ioremap_resource_byname(pdev, "fspi_base");
-
- if (IS_ERR(f->iobase)) {
- ret = PTR_ERR(f->iobase);
- goto err_put_ctrl;
- }
+ if (IS_ERR(f->iobase))
+ return PTR_ERR(f->iobase);
/* find the resources - controller memory mapped space */
if (is_acpi_node(dev_fwnode(f->dev)))
@@ -1193,11 +1225,8 @@ static int nxp_fspi_probe(struct platform_device *pdev)
else
res = platform_get_resource_byname(pdev,
IORESOURCE_MEM, "fspi_mmap");
-
- if (!res) {
- ret = -ENODEV;
- goto err_put_ctrl;
- }
+ if (!res)
+ return -ENODEV;
/* assign memory mapped starting address and mapped size. */
f->memmap_phy = res->start;
@@ -1206,99 +1235,109 @@ static int nxp_fspi_probe(struct platform_device *pdev)
/* find the clocks */
if (dev_of_node(&pdev->dev)) {
f->clk_en = devm_clk_get(dev, "fspi_en");
- if (IS_ERR(f->clk_en)) {
- ret = PTR_ERR(f->clk_en);
- goto err_put_ctrl;
- }
+ if (IS_ERR(f->clk_en))
+ return PTR_ERR(f->clk_en);
f->clk = devm_clk_get(dev, "fspi");
- if (IS_ERR(f->clk)) {
- ret = PTR_ERR(f->clk);
- goto err_put_ctrl;
- }
-
- ret = nxp_fspi_clk_prep_enable(f);
- if (ret) {
- dev_err(dev, "can not enable the clock\n");
- goto err_put_ctrl;
- }
+ if (IS_ERR(f->clk))
+ return PTR_ERR(f->clk);
}
+ /* find the irq */
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return dev_err_probe(dev, irq, "Failed to get irq source");
+
+ pm_runtime_enable(dev);
+ pm_runtime_set_autosuspend_delay(dev, FSPI_RPM_TIMEOUT);
+ pm_runtime_use_autosuspend(dev);
+
+ /* enable clock */
+ ret = pm_runtime_get_sync(f->dev);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "Failed to enable clock");
+
/* Clear potential interrupts */
reg = fspi_readl(f, f->iobase + FSPI_INTR);
if (reg)
fspi_writel(f, reg, f->iobase + FSPI_INTR);
- /* find the irq */
- ret = platform_get_irq(pdev, 0);
+ nxp_fspi_default_setup(f);
+
+ ret = pm_runtime_put_sync(dev);
if (ret < 0)
- goto err_disable_clk;
+ return dev_err_probe(dev, ret, "Failed to disable clock");
- ret = devm_request_irq(dev, ret,
+ ret = devm_request_irq(dev, irq,
nxp_fspi_irq_handler, 0, pdev->name, f);
- if (ret) {
- dev_err(dev, "failed to request irq: %d\n", ret);
- goto err_disable_clk;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to request irq\n");
- mutex_init(&f->lock);
+ devm_mutex_init(dev, &f->lock);
ctlr->bus_num = -1;
ctlr->num_chipselect = NXP_FSPI_MAX_CHIPSELECT;
ctlr->mem_ops = &nxp_fspi_mem_ops;
-
- nxp_fspi_default_setup(f);
-
+ ctlr->mem_caps = &nxp_fspi_mem_caps;
ctlr->dev.of_node = np;
- ret = devm_spi_register_controller(&pdev->dev, ctlr);
+ ret = devm_add_action_or_reset(dev, nxp_fspi_cleanup, f);
if (ret)
- goto err_destroy_mutex;
+ return dev_err_probe(dev, ret, "Failed to register nxp_fspi_cleanup\n");
- return 0;
+ return devm_spi_register_controller(&pdev->dev, ctlr);
+}
-err_destroy_mutex:
- mutex_destroy(&f->lock);
+static int nxp_fspi_runtime_suspend(struct device *dev)
+{
+ struct nxp_fspi *f = dev_get_drvdata(dev);
-err_disable_clk:
nxp_fspi_clk_disable_unprep(f);
-err_put_ctrl:
- spi_controller_put(ctlr);
-
- dev_err(dev, "NXP FSPI probe failed\n");
- return ret;
+ return 0;
}
-static void nxp_fspi_remove(struct platform_device *pdev)
+static int nxp_fspi_runtime_resume(struct device *dev)
{
- struct nxp_fspi *f = platform_get_drvdata(pdev);
-
- /* disable the hardware */
- fspi_writel(f, FSPI_MCR0_MDIS, f->iobase + FSPI_MCR0);
+ struct nxp_fspi *f = dev_get_drvdata(dev);
+ int ret;
- nxp_fspi_clk_disable_unprep(f);
+ ret = nxp_fspi_clk_prep_enable(f);
+ if (ret)
+ return ret;
- mutex_destroy(&f->lock);
+ if (f->flags & FSPI_NEED_INIT) {
+ nxp_fspi_default_setup(f);
+ ret = pinctrl_pm_select_default_state(dev);
+ if (ret)
+ dev_err(dev, "select flexspi default pinctrl failed!\n");
+ f->flags &= ~FSPI_NEED_INIT;
+ }
- if (f->ahb_addr)
- iounmap(f->ahb_addr);
+ return ret;
}
static int nxp_fspi_suspend(struct device *dev)
{
- return 0;
-}
-
-static int nxp_fspi_resume(struct device *dev)
-{
struct nxp_fspi *f = dev_get_drvdata(dev);
+ int ret;
- nxp_fspi_default_setup(f);
+ ret = pinctrl_pm_select_sleep_state(dev);
+ if (ret) {
+ dev_err(dev, "select flexspi sleep pinctrl failed!\n");
+ return ret;
+ }
- return 0;
+ f->flags |= FSPI_NEED_INIT;
+
+ return pm_runtime_force_suspend(dev);
}
+static const struct dev_pm_ops nxp_fspi_pm_ops = {
+ RUNTIME_PM_OPS(nxp_fspi_runtime_suspend, nxp_fspi_runtime_resume, NULL)
+ SYSTEM_SLEEP_PM_OPS(nxp_fspi_suspend, pm_runtime_force_resume)
+};
+
static const struct of_device_id nxp_fspi_dt_ids[] = {
{ .compatible = "nxp,lx2160a-fspi", .data = (void *)&lx2160a_data, },
{ .compatible = "nxp,imx8mm-fspi", .data = (void *)&imx8mm_data, },
@@ -1318,20 +1357,14 @@ static const struct acpi_device_id nxp_fspi_acpi_ids[] = {
MODULE_DEVICE_TABLE(acpi, nxp_fspi_acpi_ids);
#endif
-static const struct dev_pm_ops nxp_fspi_pm_ops = {
- .suspend = nxp_fspi_suspend,
- .resume = nxp_fspi_resume,
-};
-
static struct platform_driver nxp_fspi_driver = {
.driver = {
.name = "nxp-fspi",
.of_match_table = nxp_fspi_dt_ids,
.acpi_match_table = ACPI_PTR(nxp_fspi_acpi_ids),
- .pm = &nxp_fspi_pm_ops,
+ .pm = pm_ptr(&nxp_fspi_pm_ops),
},
.probe = nxp_fspi_probe,
- .remove = nxp_fspi_remove,
};
module_platform_driver(nxp_fspi_driver);
diff --git a/drivers/spi/spi-offload-trigger-pwm.c b/drivers/spi/spi-offload-trigger-pwm.c
new file mode 100644
index 000000000000..805ed41560df
--- /dev/null
+++ b/drivers/spi/spi-offload-trigger-pwm.c
@@ -0,0 +1,169 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2024 Analog Devices Inc.
+ * Copyright (C) 2024 BayLibre, SAS
+ *
+ * Generic PWM trigger for SPI offload.
+ */
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/math.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <linux/pwm.h>
+#include <linux/spi/offload/provider.h>
+#include <linux/spi/offload/types.h>
+#include <linux/time.h>
+#include <linux/types.h>
+
+struct spi_offload_trigger_pwm_state {
+ struct device *dev;
+ struct pwm_device *pwm;
+};
+
+static bool spi_offload_trigger_pwm_match(struct spi_offload_trigger *trigger,
+ enum spi_offload_trigger_type type,
+ u64 *args, u32 nargs)
+{
+ if (nargs)
+ return false;
+
+ return type == SPI_OFFLOAD_TRIGGER_PERIODIC;
+}
+
+static int spi_offload_trigger_pwm_validate(struct spi_offload_trigger *trigger,
+ struct spi_offload_trigger_config *config)
+{
+ struct spi_offload_trigger_pwm_state *st = spi_offload_trigger_get_priv(trigger);
+ struct spi_offload_trigger_periodic *periodic = &config->periodic;
+ struct pwm_waveform wf = { };
+ int ret;
+
+ if (config->type != SPI_OFFLOAD_TRIGGER_PERIODIC)
+ return -EINVAL;
+
+ if (!periodic->frequency_hz)
+ return -EINVAL;
+
+ wf.period_length_ns = DIV_ROUND_UP_ULL(NSEC_PER_SEC, periodic->frequency_hz);
+ /* REVISIT: 50% duty-cycle for now - may add config parameter later */
+ wf.duty_length_ns = wf.period_length_ns / 2;
+
+ ret = pwm_round_waveform_might_sleep(st->pwm, &wf);
+ if (ret < 0)
+ return ret;
+
+ periodic->frequency_hz = DIV_ROUND_UP_ULL(NSEC_PER_SEC, wf.period_length_ns);
+
+ return 0;
+}
+
+static int spi_offload_trigger_pwm_enable(struct spi_offload_trigger *trigger,
+ struct spi_offload_trigger_config *config)
+{
+ struct spi_offload_trigger_pwm_state *st = spi_offload_trigger_get_priv(trigger);
+ struct spi_offload_trigger_periodic *periodic = &config->periodic;
+ struct pwm_waveform wf = { };
+
+ if (config->type != SPI_OFFLOAD_TRIGGER_PERIODIC)
+ return -EINVAL;
+
+ if (!periodic->frequency_hz)
+ return -EINVAL;
+
+ wf.period_length_ns = DIV_ROUND_UP_ULL(NSEC_PER_SEC, periodic->frequency_hz);
+ /* REVISIT: 50% duty-cycle for now - may add config parameter later */
+ wf.duty_length_ns = wf.period_length_ns / 2;
+
+ return pwm_set_waveform_might_sleep(st->pwm, &wf, false);
+}
+
+static void spi_offload_trigger_pwm_disable(struct spi_offload_trigger *trigger)
+{
+ struct spi_offload_trigger_pwm_state *st = spi_offload_trigger_get_priv(trigger);
+ struct pwm_waveform wf;
+ int ret;
+
+ ret = pwm_get_waveform_might_sleep(st->pwm, &wf);
+ if (ret < 0) {
+ dev_err(st->dev, "failed to get waveform: %d\n", ret);
+ return;
+ }
+
+ wf.duty_length_ns = 0;
+
+ ret = pwm_set_waveform_might_sleep(st->pwm, &wf, false);
+ if (ret < 0)
+ dev_err(st->dev, "failed to disable PWM: %d\n", ret);
+}
+
+static const struct spi_offload_trigger_ops spi_offload_trigger_pwm_ops = {
+ .match = spi_offload_trigger_pwm_match,
+ .validate = spi_offload_trigger_pwm_validate,
+ .enable = spi_offload_trigger_pwm_enable,
+ .disable = spi_offload_trigger_pwm_disable,
+};
+
+static void spi_offload_trigger_pwm_release(void *data)
+{
+ pwm_disable(data);
+}
+
+static int spi_offload_trigger_pwm_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct spi_offload_trigger_info info = {
+ .fwnode = dev_fwnode(dev),
+ .ops = &spi_offload_trigger_pwm_ops,
+ };
+ struct spi_offload_trigger_pwm_state *st;
+ struct pwm_state state;
+ int ret;
+
+ st = devm_kzalloc(dev, sizeof(*st), GFP_KERNEL);
+ if (!st)
+ return -ENOMEM;
+
+ info.priv = st;
+ st->dev = dev;
+
+ st->pwm = devm_pwm_get(dev, NULL);
+ if (IS_ERR(st->pwm))
+ return dev_err_probe(dev, PTR_ERR(st->pwm), "failed to get PWM\n");
+
+ /* init with duty_cycle = 0, output enabled to ensure trigger off */
+ pwm_init_state(st->pwm, &state);
+ state.enabled = true;
+
+ ret = pwm_apply_might_sleep(st->pwm, &state);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "failed to apply PWM state\n");
+
+ ret = devm_add_action_or_reset(dev, spi_offload_trigger_pwm_release, st->pwm);
+ if (ret)
+ return ret;
+
+ return devm_spi_offload_trigger_register(dev, &info);
+}
+
+static const struct of_device_id spi_offload_trigger_pwm_of_match_table[] = {
+ { .compatible = "pwm-trigger" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, spi_offload_trigger_pwm_of_match_table);
+
+static struct platform_driver spi_offload_trigger_pwm_driver = {
+ .driver = {
+ .name = "pwm-trigger",
+ .of_match_table = spi_offload_trigger_pwm_of_match_table,
+ },
+ .probe = spi_offload_trigger_pwm_probe,
+};
+module_platform_driver(spi_offload_trigger_pwm_driver);
+
+MODULE_AUTHOR("David Lechner <dlechner@baylibre.com>");
+MODULE_DESCRIPTION("Generic PWM trigger");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-offload.c b/drivers/spi/spi-offload.c
new file mode 100644
index 000000000000..e674097bf3be
--- /dev/null
+++ b/drivers/spi/spi-offload.c
@@ -0,0 +1,465 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2024 Analog Devices Inc.
+ * Copyright (C) 2024 BayLibre, SAS
+ */
+
+/*
+ * SPI Offloading support.
+ *
+ * Some SPI controllers support offloading of SPI transfers. Essentially, this
+ * is the ability for a SPI controller to perform SPI transfers with minimal
+ * or even no CPU intervention, e.g. via a specialized SPI controller with a
+ * hardware trigger or via a conventional SPI controller using a non-Linux MCU
+ * processor core to offload the work.
+ */
+
+#define DEFAULT_SYMBOL_NAMESPACE "SPI_OFFLOAD"
+
+#include <linux/cleanup.h>
+#include <linux/device.h>
+#include <linux/dmaengine.h>
+#include <linux/export.h>
+#include <linux/kref.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/property.h>
+#include <linux/spi/offload/consumer.h>
+#include <linux/spi/offload/provider.h>
+#include <linux/spi/offload/types.h>
+#include <linux/spi/spi.h>
+#include <linux/types.h>
+
+struct spi_controller_and_offload {
+ struct spi_controller *controller;
+ struct spi_offload *offload;
+};
+
+struct spi_offload_trigger {
+ struct list_head list;
+ struct kref ref;
+ struct fwnode_handle *fwnode;
+ /* synchronizes calling ops and driver registration */
+ struct mutex lock;
+ /*
+ * If the provider goes away while the consumer still has a reference,
+ * ops and priv will be set to NULL and all calls will fail with -ENODEV.
+ */
+ const struct spi_offload_trigger_ops *ops;
+ void *priv;
+};
+
+static LIST_HEAD(spi_offload_triggers);
+static DEFINE_MUTEX(spi_offload_triggers_lock);
+
+/**
+ * devm_spi_offload_alloc() - Allocate offload instance
+ * @dev: Device for devm purposes and assigned to &struct spi_offload.provider_dev
+ * @priv_size: Size of private data to allocate
+ *
+ * Offload providers should use this to allocate offload instances.
+ *
+ * Return: Pointer to new offload instance or error on failure.
+ */
+struct spi_offload *devm_spi_offload_alloc(struct device *dev,
+ size_t priv_size)
+{
+ struct spi_offload *offload;
+ void *priv;
+
+ offload = devm_kzalloc(dev, sizeof(*offload), GFP_KERNEL);
+ if (!offload)
+ return ERR_PTR(-ENOMEM);
+
+ priv = devm_kzalloc(dev, priv_size, GFP_KERNEL);
+ if (!priv)
+ return ERR_PTR(-ENOMEM);
+
+ offload->provider_dev = dev;
+ offload->priv = priv;
+
+ return offload;
+}
+EXPORT_SYMBOL_GPL(devm_spi_offload_alloc);
+
+static void spi_offload_put(void *data)
+{
+ struct spi_controller_and_offload *resource = data;
+
+ resource->controller->put_offload(resource->offload);
+ kfree(resource);
+}
+
+/**
+ * devm_spi_offload_get() - Get an offload instance
+ * @dev: Device for devm purposes
+ * @spi: SPI device to use for the transfers
+ * @config: Offload configuration
+ *
+ * Peripheral drivers call this function to get an offload instance that meets
+ * the requirements specified in @config. If no suitable offload instance is
+ * available, -ENODEV is returned.
+ *
+ * Return: Offload instance or error on failure.
+ */
+struct spi_offload *devm_spi_offload_get(struct device *dev,
+ struct spi_device *spi,
+ const struct spi_offload_config *config)
+{
+ struct spi_controller_and_offload *resource;
+ struct spi_offload *offload;
+ int ret;
+
+ if (!spi || !config)
+ return ERR_PTR(-EINVAL);
+
+ if (!spi->controller->get_offload)
+ return ERR_PTR(-ENODEV);
+
+ resource = kzalloc(sizeof(*resource), GFP_KERNEL);
+ if (!resource)
+ return ERR_PTR(-ENOMEM);
+
+ offload = spi->controller->get_offload(spi, config);
+ if (IS_ERR(offload)) {
+ kfree(resource);
+ return offload;
+ }
+
+ resource->controller = spi->controller;
+ resource->offload = offload;
+
+ ret = devm_add_action_or_reset(dev, spi_offload_put, resource);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return offload;
+}
+EXPORT_SYMBOL_GPL(devm_spi_offload_get);
+
+static void spi_offload_trigger_free(struct kref *ref)
+{
+ struct spi_offload_trigger *trigger =
+ container_of(ref, struct spi_offload_trigger, ref);
+
+ mutex_destroy(&trigger->lock);
+ fwnode_handle_put(trigger->fwnode);
+ kfree(trigger);
+}
+
+static void spi_offload_trigger_put(void *data)
+{
+ struct spi_offload_trigger *trigger = data;
+
+ scoped_guard(mutex, &trigger->lock)
+ if (trigger->ops && trigger->ops->release)
+ trigger->ops->release(trigger);
+
+ kref_put(&trigger->ref, spi_offload_trigger_free);
+}
+
+static struct spi_offload_trigger
+*spi_offload_trigger_get(enum spi_offload_trigger_type type,
+ struct fwnode_reference_args *args)
+{
+ struct spi_offload_trigger *trigger;
+ bool match = false;
+ int ret;
+
+ guard(mutex)(&spi_offload_triggers_lock);
+
+ list_for_each_entry(trigger, &spi_offload_triggers, list) {
+ if (trigger->fwnode != args->fwnode)
+ continue;
+
+ match = trigger->ops->match(trigger, type, args->args, args->nargs);
+ if (match)
+ break;
+ }
+
+ if (!match)
+ return ERR_PTR(-EPROBE_DEFER);
+
+ guard(mutex)(&trigger->lock);
+
+ if (trigger->ops->request) {
+ ret = trigger->ops->request(trigger, type, args->args, args->nargs);
+ if (ret)
+ return ERR_PTR(ret);
+ }
+
+ kref_get(&trigger->ref);
+
+ return trigger;
+}
+
+/**
+ * devm_spi_offload_trigger_get() - Get an offload trigger instance
+ * @dev: Device for devm purposes.
+ * @offload: Offload instance connected to a trigger.
+ * @type: Trigger type to get.
+ *
+ * Return: Offload trigger instance or error on failure.
+ */
+struct spi_offload_trigger
+*devm_spi_offload_trigger_get(struct device *dev,
+ struct spi_offload *offload,
+ enum spi_offload_trigger_type type)
+{
+ struct spi_offload_trigger *trigger;
+ struct fwnode_reference_args args;
+ int ret;
+
+ ret = fwnode_property_get_reference_args(dev_fwnode(offload->provider_dev),
+ "trigger-sources",
+ "#trigger-source-cells", 0, 0,
+ &args);
+ if (ret)
+ return ERR_PTR(ret);
+
+ trigger = spi_offload_trigger_get(type, &args);
+ fwnode_handle_put(args.fwnode);
+ if (IS_ERR(trigger))
+ return trigger;
+
+ ret = devm_add_action_or_reset(dev, spi_offload_trigger_put, trigger);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return trigger;
+}
+EXPORT_SYMBOL_GPL(devm_spi_offload_trigger_get);
+
+/**
+ * spi_offload_trigger_validate - Validate the requested trigger
+ * @trigger: Offload trigger instance
+ * @config: Trigger config to validate
+ *
+ * On success, @config may be modifed to reflect what the hardware can do.
+ * For example, the frequency of a periodic trigger may be adjusted to the
+ * nearest supported value.
+ *
+ * Callers will likely need to do additional validation of the modified trigger
+ * parameters.
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+int spi_offload_trigger_validate(struct spi_offload_trigger *trigger,
+ struct spi_offload_trigger_config *config)
+{
+ guard(mutex)(&trigger->lock);
+
+ if (!trigger->ops)
+ return -ENODEV;
+
+ if (!trigger->ops->validate)
+ return -EOPNOTSUPP;
+
+ return trigger->ops->validate(trigger, config);
+}
+EXPORT_SYMBOL_GPL(spi_offload_trigger_validate);
+
+/**
+ * spi_offload_trigger_enable - enables trigger for offload
+ * @offload: Offload instance
+ * @trigger: Offload trigger instance
+ * @config: Trigger config to validate
+ *
+ * There must be a prepared offload instance with the specified ID (i.e.
+ * spi_optimize_message() was called with the same offload assigned to the
+ * message). This will also reserve the bus for exclusive use by the offload
+ * instance until the trigger is disabled. Any other attempts to send a
+ * transfer or lock the bus will fail with -EBUSY during this time.
+ *
+ * Calls must be balanced with spi_offload_trigger_disable().
+ *
+ * Context: can sleep
+ * Return: 0 on success, else a negative error code.
+ */
+int spi_offload_trigger_enable(struct spi_offload *offload,
+ struct spi_offload_trigger *trigger,
+ struct spi_offload_trigger_config *config)
+{
+ int ret;
+
+ guard(mutex)(&trigger->lock);
+
+ if (!trigger->ops)
+ return -ENODEV;
+
+ if (offload->ops && offload->ops->trigger_enable) {
+ ret = offload->ops->trigger_enable(offload);
+ if (ret)
+ return ret;
+ }
+
+ if (trigger->ops->enable) {
+ ret = trigger->ops->enable(trigger, config);
+ if (ret) {
+ if (offload->ops->trigger_disable)
+ offload->ops->trigger_disable(offload);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(spi_offload_trigger_enable);
+
+/**
+ * spi_offload_trigger_disable - disables hardware trigger for offload
+ * @offload: Offload instance
+ * @trigger: Offload trigger instance
+ *
+ * Disables the hardware trigger for the offload instance with the specified ID
+ * and releases the bus for use by other clients.
+ *
+ * Context: can sleep
+ */
+void spi_offload_trigger_disable(struct spi_offload *offload,
+ struct spi_offload_trigger *trigger)
+{
+ if (offload->ops && offload->ops->trigger_disable)
+ offload->ops->trigger_disable(offload);
+
+ guard(mutex)(&trigger->lock);
+
+ if (!trigger->ops)
+ return;
+
+ if (trigger->ops->disable)
+ trigger->ops->disable(trigger);
+}
+EXPORT_SYMBOL_GPL(spi_offload_trigger_disable);
+
+static void spi_offload_release_dma_chan(void *chan)
+{
+ dma_release_channel(chan);
+}
+
+/**
+ * devm_spi_offload_tx_stream_request_dma_chan - Get the DMA channel info for the TX stream
+ * @dev: Device for devm purposes.
+ * @offload: Offload instance
+ *
+ * This is the DMA channel that will provide data to transfers that use the
+ * %SPI_OFFLOAD_XFER_TX_STREAM offload flag.
+ *
+ * Return: Pointer to DMA channel info, or negative error code
+ */
+struct dma_chan
+*devm_spi_offload_tx_stream_request_dma_chan(struct device *dev,
+ struct spi_offload *offload)
+{
+ struct dma_chan *chan;
+ int ret;
+
+ if (!offload->ops || !offload->ops->tx_stream_request_dma_chan)
+ return ERR_PTR(-EOPNOTSUPP);
+
+ chan = offload->ops->tx_stream_request_dma_chan(offload);
+ if (IS_ERR(chan))
+ return chan;
+
+ ret = devm_add_action_or_reset(dev, spi_offload_release_dma_chan, chan);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return chan;
+}
+EXPORT_SYMBOL_GPL(devm_spi_offload_tx_stream_request_dma_chan);
+
+/**
+ * devm_spi_offload_rx_stream_request_dma_chan - Get the DMA channel info for the RX stream
+ * @dev: Device for devm purposes.
+ * @offload: Offload instance
+ *
+ * This is the DMA channel that will receive data from transfers that use the
+ * %SPI_OFFLOAD_XFER_RX_STREAM offload flag.
+ *
+ * Return: Pointer to DMA channel info, or negative error code
+ */
+struct dma_chan
+*devm_spi_offload_rx_stream_request_dma_chan(struct device *dev,
+ struct spi_offload *offload)
+{
+ struct dma_chan *chan;
+ int ret;
+
+ if (!offload->ops || !offload->ops->rx_stream_request_dma_chan)
+ return ERR_PTR(-EOPNOTSUPP);
+
+ chan = offload->ops->rx_stream_request_dma_chan(offload);
+ if (IS_ERR(chan))
+ return chan;
+
+ ret = devm_add_action_or_reset(dev, spi_offload_release_dma_chan, chan);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return chan;
+}
+EXPORT_SYMBOL_GPL(devm_spi_offload_rx_stream_request_dma_chan);
+
+/* Triggers providers */
+
+static void spi_offload_trigger_unregister(void *data)
+{
+ struct spi_offload_trigger *trigger = data;
+
+ scoped_guard(mutex, &spi_offload_triggers_lock)
+ list_del(&trigger->list);
+
+ scoped_guard(mutex, &trigger->lock) {
+ trigger->priv = NULL;
+ trigger->ops = NULL;
+ }
+
+ kref_put(&trigger->ref, spi_offload_trigger_free);
+}
+
+/**
+ * devm_spi_offload_trigger_register() - Allocate and register an offload trigger
+ * @dev: Device for devm purposes.
+ * @info: Provider-specific trigger info.
+ *
+ * Return: 0 on success, else a negative error code.
+ */
+int devm_spi_offload_trigger_register(struct device *dev,
+ struct spi_offload_trigger_info *info)
+{
+ struct spi_offload_trigger *trigger;
+
+ if (!info->fwnode || !info->ops || !info->ops->match)
+ return -EINVAL;
+
+ trigger = kzalloc(sizeof(*trigger), GFP_KERNEL);
+ if (!trigger)
+ return -ENOMEM;
+
+ kref_init(&trigger->ref);
+ mutex_init(&trigger->lock);
+ trigger->fwnode = fwnode_handle_get(info->fwnode);
+ trigger->ops = info->ops;
+ trigger->priv = info->priv;
+
+ scoped_guard(mutex, &spi_offload_triggers_lock)
+ list_add_tail(&trigger->list, &spi_offload_triggers);
+
+ return devm_add_action_or_reset(dev, spi_offload_trigger_unregister, trigger);
+}
+EXPORT_SYMBOL_GPL(devm_spi_offload_trigger_register);
+
+/**
+ * spi_offload_trigger_get_priv() - Get the private data for the trigger
+ *
+ * @trigger: Offload trigger instance.
+ *
+ * Return: Private data for the trigger.
+ */
+void *spi_offload_trigger_get_priv(struct spi_offload_trigger *trigger)
+{
+ return trigger->priv;
+}
+EXPORT_SYMBOL_GPL(spi_offload_trigger_get_priv);
diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c
index add6247d3481..29c616e2c408 100644
--- a/drivers/spi/spi-omap2-mcspi.c
+++ b/drivers/spi/spi-omap2-mcspi.c
@@ -1561,10 +1561,15 @@ static int omap2_mcspi_probe(struct platform_device *pdev)
}
mcspi->ref_clk = devm_clk_get_optional_enabled(&pdev->dev, NULL);
- if (IS_ERR(mcspi->ref_clk))
- mcspi->ref_clk_hz = OMAP2_MCSPI_MAX_FREQ;
- else
+ if (IS_ERR(mcspi->ref_clk)) {
+ status = PTR_ERR(mcspi->ref_clk);
+ dev_err_probe(&pdev->dev, status, "Failed to get ref_clk");
+ goto free_ctlr;
+ }
+ if (mcspi->ref_clk)
mcspi->ref_clk_hz = clk_get_rate(mcspi->ref_clk);
+ else
+ mcspi->ref_clk_hz = OMAP2_MCSPI_MAX_FREQ;
ctlr->max_speed_hz = mcspi->ref_clk_hz;
ctlr->min_speed_hz = mcspi->ref_clk_hz >> 15;
diff --git a/drivers/spi/spi-pci1xxxx.c b/drivers/spi/spi-pci1xxxx.c
index fc98979eba48..330078b1d50f 100644
--- a/drivers/spi/spi-pci1xxxx.c
+++ b/drivers/spi/spi-pci1xxxx.c
@@ -741,21 +741,19 @@ static int pci1xxxx_spi_probe(struct pci_dev *pdev, const struct pci_device_id *
if (ret)
return -ENOMEM;
- ret = pci_request_regions(pdev, DRV_NAME);
+ ret = pcim_request_all_regions(pdev, DRV_NAME);
if (ret)
return -ENOMEM;
spi_bus->reg_base = pcim_iomap(pdev, 0, pci_resource_len(pdev, 0));
- if (!spi_bus->reg_base) {
- ret = -EINVAL;
- goto error;
- }
+ if (!spi_bus->reg_base)
+ return -EINVAL;
ret = pci_alloc_irq_vectors(pdev, hw_inst_cnt, hw_inst_cnt,
PCI_IRQ_ALL_TYPES);
if (ret < 0) {
dev_err(&pdev->dev, "Error allocating MSI vectors\n");
- goto error;
+ return ret;
}
init_completion(&spi_sub_ptr->spi_xfer_done);
@@ -773,13 +771,12 @@ static int pci1xxxx_spi_probe(struct pci_dev *pdev, const struct pci_device_id *
if (ret < 0) {
dev_err(&pdev->dev, "Unable to request irq : %d",
spi_sub_ptr->irq);
- ret = -ENODEV;
- goto error;
+ return -ENODEV;
}
ret = pci1xxxx_spi_dma_init(spi_bus, spi_sub_ptr->irq);
if (ret && ret != -EOPNOTSUPP)
- goto error;
+ return ret;
/* This register is only applicable for 1st instance */
regval = readl(spi_bus->reg_base + SPI_PCI_CTRL_REG_OFFSET(0));
@@ -808,8 +805,7 @@ static int pci1xxxx_spi_probe(struct pci_dev *pdev, const struct pci_device_id *
if (ret < 0) {
dev_err(&pdev->dev, "Unable to request irq : %d",
spi_sub_ptr->irq);
- ret = -ENODEV;
- goto error;
+ return -ENODEV;
}
}
@@ -828,15 +824,11 @@ static int pci1xxxx_spi_probe(struct pci_dev *pdev, const struct pci_device_id *
spi_controller_set_devdata(spi_host, spi_sub_ptr);
ret = devm_spi_register_controller(dev, spi_host);
if (ret)
- goto error;
+ return ret;
}
pci_set_drvdata(pdev, spi_bus);
return 0;
-
-error:
- pci_release_regions(pdev);
- return ret;
}
static void store_restore_config(struct pci1xxxx_spi *spi_ptr,
diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
index 903d76145272..06711a62fa3d 100644
--- a/drivers/spi/spi-pxa2xx.c
+++ b/drivers/spi/spi-pxa2xx.c
@@ -73,8 +73,9 @@ struct chip_data {
#define LPSS_CAPS_CS_EN_MASK (0xf << LPSS_CAPS_CS_EN_SHIFT)
#define LPSS_PRIV_CLOCK_GATE 0x38
-#define LPSS_PRIV_CLOCK_GATE_CLK_CTL_MASK 0x3
-#define LPSS_PRIV_CLOCK_GATE_CLK_CTL_FORCE_ON 0x3
+#define LPSS_PRIV_CLOCK_GATE_CLK_CTL_MASK 0x3
+#define LPSS_PRIV_CLOCK_GATE_CLK_CTL_FORCE_ON 0x3
+#define LPSS_PRIV_CLOCK_GATE_CLK_CTL_FORCE_OFF 0x0
struct lpss_config {
/* LPSS offset from drv_data->ioaddr */
@@ -321,6 +322,20 @@ static void __lpss_ssp_write_priv(struct driver_data *drv_data,
writel(value, drv_data->lpss_base + offset);
}
+static bool __lpss_ssp_update_priv(struct driver_data *drv_data, unsigned int offset,
+ u32 mask, u32 value)
+{
+ u32 new, curr;
+
+ curr = __lpss_ssp_read_priv(drv_data, offset);
+ new = (curr & ~mask) | (value & mask);
+ if (new == curr)
+ return false;
+
+ __lpss_ssp_write_priv(drv_data, offset, new);
+ return true;
+}
+
/*
* lpss_ssp_setup - perform LPSS SSP specific setup
* @drv_data: pointer to the driver private data
@@ -337,21 +352,16 @@ static void lpss_ssp_setup(struct driver_data *drv_data)
drv_data->lpss_base = drv_data->ssp->mmio_base + config->offset;
/* Enable software chip select control */
- value = __lpss_ssp_read_priv(drv_data, config->reg_cs_ctrl);
- value &= ~(LPSS_CS_CONTROL_SW_MODE | LPSS_CS_CONTROL_CS_HIGH);
- value |= LPSS_CS_CONTROL_SW_MODE | LPSS_CS_CONTROL_CS_HIGH;
- __lpss_ssp_write_priv(drv_data, config->reg_cs_ctrl, value);
+ value = LPSS_CS_CONTROL_SW_MODE | LPSS_CS_CONTROL_CS_HIGH;
+ __lpss_ssp_update_priv(drv_data, config->reg_cs_ctrl, value, value);
/* Enable multiblock DMA transfers */
if (drv_data->controller_info->enable_dma) {
- __lpss_ssp_write_priv(drv_data, config->reg_ssp, 1);
+ __lpss_ssp_update_priv(drv_data, config->reg_ssp, BIT(0), BIT(0));
if (config->reg_general >= 0) {
- value = __lpss_ssp_read_priv(drv_data,
- config->reg_general);
- value |= LPSS_GENERAL_REG_RXTO_HOLDOFF_DISABLE;
- __lpss_ssp_write_priv(drv_data,
- config->reg_general, value);
+ value = LPSS_GENERAL_REG_RXTO_HOLDOFF_DISABLE;
+ __lpss_ssp_update_priv(drv_data, config->reg_general, value, value);
}
}
}
@@ -361,30 +371,19 @@ static void lpss_ssp_select_cs(struct spi_device *spi,
{
struct driver_data *drv_data =
spi_controller_get_devdata(spi->controller);
- u32 value, cs;
+ u32 cs;
- if (!config->cs_sel_mask)
+ cs = spi_get_chipselect(spi, 0) << config->cs_sel_shift;
+ if (!__lpss_ssp_update_priv(drv_data, config->reg_cs_ctrl, config->cs_sel_mask, cs))
return;
- value = __lpss_ssp_read_priv(drv_data, config->reg_cs_ctrl);
-
- cs = spi_get_chipselect(spi, 0);
- cs <<= config->cs_sel_shift;
- if (cs != (value & config->cs_sel_mask)) {
- /*
- * When switching another chip select output active the
- * output must be selected first and wait 2 ssp_clk cycles
- * before changing state to active. Otherwise a short
- * glitch will occur on the previous chip select since
- * output select is latched but state control is not.
- */
- value &= ~config->cs_sel_mask;
- value |= cs;
- __lpss_ssp_write_priv(drv_data,
- config->reg_cs_ctrl, value);
- ndelay(1000000000 /
- (drv_data->controller->max_speed_hz / 2));
- }
+ /*
+ * When switching another chip select output active the output must be
+ * selected first and wait 2 ssp_clk cycles before changing state to
+ * active. Otherwise a short glitch will occur on the previous chip
+ * select since output select is latched but state control is not.
+ */
+ ndelay(1000000000 / (drv_data->controller->max_speed_hz / 2));
}
static void lpss_ssp_cs_control(struct spi_device *spi, bool enable)
@@ -392,34 +391,27 @@ static void lpss_ssp_cs_control(struct spi_device *spi, bool enable)
struct driver_data *drv_data =
spi_controller_get_devdata(spi->controller);
const struct lpss_config *config;
- u32 value;
+ u32 mask;
config = lpss_get_config(drv_data);
if (enable)
lpss_ssp_select_cs(spi, config);
- value = __lpss_ssp_read_priv(drv_data, config->reg_cs_ctrl);
- if (enable)
- value &= ~LPSS_CS_CONTROL_CS_HIGH;
- else
- value |= LPSS_CS_CONTROL_CS_HIGH;
- __lpss_ssp_write_priv(drv_data, config->reg_cs_ctrl, value);
+ mask = LPSS_CS_CONTROL_CS_HIGH;
+ __lpss_ssp_update_priv(drv_data, config->reg_cs_ctrl, mask, enable ? 0 : mask);
if (config->cs_clk_stays_gated) {
- u32 clkgate;
-
/*
* Changing CS alone when dynamic clock gating is on won't
* actually flip CS at that time. This ruins SPI transfers
* that specify delays, or have no data. Toggle the clock mode
* to force on briefly to poke the CS pin to move.
*/
- clkgate = __lpss_ssp_read_priv(drv_data, LPSS_PRIV_CLOCK_GATE);
- value = (clkgate & ~LPSS_PRIV_CLOCK_GATE_CLK_CTL_MASK) |
- LPSS_PRIV_CLOCK_GATE_CLK_CTL_FORCE_ON;
-
- __lpss_ssp_write_priv(drv_data, LPSS_PRIV_CLOCK_GATE, value);
- __lpss_ssp_write_priv(drv_data, LPSS_PRIV_CLOCK_GATE, clkgate);
+ mask = LPSS_PRIV_CLOCK_GATE_CLK_CTL_MASK;
+ if (__lpss_ssp_update_priv(drv_data, LPSS_PRIV_CLOCK_GATE, mask,
+ LPSS_PRIV_CLOCK_GATE_CLK_CTL_FORCE_ON))
+ __lpss_ssp_update_priv(drv_data, LPSS_PRIV_CLOCK_GATE, mask,
+ LPSS_PRIV_CLOCK_GATE_CLK_CTL_FORCE_OFF);
}
}
diff --git a/drivers/spi/spi-qpic-snand.c b/drivers/spi/spi-qpic-snand.c
new file mode 100644
index 000000000000..fd129650434f
--- /dev/null
+++ b/drivers/spi/spi-qpic-snand.c
@@ -0,0 +1,1644 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved.
+ *
+ * Authors:
+ * Md Sadre Alam <quic_mdalam@quicinc.com>
+ * Sricharan R <quic_srichara@quicinc.com>
+ * Varadarajan Narayanan <quic_varada@quicinc.com>
+ */
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/dma/qcom_adm.h>
+#include <linux/dma/qcom_bam_dma.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/mtd/nand-qpic-common.h>
+#include <linux/mtd/spinand.h>
+#include <linux/bitfield.h>
+
+#define NAND_FLASH_SPI_CFG 0xc0
+#define NAND_NUM_ADDR_CYCLES 0xc4
+#define NAND_BUSY_CHECK_WAIT_CNT 0xc8
+#define NAND_FLASH_FEATURES 0xf64
+
+/* QSPI NAND config reg bits */
+#define LOAD_CLK_CNTR_INIT_EN BIT(28)
+#define CLK_CNTR_INIT_VAL_VEC 0x924
+#define CLK_CNTR_INIT_VAL_VEC_MASK GENMASK(27, 16)
+#define FEA_STATUS_DEV_ADDR 0xc0
+#define FEA_STATUS_DEV_ADDR_MASK GENMASK(15, 8)
+#define SPI_CFG BIT(0)
+#define SPI_NUM_ADDR 0xDA4DB
+#define SPI_WAIT_CNT 0x10
+#define QPIC_QSPI_NUM_CS 1
+#define SPI_TRANSFER_MODE_x1 BIT(29)
+#define SPI_TRANSFER_MODE_x4 (3 << 29)
+#define SPI_WP BIT(28)
+#define SPI_HOLD BIT(27)
+#define QPIC_SET_FEATURE BIT(31)
+
+#define SPINAND_RESET 0xff
+#define SPINAND_READID 0x9f
+#define SPINAND_GET_FEATURE 0x0f
+#define SPINAND_SET_FEATURE 0x1f
+#define SPINAND_READ 0x13
+#define SPINAND_ERASE 0xd8
+#define SPINAND_WRITE_EN 0x06
+#define SPINAND_PROGRAM_EXECUTE 0x10
+#define SPINAND_PROGRAM_LOAD 0x84
+
+#define ACC_FEATURE 0xe
+#define BAD_BLOCK_MARKER_SIZE 0x2
+#define OOB_BUF_SIZE 128
+#define ecceng_to_qspi(eng) container_of(eng, struct qpic_spi_nand, ecc_eng)
+
+struct qpic_snand_op {
+ u32 cmd_reg;
+ u32 addr1_reg;
+ u32 addr2_reg;
+};
+
+struct snandc_read_status {
+ __le32 snandc_flash;
+ __le32 snandc_buffer;
+ __le32 snandc_erased_cw;
+};
+
+/*
+ * ECC state struct
+ * @corrected: ECC corrected
+ * @bitflips: Max bit flip
+ * @failed: ECC failed
+ */
+struct qcom_ecc_stats {
+ u32 corrected;
+ u32 bitflips;
+ u32 failed;
+};
+
+struct qpic_ecc {
+ struct device *dev;
+ int ecc_bytes_hw;
+ int spare_bytes;
+ int bbm_size;
+ int ecc_mode;
+ int bytes;
+ int steps;
+ int step_size;
+ int strength;
+ int cw_size;
+ int cw_data;
+ u32 cfg0;
+ u32 cfg1;
+ u32 cfg0_raw;
+ u32 cfg1_raw;
+ u32 ecc_buf_cfg;
+ u32 ecc_bch_cfg;
+ u32 clrflashstatus;
+ u32 clrreadstatus;
+ bool bch_enabled;
+};
+
+struct qpic_spi_nand {
+ struct qcom_nand_controller *snandc;
+ struct spi_controller *ctlr;
+ struct mtd_info *mtd;
+ struct clk *iomacro_clk;
+ struct qpic_ecc *ecc;
+ struct qcom_ecc_stats ecc_stats;
+ struct nand_ecc_engine ecc_eng;
+ u8 *data_buf;
+ u8 *oob_buf;
+ __le32 addr1;
+ __le32 addr2;
+ __le32 cmd;
+ u32 num_cw;
+ bool oob_rw;
+ bool page_rw;
+ bool raw_rw;
+};
+
+static void qcom_spi_set_read_loc_first(struct qcom_nand_controller *snandc,
+ int reg, int cw_offset, int read_size,
+ int is_last_read_loc)
+{
+ __le32 locreg_val;
+ u32 val = FIELD_PREP(READ_LOCATION_OFFSET_MASK, cw_offset) |
+ FIELD_PREP(READ_LOCATION_SIZE_MASK, read_size) |
+ FIELD_PREP(READ_LOCATION_LAST_MASK, is_last_read_loc);
+
+ locreg_val = cpu_to_le32(val);
+
+ if (reg == NAND_READ_LOCATION_0)
+ snandc->regs->read_location0 = locreg_val;
+ else if (reg == NAND_READ_LOCATION_1)
+ snandc->regs->read_location1 = locreg_val;
+ else if (reg == NAND_READ_LOCATION_2)
+ snandc->regs->read_location2 = locreg_val;
+ else if (reg == NAND_READ_LOCATION_3)
+ snandc->regs->read_location3 = locreg_val;
+}
+
+static void qcom_spi_set_read_loc_last(struct qcom_nand_controller *snandc,
+ int reg, int cw_offset, int read_size,
+ int is_last_read_loc)
+{
+ __le32 locreg_val;
+ u32 val = FIELD_PREP(READ_LOCATION_OFFSET_MASK, cw_offset) |
+ FIELD_PREP(READ_LOCATION_SIZE_MASK, read_size) |
+ FIELD_PREP(READ_LOCATION_LAST_MASK, is_last_read_loc);
+
+ locreg_val = cpu_to_le32(val);
+
+ if (reg == NAND_READ_LOCATION_LAST_CW_0)
+ snandc->regs->read_location_last0 = locreg_val;
+ else if (reg == NAND_READ_LOCATION_LAST_CW_1)
+ snandc->regs->read_location_last1 = locreg_val;
+ else if (reg == NAND_READ_LOCATION_LAST_CW_2)
+ snandc->regs->read_location_last2 = locreg_val;
+ else if (reg == NAND_READ_LOCATION_LAST_CW_3)
+ snandc->regs->read_location_last3 = locreg_val;
+}
+
+static struct qcom_nand_controller *nand_to_qcom_snand(struct nand_device *nand)
+{
+ struct nand_ecc_engine *eng = nand->ecc.engine;
+ struct qpic_spi_nand *qspi = ecceng_to_qspi(eng);
+
+ return qspi->snandc;
+}
+
+static int qcom_spi_init(struct qcom_nand_controller *snandc)
+{
+ u32 snand_cfg_val = 0x0;
+ int ret;
+
+ snand_cfg_val = FIELD_PREP(CLK_CNTR_INIT_VAL_VEC_MASK, CLK_CNTR_INIT_VAL_VEC) |
+ FIELD_PREP(LOAD_CLK_CNTR_INIT_EN, 0) |
+ FIELD_PREP(FEA_STATUS_DEV_ADDR_MASK, FEA_STATUS_DEV_ADDR) |
+ FIELD_PREP(SPI_CFG, 0);
+
+ snandc->regs->spi_cfg = cpu_to_le32(snand_cfg_val);
+ snandc->regs->num_addr_cycle = cpu_to_le32(SPI_NUM_ADDR);
+ snandc->regs->busy_wait_cnt = cpu_to_le32(SPI_WAIT_CNT);
+
+ qcom_write_reg_dma(snandc, &snandc->regs->spi_cfg, NAND_FLASH_SPI_CFG, 1, 0);
+
+ snand_cfg_val &= ~LOAD_CLK_CNTR_INIT_EN;
+ snandc->regs->spi_cfg = cpu_to_le32(snand_cfg_val);
+
+ qcom_write_reg_dma(snandc, &snandc->regs->spi_cfg, NAND_FLASH_SPI_CFG, 1, 0);
+
+ qcom_write_reg_dma(snandc, &snandc->regs->num_addr_cycle, NAND_NUM_ADDR_CYCLES, 1, 0);
+ qcom_write_reg_dma(snandc, &snandc->regs->busy_wait_cnt, NAND_BUSY_CHECK_WAIT_CNT, 1,
+ NAND_BAM_NEXT_SGL);
+
+ ret = qcom_submit_descs(snandc);
+ if (ret) {
+ dev_err(snandc->dev, "failure in submitting spi init descriptor\n");
+ return ret;
+ }
+
+ return ret;
+}
+
+static int qcom_spi_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_device *nand = mtd_to_nanddev(mtd);
+ struct qcom_nand_controller *snandc = nand_to_qcom_snand(nand);
+ struct qpic_ecc *qecc = snandc->qspi->ecc;
+
+ if (section > 1)
+ return -ERANGE;
+
+ oobregion->length = qecc->ecc_bytes_hw + qecc->spare_bytes;
+ oobregion->offset = mtd->oobsize - oobregion->length;
+
+ return 0;
+}
+
+static int qcom_spi_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_device *nand = mtd_to_nanddev(mtd);
+ struct qcom_nand_controller *snandc = nand_to_qcom_snand(nand);
+ struct qpic_ecc *qecc = snandc->qspi->ecc;
+
+ if (section)
+ return -ERANGE;
+
+ oobregion->length = qecc->steps * 4;
+ oobregion->offset = ((qecc->steps - 1) * qecc->bytes) + qecc->bbm_size;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops qcom_spi_ooblayout = {
+ .ecc = qcom_spi_ooblayout_ecc,
+ .free = qcom_spi_ooblayout_free,
+};
+
+static int qcom_spi_ecc_init_ctx_pipelined(struct nand_device *nand)
+{
+ struct qcom_nand_controller *snandc = nand_to_qcom_snand(nand);
+ struct nand_ecc_props *reqs = &nand->ecc.requirements;
+ struct nand_ecc_props *user = &nand->ecc.user_conf;
+ struct nand_ecc_props *conf = &nand->ecc.ctx.conf;
+ struct mtd_info *mtd = nanddev_to_mtd(nand);
+ int cwperpage, bad_block_byte, ret;
+ struct qpic_ecc *ecc_cfg;
+
+ cwperpage = mtd->writesize / NANDC_STEP_SIZE;
+ snandc->qspi->num_cw = cwperpage;
+
+ ecc_cfg = kzalloc(sizeof(*ecc_cfg), GFP_KERNEL);
+ if (!ecc_cfg)
+ return -ENOMEM;
+
+ if (user->step_size && user->strength) {
+ ecc_cfg->step_size = user->step_size;
+ ecc_cfg->strength = user->strength;
+ } else if (reqs->step_size && reqs->strength) {
+ ecc_cfg->step_size = reqs->step_size;
+ ecc_cfg->strength = reqs->strength;
+ } else {
+ /* use defaults */
+ ecc_cfg->step_size = NANDC_STEP_SIZE;
+ ecc_cfg->strength = 4;
+ }
+
+ if (ecc_cfg->step_size != NANDC_STEP_SIZE) {
+ dev_err(snandc->dev,
+ "only %u bytes ECC step size is supported\n",
+ NANDC_STEP_SIZE);
+ ret = -EOPNOTSUPP;
+ goto err_free_ecc_cfg;
+ }
+
+ if (ecc_cfg->strength != 4) {
+ dev_err(snandc->dev,
+ "only 4 bits ECC strength is supported\n");
+ ret = -EOPNOTSUPP;
+ goto err_free_ecc_cfg;
+ }
+
+ snandc->qspi->oob_buf = kmalloc(mtd->writesize + mtd->oobsize,
+ GFP_KERNEL);
+ if (!snandc->qspi->oob_buf) {
+ ret = -ENOMEM;
+ goto err_free_ecc_cfg;
+ }
+
+ memset(snandc->qspi->oob_buf, 0xff, mtd->writesize + mtd->oobsize);
+
+ nand->ecc.ctx.priv = ecc_cfg;
+ snandc->qspi->mtd = mtd;
+
+ ecc_cfg->ecc_bytes_hw = 7;
+ ecc_cfg->spare_bytes = 4;
+ ecc_cfg->bbm_size = 1;
+ ecc_cfg->bch_enabled = true;
+ ecc_cfg->bytes = ecc_cfg->ecc_bytes_hw + ecc_cfg->spare_bytes + ecc_cfg->bbm_size;
+
+ ecc_cfg->steps = 4;
+ ecc_cfg->cw_data = 516;
+ ecc_cfg->cw_size = ecc_cfg->cw_data + ecc_cfg->bytes;
+ bad_block_byte = mtd->writesize - ecc_cfg->cw_size * (cwperpage - 1) + 1;
+
+ mtd_set_ooblayout(mtd, &qcom_spi_ooblayout);
+
+ ecc_cfg->cfg0 = FIELD_PREP(CW_PER_PAGE_MASK, (cwperpage - 1)) |
+ FIELD_PREP(UD_SIZE_BYTES_MASK, ecc_cfg->cw_data) |
+ FIELD_PREP(DISABLE_STATUS_AFTER_WRITE, 1) |
+ FIELD_PREP(NUM_ADDR_CYCLES_MASK, 3) |
+ FIELD_PREP(ECC_PARITY_SIZE_BYTES_RS, ecc_cfg->ecc_bytes_hw) |
+ FIELD_PREP(STATUS_BFR_READ, 0) |
+ FIELD_PREP(SET_RD_MODE_AFTER_STATUS, 1) |
+ FIELD_PREP(SPARE_SIZE_BYTES_MASK, ecc_cfg->spare_bytes);
+
+ ecc_cfg->cfg1 = FIELD_PREP(NAND_RECOVERY_CYCLES_MASK, 0) |
+ FIELD_PREP(CS_ACTIVE_BSY, 0) |
+ FIELD_PREP(BAD_BLOCK_BYTE_NUM_MASK, bad_block_byte) |
+ FIELD_PREP(BAD_BLOCK_IN_SPARE_AREA, 0) |
+ FIELD_PREP(WR_RD_BSY_GAP_MASK, 20) |
+ FIELD_PREP(WIDE_FLASH, 0) |
+ FIELD_PREP(ENABLE_BCH_ECC, ecc_cfg->bch_enabled);
+
+ ecc_cfg->cfg0_raw = FIELD_PREP(CW_PER_PAGE_MASK, (cwperpage - 1)) |
+ FIELD_PREP(NUM_ADDR_CYCLES_MASK, 3) |
+ FIELD_PREP(UD_SIZE_BYTES_MASK, ecc_cfg->cw_size) |
+ FIELD_PREP(SPARE_SIZE_BYTES_MASK, 0);
+
+ ecc_cfg->cfg1_raw = FIELD_PREP(NAND_RECOVERY_CYCLES_MASK, 0) |
+ FIELD_PREP(CS_ACTIVE_BSY, 0) |
+ FIELD_PREP(BAD_BLOCK_BYTE_NUM_MASK, 17) |
+ FIELD_PREP(BAD_BLOCK_IN_SPARE_AREA, 1) |
+ FIELD_PREP(WR_RD_BSY_GAP_MASK, 20) |
+ FIELD_PREP(WIDE_FLASH, 0) |
+ FIELD_PREP(DEV0_CFG1_ECC_DISABLE, 1);
+
+ ecc_cfg->ecc_bch_cfg = FIELD_PREP(ECC_CFG_ECC_DISABLE, !ecc_cfg->bch_enabled) |
+ FIELD_PREP(ECC_SW_RESET, 0) |
+ FIELD_PREP(ECC_NUM_DATA_BYTES_MASK, ecc_cfg->cw_data) |
+ FIELD_PREP(ECC_FORCE_CLK_OPEN, 1) |
+ FIELD_PREP(ECC_MODE_MASK, 0) |
+ FIELD_PREP(ECC_PARITY_SIZE_BYTES_BCH_MASK, ecc_cfg->ecc_bytes_hw);
+
+ ecc_cfg->ecc_buf_cfg = FIELD_PREP(NUM_STEPS_MASK, 0x203);
+ ecc_cfg->clrflashstatus = FS_READY_BSY_N;
+ ecc_cfg->clrreadstatus = 0xc0;
+
+ conf->step_size = ecc_cfg->step_size;
+ conf->strength = ecc_cfg->strength;
+
+ snandc->regs->erased_cw_detect_cfg_clr = cpu_to_le32(CLR_ERASED_PAGE_DET);
+ snandc->regs->erased_cw_detect_cfg_set = cpu_to_le32(SET_ERASED_PAGE_DET);
+
+ dev_dbg(snandc->dev, "ECC strength: %u bits per %u bytes\n",
+ ecc_cfg->strength, ecc_cfg->step_size);
+
+ return 0;
+
+err_free_ecc_cfg:
+ kfree(ecc_cfg);
+ return ret;
+}
+
+static void qcom_spi_ecc_cleanup_ctx_pipelined(struct nand_device *nand)
+{
+ struct qpic_ecc *ecc_cfg = nand_to_ecc_ctx(nand);
+
+ kfree(ecc_cfg);
+}
+
+static int qcom_spi_ecc_prepare_io_req_pipelined(struct nand_device *nand,
+ struct nand_page_io_req *req)
+{
+ struct qcom_nand_controller *snandc = nand_to_qcom_snand(nand);
+ struct qpic_ecc *ecc_cfg = nand_to_ecc_ctx(nand);
+
+ snandc->qspi->ecc = ecc_cfg;
+ snandc->qspi->raw_rw = false;
+ snandc->qspi->oob_rw = false;
+ snandc->qspi->page_rw = false;
+
+ if (req->datalen)
+ snandc->qspi->page_rw = true;
+
+ if (req->ooblen)
+ snandc->qspi->oob_rw = true;
+
+ if (req->mode == MTD_OPS_RAW)
+ snandc->qspi->raw_rw = true;
+
+ return 0;
+}
+
+static int qcom_spi_ecc_finish_io_req_pipelined(struct nand_device *nand,
+ struct nand_page_io_req *req)
+{
+ struct qcom_nand_controller *snandc = nand_to_qcom_snand(nand);
+ struct mtd_info *mtd = nanddev_to_mtd(nand);
+
+ if (req->mode == MTD_OPS_RAW || req->type != NAND_PAGE_READ)
+ return 0;
+
+ if (snandc->qspi->ecc_stats.failed)
+ mtd->ecc_stats.failed += snandc->qspi->ecc_stats.failed;
+ else
+ mtd->ecc_stats.corrected += snandc->qspi->ecc_stats.corrected;
+
+ if (snandc->qspi->ecc_stats.failed)
+ return -EBADMSG;
+ else
+ return snandc->qspi->ecc_stats.bitflips;
+}
+
+static struct nand_ecc_engine_ops qcom_spi_ecc_engine_ops_pipelined = {
+ .init_ctx = qcom_spi_ecc_init_ctx_pipelined,
+ .cleanup_ctx = qcom_spi_ecc_cleanup_ctx_pipelined,
+ .prepare_io_req = qcom_spi_ecc_prepare_io_req_pipelined,
+ .finish_io_req = qcom_spi_ecc_finish_io_req_pipelined,
+};
+
+/* helper to configure location register values */
+static void qcom_spi_set_read_loc(struct qcom_nand_controller *snandc, int cw, int reg,
+ int cw_offset, int read_size, int is_last_read_loc)
+{
+ int reg_base = NAND_READ_LOCATION_0;
+ int num_cw = snandc->qspi->num_cw;
+
+ if (cw == (num_cw - 1))
+ reg_base = NAND_READ_LOCATION_LAST_CW_0;
+
+ reg_base += reg * 4;
+
+ if (cw == (num_cw - 1))
+ return qcom_spi_set_read_loc_last(snandc, reg_base, cw_offset,
+ read_size, is_last_read_loc);
+ else
+ return qcom_spi_set_read_loc_first(snandc, reg_base, cw_offset,
+ read_size, is_last_read_loc);
+}
+
+static void
+qcom_spi_config_cw_read(struct qcom_nand_controller *snandc, bool use_ecc, int cw)
+{
+ __le32 *reg = &snandc->regs->read_location0;
+ int num_cw = snandc->qspi->num_cw;
+
+ qcom_write_reg_dma(snandc, reg, NAND_READ_LOCATION_0, 4, NAND_BAM_NEXT_SGL);
+ if (cw == (num_cw - 1)) {
+ reg = &snandc->regs->read_location_last0;
+ qcom_write_reg_dma(snandc, reg, NAND_READ_LOCATION_LAST_CW_0, 4,
+ NAND_BAM_NEXT_SGL);
+ }
+
+ qcom_write_reg_dma(snandc, &snandc->regs->cmd, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
+ qcom_write_reg_dma(snandc, &snandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
+
+ qcom_read_reg_dma(snandc, NAND_FLASH_STATUS, 2, 0);
+ qcom_read_reg_dma(snandc, NAND_ERASED_CW_DETECT_STATUS, 1,
+ NAND_BAM_NEXT_SGL);
+}
+
+static int qcom_spi_block_erase(struct qcom_nand_controller *snandc)
+{
+ struct qpic_ecc *ecc_cfg = snandc->qspi->ecc;
+ int ret;
+
+ snandc->buf_count = 0;
+ snandc->buf_start = 0;
+ qcom_clear_read_regs(snandc);
+ qcom_clear_bam_transaction(snandc);
+
+ snandc->regs->cmd = snandc->qspi->cmd;
+ snandc->regs->addr0 = snandc->qspi->addr1;
+ snandc->regs->addr1 = snandc->qspi->addr2;
+ snandc->regs->cfg0 = cpu_to_le32((ecc_cfg->cfg0_raw & ~CW_PER_PAGE_MASK) |
+ FIELD_PREP(CW_PER_PAGE_MASK, 0));
+ snandc->regs->cfg1 = cpu_to_le32(ecc_cfg->cfg1_raw);
+ snandc->regs->exec = cpu_to_le32(1);
+
+ qcom_write_reg_dma(snandc, &snandc->regs->cmd, NAND_FLASH_CMD, 3, NAND_BAM_NEXT_SGL);
+ qcom_write_reg_dma(snandc, &snandc->regs->cfg0, NAND_DEV0_CFG0, 2, NAND_BAM_NEXT_SGL);
+ qcom_write_reg_dma(snandc, &snandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
+
+ ret = qcom_submit_descs(snandc);
+ if (ret) {
+ dev_err(snandc->dev, "failure to erase block\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static void qcom_spi_config_single_cw_page_read(struct qcom_nand_controller *snandc,
+ bool use_ecc, int cw)
+{
+ __le32 *reg = &snandc->regs->read_location0;
+ int num_cw = snandc->qspi->num_cw;
+
+ qcom_write_reg_dma(snandc, &snandc->regs->addr0, NAND_ADDR0, 2, 0);
+ qcom_write_reg_dma(snandc, &snandc->regs->cfg0, NAND_DEV0_CFG0, 3, 0);
+ qcom_write_reg_dma(snandc, &snandc->regs->erased_cw_detect_cfg_clr,
+ NAND_ERASED_CW_DETECT_CFG, 1, 0);
+ qcom_write_reg_dma(snandc, &snandc->regs->erased_cw_detect_cfg_set,
+ NAND_ERASED_CW_DETECT_CFG, 1,
+ NAND_ERASED_CW_SET | NAND_BAM_NEXT_SGL);
+
+ if (cw == (num_cw - 1)) {
+ reg = &snandc->regs->read_location_last0;
+ qcom_write_reg_dma(snandc, reg, NAND_READ_LOCATION_LAST_CW_0, 4, NAND_BAM_NEXT_SGL);
+ }
+ qcom_write_reg_dma(snandc, &snandc->regs->cmd, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
+ qcom_write_reg_dma(snandc, &snandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
+
+ qcom_read_reg_dma(snandc, NAND_FLASH_STATUS, 1, 0);
+}
+
+static int qcom_spi_check_raw_flash_errors(struct qcom_nand_controller *snandc, int cw_cnt)
+{
+ int i;
+
+ qcom_nandc_dev_to_mem(snandc, true);
+
+ for (i = 0; i < cw_cnt; i++) {
+ u32 flash = le32_to_cpu(snandc->reg_read_buf[i]);
+
+ if (flash & (FS_OP_ERR | FS_MPU_ERR))
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int qcom_spi_read_last_cw(struct qcom_nand_controller *snandc,
+ const struct spi_mem_op *op)
+{
+ struct qpic_ecc *ecc_cfg = snandc->qspi->ecc;
+ struct mtd_info *mtd = snandc->qspi->mtd;
+ int size, ret = 0;
+ int col, bbpos;
+ u32 cfg0, cfg1, ecc_bch_cfg;
+ u32 num_cw = snandc->qspi->num_cw;
+
+ qcom_clear_bam_transaction(snandc);
+ qcom_clear_read_regs(snandc);
+
+ size = ecc_cfg->cw_size;
+ col = ecc_cfg->cw_size * (num_cw - 1);
+
+ memset(snandc->data_buffer, 0xff, size);
+ snandc->regs->addr0 = (snandc->qspi->addr1 | cpu_to_le32(col));
+ snandc->regs->addr1 = snandc->qspi->addr2;
+
+ cfg0 = (ecc_cfg->cfg0_raw & ~CW_PER_PAGE_MASK) |
+ FIELD_PREP(CW_PER_PAGE_MASK, 0);
+ cfg1 = ecc_cfg->cfg1_raw;
+ ecc_bch_cfg = ECC_CFG_ECC_DISABLE;
+
+ snandc->regs->cmd = snandc->qspi->cmd;
+ snandc->regs->cfg0 = cpu_to_le32(cfg0);
+ snandc->regs->cfg1 = cpu_to_le32(cfg1);
+ snandc->regs->ecc_bch_cfg = cpu_to_le32(ecc_bch_cfg);
+ snandc->regs->clrflashstatus = cpu_to_le32(ecc_cfg->clrflashstatus);
+ snandc->regs->clrreadstatus = cpu_to_le32(ecc_cfg->clrreadstatus);
+ snandc->regs->exec = cpu_to_le32(1);
+
+ qcom_spi_set_read_loc(snandc, num_cw - 1, 0, 0, ecc_cfg->cw_size, 1);
+
+ qcom_spi_config_single_cw_page_read(snandc, false, num_cw - 1);
+
+ qcom_read_data_dma(snandc, FLASH_BUF_ACC, snandc->data_buffer, size, 0);
+
+ ret = qcom_submit_descs(snandc);
+ if (ret) {
+ dev_err(snandc->dev, "failed to read last cw\n");
+ return ret;
+ }
+
+ ret = qcom_spi_check_raw_flash_errors(snandc, 1);
+ if (ret)
+ return ret;
+
+ bbpos = mtd->writesize - ecc_cfg->cw_size * (num_cw - 1);
+
+ if (snandc->data_buffer[bbpos] == 0xff)
+ snandc->data_buffer[bbpos + 1] = 0xff;
+ if (snandc->data_buffer[bbpos] != 0xff)
+ snandc->data_buffer[bbpos + 1] = snandc->data_buffer[bbpos];
+
+ memcpy(op->data.buf.in, snandc->data_buffer + bbpos, op->data.nbytes);
+
+ return ret;
+}
+
+static int qcom_spi_check_error(struct qcom_nand_controller *snandc)
+{
+ struct snandc_read_status *buf;
+ struct qpic_ecc *ecc_cfg = snandc->qspi->ecc;
+ int i, num_cw = snandc->qspi->num_cw;
+ bool flash_op_err = false, erased;
+ unsigned int max_bitflips = 0;
+ unsigned int uncorrectable_cws = 0;
+
+ snandc->qspi->ecc_stats.failed = 0;
+ snandc->qspi->ecc_stats.corrected = 0;
+
+ qcom_nandc_dev_to_mem(snandc, true);
+ buf = (struct snandc_read_status *)snandc->reg_read_buf;
+
+ for (i = 0; i < num_cw; i++, buf++) {
+ u32 flash, buffer, erased_cw;
+
+ flash = le32_to_cpu(buf->snandc_flash);
+ buffer = le32_to_cpu(buf->snandc_buffer);
+ erased_cw = le32_to_cpu(buf->snandc_erased_cw);
+
+ if ((flash & FS_OP_ERR) && (buffer & BS_UNCORRECTABLE_BIT)) {
+ if (ecc_cfg->bch_enabled)
+ erased = (erased_cw & ERASED_CW) == ERASED_CW;
+ else
+ erased = false;
+
+ if (!erased)
+ uncorrectable_cws |= BIT(i);
+
+ } else if (flash & (FS_OP_ERR | FS_MPU_ERR)) {
+ flash_op_err = true;
+ } else {
+ unsigned int stat;
+
+ stat = buffer & BS_CORRECTABLE_ERR_MSK;
+ snandc->qspi->ecc_stats.corrected += stat;
+ max_bitflips = max(max_bitflips, stat);
+ }
+ }
+
+ if (flash_op_err)
+ return -EIO;
+
+ if (!uncorrectable_cws)
+ snandc->qspi->ecc_stats.bitflips = max_bitflips;
+ else
+ snandc->qspi->ecc_stats.failed++;
+
+ return 0;
+}
+
+static int qcom_spi_read_cw_raw(struct qcom_nand_controller *snandc, u8 *data_buf,
+ u8 *oob_buf, int cw)
+{
+ struct qpic_ecc *ecc_cfg = snandc->qspi->ecc;
+ struct mtd_info *mtd = snandc->qspi->mtd;
+ int data_size1, data_size2, oob_size1, oob_size2;
+ int ret, reg_off = FLASH_BUF_ACC, read_loc = 0;
+ int raw_cw = cw;
+ u32 cfg0, cfg1, ecc_bch_cfg, num_cw = snandc->qspi->num_cw;
+ int col;
+
+ snandc->buf_count = 0;
+ snandc->buf_start = 0;
+ qcom_clear_read_regs(snandc);
+ qcom_clear_bam_transaction(snandc);
+ raw_cw = num_cw - 1;
+
+ cfg0 = (ecc_cfg->cfg0_raw & ~CW_PER_PAGE_MASK) |
+ FIELD_PREP(CW_PER_PAGE_MASK, 0);
+ cfg1 = ecc_cfg->cfg1_raw;
+ ecc_bch_cfg = ECC_CFG_ECC_DISABLE;
+
+ col = ecc_cfg->cw_size * cw;
+
+ snandc->regs->addr0 = (snandc->qspi->addr1 | cpu_to_le32(col));
+ snandc->regs->addr1 = snandc->qspi->addr2;
+ snandc->regs->cmd = snandc->qspi->cmd;
+ snandc->regs->cfg0 = cpu_to_le32(cfg0);
+ snandc->regs->cfg1 = cpu_to_le32(cfg1);
+ snandc->regs->ecc_bch_cfg = cpu_to_le32(ecc_bch_cfg);
+ snandc->regs->clrflashstatus = cpu_to_le32(ecc_cfg->clrflashstatus);
+ snandc->regs->clrreadstatus = cpu_to_le32(ecc_cfg->clrreadstatus);
+ snandc->regs->exec = cpu_to_le32(1);
+
+ qcom_spi_set_read_loc(snandc, raw_cw, 0, 0, ecc_cfg->cw_size, 1);
+
+ qcom_write_reg_dma(snandc, &snandc->regs->addr0, NAND_ADDR0, 2, 0);
+ qcom_write_reg_dma(snandc, &snandc->regs->cfg0, NAND_DEV0_CFG0, 3, 0);
+ qcom_write_reg_dma(snandc, &snandc->regs->ecc_buf_cfg, NAND_EBI2_ECC_BUF_CFG, 1, 0);
+
+ qcom_write_reg_dma(snandc, &snandc->regs->erased_cw_detect_cfg_clr,
+ NAND_ERASED_CW_DETECT_CFG, 1, 0);
+ qcom_write_reg_dma(snandc, &snandc->regs->erased_cw_detect_cfg_set,
+ NAND_ERASED_CW_DETECT_CFG, 1,
+ NAND_ERASED_CW_SET | NAND_BAM_NEXT_SGL);
+
+ data_size1 = mtd->writesize - ecc_cfg->cw_size * (num_cw - 1);
+ oob_size1 = ecc_cfg->bbm_size;
+
+ if (cw == (num_cw - 1)) {
+ data_size2 = NANDC_STEP_SIZE - data_size1 -
+ ((num_cw - 1) * 4);
+ oob_size2 = (num_cw * 4) + ecc_cfg->ecc_bytes_hw +
+ ecc_cfg->spare_bytes;
+ } else {
+ data_size2 = ecc_cfg->cw_data - data_size1;
+ oob_size2 = ecc_cfg->ecc_bytes_hw + ecc_cfg->spare_bytes;
+ }
+
+ qcom_spi_set_read_loc(snandc, cw, 0, read_loc, data_size1, 0);
+ read_loc += data_size1;
+
+ qcom_spi_set_read_loc(snandc, cw, 1, read_loc, oob_size1, 0);
+ read_loc += oob_size1;
+
+ qcom_spi_set_read_loc(snandc, cw, 2, read_loc, data_size2, 0);
+ read_loc += data_size2;
+
+ qcom_spi_set_read_loc(snandc, cw, 3, read_loc, oob_size2, 1);
+
+ qcom_spi_config_cw_read(snandc, false, raw_cw);
+
+ qcom_read_data_dma(snandc, reg_off, data_buf, data_size1, 0);
+ reg_off += data_size1;
+
+ qcom_read_data_dma(snandc, reg_off, oob_buf, oob_size1, 0);
+ reg_off += oob_size1;
+
+ qcom_read_data_dma(snandc, reg_off, data_buf + data_size1, data_size2, 0);
+ reg_off += data_size2;
+
+ qcom_read_data_dma(snandc, reg_off, oob_buf + oob_size1, oob_size2, 0);
+
+ ret = qcom_submit_descs(snandc);
+ if (ret) {
+ dev_err(snandc->dev, "failure to read raw cw %d\n", cw);
+ return ret;
+ }
+
+ return qcom_spi_check_raw_flash_errors(snandc, 1);
+}
+
+static int qcom_spi_read_page_raw(struct qcom_nand_controller *snandc,
+ const struct spi_mem_op *op)
+{
+ struct qpic_ecc *ecc_cfg = snandc->qspi->ecc;
+ u8 *data_buf = NULL, *oob_buf = NULL;
+ int ret, cw;
+ u32 num_cw = snandc->qspi->num_cw;
+
+ if (snandc->qspi->page_rw)
+ data_buf = op->data.buf.in;
+
+ oob_buf = snandc->qspi->oob_buf;
+ memset(oob_buf, 0xff, OOB_BUF_SIZE);
+
+ for (cw = 0; cw < num_cw; cw++) {
+ ret = qcom_spi_read_cw_raw(snandc, data_buf, oob_buf, cw);
+ if (ret)
+ return ret;
+
+ if (data_buf)
+ data_buf += ecc_cfg->cw_data;
+ if (oob_buf)
+ oob_buf += ecc_cfg->bytes;
+ }
+
+ return 0;
+}
+
+static int qcom_spi_read_page_ecc(struct qcom_nand_controller *snandc,
+ const struct spi_mem_op *op)
+{
+ struct qpic_ecc *ecc_cfg = snandc->qspi->ecc;
+ u8 *data_buf = NULL, *oob_buf = NULL;
+ int ret, i;
+ u32 cfg0, cfg1, ecc_bch_cfg, num_cw = snandc->qspi->num_cw;
+
+ data_buf = op->data.buf.in;
+ oob_buf = snandc->qspi->oob_buf;
+
+ snandc->buf_count = 0;
+ snandc->buf_start = 0;
+ qcom_clear_read_regs(snandc);
+
+ cfg0 = (ecc_cfg->cfg0 & ~CW_PER_PAGE_MASK) |
+ FIELD_PREP(CW_PER_PAGE_MASK, num_cw - 1);
+ cfg1 = ecc_cfg->cfg1;
+ ecc_bch_cfg = ecc_cfg->ecc_bch_cfg;
+
+ snandc->regs->addr0 = snandc->qspi->addr1;
+ snandc->regs->addr1 = snandc->qspi->addr2;
+ snandc->regs->cmd = snandc->qspi->cmd;
+ snandc->regs->cfg0 = cpu_to_le32(cfg0);
+ snandc->regs->cfg1 = cpu_to_le32(cfg1);
+ snandc->regs->ecc_bch_cfg = cpu_to_le32(ecc_bch_cfg);
+ snandc->regs->clrflashstatus = cpu_to_le32(ecc_cfg->clrflashstatus);
+ snandc->regs->clrreadstatus = cpu_to_le32(ecc_cfg->clrreadstatus);
+ snandc->regs->exec = cpu_to_le32(1);
+
+ qcom_spi_set_read_loc(snandc, 0, 0, 0, ecc_cfg->cw_data, 1);
+
+ qcom_clear_bam_transaction(snandc);
+
+ qcom_write_reg_dma(snandc, &snandc->regs->addr0, NAND_ADDR0, 2, 0);
+ qcom_write_reg_dma(snandc, &snandc->regs->cfg0, NAND_DEV0_CFG0, 3, 0);
+ qcom_write_reg_dma(snandc, &snandc->regs->erased_cw_detect_cfg_clr,
+ NAND_ERASED_CW_DETECT_CFG, 1, 0);
+ qcom_write_reg_dma(snandc, &snandc->regs->erased_cw_detect_cfg_set,
+ NAND_ERASED_CW_DETECT_CFG, 1,
+ NAND_ERASED_CW_SET | NAND_BAM_NEXT_SGL);
+
+ for (i = 0; i < num_cw; i++) {
+ int data_size, oob_size;
+
+ if (i == (num_cw - 1)) {
+ data_size = 512 - ((num_cw - 1) << 2);
+ oob_size = (num_cw << 2) + ecc_cfg->ecc_bytes_hw +
+ ecc_cfg->spare_bytes;
+ } else {
+ data_size = ecc_cfg->cw_data;
+ oob_size = ecc_cfg->ecc_bytes_hw + ecc_cfg->spare_bytes;
+ }
+
+ if (data_buf && oob_buf) {
+ qcom_spi_set_read_loc(snandc, i, 0, 0, data_size, 0);
+ qcom_spi_set_read_loc(snandc, i, 1, data_size, oob_size, 1);
+ } else if (data_buf) {
+ qcom_spi_set_read_loc(snandc, i, 0, 0, data_size, 1);
+ } else {
+ qcom_spi_set_read_loc(snandc, i, 0, data_size, oob_size, 1);
+ }
+
+ qcom_spi_config_cw_read(snandc, true, i);
+
+ if (data_buf)
+ qcom_read_data_dma(snandc, FLASH_BUF_ACC, data_buf,
+ data_size, 0);
+ if (oob_buf) {
+ int j;
+
+ for (j = 0; j < ecc_cfg->bbm_size; j++)
+ *oob_buf++ = 0xff;
+
+ qcom_read_data_dma(snandc, FLASH_BUF_ACC + data_size,
+ oob_buf, oob_size, 0);
+ }
+
+ if (data_buf)
+ data_buf += data_size;
+ if (oob_buf)
+ oob_buf += oob_size;
+ }
+
+ ret = qcom_submit_descs(snandc);
+ if (ret) {
+ dev_err(snandc->dev, "failure to read page\n");
+ return ret;
+ }
+
+ return qcom_spi_check_error(snandc);
+}
+
+static int qcom_spi_read_page_oob(struct qcom_nand_controller *snandc,
+ const struct spi_mem_op *op)
+{
+ struct qpic_ecc *ecc_cfg = snandc->qspi->ecc;
+ u8 *oob_buf = NULL;
+ int ret, i;
+ u32 cfg0, cfg1, ecc_bch_cfg, num_cw = snandc->qspi->num_cw;
+
+ oob_buf = op->data.buf.in;
+
+ snandc->buf_count = 0;
+ snandc->buf_start = 0;
+ qcom_clear_read_regs(snandc);
+ qcom_clear_bam_transaction(snandc);
+
+ cfg0 = (ecc_cfg->cfg0 & ~CW_PER_PAGE_MASK) |
+ FIELD_PREP(CW_PER_PAGE_MASK, num_cw - 1);
+ cfg1 = ecc_cfg->cfg1;
+ ecc_bch_cfg = ecc_cfg->ecc_bch_cfg;
+
+ snandc->regs->addr0 = snandc->qspi->addr1;
+ snandc->regs->addr1 = snandc->qspi->addr2;
+ snandc->regs->cmd = snandc->qspi->cmd;
+ snandc->regs->cfg0 = cpu_to_le32(cfg0);
+ snandc->regs->cfg1 = cpu_to_le32(cfg1);
+ snandc->regs->ecc_bch_cfg = cpu_to_le32(ecc_bch_cfg);
+ snandc->regs->clrflashstatus = cpu_to_le32(ecc_cfg->clrflashstatus);
+ snandc->regs->clrreadstatus = cpu_to_le32(ecc_cfg->clrreadstatus);
+ snandc->regs->exec = cpu_to_le32(1);
+
+ qcom_spi_set_read_loc(snandc, 0, 0, 0, ecc_cfg->cw_data, 1);
+
+ qcom_write_reg_dma(snandc, &snandc->regs->addr0, NAND_ADDR0, 2, 0);
+ qcom_write_reg_dma(snandc, &snandc->regs->cfg0, NAND_DEV0_CFG0, 3, 0);
+ qcom_write_reg_dma(snandc, &snandc->regs->erased_cw_detect_cfg_clr,
+ NAND_ERASED_CW_DETECT_CFG, 1, 0);
+ qcom_write_reg_dma(snandc, &snandc->regs->erased_cw_detect_cfg_set,
+ NAND_ERASED_CW_DETECT_CFG, 1,
+ NAND_ERASED_CW_SET | NAND_BAM_NEXT_SGL);
+
+ for (i = 0; i < num_cw; i++) {
+ int data_size, oob_size;
+
+ if (i == (num_cw - 1)) {
+ data_size = NANDC_STEP_SIZE - ((num_cw - 1) << 2);
+ oob_size = (num_cw << 2) + ecc_cfg->ecc_bytes_hw +
+ ecc_cfg->spare_bytes;
+ } else {
+ data_size = ecc_cfg->cw_data;
+ oob_size = ecc_cfg->ecc_bytes_hw + ecc_cfg->spare_bytes;
+ }
+
+ qcom_spi_set_read_loc(snandc, i, 0, data_size, oob_size, 1);
+
+ qcom_spi_config_cw_read(snandc, true, i);
+
+ if (oob_buf) {
+ int j;
+
+ for (j = 0; j < ecc_cfg->bbm_size; j++)
+ *oob_buf++ = 0xff;
+
+ qcom_read_data_dma(snandc, FLASH_BUF_ACC + data_size,
+ oob_buf, oob_size, 0);
+ }
+
+ if (oob_buf)
+ oob_buf += oob_size;
+ }
+
+ ret = qcom_submit_descs(snandc);
+ if (ret) {
+ dev_err(snandc->dev, "failure to read oob\n");
+ return ret;
+ }
+
+ return qcom_spi_check_error(snandc);
+}
+
+static int qcom_spi_read_page(struct qcom_nand_controller *snandc,
+ const struct spi_mem_op *op)
+{
+ if (snandc->qspi->page_rw && snandc->qspi->raw_rw)
+ return qcom_spi_read_page_raw(snandc, op);
+
+ if (snandc->qspi->page_rw)
+ return qcom_spi_read_page_ecc(snandc, op);
+
+ if (snandc->qspi->oob_rw && snandc->qspi->raw_rw)
+ return qcom_spi_read_last_cw(snandc, op);
+
+ if (snandc->qspi->oob_rw)
+ return qcom_spi_read_page_oob(snandc, op);
+
+ return 0;
+}
+
+static void qcom_spi_config_page_write(struct qcom_nand_controller *snandc)
+{
+ qcom_write_reg_dma(snandc, &snandc->regs->addr0, NAND_ADDR0, 2, 0);
+ qcom_write_reg_dma(snandc, &snandc->regs->cfg0, NAND_DEV0_CFG0, 3, 0);
+ qcom_write_reg_dma(snandc, &snandc->regs->ecc_buf_cfg, NAND_EBI2_ECC_BUF_CFG,
+ 1, NAND_BAM_NEXT_SGL);
+}
+
+static void qcom_spi_config_cw_write(struct qcom_nand_controller *snandc)
+{
+ qcom_write_reg_dma(snandc, &snandc->regs->cmd, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
+ qcom_write_reg_dma(snandc, &snandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
+ qcom_read_reg_dma(snandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
+
+ qcom_write_reg_dma(snandc, &snandc->regs->clrflashstatus, NAND_FLASH_STATUS, 1, 0);
+ qcom_write_reg_dma(snandc, &snandc->regs->clrreadstatus, NAND_READ_STATUS, 1,
+ NAND_BAM_NEXT_SGL);
+}
+
+static int qcom_spi_program_raw(struct qcom_nand_controller *snandc,
+ const struct spi_mem_op *op)
+{
+ struct qpic_ecc *ecc_cfg = snandc->qspi->ecc;
+ struct mtd_info *mtd = snandc->qspi->mtd;
+ u8 *data_buf = NULL, *oob_buf = NULL;
+ int i, ret;
+ int num_cw = snandc->qspi->num_cw;
+ u32 cfg0, cfg1, ecc_bch_cfg;
+
+ cfg0 = (ecc_cfg->cfg0_raw & ~CW_PER_PAGE_MASK) |
+ FIELD_PREP(CW_PER_PAGE_MASK, num_cw - 1);
+ cfg1 = ecc_cfg->cfg1_raw;
+ ecc_bch_cfg = ECC_CFG_ECC_DISABLE;
+
+ data_buf = snandc->qspi->data_buf;
+
+ oob_buf = snandc->qspi->oob_buf;
+ memset(oob_buf, 0xff, OOB_BUF_SIZE);
+
+ snandc->buf_count = 0;
+ snandc->buf_start = 0;
+ qcom_clear_read_regs(snandc);
+ qcom_clear_bam_transaction(snandc);
+
+ snandc->regs->addr0 = snandc->qspi->addr1;
+ snandc->regs->addr1 = snandc->qspi->addr2;
+ snandc->regs->cmd = snandc->qspi->cmd;
+ snandc->regs->cfg0 = cpu_to_le32(cfg0);
+ snandc->regs->cfg1 = cpu_to_le32(cfg1);
+ snandc->regs->ecc_bch_cfg = cpu_to_le32(ecc_bch_cfg);
+ snandc->regs->clrflashstatus = cpu_to_le32(ecc_cfg->clrflashstatus);
+ snandc->regs->clrreadstatus = cpu_to_le32(ecc_cfg->clrreadstatus);
+ snandc->regs->exec = cpu_to_le32(1);
+
+ qcom_spi_config_page_write(snandc);
+
+ for (i = 0; i < num_cw; i++) {
+ int data_size1, data_size2, oob_size1, oob_size2;
+ int reg_off = FLASH_BUF_ACC;
+
+ data_size1 = mtd->writesize - ecc_cfg->cw_size * (num_cw - 1);
+ oob_size1 = ecc_cfg->bbm_size;
+
+ if (i == (num_cw - 1)) {
+ data_size2 = NANDC_STEP_SIZE - data_size1 -
+ ((num_cw - 1) << 2);
+ oob_size2 = (num_cw << 2) + ecc_cfg->ecc_bytes_hw +
+ ecc_cfg->spare_bytes;
+ } else {
+ data_size2 = ecc_cfg->cw_data - data_size1;
+ oob_size2 = ecc_cfg->ecc_bytes_hw + ecc_cfg->spare_bytes;
+ }
+
+ qcom_write_data_dma(snandc, reg_off, data_buf, data_size1,
+ NAND_BAM_NO_EOT);
+ reg_off += data_size1;
+ data_buf += data_size1;
+
+ qcom_write_data_dma(snandc, reg_off, oob_buf, oob_size1,
+ NAND_BAM_NO_EOT);
+ oob_buf += oob_size1;
+ reg_off += oob_size1;
+
+ qcom_write_data_dma(snandc, reg_off, data_buf, data_size2,
+ NAND_BAM_NO_EOT);
+ reg_off += data_size2;
+ data_buf += data_size2;
+
+ qcom_write_data_dma(snandc, reg_off, oob_buf, oob_size2, 0);
+ oob_buf += oob_size2;
+
+ qcom_spi_config_cw_write(snandc);
+ }
+
+ ret = qcom_submit_descs(snandc);
+ if (ret) {
+ dev_err(snandc->dev, "failure to write raw page\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int qcom_spi_program_ecc(struct qcom_nand_controller *snandc,
+ const struct spi_mem_op *op)
+{
+ struct qpic_ecc *ecc_cfg = snandc->qspi->ecc;
+ u8 *data_buf = NULL, *oob_buf = NULL;
+ int i, ret;
+ int num_cw = snandc->qspi->num_cw;
+ u32 cfg0, cfg1, ecc_bch_cfg, ecc_buf_cfg;
+
+ cfg0 = (ecc_cfg->cfg0 & ~CW_PER_PAGE_MASK) |
+ FIELD_PREP(CW_PER_PAGE_MASK, num_cw - 1);
+ cfg1 = ecc_cfg->cfg1;
+ ecc_bch_cfg = ecc_cfg->ecc_bch_cfg;
+ ecc_buf_cfg = ecc_cfg->ecc_buf_cfg;
+
+ if (snandc->qspi->data_buf)
+ data_buf = snandc->qspi->data_buf;
+
+ oob_buf = snandc->qspi->oob_buf;
+
+ snandc->buf_count = 0;
+ snandc->buf_start = 0;
+ qcom_clear_read_regs(snandc);
+ qcom_clear_bam_transaction(snandc);
+
+ snandc->regs->addr0 = snandc->qspi->addr1;
+ snandc->regs->addr1 = snandc->qspi->addr2;
+ snandc->regs->cmd = snandc->qspi->cmd;
+ snandc->regs->cfg0 = cpu_to_le32(cfg0);
+ snandc->regs->cfg1 = cpu_to_le32(cfg1);
+ snandc->regs->ecc_bch_cfg = cpu_to_le32(ecc_bch_cfg);
+ snandc->regs->ecc_buf_cfg = cpu_to_le32(ecc_buf_cfg);
+ snandc->regs->exec = cpu_to_le32(1);
+
+ qcom_spi_config_page_write(snandc);
+
+ for (i = 0; i < num_cw; i++) {
+ int data_size, oob_size;
+
+ if (i == (num_cw - 1)) {
+ data_size = NANDC_STEP_SIZE - ((num_cw - 1) << 2);
+ oob_size = (num_cw << 2) + ecc_cfg->ecc_bytes_hw +
+ ecc_cfg->spare_bytes;
+ } else {
+ data_size = ecc_cfg->cw_data;
+ oob_size = ecc_cfg->bytes;
+ }
+
+ if (data_buf)
+ qcom_write_data_dma(snandc, FLASH_BUF_ACC, data_buf, data_size,
+ i == (num_cw - 1) ? NAND_BAM_NO_EOT : 0);
+
+ if (i == (num_cw - 1)) {
+ if (oob_buf) {
+ oob_buf += ecc_cfg->bbm_size;
+ qcom_write_data_dma(snandc, FLASH_BUF_ACC + data_size,
+ oob_buf, oob_size, 0);
+ }
+ }
+
+ qcom_spi_config_cw_write(snandc);
+
+ if (data_buf)
+ data_buf += data_size;
+ if (oob_buf)
+ oob_buf += oob_size;
+ }
+
+ ret = qcom_submit_descs(snandc);
+ if (ret) {
+ dev_err(snandc->dev, "failure to write page\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int qcom_spi_program_oob(struct qcom_nand_controller *snandc,
+ const struct spi_mem_op *op)
+{
+ struct qpic_ecc *ecc_cfg = snandc->qspi->ecc;
+ u8 *oob_buf = NULL;
+ int ret, col, data_size, oob_size;
+ int num_cw = snandc->qspi->num_cw;
+ u32 cfg0, cfg1, ecc_bch_cfg, ecc_buf_cfg;
+
+ cfg0 = (ecc_cfg->cfg0 & ~CW_PER_PAGE_MASK) |
+ FIELD_PREP(CW_PER_PAGE_MASK, num_cw - 1);
+ cfg1 = ecc_cfg->cfg1;
+ ecc_bch_cfg = ecc_cfg->ecc_bch_cfg;
+ ecc_buf_cfg = ecc_cfg->ecc_buf_cfg;
+
+ col = ecc_cfg->cw_size * (num_cw - 1);
+
+ oob_buf = snandc->qspi->data_buf;
+
+ snandc->buf_count = 0;
+ snandc->buf_start = 0;
+ qcom_clear_read_regs(snandc);
+ qcom_clear_bam_transaction(snandc);
+ snandc->regs->addr0 = (snandc->qspi->addr1 | cpu_to_le32(col));
+ snandc->regs->addr1 = snandc->qspi->addr2;
+ snandc->regs->cmd = snandc->qspi->cmd;
+ snandc->regs->cfg0 = cpu_to_le32(cfg0);
+ snandc->regs->cfg1 = cpu_to_le32(cfg1);
+ snandc->regs->ecc_bch_cfg = cpu_to_le32(ecc_bch_cfg);
+ snandc->regs->ecc_buf_cfg = cpu_to_le32(ecc_buf_cfg);
+ snandc->regs->exec = cpu_to_le32(1);
+
+ /* calculate the data and oob size for the last codeword/step */
+ data_size = NANDC_STEP_SIZE - ((num_cw - 1) << 2);
+ oob_size = snandc->qspi->mtd->oobavail;
+
+ memset(snandc->data_buffer, 0xff, ecc_cfg->cw_data);
+ /* override new oob content to last codeword */
+ mtd_ooblayout_get_databytes(snandc->qspi->mtd, snandc->data_buffer + data_size,
+ oob_buf, 0, snandc->qspi->mtd->oobavail);
+ qcom_spi_config_page_write(snandc);
+ qcom_write_data_dma(snandc, FLASH_BUF_ACC, snandc->data_buffer, data_size + oob_size, 0);
+ qcom_spi_config_cw_write(snandc);
+
+ ret = qcom_submit_descs(snandc);
+ if (ret) {
+ dev_err(snandc->dev, "failure to write oob\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int qcom_spi_program_execute(struct qcom_nand_controller *snandc,
+ const struct spi_mem_op *op)
+{
+ if (snandc->qspi->page_rw && snandc->qspi->raw_rw)
+ return qcom_spi_program_raw(snandc, op);
+
+ if (snandc->qspi->page_rw)
+ return qcom_spi_program_ecc(snandc, op);
+
+ if (snandc->qspi->oob_rw)
+ return qcom_spi_program_oob(snandc, op);
+
+ return 0;
+}
+
+static int qcom_spi_cmd_mapping(struct qcom_nand_controller *snandc, u32 opcode, u32 *cmd)
+{
+ switch (opcode) {
+ case SPINAND_RESET:
+ *cmd = (SPI_WP | SPI_HOLD | SPI_TRANSFER_MODE_x1 | OP_RESET_DEVICE);
+ break;
+ case SPINAND_READID:
+ *cmd = (SPI_WP | SPI_HOLD | SPI_TRANSFER_MODE_x1 | OP_FETCH_ID);
+ break;
+ case SPINAND_GET_FEATURE:
+ *cmd = (SPI_TRANSFER_MODE_x1 | SPI_WP | SPI_HOLD | ACC_FEATURE);
+ break;
+ case SPINAND_SET_FEATURE:
+ *cmd = (SPI_TRANSFER_MODE_x1 | SPI_WP | SPI_HOLD | ACC_FEATURE |
+ QPIC_SET_FEATURE);
+ break;
+ case SPINAND_READ:
+ if (snandc->qspi->raw_rw) {
+ *cmd = (PAGE_ACC | LAST_PAGE | SPI_TRANSFER_MODE_x1 |
+ SPI_WP | SPI_HOLD | OP_PAGE_READ);
+ } else {
+ *cmd = (PAGE_ACC | LAST_PAGE | SPI_TRANSFER_MODE_x1 |
+ SPI_WP | SPI_HOLD | OP_PAGE_READ_WITH_ECC);
+ }
+
+ break;
+ case SPINAND_ERASE:
+ *cmd = OP_BLOCK_ERASE | PAGE_ACC | LAST_PAGE | SPI_WP |
+ SPI_HOLD | SPI_TRANSFER_MODE_x1;
+ break;
+ case SPINAND_WRITE_EN:
+ *cmd = SPINAND_WRITE_EN;
+ break;
+ case SPINAND_PROGRAM_EXECUTE:
+ *cmd = (PAGE_ACC | LAST_PAGE | SPI_TRANSFER_MODE_x1 |
+ SPI_WP | SPI_HOLD | OP_PROGRAM_PAGE);
+ break;
+ case SPINAND_PROGRAM_LOAD:
+ *cmd = SPINAND_PROGRAM_LOAD;
+ break;
+ default:
+ dev_err(snandc->dev, "Opcode not supported: %u\n", opcode);
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int qcom_spi_write_page(struct qcom_nand_controller *snandc,
+ const struct spi_mem_op *op)
+{
+ int ret;
+ u32 cmd;
+
+ ret = qcom_spi_cmd_mapping(snandc, op->cmd.opcode, &cmd);
+ if (ret < 0)
+ return ret;
+
+ if (op->cmd.opcode == SPINAND_PROGRAM_LOAD)
+ snandc->qspi->data_buf = (u8 *)op->data.buf.out;
+
+ return 0;
+}
+
+static int qcom_spi_send_cmdaddr(struct qcom_nand_controller *snandc,
+ const struct spi_mem_op *op)
+{
+ struct qpic_snand_op s_op = {};
+ u32 cmd;
+ int ret, opcode;
+
+ ret = qcom_spi_cmd_mapping(snandc, op->cmd.opcode, &cmd);
+ if (ret < 0)
+ return ret;
+
+ s_op.cmd_reg = cmd;
+ s_op.addr1_reg = op->addr.val;
+ s_op.addr2_reg = 0;
+
+ opcode = op->cmd.opcode;
+
+ switch (opcode) {
+ case SPINAND_WRITE_EN:
+ return 0;
+ case SPINAND_PROGRAM_EXECUTE:
+ s_op.addr1_reg = op->addr.val << 16;
+ s_op.addr2_reg = op->addr.val >> 16 & 0xff;
+ snandc->qspi->addr1 = cpu_to_le32(s_op.addr1_reg);
+ snandc->qspi->addr2 = cpu_to_le32(s_op.addr2_reg);
+ snandc->qspi->cmd = cpu_to_le32(cmd);
+ return qcom_spi_program_execute(snandc, op);
+ case SPINAND_READ:
+ s_op.addr1_reg = (op->addr.val << 16);
+ s_op.addr2_reg = op->addr.val >> 16 & 0xff;
+ snandc->qspi->addr1 = cpu_to_le32(s_op.addr1_reg);
+ snandc->qspi->addr2 = cpu_to_le32(s_op.addr2_reg);
+ snandc->qspi->cmd = cpu_to_le32(cmd);
+ return 0;
+ case SPINAND_ERASE:
+ s_op.addr2_reg = (op->addr.val >> 16) & 0xffff;
+ s_op.addr1_reg = op->addr.val;
+ snandc->qspi->addr1 = cpu_to_le32(s_op.addr1_reg << 16);
+ snandc->qspi->addr2 = cpu_to_le32(s_op.addr2_reg);
+ snandc->qspi->cmd = cpu_to_le32(cmd);
+ return qcom_spi_block_erase(snandc);
+ default:
+ break;
+ }
+
+ snandc->buf_count = 0;
+ snandc->buf_start = 0;
+ qcom_clear_read_regs(snandc);
+ qcom_clear_bam_transaction(snandc);
+
+ snandc->regs->cmd = cpu_to_le32(s_op.cmd_reg);
+ snandc->regs->exec = cpu_to_le32(1);
+ snandc->regs->addr0 = cpu_to_le32(s_op.addr1_reg);
+ snandc->regs->addr1 = cpu_to_le32(s_op.addr2_reg);
+
+ qcom_write_reg_dma(snandc, &snandc->regs->cmd, NAND_FLASH_CMD, 3, NAND_BAM_NEXT_SGL);
+ qcom_write_reg_dma(snandc, &snandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
+
+ ret = qcom_submit_descs(snandc);
+ if (ret)
+ dev_err(snandc->dev, "failure in submitting cmd descriptor\n");
+
+ return ret;
+}
+
+static int qcom_spi_io_op(struct qcom_nand_controller *snandc, const struct spi_mem_op *op)
+{
+ int ret, val, opcode;
+ bool copy = false, copy_ftr = false;
+
+ ret = qcom_spi_send_cmdaddr(snandc, op);
+ if (ret)
+ return ret;
+
+ snandc->buf_count = 0;
+ snandc->buf_start = 0;
+ qcom_clear_read_regs(snandc);
+ qcom_clear_bam_transaction(snandc);
+ opcode = op->cmd.opcode;
+
+ switch (opcode) {
+ case SPINAND_READID:
+ snandc->buf_count = 4;
+ qcom_read_reg_dma(snandc, NAND_READ_ID, 1, NAND_BAM_NEXT_SGL);
+ copy = true;
+ break;
+ case SPINAND_GET_FEATURE:
+ snandc->buf_count = 4;
+ qcom_read_reg_dma(snandc, NAND_FLASH_FEATURES, 1, NAND_BAM_NEXT_SGL);
+ copy_ftr = true;
+ break;
+ case SPINAND_SET_FEATURE:
+ snandc->regs->flash_feature = cpu_to_le32(*(u32 *)op->data.buf.out);
+ qcom_write_reg_dma(snandc, &snandc->regs->flash_feature,
+ NAND_FLASH_FEATURES, 1, NAND_BAM_NEXT_SGL);
+ break;
+ case SPINAND_PROGRAM_EXECUTE:
+ case SPINAND_WRITE_EN:
+ case SPINAND_RESET:
+ case SPINAND_ERASE:
+ case SPINAND_READ:
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ ret = qcom_submit_descs(snandc);
+ if (ret) {
+ dev_err(snandc->dev, "failure in submitting descriptor for:%d\n", opcode);
+ return ret;
+ }
+
+ if (copy) {
+ qcom_nandc_dev_to_mem(snandc, true);
+ memcpy(op->data.buf.in, snandc->reg_read_buf, snandc->buf_count);
+ }
+
+ if (copy_ftr) {
+ qcom_nandc_dev_to_mem(snandc, true);
+ val = le32_to_cpu(*(__le32 *)snandc->reg_read_buf);
+ val >>= 8;
+ memcpy(op->data.buf.in, &val, snandc->buf_count);
+ }
+
+ return 0;
+}
+
+static bool qcom_spi_is_page_op(const struct spi_mem_op *op)
+{
+ if (op->addr.buswidth != 1 && op->addr.buswidth != 2 && op->addr.buswidth != 4)
+ return false;
+
+ if (op->data.dir == SPI_MEM_DATA_IN) {
+ if (op->addr.buswidth == 4 && op->data.buswidth == 4)
+ return true;
+
+ if (op->addr.nbytes == 2 && op->addr.buswidth == 1)
+ return true;
+
+ } else if (op->data.dir == SPI_MEM_DATA_OUT) {
+ if (op->data.buswidth == 4)
+ return true;
+ if (op->addr.nbytes == 2 && op->addr.buswidth == 1)
+ return true;
+ }
+
+ return false;
+}
+
+static bool qcom_spi_supports_op(struct spi_mem *mem, const struct spi_mem_op *op)
+{
+ if (!spi_mem_default_supports_op(mem, op))
+ return false;
+
+ if (op->cmd.nbytes != 1 || op->cmd.buswidth != 1)
+ return false;
+
+ if (qcom_spi_is_page_op(op))
+ return true;
+
+ return ((!op->addr.nbytes || op->addr.buswidth == 1) &&
+ (!op->dummy.nbytes || op->dummy.buswidth == 1) &&
+ (!op->data.nbytes || op->data.buswidth == 1));
+}
+
+static int qcom_spi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
+{
+ struct qcom_nand_controller *snandc = spi_controller_get_devdata(mem->spi->controller);
+
+ dev_dbg(snandc->dev, "OP %02x ADDR %08llX@%d:%u DATA %d:%u", op->cmd.opcode,
+ op->addr.val, op->addr.buswidth, op->addr.nbytes,
+ op->data.buswidth, op->data.nbytes);
+
+ if (qcom_spi_is_page_op(op)) {
+ if (op->data.dir == SPI_MEM_DATA_IN)
+ return qcom_spi_read_page(snandc, op);
+ if (op->data.dir == SPI_MEM_DATA_OUT)
+ return qcom_spi_write_page(snandc, op);
+ } else {
+ return qcom_spi_io_op(snandc, op);
+ }
+
+ return 0;
+}
+
+static const struct spi_controller_mem_ops qcom_spi_mem_ops = {
+ .supports_op = qcom_spi_supports_op,
+ .exec_op = qcom_spi_exec_op,
+};
+
+static const struct spi_controller_mem_caps qcom_spi_mem_caps = {
+ .ecc = true,
+};
+
+static int qcom_spi_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct spi_controller *ctlr;
+ struct qcom_nand_controller *snandc;
+ struct qpic_spi_nand *qspi;
+ struct qpic_ecc *ecc;
+ struct resource *res;
+ const void *dev_data;
+ int ret;
+
+ ecc = devm_kzalloc(dev, sizeof(*ecc), GFP_KERNEL);
+ if (!ecc)
+ return -ENOMEM;
+
+ qspi = devm_kzalloc(dev, sizeof(*qspi), GFP_KERNEL);
+ if (!qspi)
+ return -ENOMEM;
+
+ ctlr = __devm_spi_alloc_controller(dev, sizeof(*snandc), false);
+ if (!ctlr)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, ctlr);
+
+ snandc = spi_controller_get_devdata(ctlr);
+ qspi->snandc = snandc;
+
+ snandc->dev = dev;
+ snandc->qspi = qspi;
+ snandc->qspi->ctlr = ctlr;
+ snandc->qspi->ecc = ecc;
+
+ dev_data = of_device_get_match_data(dev);
+ if (!dev_data) {
+ dev_err(&pdev->dev, "failed to get device data\n");
+ return -ENODEV;
+ }
+
+ snandc->props = dev_data;
+ snandc->dev = &pdev->dev;
+
+ snandc->core_clk = devm_clk_get(dev, "core");
+ if (IS_ERR(snandc->core_clk))
+ return PTR_ERR(snandc->core_clk);
+
+ snandc->aon_clk = devm_clk_get(dev, "aon");
+ if (IS_ERR(snandc->aon_clk))
+ return PTR_ERR(snandc->aon_clk);
+
+ snandc->qspi->iomacro_clk = devm_clk_get(dev, "iom");
+ if (IS_ERR(snandc->qspi->iomacro_clk))
+ return PTR_ERR(snandc->qspi->iomacro_clk);
+
+ snandc->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
+ if (IS_ERR(snandc->base))
+ return PTR_ERR(snandc->base);
+
+ snandc->base_phys = res->start;
+ snandc->base_dma = dma_map_resource(dev, res->start, resource_size(res),
+ DMA_BIDIRECTIONAL, 0);
+ if (dma_mapping_error(dev, snandc->base_dma))
+ return -ENXIO;
+
+ ret = clk_prepare_enable(snandc->core_clk);
+ if (ret)
+ goto err_dis_core_clk;
+
+ ret = clk_prepare_enable(snandc->aon_clk);
+ if (ret)
+ goto err_dis_aon_clk;
+
+ ret = clk_prepare_enable(snandc->qspi->iomacro_clk);
+ if (ret)
+ goto err_dis_iom_clk;
+
+ ret = qcom_nandc_alloc(snandc);
+ if (ret)
+ goto err_snand_alloc;
+
+ ret = qcom_spi_init(snandc);
+ if (ret)
+ goto err_spi_init;
+
+ /* setup ECC engine */
+ snandc->qspi->ecc_eng.dev = &pdev->dev;
+ snandc->qspi->ecc_eng.integration = NAND_ECC_ENGINE_INTEGRATION_PIPELINED;
+ snandc->qspi->ecc_eng.ops = &qcom_spi_ecc_engine_ops_pipelined;
+ snandc->qspi->ecc_eng.priv = snandc;
+
+ ret = nand_ecc_register_on_host_hw_engine(&snandc->qspi->ecc_eng);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register ecc engine:%d\n", ret);
+ goto err_spi_init;
+ }
+
+ ctlr->num_chipselect = QPIC_QSPI_NUM_CS;
+ ctlr->mem_ops = &qcom_spi_mem_ops;
+ ctlr->mem_caps = &qcom_spi_mem_caps;
+ ctlr->dev.of_node = pdev->dev.of_node;
+ ctlr->mode_bits = SPI_TX_DUAL | SPI_RX_DUAL |
+ SPI_TX_QUAD | SPI_RX_QUAD;
+
+ ret = spi_register_controller(ctlr);
+ if (ret) {
+ dev_err(&pdev->dev, "spi_register_controller failed.\n");
+ goto err_spi_init;
+ }
+
+ return 0;
+
+err_spi_init:
+ qcom_nandc_unalloc(snandc);
+err_snand_alloc:
+ clk_disable_unprepare(snandc->qspi->iomacro_clk);
+err_dis_iom_clk:
+ clk_disable_unprepare(snandc->aon_clk);
+err_dis_aon_clk:
+ clk_disable_unprepare(snandc->core_clk);
+err_dis_core_clk:
+ dma_unmap_resource(dev, res->start, resource_size(res),
+ DMA_BIDIRECTIONAL, 0);
+ return ret;
+}
+
+static void qcom_spi_remove(struct platform_device *pdev)
+{
+ struct spi_controller *ctlr = platform_get_drvdata(pdev);
+ struct qcom_nand_controller *snandc = spi_controller_get_devdata(ctlr);
+ struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ spi_unregister_controller(ctlr);
+
+ qcom_nandc_unalloc(snandc);
+
+ clk_disable_unprepare(snandc->aon_clk);
+ clk_disable_unprepare(snandc->core_clk);
+ clk_disable_unprepare(snandc->qspi->iomacro_clk);
+
+ dma_unmap_resource(&pdev->dev, snandc->base_dma, resource_size(res),
+ DMA_BIDIRECTIONAL, 0);
+}
+
+static const struct qcom_nandc_props ipq9574_snandc_props = {
+ .dev_cmd_reg_start = 0x7000,
+ .supports_bam = true,
+};
+
+static const struct of_device_id qcom_snandc_of_match[] = {
+ {
+ .compatible = "qcom,ipq9574-snand",
+ .data = &ipq9574_snandc_props,
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, qcom_snandc_of_match);
+
+static struct platform_driver qcom_spi_driver = {
+ .driver = {
+ .name = "qcom_snand",
+ .of_match_table = qcom_snandc_of_match,
+ },
+ .probe = qcom_spi_probe,
+ .remove = qcom_spi_remove,
+};
+module_platform_driver(qcom_spi_driver);
+
+MODULE_DESCRIPTION("SPI driver for QPIC QSPI cores");
+MODULE_AUTHOR("Md Sadre Alam <quic_mdalam@quicinc.com>");
+MODULE_LICENSE("GPL");
+
diff --git a/drivers/spi/spi-realtek-rtl-snand.c b/drivers/spi/spi-realtek-rtl-snand.c
index cd0484041147..741cf2af3e91 100644
--- a/drivers/spi/spi-realtek-rtl-snand.c
+++ b/drivers/spi/spi-realtek-rtl-snand.c
@@ -364,7 +364,6 @@ static int rtl_snand_probe(struct platform_device *pdev)
.reg_bits = 32,
.val_bits = 32,
.reg_stride = 4,
- .cache_type = REGCACHE_NONE,
};
int irq, ret;
diff --git a/drivers/spi/spi-rockchip-sfc.c b/drivers/spi/spi-rockchip-sfc.c
index 70bbb459caa4..f3fe10eddb6a 100644
--- a/drivers/spi/spi-rockchip-sfc.c
+++ b/drivers/spi/spi-rockchip-sfc.c
@@ -13,12 +13,14 @@
#include <linux/completion.h>
#include <linux/dma-mapping.h>
#include <linux/iopoll.h>
+#include <linux/interrupt.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/slab.h>
-#include <linux/interrupt.h>
#include <linux/spi/spi-mem.h>
/* System control */
@@ -110,6 +112,7 @@
#define SFC_VER_3 0x3
#define SFC_VER_4 0x4
#define SFC_VER_5 0x5
+#define SFC_VER_8 0x8
/* Delay line controller register */
#define SFC_DLL_CTRL0 0x3C
@@ -150,16 +153,13 @@
/* Data */
#define SFC_DATA 0x108
-/* The controller and documentation reports that it supports up to 4 CS
- * devices (0-3), however I have only been able to test a single CS (CS 0)
- * due to the configuration of my device.
- */
-#define SFC_MAX_CHIPSELECT_NUM 4
+#define SFC_CS1_REG_OFFSET 0x200
+
+#define SFC_MAX_CHIPSELECT_NUM 2
-/* The SFC can transfer max 16KB - 1 at one time
- * we set it to 15.5KB here for alignment.
- */
#define SFC_MAX_IOSIZE_VER3 (512 * 31)
+/* Although up to 4GB, 64KB is enough with less mem reserved */
+#define SFC_MAX_IOSIZE_VER4 (0x10000U)
/* DMA is only enabled for large data transmission */
#define SFC_DMA_TRANS_THRETHOLD (0x40)
@@ -169,12 +169,14 @@
*/
#define SFC_MAX_SPEED (150 * 1000 * 1000)
+#define ROCKCHIP_AUTOSUSPEND_DELAY 2000
+
struct rockchip_sfc {
struct device *dev;
void __iomem *regbase;
struct clk *hclk;
struct clk *clk;
- u32 frequency;
+ u32 speed[SFC_MAX_CHIPSELECT_NUM];
/* virtual mapped addr for dma_buffer */
void *buffer;
dma_addr_t dma_buffer;
@@ -216,6 +218,22 @@ static u32 rockchip_sfc_get_max_iosize(struct rockchip_sfc *sfc)
return SFC_MAX_IOSIZE_VER3;
}
+static int rockchip_sfc_clk_set_rate(struct rockchip_sfc *sfc, unsigned long speed)
+{
+ if (sfc->version >= SFC_VER_8)
+ return clk_set_rate(sfc->clk, speed * 2);
+ else
+ return clk_set_rate(sfc->clk, speed);
+}
+
+static unsigned long rockchip_sfc_clk_get_rate(struct rockchip_sfc *sfc)
+{
+ if (sfc->version >= SFC_VER_8)
+ return clk_get_rate(sfc->clk) / 2;
+ else
+ return clk_get_rate(sfc->clk);
+}
+
static void rockchip_sfc_irq_unmask(struct rockchip_sfc *sfc, u32 mask)
{
u32 reg;
@@ -302,6 +320,7 @@ static int rockchip_sfc_xfer_setup(struct rockchip_sfc *sfc,
u32 len)
{
u32 ctrl = 0, cmd = 0;
+ u8 cs = spi_get_chipselect(mem->spi, 0);
/* set CMD */
cmd = op->cmd.opcode;
@@ -315,7 +334,8 @@ static int rockchip_sfc_xfer_setup(struct rockchip_sfc *sfc,
cmd |= SFC_CMD_ADDR_24BITS << SFC_CMD_ADDR_SHIFT;
} else {
cmd |= SFC_CMD_ADDR_XBITS << SFC_CMD_ADDR_SHIFT;
- writel(op->addr.nbytes * 8 - 1, sfc->regbase + SFC_ABIT);
+ writel(op->addr.nbytes * 8 - 1,
+ sfc->regbase + cs * SFC_CS1_REG_OFFSET + SFC_ABIT);
}
ctrl |= ((op->addr.buswidth >> 1) << SFC_CTRL_ADDR_BITS_SHIFT);
@@ -347,7 +367,7 @@ static int rockchip_sfc_xfer_setup(struct rockchip_sfc *sfc,
/* set the Controller */
ctrl |= SFC_CTRL_PHASE_SEL_NEGETIVE;
- cmd |= spi_get_chipselect(mem->spi, 0) << SFC_CMD_CS_SHIFT;
+ cmd |= cs << SFC_CMD_CS_SHIFT;
dev_dbg(sfc->dev, "sfc addr.nbytes=%x(x%d) dummy.nbytes=%x(x%d)\n",
op->addr.nbytes, op->addr.buswidth,
@@ -355,7 +375,7 @@ static int rockchip_sfc_xfer_setup(struct rockchip_sfc *sfc,
dev_dbg(sfc->dev, "sfc ctrl=%x cmd=%x addr=%llx len=%x\n",
ctrl, cmd, op->addr.val, len);
- writel(ctrl, sfc->regbase + SFC_CTRL);
+ writel(ctrl, sfc->regbase + cs * SFC_CS1_REG_OFFSET + SFC_CTRL);
writel(cmd, sfc->regbase + SFC_CMD);
if (op->addr.nbytes)
writel(op->addr.val, sfc->regbase + SFC_ADDR);
@@ -453,8 +473,10 @@ static int rockchip_sfc_xfer_data_dma(struct rockchip_sfc *sfc,
dev_dbg(sfc->dev, "sfc xfer_dma len=%x\n", len);
- if (op->data.dir == SPI_MEM_DATA_OUT)
+ if (op->data.dir == SPI_MEM_DATA_OUT) {
memcpy(sfc->buffer, op->data.buf.out, len);
+ dma_sync_single_for_device(sfc->dev, sfc->dma_buffer, len, DMA_TO_DEVICE);
+ }
ret = rockchip_sfc_fifo_transfer_dma(sfc, sfc->dma_buffer, len);
if (!wait_for_completion_timeout(&sfc->cp, msecs_to_jiffies(2000))) {
@@ -462,8 +484,11 @@ static int rockchip_sfc_xfer_data_dma(struct rockchip_sfc *sfc,
ret = -ETIMEDOUT;
}
rockchip_sfc_irq_mask(sfc, SFC_IMR_DMA);
- if (op->data.dir == SPI_MEM_DATA_IN)
+
+ if (op->data.dir == SPI_MEM_DATA_IN) {
+ dma_sync_single_for_cpu(sfc->dev, sfc->dma_buffer, len, DMA_FROM_DEVICE);
memcpy(op->data.buf.in, sfc->buffer, len);
+ }
return ret;
}
@@ -473,6 +498,16 @@ static int rockchip_sfc_xfer_done(struct rockchip_sfc *sfc, u32 timeout_us)
int ret = 0;
u32 status;
+ /*
+ * There is very little data left in fifo, and the controller will
+ * complete the transmission in a short period of time.
+ */
+ ret = readl_poll_timeout(sfc->regbase + SFC_SR, status,
+ !(status & SFC_SR_IS_BUSY),
+ 0, 10);
+ if (!ret)
+ return 0;
+
ret = readl_poll_timeout(sfc->regbase + SFC_SR, status,
!(status & SFC_SR_IS_BUSY),
20, timeout_us);
@@ -491,14 +526,22 @@ static int rockchip_sfc_exec_mem_op(struct spi_mem *mem, const struct spi_mem_op
struct rockchip_sfc *sfc = spi_controller_get_devdata(mem->spi->controller);
u32 len = op->data.nbytes;
int ret;
+ u8 cs = spi_get_chipselect(mem->spi, 0);
- if (unlikely(mem->spi->max_speed_hz != sfc->frequency)) {
- ret = clk_set_rate(sfc->clk, mem->spi->max_speed_hz);
+ ret = pm_runtime_get_sync(sfc->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(sfc->dev);
+ return ret;
+ }
+
+ if (unlikely(op->max_freq != sfc->speed[cs]) &&
+ !has_acpi_companion(sfc->dev)) {
+ ret = rockchip_sfc_clk_set_rate(sfc, op->max_freq);
if (ret)
- return ret;
- sfc->frequency = mem->spi->max_speed_hz;
+ goto out;
+ sfc->speed[cs] = op->max_freq;
dev_dbg(sfc->dev, "set_freq=%dHz real_freq=%ldHz\n",
- sfc->frequency, clk_get_rate(sfc->clk));
+ sfc->speed[cs], rockchip_sfc_clk_get_rate(sfc));
}
rockchip_sfc_adjust_op_work((struct spi_mem_op *)op);
@@ -515,11 +558,17 @@ static int rockchip_sfc_exec_mem_op(struct spi_mem *mem, const struct spi_mem_op
if (ret != len) {
dev_err(sfc->dev, "xfer data failed ret %d dir %d\n", ret, op->data.dir);
- return -EIO;
+ ret = -EIO;
+ goto out;
}
}
- return rockchip_sfc_xfer_done(sfc, 100000);
+ ret = rockchip_sfc_xfer_done(sfc, 100000);
+out:
+ pm_runtime_mark_last_busy(sfc->dev);
+ pm_runtime_put_autosuspend(sfc->dev);
+
+ return ret;
}
static int rockchip_sfc_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op)
@@ -536,6 +585,10 @@ static const struct spi_controller_mem_ops rockchip_sfc_mem_ops = {
.adjust_op_size = rockchip_sfc_adjust_op_size,
};
+static const struct spi_controller_mem_caps rockchip_sfc_mem_caps = {
+ .per_op_freq = true,
+};
+
static irqreturn_t rockchip_sfc_irq_handler(int irq, void *dev_id)
{
struct rockchip_sfc *sfc = dev_id;
@@ -561,6 +614,7 @@ static int rockchip_sfc_probe(struct platform_device *pdev)
struct spi_controller *host;
struct rockchip_sfc *sfc;
int ret;
+ u32 i, val;
host = devm_spi_alloc_host(&pdev->dev, sizeof(*sfc));
if (!host)
@@ -568,6 +622,7 @@ static int rockchip_sfc_probe(struct platform_device *pdev)
host->flags = SPI_CONTROLLER_HALF_DUPLEX;
host->mem_ops = &rockchip_sfc_mem_ops;
+ host->mem_caps = &rockchip_sfc_mem_caps;
host->dev.of_node = pdev->dev.of_node;
host->mode_bits = SPI_TX_QUAD | SPI_TX_DUAL | SPI_RX_QUAD | SPI_RX_DUAL;
host->max_speed_hz = SFC_MAX_SPEED;
@@ -581,31 +636,29 @@ static int rockchip_sfc_probe(struct platform_device *pdev)
if (IS_ERR(sfc->regbase))
return PTR_ERR(sfc->regbase);
- sfc->clk = devm_clk_get(&pdev->dev, "clk_sfc");
+ if (!has_acpi_companion(&pdev->dev))
+ sfc->clk = devm_clk_get(&pdev->dev, "clk_sfc");
if (IS_ERR(sfc->clk))
return dev_err_probe(&pdev->dev, PTR_ERR(sfc->clk),
"Failed to get sfc interface clk\n");
- sfc->hclk = devm_clk_get(&pdev->dev, "hclk_sfc");
+ if (!has_acpi_companion(&pdev->dev))
+ sfc->hclk = devm_clk_get(&pdev->dev, "hclk_sfc");
if (IS_ERR(sfc->hclk))
return dev_err_probe(&pdev->dev, PTR_ERR(sfc->hclk),
"Failed to get sfc ahb clk\n");
- sfc->use_dma = !of_property_read_bool(sfc->dev->of_node, "rockchip,sfc-no-dma");
-
- if (sfc->use_dma) {
- ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
- if (ret) {
- dev_warn(dev, "Unable to set dma mask\n");
- return ret;
- }
-
- sfc->buffer = dmam_alloc_coherent(dev, SFC_MAX_IOSIZE_VER3,
- &sfc->dma_buffer, GFP_KERNEL);
- if (!sfc->buffer)
- return -ENOMEM;
+ if (has_acpi_companion(&pdev->dev)) {
+ ret = device_property_read_u32(&pdev->dev, "clock-frequency", &val);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "Failed to find clock-frequency in ACPI\n");
+ for (i = 0; i < SFC_MAX_CHIPSELECT_NUM; i++)
+ sfc->speed[i] = val;
}
+ sfc->use_dma = !of_property_read_bool(sfc->dev->of_node, "rockchip,sfc-no-dma");
+
ret = clk_prepare_enable(sfc->hclk);
if (ret) {
dev_err(&pdev->dev, "Failed to enable ahb clk\n");
@@ -630,19 +683,47 @@ static int rockchip_sfc_probe(struct platform_device *pdev)
goto err_irq;
}
+ platform_set_drvdata(pdev, sfc);
+
ret = rockchip_sfc_init(sfc);
if (ret)
goto err_irq;
- sfc->max_iosize = rockchip_sfc_get_max_iosize(sfc);
sfc->version = rockchip_sfc_get_version(sfc);
+ sfc->max_iosize = rockchip_sfc_get_max_iosize(sfc);
+
+ pm_runtime_set_autosuspend_delay(dev, ROCKCHIP_AUTOSUSPEND_DELAY);
+ pm_runtime_use_autosuspend(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+ pm_runtime_get_noresume(dev);
+
+ if (sfc->use_dma) {
+ sfc->buffer = (u8 *)__get_free_pages(GFP_KERNEL | GFP_DMA32,
+ get_order(sfc->max_iosize));
+ if (!sfc->buffer) {
+ ret = -ENOMEM;
+ goto err_dma;
+ }
+ sfc->dma_buffer = virt_to_phys(sfc->buffer);
+ }
- ret = spi_register_controller(host);
+ ret = devm_spi_register_controller(dev, host);
if (ret)
- goto err_irq;
+ goto err_register;
- return 0;
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+ return 0;
+err_register:
+ free_pages((unsigned long)sfc->buffer, get_order(sfc->max_iosize));
+err_dma:
+ pm_runtime_get_sync(dev);
+ pm_runtime_put_noidle(dev);
+ pm_runtime_disable(dev);
+ pm_runtime_set_suspended(dev);
+ pm_runtime_dont_use_autosuspend(dev);
err_irq:
clk_disable_unprepare(sfc->clk);
err_clk:
@@ -657,11 +738,80 @@ static void rockchip_sfc_remove(struct platform_device *pdev)
struct spi_controller *host = sfc->host;
spi_unregister_controller(host);
+ free_pages((unsigned long)sfc->buffer, get_order(sfc->max_iosize));
clk_disable_unprepare(sfc->clk);
clk_disable_unprepare(sfc->hclk);
}
+#ifdef CONFIG_PM
+static int rockchip_sfc_runtime_suspend(struct device *dev)
+{
+ struct rockchip_sfc *sfc = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(sfc->clk);
+ clk_disable_unprepare(sfc->hclk);
+
+ return 0;
+}
+
+static int rockchip_sfc_runtime_resume(struct device *dev)
+{
+ struct rockchip_sfc *sfc = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_prepare_enable(sfc->hclk);
+ if (ret < 0)
+ return ret;
+
+ ret = clk_prepare_enable(sfc->clk);
+ if (ret < 0)
+ clk_disable_unprepare(sfc->hclk);
+
+ return ret;
+}
+#endif /* CONFIG_PM */
+
+#ifdef CONFIG_PM_SLEEP
+static int rockchip_sfc_suspend(struct device *dev)
+{
+ pinctrl_pm_select_sleep_state(dev);
+
+ return pm_runtime_force_suspend(dev);
+}
+
+static int rockchip_sfc_resume(struct device *dev)
+{
+ struct rockchip_sfc *sfc = dev_get_drvdata(dev);
+ int ret;
+
+ ret = pm_runtime_force_resume(dev);
+ if (ret < 0)
+ return ret;
+
+ pinctrl_pm_select_default_state(dev);
+
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(dev);
+ return ret;
+ }
+
+ rockchip_sfc_init(sfc);
+
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+
+ return 0;
+}
+#endif /* CONFIG_PM_SLEEP */
+
+static const struct dev_pm_ops rockchip_sfc_pm_ops = {
+ SET_RUNTIME_PM_OPS(rockchip_sfc_runtime_suspend,
+ rockchip_sfc_runtime_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(rockchip_sfc_suspend, rockchip_sfc_resume)
+};
+
static const struct of_device_id rockchip_sfc_dt_ids[] = {
{ .compatible = "rockchip,sfc"},
{ /* sentinel */ }
@@ -672,6 +822,7 @@ static struct platform_driver rockchip_sfc_driver = {
.driver = {
.name = "rockchip-sfc",
.of_match_table = rockchip_sfc_dt_ids,
+ .pm = &rockchip_sfc_pm_ops,
},
.probe = rockchip_sfc_probe,
.remove = rockchip_sfc_remove,
diff --git a/drivers/spi/spi-rockchip.c b/drivers/spi/spi-rockchip.c
index 1bc012fce7cb..1a6381de6f33 100644
--- a/drivers/spi/spi-rockchip.c
+++ b/drivers/spi/spi-rockchip.c
@@ -547,7 +547,7 @@ static int rockchip_spi_config(struct rockchip_spi *rs,
cr0 |= (spi->mode & 0x3U) << CR0_SCPH_OFFSET;
if (spi->mode & SPI_LSB_FIRST)
cr0 |= CR0_FBM_LSB << CR0_FBM_OFFSET;
- if (spi->mode & SPI_CS_HIGH)
+ if ((spi->mode & SPI_CS_HIGH) && !(spi_get_csgpiod(spi, 0)))
cr0 |= BIT(spi_get_chipselect(spi, 0)) << CR0_SOI_OFFSET;
if (xfer->rx_buf && xfer->tx_buf)
diff --git a/drivers/spi/spi-rpc-if.c b/drivers/spi/spi-rpc-if.c
index e0c66a24a3cb..627cffea5d5c 100644
--- a/drivers/spi/spi-rpc-if.c
+++ b/drivers/spi/spi-rpc-if.c
@@ -75,6 +75,19 @@ static bool rpcif_spi_mem_supports_op(struct spi_mem *mem,
return true;
}
+static ssize_t xspi_spi_mem_dirmap_write(struct spi_mem_dirmap_desc *desc,
+ u64 offs, size_t len, const void *buf)
+{
+ struct rpcif *rpc = spi_controller_get_devdata(desc->mem->spi->controller);
+
+ if (offs + desc->info.offset + len > U32_MAX)
+ return -EINVAL;
+
+ rpcif_spi_mem_prepare(desc->mem->spi, &desc->info.op_tmpl, &offs, &len);
+
+ return xspi_dirmap_write(rpc->dev, offs, len, buf);
+}
+
static ssize_t rpcif_spi_mem_dirmap_read(struct spi_mem_dirmap_desc *desc,
u64 offs, size_t len, void *buf)
{
@@ -103,7 +116,7 @@ static int rpcif_spi_mem_dirmap_create(struct spi_mem_dirmap_desc *desc)
if (!rpc->dirmap)
return -EOPNOTSUPP;
- if (desc->info.op_tmpl.data.dir != SPI_MEM_DATA_IN)
+ if (!rpc->xspi && desc->info.op_tmpl.data.dir != SPI_MEM_DATA_IN)
return -EOPNOTSUPP;
return 0;
@@ -125,6 +138,7 @@ static const struct spi_controller_mem_ops rpcif_spi_mem_ops = {
.exec_op = rpcif_spi_mem_exec_op,
.dirmap_create = rpcif_spi_mem_dirmap_create,
.dirmap_read = rpcif_spi_mem_dirmap_read,
+ .dirmap_write = xspi_spi_mem_dirmap_write,
};
static int rpcif_spi_probe(struct platform_device *pdev)
diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c
index 389275dbc003..9c47f5741c5f 100644
--- a/drivers/spi/spi-s3c64xx.c
+++ b/drivers/spi/spi-s3c64xx.c
@@ -139,7 +139,9 @@ struct s3c64xx_spi_dma_data {
* struct s3c64xx_spi_port_config - SPI Controller hardware info
* @fifo_lvl_mask: [DEPRECATED] use @{rx, tx}_fifomask instead.
* @rx_lvl_offset: [DEPRECATED] use @{rx,tx}_fifomask instead.
- * @fifo_depth: depth of the FIFO.
+ * @fifo_depth: depth of the FIFOs. Used by compatibles where all the instances
+ * of the IP define the same FIFO depth. It has higher precedence
+ * than the FIFO depth specified via DT.
* @rx_fifomask: SPI_STATUS.RX_FIFO_LVL mask. Shifted mask defining the field's
* length and position.
* @tx_fifomask: SPI_STATUS.TX_FIFO_LVL mask. Shifted mask defining the field's
diff --git a/drivers/spi/spi-sc18is602.c b/drivers/spi/spi-sc18is602.c
index eecf9ea95ae3..1627aa66c965 100644
--- a/drivers/spi/spi-sc18is602.c
+++ b/drivers/spi/spi-sc18is602.c
@@ -7,13 +7,15 @@
#include <linux/kernel.h>
#include <linux/err.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/spi/spi.h>
#include <linux/i2c.h>
#include <linux/delay.h>
#include <linux/pm_runtime.h>
-#include <linux/of.h>
#include <linux/platform_data/sc18is602.h>
+#include <linux/property.h>
+
#include <linux/gpio/consumer.h>
enum chips { sc18is602, sc18is602b, sc18is603 };
@@ -236,9 +238,7 @@ static int sc18is602_setup(struct spi_device *spi)
static int sc18is602_probe(struct i2c_client *client)
{
- const struct i2c_device_id *id = i2c_client_get_device_id(client);
struct device *dev = &client->dev;
- struct device_node *np = dev->of_node;
struct sc18is602_platform_data *pdata = dev_get_platdata(dev);
struct sc18is602 *hw;
struct spi_controller *host;
@@ -251,8 +251,9 @@ static int sc18is602_probe(struct i2c_client *client)
if (!host)
return -ENOMEM;
+ device_set_node(&host->dev, dev_fwnode(dev));
+
hw = spi_controller_get_devdata(host);
- i2c_set_clientdata(client, hw);
/* assert reset and then release */
hw->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
@@ -265,11 +266,7 @@ static int sc18is602_probe(struct i2c_client *client)
hw->dev = dev;
hw->ctrl = 0xff;
- if (client->dev.of_node)
- hw->id = (uintptr_t)of_device_get_match_data(&client->dev);
- else
- hw->id = id->driver_data;
-
+ hw->id = (uintptr_t)i2c_get_match_data(client);
switch (hw->id) {
case sc18is602:
case sc18is602b:
@@ -278,28 +275,21 @@ static int sc18is602_probe(struct i2c_client *client)
break;
case sc18is603:
host->num_chipselect = 2;
- if (pdata) {
+ if (pdata)
hw->freq = pdata->clock_frequency;
- } else {
- const __be32 *val;
- int len;
-
- val = of_get_property(np, "clock-frequency", &len);
- if (val && len >= sizeof(__be32))
- hw->freq = be32_to_cpup(val);
- }
+ else
+ device_property_read_u32(dev, "clock-frequency", &hw->freq);
if (!hw->freq)
hw->freq = SC18IS602_CLOCK;
break;
}
- host->bus_num = np ? -1 : client->adapter->nr;
+ host->bus_num = dev_fwnode(dev) ? -1 : client->adapter->nr;
host->mode_bits = SPI_CPHA | SPI_CPOL | SPI_LSB_FIRST;
host->bits_per_word_mask = SPI_BPW_MASK(8);
host->setup = sc18is602_setup;
host->transfer_one_message = sc18is602_transfer_one;
host->max_transfer_size = sc18is602_max_transfer_size;
host->max_message_size = sc18is602_max_transfer_size;
- host->dev.of_node = np;
host->min_speed_hz = hw->freq / 128;
host->max_speed_hz = hw->freq / 4;
@@ -314,7 +304,7 @@ static const struct i2c_device_id sc18is602_id[] = {
};
MODULE_DEVICE_TABLE(i2c, sc18is602_id);
-static const struct of_device_id sc18is602_of_match[] __maybe_unused = {
+static const struct of_device_id sc18is602_of_match[] = {
{
.compatible = "nxp,sc18is602",
.data = (void *)sc18is602
@@ -334,7 +324,7 @@ MODULE_DEVICE_TABLE(of, sc18is602_of_match);
static struct i2c_driver sc18is602_driver = {
.driver = {
.name = "sc18is602",
- .of_match_table = of_match_ptr(sc18is602_of_match),
+ .of_match_table = sc18is602_of_match,
},
.probe = sc18is602_probe,
.id_table = sc18is602_id,
diff --git a/drivers/spi/spi-sg2044-nor.c b/drivers/spi/spi-sg2044-nor.c
new file mode 100644
index 000000000000..a59aa3fc55d2
--- /dev/null
+++ b/drivers/spi/spi-sg2044-nor.c
@@ -0,0 +1,488 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * SG2044 SPI NOR controller driver
+ *
+ * Copyright (c) 2025 Longbin Li <looong.bin@gmail.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/spi/spi-mem.h>
+
+/* Hardware register definitions */
+#define SPIFMC_CTRL 0x00
+#define SPIFMC_CTRL_CPHA BIT(12)
+#define SPIFMC_CTRL_CPOL BIT(13)
+#define SPIFMC_CTRL_HOLD_OL BIT(14)
+#define SPIFMC_CTRL_WP_OL BIT(15)
+#define SPIFMC_CTRL_LSBF BIT(20)
+#define SPIFMC_CTRL_SRST BIT(21)
+#define SPIFMC_CTRL_SCK_DIV_SHIFT 0
+#define SPIFMC_CTRL_FRAME_LEN_SHIFT 16
+#define SPIFMC_CTRL_SCK_DIV_MASK 0x7FF
+
+#define SPIFMC_CE_CTRL 0x04
+#define SPIFMC_CE_CTRL_CEMANUAL BIT(0)
+#define SPIFMC_CE_CTRL_CEMANUAL_EN BIT(1)
+
+#define SPIFMC_DLY_CTRL 0x08
+#define SPIFMC_CTRL_FM_INTVL_MASK 0x000f
+#define SPIFMC_CTRL_FM_INTVL BIT(0)
+#define SPIFMC_CTRL_CET_MASK 0x0f00
+#define SPIFMC_CTRL_CET BIT(8)
+
+#define SPIFMC_DMMR 0x0c
+
+#define SPIFMC_TRAN_CSR 0x10
+#define SPIFMC_TRAN_CSR_TRAN_MODE_MASK GENMASK(1, 0)
+#define SPIFMC_TRAN_CSR_TRAN_MODE_RX BIT(0)
+#define SPIFMC_TRAN_CSR_TRAN_MODE_TX BIT(1)
+#define SPIFMC_TRAN_CSR_FAST_MODE BIT(3)
+#define SPIFMC_TRAN_CSR_BUS_WIDTH_1_BIT (0x00 << 4)
+#define SPIFMC_TRAN_CSR_BUS_WIDTH_2_BIT (0x01 << 4)
+#define SPIFMC_TRAN_CSR_BUS_WIDTH_4_BIT (0x02 << 4)
+#define SPIFMC_TRAN_CSR_DMA_EN BIT(6)
+#define SPIFMC_TRAN_CSR_MISO_LEVEL BIT(7)
+#define SPIFMC_TRAN_CSR_ADDR_BYTES_MASK GENMASK(10, 8)
+#define SPIFMC_TRAN_CSR_ADDR_BYTES_SHIFT 8
+#define SPIFMC_TRAN_CSR_WITH_CMD BIT(11)
+#define SPIFMC_TRAN_CSR_FIFO_TRG_LVL_MASK GENMASK(13, 12)
+#define SPIFMC_TRAN_CSR_FIFO_TRG_LVL_1_BYTE (0x00 << 12)
+#define SPIFMC_TRAN_CSR_FIFO_TRG_LVL_2_BYTE (0x01 << 12)
+#define SPIFMC_TRAN_CSR_FIFO_TRG_LVL_4_BYTE (0x02 << 12)
+#define SPIFMC_TRAN_CSR_FIFO_TRG_LVL_8_BYTE (0x03 << 12)
+#define SPIFMC_TRAN_CSR_GO_BUSY BIT(15)
+#define SPIFMC_TRAN_CSR_ADDR4B_SHIFT 20
+#define SPIFMC_TRAN_CSR_CMD4B_SHIFT 21
+
+#define SPIFMC_TRAN_NUM 0x14
+#define SPIFMC_FIFO_PORT 0x18
+#define SPIFMC_FIFO_PT 0x20
+
+#define SPIFMC_INT_STS 0x28
+#define SPIFMC_INT_TRAN_DONE BIT(0)
+#define SPIFMC_INT_RD_FIFO BIT(2)
+#define SPIFMC_INT_WR_FIFO BIT(3)
+#define SPIFMC_INT_RX_FRAME BIT(4)
+#define SPIFMC_INT_TX_FRAME BIT(5)
+
+#define SPIFMC_INT_EN 0x2c
+#define SPIFMC_INT_TRAN_DONE_EN BIT(0)
+#define SPIFMC_INT_RD_FIFO_EN BIT(2)
+#define SPIFMC_INT_WR_FIFO_EN BIT(3)
+#define SPIFMC_INT_RX_FRAME_EN BIT(4)
+#define SPIFMC_INT_TX_FRAME_EN BIT(5)
+
+#define SPIFMC_OPT 0x030
+#define SPIFMC_OPT_DISABLE_FIFO_FLUSH BIT(1)
+
+#define SPIFMC_MAX_FIFO_DEPTH 8
+
+#define SPIFMC_MAX_READ_SIZE 0x10000
+
+struct sg2044_spifmc {
+ struct spi_controller *ctrl;
+ void __iomem *io_base;
+ struct device *dev;
+ struct mutex lock;
+ struct clk *clk;
+};
+
+static int sg2044_spifmc_wait_int(struct sg2044_spifmc *spifmc, u8 int_type)
+{
+ u32 stat;
+
+ return readl_poll_timeout(spifmc->io_base + SPIFMC_INT_STS, stat,
+ (stat & int_type), 0, 1000000);
+}
+
+static int sg2044_spifmc_wait_xfer_size(struct sg2044_spifmc *spifmc,
+ int xfer_size)
+{
+ u8 stat;
+
+ return readl_poll_timeout(spifmc->io_base + SPIFMC_FIFO_PT, stat,
+ ((stat & 0xf) == xfer_size), 1, 1000000);
+}
+
+static u32 sg2044_spifmc_init_reg(struct sg2044_spifmc *spifmc)
+{
+ u32 reg;
+
+ reg = readl(spifmc->io_base + SPIFMC_TRAN_CSR);
+ reg &= ~(SPIFMC_TRAN_CSR_TRAN_MODE_MASK |
+ SPIFMC_TRAN_CSR_FAST_MODE |
+ SPIFMC_TRAN_CSR_BUS_WIDTH_2_BIT |
+ SPIFMC_TRAN_CSR_BUS_WIDTH_4_BIT |
+ SPIFMC_TRAN_CSR_DMA_EN |
+ SPIFMC_TRAN_CSR_ADDR_BYTES_MASK |
+ SPIFMC_TRAN_CSR_WITH_CMD |
+ SPIFMC_TRAN_CSR_FIFO_TRG_LVL_MASK);
+
+ writel(reg, spifmc->io_base + SPIFMC_TRAN_CSR);
+
+ return reg;
+}
+
+static ssize_t sg2044_spifmc_read_64k(struct sg2044_spifmc *spifmc,
+ const struct spi_mem_op *op, loff_t from,
+ size_t len, u_char *buf)
+{
+ int xfer_size, offset;
+ u32 reg;
+ int ret;
+ int i;
+
+ reg = sg2044_spifmc_init_reg(spifmc);
+ reg |= (op->addr.nbytes + op->dummy.nbytes) << SPIFMC_TRAN_CSR_ADDR_BYTES_SHIFT;
+ reg |= SPIFMC_TRAN_CSR_FIFO_TRG_LVL_8_BYTE;
+ reg |= SPIFMC_TRAN_CSR_WITH_CMD;
+ reg |= SPIFMC_TRAN_CSR_TRAN_MODE_RX;
+
+ writel(0, spifmc->io_base + SPIFMC_FIFO_PT);
+ writeb(op->cmd.opcode, spifmc->io_base + SPIFMC_FIFO_PORT);
+
+ for (i = op->addr.nbytes - 1; i >= 0; i--)
+ writeb((from >> i * 8) & 0xff, spifmc->io_base + SPIFMC_FIFO_PORT);
+
+ for (i = 0; i < op->dummy.nbytes; i++)
+ writeb(0xff, spifmc->io_base + SPIFMC_FIFO_PORT);
+
+ writel(len, spifmc->io_base + SPIFMC_TRAN_NUM);
+ writel(0, spifmc->io_base + SPIFMC_INT_STS);
+ reg |= SPIFMC_TRAN_CSR_GO_BUSY;
+ writel(reg, spifmc->io_base + SPIFMC_TRAN_CSR);
+
+ ret = sg2044_spifmc_wait_int(spifmc, SPIFMC_INT_RD_FIFO);
+ if (ret < 0)
+ return ret;
+
+ offset = 0;
+ while (offset < len) {
+ xfer_size = min_t(size_t, SPIFMC_MAX_FIFO_DEPTH, len - offset);
+
+ ret = sg2044_spifmc_wait_xfer_size(spifmc, xfer_size);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < xfer_size; i++)
+ buf[i + offset] = readb(spifmc->io_base + SPIFMC_FIFO_PORT);
+
+ offset += xfer_size;
+ }
+
+ ret = sg2044_spifmc_wait_int(spifmc, SPIFMC_INT_TRAN_DONE);
+ if (ret < 0)
+ return ret;
+
+ writel(0, spifmc->io_base + SPIFMC_FIFO_PT);
+
+ return len;
+}
+
+static ssize_t sg2044_spifmc_read(struct sg2044_spifmc *spifmc,
+ const struct spi_mem_op *op)
+{
+ size_t xfer_size;
+ size_t offset;
+ loff_t from = op->addr.val;
+ size_t len = op->data.nbytes;
+ int ret;
+ u8 *din = op->data.buf.in;
+
+ offset = 0;
+ while (offset < len) {
+ xfer_size = min_t(size_t, SPIFMC_MAX_READ_SIZE, len - offset);
+
+ ret = sg2044_spifmc_read_64k(spifmc, op, from, xfer_size, din);
+ if (ret < 0)
+ return ret;
+
+ offset += xfer_size;
+ din += xfer_size;
+ from += xfer_size;
+ }
+
+ return 0;
+}
+
+static ssize_t sg2044_spifmc_write(struct sg2044_spifmc *spifmc,
+ const struct spi_mem_op *op)
+{
+ size_t xfer_size;
+ const u8 *dout = op->data.buf.out;
+ int i, offset;
+ int ret;
+ u32 reg;
+
+ reg = sg2044_spifmc_init_reg(spifmc);
+ reg |= (op->addr.nbytes + op->dummy.nbytes) << SPIFMC_TRAN_CSR_ADDR_BYTES_SHIFT;
+ reg |= SPIFMC_TRAN_CSR_FIFO_TRG_LVL_8_BYTE;
+ reg |= SPIFMC_TRAN_CSR_WITH_CMD;
+ reg |= SPIFMC_TRAN_CSR_TRAN_MODE_TX;
+
+ writel(0, spifmc->io_base + SPIFMC_FIFO_PT);
+ writeb(op->cmd.opcode, spifmc->io_base + SPIFMC_FIFO_PORT);
+
+ for (i = op->addr.nbytes - 1; i >= 0; i--)
+ writeb((op->addr.val >> i * 8) & 0xff, spifmc->io_base + SPIFMC_FIFO_PORT);
+
+ for (i = 0; i < op->dummy.nbytes; i++)
+ writeb(0xff, spifmc->io_base + SPIFMC_FIFO_PORT);
+
+ writel(0, spifmc->io_base + SPIFMC_INT_STS);
+ writel(op->data.nbytes, spifmc->io_base + SPIFMC_TRAN_NUM);
+ reg |= SPIFMC_TRAN_CSR_GO_BUSY;
+ writel(reg, spifmc->io_base + SPIFMC_TRAN_CSR);
+
+ ret = sg2044_spifmc_wait_xfer_size(spifmc, 0);
+ if (ret < 0)
+ return ret;
+
+ writel(0, spifmc->io_base + SPIFMC_FIFO_PT);
+
+ offset = 0;
+ while (offset < op->data.nbytes) {
+ xfer_size = min_t(size_t, SPIFMC_MAX_FIFO_DEPTH, op->data.nbytes - offset);
+
+ ret = sg2044_spifmc_wait_xfer_size(spifmc, 0);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < xfer_size; i++)
+ writeb(dout[i + offset], spifmc->io_base + SPIFMC_FIFO_PORT);
+
+ offset += xfer_size;
+ }
+
+ ret = sg2044_spifmc_wait_int(spifmc, SPIFMC_INT_TRAN_DONE);
+ if (ret < 0)
+ return ret;
+
+ writel(0, spifmc->io_base + SPIFMC_FIFO_PT);
+
+ return 0;
+}
+
+static ssize_t sg2044_spifmc_tran_cmd(struct sg2044_spifmc *spifmc,
+ const struct spi_mem_op *op)
+{
+ int i, ret;
+ u32 reg;
+
+ reg = sg2044_spifmc_init_reg(spifmc);
+ reg |= (op->addr.nbytes + op->dummy.nbytes) << SPIFMC_TRAN_CSR_ADDR_BYTES_SHIFT;
+ reg |= SPIFMC_TRAN_CSR_FIFO_TRG_LVL_1_BYTE;
+ reg |= SPIFMC_TRAN_CSR_WITH_CMD;
+
+ writel(0, spifmc->io_base + SPIFMC_FIFO_PT);
+ writeb(op->cmd.opcode, spifmc->io_base + SPIFMC_FIFO_PORT);
+
+ for (i = op->addr.nbytes - 1; i >= 0; i--)
+ writeb((op->addr.val >> i * 8) & 0xff, spifmc->io_base + SPIFMC_FIFO_PORT);
+
+ for (i = 0; i < op->dummy.nbytes; i++)
+ writeb(0xff, spifmc->io_base + SPIFMC_FIFO_PORT);
+
+ writel(0, spifmc->io_base + SPIFMC_INT_STS);
+ reg |= SPIFMC_TRAN_CSR_GO_BUSY;
+ writel(reg, spifmc->io_base + SPIFMC_TRAN_CSR);
+
+ ret = sg2044_spifmc_wait_int(spifmc, SPIFMC_INT_TRAN_DONE);
+ if (ret < 0)
+ return ret;
+
+ writel(0, spifmc->io_base + SPIFMC_FIFO_PT);
+
+ return 0;
+}
+
+static void sg2044_spifmc_trans(struct sg2044_spifmc *spifmc,
+ const struct spi_mem_op *op)
+{
+ if (op->data.dir == SPI_MEM_DATA_IN)
+ sg2044_spifmc_read(spifmc, op);
+ else if (op->data.dir == SPI_MEM_DATA_OUT)
+ sg2044_spifmc_write(spifmc, op);
+ else
+ sg2044_spifmc_tran_cmd(spifmc, op);
+}
+
+static ssize_t sg2044_spifmc_trans_reg(struct sg2044_spifmc *spifmc,
+ const struct spi_mem_op *op)
+{
+ const u8 *dout = NULL;
+ u8 *din = NULL;
+ size_t len = op->data.nbytes;
+ int ret, i;
+ u32 reg;
+
+ if (op->data.dir == SPI_MEM_DATA_IN)
+ din = op->data.buf.in;
+ else
+ dout = op->data.buf.out;
+
+ reg = sg2044_spifmc_init_reg(spifmc);
+ reg |= SPIFMC_TRAN_CSR_FIFO_TRG_LVL_1_BYTE;
+ reg |= SPIFMC_TRAN_CSR_WITH_CMD;
+
+ if (din) {
+ reg |= SPIFMC_TRAN_CSR_BUS_WIDTH_1_BIT;
+ reg |= SPIFMC_TRAN_CSR_TRAN_MODE_RX;
+ reg |= SPIFMC_TRAN_CSR_TRAN_MODE_TX;
+
+ writel(SPIFMC_OPT_DISABLE_FIFO_FLUSH, spifmc->io_base + SPIFMC_OPT);
+ } else {
+ /*
+ * If write values to the Status Register,
+ * configure TRAN_CSR register as the same as
+ * sg2044_spifmc_read_reg.
+ */
+ if (op->cmd.opcode == 0x01) {
+ reg |= SPIFMC_TRAN_CSR_TRAN_MODE_RX;
+ reg |= SPIFMC_TRAN_CSR_TRAN_MODE_TX;
+ writel(len, spifmc->io_base + SPIFMC_TRAN_NUM);
+ }
+ }
+
+ writel(0, spifmc->io_base + SPIFMC_FIFO_PT);
+ writeb(op->cmd.opcode, spifmc->io_base + SPIFMC_FIFO_PORT);
+
+ for (i = 0; i < len; i++) {
+ if (din)
+ writeb(0xff, spifmc->io_base + SPIFMC_FIFO_PORT);
+ else
+ writeb(dout[i], spifmc->io_base + SPIFMC_FIFO_PORT);
+ }
+
+ writel(0, spifmc->io_base + SPIFMC_INT_STS);
+ writel(len, spifmc->io_base + SPIFMC_TRAN_NUM);
+ reg |= SPIFMC_TRAN_CSR_GO_BUSY;
+ writel(reg, spifmc->io_base + SPIFMC_TRAN_CSR);
+
+ ret = sg2044_spifmc_wait_int(spifmc, SPIFMC_INT_TRAN_DONE);
+ if (ret < 0)
+ return ret;
+
+ if (din) {
+ while (len--)
+ *din++ = readb(spifmc->io_base + SPIFMC_FIFO_PORT);
+ }
+
+ writel(0, spifmc->io_base + SPIFMC_FIFO_PT);
+
+ return 0;
+}
+
+static int sg2044_spifmc_exec_op(struct spi_mem *mem,
+ const struct spi_mem_op *op)
+{
+ struct sg2044_spifmc *spifmc;
+
+ spifmc = spi_controller_get_devdata(mem->spi->controller);
+
+ mutex_lock(&spifmc->lock);
+
+ if (op->addr.nbytes == 0)
+ sg2044_spifmc_trans_reg(spifmc, op);
+ else
+ sg2044_spifmc_trans(spifmc, op);
+
+ mutex_unlock(&spifmc->lock);
+
+ return 0;
+}
+
+static const struct spi_controller_mem_ops sg2044_spifmc_mem_ops = {
+ .exec_op = sg2044_spifmc_exec_op,
+};
+
+static void sg2044_spifmc_init(struct sg2044_spifmc *spifmc)
+{
+ u32 tran_csr;
+ u32 reg;
+
+ writel(0, spifmc->io_base + SPIFMC_DMMR);
+
+ reg = readl(spifmc->io_base + SPIFMC_CTRL);
+ reg |= SPIFMC_CTRL_SRST;
+ reg &= ~(SPIFMC_CTRL_SCK_DIV_MASK);
+ reg |= 1;
+ writel(reg, spifmc->io_base + SPIFMC_CTRL);
+
+ writel(0, spifmc->io_base + SPIFMC_CE_CTRL);
+
+ tran_csr = readl(spifmc->io_base + SPIFMC_TRAN_CSR);
+ tran_csr |= (0 << SPIFMC_TRAN_CSR_ADDR_BYTES_SHIFT);
+ tran_csr |= SPIFMC_TRAN_CSR_FIFO_TRG_LVL_4_BYTE;
+ tran_csr |= SPIFMC_TRAN_CSR_WITH_CMD;
+ writel(tran_csr, spifmc->io_base + SPIFMC_TRAN_CSR);
+}
+
+static int sg2044_spifmc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct spi_controller *ctrl;
+ struct sg2044_spifmc *spifmc;
+ int ret;
+
+ ctrl = devm_spi_alloc_host(&pdev->dev, sizeof(*spifmc));
+ if (!ctrl)
+ return -ENOMEM;
+
+ spifmc = spi_controller_get_devdata(ctrl);
+
+ spifmc->clk = devm_clk_get_enabled(&pdev->dev, NULL);
+ if (IS_ERR(spifmc->clk))
+ return dev_err_probe(dev, PTR_ERR(spifmc->clk), "Cannot get and enable AHB clock\n");
+
+ spifmc->dev = &pdev->dev;
+ spifmc->ctrl = ctrl;
+
+ spifmc->io_base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(spifmc->io_base))
+ return PTR_ERR(spifmc->io_base);
+
+ ctrl->num_chipselect = 1;
+ ctrl->dev.of_node = pdev->dev.of_node;
+ ctrl->bits_per_word_mask = SPI_BPW_MASK(8);
+ ctrl->auto_runtime_pm = false;
+ ctrl->mem_ops = &sg2044_spifmc_mem_ops;
+ ctrl->mode_bits = SPI_RX_DUAL | SPI_TX_DUAL | SPI_RX_QUAD | SPI_TX_QUAD;
+
+ ret = devm_mutex_init(dev, &spifmc->lock);
+ if (ret)
+ return ret;
+
+ sg2044_spifmc_init(spifmc);
+ sg2044_spifmc_init_reg(spifmc);
+
+ ret = devm_spi_register_controller(&pdev->dev, ctrl);
+ if (ret)
+ return dev_err_probe(dev, ret, "spi_register_controller failed\n");
+
+ return 0;
+}
+
+static const struct of_device_id sg2044_spifmc_match[] = {
+ { .compatible = "sophgo,sg2044-spifmc-nor" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, sg2044_spifmc_match);
+
+static struct platform_driver sg2044_nor_driver = {
+ .driver = {
+ .name = "sg2044,spifmc-nor",
+ .of_match_table = sg2044_spifmc_match,
+ },
+ .probe = sg2044_spifmc_probe,
+};
+module_platform_driver(sg2044_nor_driver);
+
+MODULE_DESCRIPTION("SG2044 SPI NOR controller driver");
+MODULE_AUTHOR("Longbin Li <looong.bin@gmail.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c
index 8a98c313548e..94a867967e02 100644
--- a/drivers/spi/spi-sh-msiof.c
+++ b/drivers/spi/spi-sh-msiof.c
@@ -20,6 +20,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/of_graph.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/sh_dma.h>
@@ -62,135 +63,6 @@ struct sh_msiof_spi_priv {
#define MAX_SS 3 /* Maximum number of native chip selects */
-#define SITMDR1 0x00 /* Transmit Mode Register 1 */
-#define SITMDR2 0x04 /* Transmit Mode Register 2 */
-#define SITMDR3 0x08 /* Transmit Mode Register 3 */
-#define SIRMDR1 0x10 /* Receive Mode Register 1 */
-#define SIRMDR2 0x14 /* Receive Mode Register 2 */
-#define SIRMDR3 0x18 /* Receive Mode Register 3 */
-#define SITSCR 0x20 /* Transmit Clock Select Register */
-#define SIRSCR 0x22 /* Receive Clock Select Register (SH, A1, APE6) */
-#define SICTR 0x28 /* Control Register */
-#define SIFCTR 0x30 /* FIFO Control Register */
-#define SISTR 0x40 /* Status Register */
-#define SIIER 0x44 /* Interrupt Enable Register */
-#define SITDR1 0x48 /* Transmit Control Data Register 1 (SH, A1) */
-#define SITDR2 0x4c /* Transmit Control Data Register 2 (SH, A1) */
-#define SITFDR 0x50 /* Transmit FIFO Data Register */
-#define SIRDR1 0x58 /* Receive Control Data Register 1 (SH, A1) */
-#define SIRDR2 0x5c /* Receive Control Data Register 2 (SH, A1) */
-#define SIRFDR 0x60 /* Receive FIFO Data Register */
-
-/* SITMDR1 and SIRMDR1 */
-#define SIMDR1_TRMD BIT(31) /* Transfer Mode (1 = Master mode) */
-#define SIMDR1_SYNCMD_MASK GENMASK(29, 28) /* SYNC Mode */
-#define SIMDR1_SYNCMD_SPI (2 << 28) /* Level mode/SPI */
-#define SIMDR1_SYNCMD_LR (3 << 28) /* L/R mode */
-#define SIMDR1_SYNCAC_SHIFT 25 /* Sync Polarity (1 = Active-low) */
-#define SIMDR1_BITLSB_SHIFT 24 /* MSB/LSB First (1 = LSB first) */
-#define SIMDR1_DTDL_SHIFT 20 /* Data Pin Bit Delay for MSIOF_SYNC */
-#define SIMDR1_SYNCDL_SHIFT 16 /* Frame Sync Signal Timing Delay */
-#define SIMDR1_FLD_MASK GENMASK(3, 2) /* Frame Sync Signal Interval (0-3) */
-#define SIMDR1_FLD_SHIFT 2
-#define SIMDR1_XXSTP BIT(0) /* Transmission/Reception Stop on FIFO */
-/* SITMDR1 */
-#define SITMDR1_PCON BIT(30) /* Transfer Signal Connection */
-#define SITMDR1_SYNCCH_MASK GENMASK(27, 26) /* Sync Signal Channel Select */
-#define SITMDR1_SYNCCH_SHIFT 26 /* 0=MSIOF_SYNC, 1=MSIOF_SS1, 2=MSIOF_SS2 */
-
-/* SITMDR2 and SIRMDR2 */
-#define SIMDR2_BITLEN1(i) (((i) - 1) << 24) /* Data Size (8-32 bits) */
-#define SIMDR2_WDLEN1(i) (((i) - 1) << 16) /* Word Count (1-64/256 (SH, A1))) */
-#define SIMDR2_GRPMASK1 BIT(0) /* Group Output Mask 1 (SH, A1) */
-
-/* SITSCR and SIRSCR */
-#define SISCR_BRPS_MASK GENMASK(12, 8) /* Prescaler Setting (1-32) */
-#define SISCR_BRPS(i) (((i) - 1) << 8)
-#define SISCR_BRDV_MASK GENMASK(2, 0) /* Baud Rate Generator's Division Ratio */
-#define SISCR_BRDV_DIV_2 0
-#define SISCR_BRDV_DIV_4 1
-#define SISCR_BRDV_DIV_8 2
-#define SISCR_BRDV_DIV_16 3
-#define SISCR_BRDV_DIV_32 4
-#define SISCR_BRDV_DIV_1 7
-
-/* SICTR */
-#define SICTR_TSCKIZ_MASK GENMASK(31, 30) /* Transmit Clock I/O Polarity Select */
-#define SICTR_TSCKIZ_SCK BIT(31) /* Disable SCK when TX disabled */
-#define SICTR_TSCKIZ_POL_SHIFT 30 /* Transmit Clock Polarity */
-#define SICTR_RSCKIZ_MASK GENMASK(29, 28) /* Receive Clock Polarity Select */
-#define SICTR_RSCKIZ_SCK BIT(29) /* Must match CTR_TSCKIZ_SCK */
-#define SICTR_RSCKIZ_POL_SHIFT 28 /* Receive Clock Polarity */
-#define SICTR_TEDG_SHIFT 27 /* Transmit Timing (1 = falling edge) */
-#define SICTR_REDG_SHIFT 26 /* Receive Timing (1 = falling edge) */
-#define SICTR_TXDIZ_MASK GENMASK(23, 22) /* Pin Output When TX is Disabled */
-#define SICTR_TXDIZ_LOW (0 << 22) /* 0 */
-#define SICTR_TXDIZ_HIGH (1 << 22) /* 1 */
-#define SICTR_TXDIZ_HIZ (2 << 22) /* High-impedance */
-#define SICTR_TSCKE BIT(15) /* Transmit Serial Clock Output Enable */
-#define SICTR_TFSE BIT(14) /* Transmit Frame Sync Signal Output Enable */
-#define SICTR_TXE BIT(9) /* Transmit Enable */
-#define SICTR_RXE BIT(8) /* Receive Enable */
-#define SICTR_TXRST BIT(1) /* Transmit Reset */
-#define SICTR_RXRST BIT(0) /* Receive Reset */
-
-/* SIFCTR */
-#define SIFCTR_TFWM_MASK GENMASK(31, 29) /* Transmit FIFO Watermark */
-#define SIFCTR_TFWM_64 (0UL << 29) /* Transfer Request when 64 empty stages */
-#define SIFCTR_TFWM_32 (1UL << 29) /* Transfer Request when 32 empty stages */
-#define SIFCTR_TFWM_24 (2UL << 29) /* Transfer Request when 24 empty stages */
-#define SIFCTR_TFWM_16 (3UL << 29) /* Transfer Request when 16 empty stages */
-#define SIFCTR_TFWM_12 (4UL << 29) /* Transfer Request when 12 empty stages */
-#define SIFCTR_TFWM_8 (5UL << 29) /* Transfer Request when 8 empty stages */
-#define SIFCTR_TFWM_4 (6UL << 29) /* Transfer Request when 4 empty stages */
-#define SIFCTR_TFWM_1 (7UL << 29) /* Transfer Request when 1 empty stage */
-#define SIFCTR_TFUA_MASK GENMASK(26, 20) /* Transmit FIFO Usable Area */
-#define SIFCTR_TFUA_SHIFT 20
-#define SIFCTR_TFUA(i) ((i) << SIFCTR_TFUA_SHIFT)
-#define SIFCTR_RFWM_MASK GENMASK(15, 13) /* Receive FIFO Watermark */
-#define SIFCTR_RFWM_1 (0 << 13) /* Transfer Request when 1 valid stages */
-#define SIFCTR_RFWM_4 (1 << 13) /* Transfer Request when 4 valid stages */
-#define SIFCTR_RFWM_8 (2 << 13) /* Transfer Request when 8 valid stages */
-#define SIFCTR_RFWM_16 (3 << 13) /* Transfer Request when 16 valid stages */
-#define SIFCTR_RFWM_32 (4 << 13) /* Transfer Request when 32 valid stages */
-#define SIFCTR_RFWM_64 (5 << 13) /* Transfer Request when 64 valid stages */
-#define SIFCTR_RFWM_128 (6 << 13) /* Transfer Request when 128 valid stages */
-#define SIFCTR_RFWM_256 (7 << 13) /* Transfer Request when 256 valid stages */
-#define SIFCTR_RFUA_MASK GENMASK(12, 4) /* Receive FIFO Usable Area (0x40 = full) */
-#define SIFCTR_RFUA_SHIFT 4
-#define SIFCTR_RFUA(i) ((i) << SIFCTR_RFUA_SHIFT)
-
-/* SISTR */
-#define SISTR_TFEMP BIT(29) /* Transmit FIFO Empty */
-#define SISTR_TDREQ BIT(28) /* Transmit Data Transfer Request */
-#define SISTR_TEOF BIT(23) /* Frame Transmission End */
-#define SISTR_TFSERR BIT(21) /* Transmit Frame Synchronization Error */
-#define SISTR_TFOVF BIT(20) /* Transmit FIFO Overflow */
-#define SISTR_TFUDF BIT(19) /* Transmit FIFO Underflow */
-#define SISTR_RFFUL BIT(13) /* Receive FIFO Full */
-#define SISTR_RDREQ BIT(12) /* Receive Data Transfer Request */
-#define SISTR_REOF BIT(7) /* Frame Reception End */
-#define SISTR_RFSERR BIT(5) /* Receive Frame Synchronization Error */
-#define SISTR_RFUDF BIT(4) /* Receive FIFO Underflow */
-#define SISTR_RFOVF BIT(3) /* Receive FIFO Overflow */
-
-/* SIIER */
-#define SIIER_TDMAE BIT(31) /* Transmit Data DMA Transfer Req. Enable */
-#define SIIER_TFEMPE BIT(29) /* Transmit FIFO Empty Enable */
-#define SIIER_TDREQE BIT(28) /* Transmit Data Transfer Request Enable */
-#define SIIER_TEOFE BIT(23) /* Frame Transmission End Enable */
-#define SIIER_TFSERRE BIT(21) /* Transmit Frame Sync Error Enable */
-#define SIIER_TFOVFE BIT(20) /* Transmit FIFO Overflow Enable */
-#define SIIER_TFUDFE BIT(19) /* Transmit FIFO Underflow Enable */
-#define SIIER_RDMAE BIT(15) /* Receive Data DMA Transfer Req. Enable */
-#define SIIER_RFFULE BIT(13) /* Receive FIFO Full Enable */
-#define SIIER_RDREQE BIT(12) /* Receive Data Transfer Request Enable */
-#define SIIER_REOFE BIT(7) /* Frame Reception End Enable */
-#define SIIER_RFSERRE BIT(5) /* Receive Frame Sync Error Enable */
-#define SIIER_RFUDFE BIT(4) /* Receive FIFO Underflow Enable */
-#define SIIER_RFOVFE BIT(3) /* Receive FIFO Overflow Enable */
-
-
static u32 sh_msiof_read(struct sh_msiof_spi_priv *p, int reg_offs)
{
switch (reg_offs) {
@@ -255,11 +127,6 @@ static void sh_msiof_spi_reset_regs(struct sh_msiof_spi_priv *p)
100);
}
-static const u32 sh_msiof_spi_div_array[] = {
- SISCR_BRDV_DIV_1, SISCR_BRDV_DIV_2, SISCR_BRDV_DIV_4,
- SISCR_BRDV_DIV_8, SISCR_BRDV_DIV_16, SISCR_BRDV_DIV_32,
-};
-
static void sh_msiof_spi_set_clk_regs(struct sh_msiof_spi_priv *p,
struct spi_transfer *t)
{
@@ -298,7 +165,9 @@ static void sh_msiof_spi_set_clk_regs(struct sh_msiof_spi_priv *p,
t->effective_speed_hz = parent_rate / (brps << div_pow);
- scr = sh_msiof_spi_div_array[div_pow] | SISCR_BRPS(brps);
+ /* div_pow == 0 maps to SISCR_BRDV_DIV_1 == all ones */
+ scr = FIELD_PREP(SISCR_BRDV, div_pow - 1) |
+ FIELD_PREP(SISCR_BRPS, brps - 1);
sh_msiof_write(p, SITSCR, scr);
if (!(p->ctlr->flags & SPI_CONTROLLER_MUST_TX))
sh_msiof_write(p, SIRSCR, scr);
@@ -340,18 +209,19 @@ static u32 sh_msiof_spi_get_dtdl_and_syncdl(struct sh_msiof_spi_priv *p)
return 0;
}
- val = sh_msiof_get_delay_bit(p->info->dtdl) << SIMDR1_DTDL_SHIFT;
- val |= sh_msiof_get_delay_bit(p->info->syncdl) << SIMDR1_SYNCDL_SHIFT;
+ val = FIELD_PREP(SIMDR1_DTDL, sh_msiof_get_delay_bit(p->info->dtdl)) |
+ FIELD_PREP(SIMDR1_SYNCDL,
+ sh_msiof_get_delay_bit(p->info->syncdl));
return val;
}
static void sh_msiof_spi_set_pin_regs(struct sh_msiof_spi_priv *p, u32 ss,
- u32 cpol, u32 cpha,
- u32 tx_hi_z, u32 lsb_first, u32 cs_high)
+ bool cpol, bool cpha, bool tx_hi_z,
+ bool lsb_first, bool cs_high)
{
+ bool edge;
u32 tmp;
- int edge;
/*
* CPOL CPHA TSCKIZ RSCKIZ TEDG REDG
@@ -360,16 +230,18 @@ static void sh_msiof_spi_set_pin_regs(struct sh_msiof_spi_priv *p, u32 ss,
* 1 0 11 11 0 0
* 1 1 11 11 1 1
*/
- tmp = SIMDR1_SYNCMD_SPI | 1 << SIMDR1_FLD_SHIFT | SIMDR1_XXSTP;
- tmp |= !cs_high << SIMDR1_SYNCAC_SHIFT;
- tmp |= lsb_first << SIMDR1_BITLSB_SHIFT;
+ tmp = FIELD_PREP(SIMDR1_SYNCMD, SIMDR1_SYNCMD_SPI) |
+ FIELD_PREP(SIMDR1_FLD, 1) | SIMDR1_XXSTP |
+ FIELD_PREP(SIMDR1_SYNCAC, !cs_high) |
+ FIELD_PREP(SIMDR1_BITLSB, lsb_first);
tmp |= sh_msiof_spi_get_dtdl_and_syncdl(p);
if (spi_controller_is_target(p->ctlr)) {
sh_msiof_write(p, SITMDR1, tmp | SITMDR1_PCON);
} else {
sh_msiof_write(p, SITMDR1,
tmp | SIMDR1_TRMD | SITMDR1_PCON |
- (ss < MAX_SS ? ss : 0) << SITMDR1_SYNCCH_SHIFT);
+ FIELD_PREP(SITMDR1_SYNCCH,
+ ss < MAX_SS ? ss : 0));
}
if (p->ctlr->flags & SPI_CONTROLLER_MUST_TX) {
/* These bits are reserved if RX needs TX */
@@ -378,30 +250,42 @@ static void sh_msiof_spi_set_pin_regs(struct sh_msiof_spi_priv *p, u32 ss,
sh_msiof_write(p, SIRMDR1, tmp);
tmp = 0;
- tmp |= SICTR_TSCKIZ_SCK | cpol << SICTR_TSCKIZ_POL_SHIFT;
- tmp |= SICTR_RSCKIZ_SCK | cpol << SICTR_RSCKIZ_POL_SHIFT;
+ tmp |= SICTR_TSCKIZ_SCK | FIELD_PREP(SICTR_TSCKIZ_POL, cpol);
+ tmp |= SICTR_RSCKIZ_SCK | FIELD_PREP(SICTR_RSCKIZ_POL, cpol);
edge = cpol ^ !cpha;
- tmp |= edge << SICTR_TEDG_SHIFT;
- tmp |= edge << SICTR_REDG_SHIFT;
- tmp |= tx_hi_z ? SICTR_TXDIZ_HIZ : SICTR_TXDIZ_LOW;
+ tmp |= FIELD_PREP(SICTR_TEDG, edge);
+ tmp |= FIELD_PREP(SICTR_REDG, edge);
+ tmp |= FIELD_PREP(SICTR_TXDIZ,
+ tx_hi_z ? SICTR_TXDIZ_HIZ : SICTR_TXDIZ_LOW);
sh_msiof_write(p, SICTR, tmp);
}
static void sh_msiof_spi_set_mode_regs(struct sh_msiof_spi_priv *p,
const void *tx_buf, void *rx_buf,
- u32 bits, u32 words)
+ u32 bits, u32 words1, u32 words2)
{
- u32 dr2 = SIMDR2_BITLEN1(bits) | SIMDR2_WDLEN1(words);
+ u32 dr2 = FIELD_PREP(SIMDR2_GRP, words2 ? 1 : 0) |
+ FIELD_PREP(SIMDR2_BITLEN1, bits - 1) |
+ FIELD_PREP(SIMDR2_WDLEN1, words1 - 1);
if (tx_buf || (p->ctlr->flags & SPI_CONTROLLER_MUST_TX))
sh_msiof_write(p, SITMDR2, dr2);
else
- sh_msiof_write(p, SITMDR2, dr2 | SIMDR2_GRPMASK1);
+ sh_msiof_write(p, SITMDR2, dr2 | SIMDR2_GRPMASK);
if (rx_buf)
sh_msiof_write(p, SIRMDR2, dr2);
+
+ if (words2) {
+ u32 dr3 = FIELD_PREP(SIMDR3_BITLEN2, bits - 1) |
+ FIELD_PREP(SIMDR3_WDLEN2, words2 - 1);
+
+ sh_msiof_write(p, SITMDR3, dr3);
+ if (rx_buf)
+ sh_msiof_write(p, SIRMDR3, dr3);
+ }
}
static void sh_msiof_reset_str(struct sh_msiof_spi_priv *p)
@@ -411,140 +295,154 @@ static void sh_msiof_reset_str(struct sh_msiof_spi_priv *p)
}
static void sh_msiof_spi_write_fifo_8(struct sh_msiof_spi_priv *p,
- const void *tx_buf, int words, int fs)
+ const void *tx_buf, unsigned int words,
+ unsigned int fs)
{
const u8 *buf_8 = tx_buf;
- int k;
+ unsigned int k;
for (k = 0; k < words; k++)
sh_msiof_write(p, SITFDR, buf_8[k] << fs);
}
static void sh_msiof_spi_write_fifo_16(struct sh_msiof_spi_priv *p,
- const void *tx_buf, int words, int fs)
+ const void *tx_buf, unsigned int words,
+ unsigned int fs)
{
const u16 *buf_16 = tx_buf;
- int k;
+ unsigned int k;
for (k = 0; k < words; k++)
sh_msiof_write(p, SITFDR, buf_16[k] << fs);
}
static void sh_msiof_spi_write_fifo_16u(struct sh_msiof_spi_priv *p,
- const void *tx_buf, int words, int fs)
+ const void *tx_buf, unsigned int words,
+ unsigned int fs)
{
const u16 *buf_16 = tx_buf;
- int k;
+ unsigned int k;
for (k = 0; k < words; k++)
sh_msiof_write(p, SITFDR, get_unaligned(&buf_16[k]) << fs);
}
static void sh_msiof_spi_write_fifo_32(struct sh_msiof_spi_priv *p,
- const void *tx_buf, int words, int fs)
+ const void *tx_buf, unsigned int words,
+ unsigned int fs)
{
const u32 *buf_32 = tx_buf;
- int k;
+ unsigned int k;
for (k = 0; k < words; k++)
sh_msiof_write(p, SITFDR, buf_32[k] << fs);
}
static void sh_msiof_spi_write_fifo_32u(struct sh_msiof_spi_priv *p,
- const void *tx_buf, int words, int fs)
+ const void *tx_buf, unsigned int words,
+ unsigned int fs)
{
const u32 *buf_32 = tx_buf;
- int k;
+ unsigned int k;
for (k = 0; k < words; k++)
sh_msiof_write(p, SITFDR, get_unaligned(&buf_32[k]) << fs);
}
static void sh_msiof_spi_write_fifo_s32(struct sh_msiof_spi_priv *p,
- const void *tx_buf, int words, int fs)
+ const void *tx_buf, unsigned int words,
+ unsigned int fs)
{
const u32 *buf_32 = tx_buf;
- int k;
+ unsigned int k;
for (k = 0; k < words; k++)
sh_msiof_write(p, SITFDR, swab32(buf_32[k] << fs));
}
static void sh_msiof_spi_write_fifo_s32u(struct sh_msiof_spi_priv *p,
- const void *tx_buf, int words, int fs)
+ const void *tx_buf,
+ unsigned int words, unsigned int fs)
{
const u32 *buf_32 = tx_buf;
- int k;
+ unsigned int k;
for (k = 0; k < words; k++)
sh_msiof_write(p, SITFDR, swab32(get_unaligned(&buf_32[k]) << fs));
}
static void sh_msiof_spi_read_fifo_8(struct sh_msiof_spi_priv *p,
- void *rx_buf, int words, int fs)
+ void *rx_buf, unsigned int words,
+ unsigned int fs)
{
u8 *buf_8 = rx_buf;
- int k;
+ unsigned int k;
for (k = 0; k < words; k++)
buf_8[k] = sh_msiof_read(p, SIRFDR) >> fs;
}
static void sh_msiof_spi_read_fifo_16(struct sh_msiof_spi_priv *p,
- void *rx_buf, int words, int fs)
+ void *rx_buf, unsigned int words,
+ unsigned int fs)
{
u16 *buf_16 = rx_buf;
- int k;
+ unsigned int k;
for (k = 0; k < words; k++)
buf_16[k] = sh_msiof_read(p, SIRFDR) >> fs;
}
static void sh_msiof_spi_read_fifo_16u(struct sh_msiof_spi_priv *p,
- void *rx_buf, int words, int fs)
+ void *rx_buf, unsigned int words,
+ unsigned int fs)
{
u16 *buf_16 = rx_buf;
- int k;
+ unsigned int k;
for (k = 0; k < words; k++)
put_unaligned(sh_msiof_read(p, SIRFDR) >> fs, &buf_16[k]);
}
static void sh_msiof_spi_read_fifo_32(struct sh_msiof_spi_priv *p,
- void *rx_buf, int words, int fs)
+ void *rx_buf, unsigned int words,
+ unsigned int fs)
{
u32 *buf_32 = rx_buf;
- int k;
+ unsigned int k;
for (k = 0; k < words; k++)
buf_32[k] = sh_msiof_read(p, SIRFDR) >> fs;
}
static void sh_msiof_spi_read_fifo_32u(struct sh_msiof_spi_priv *p,
- void *rx_buf, int words, int fs)
+ void *rx_buf, unsigned int words,
+ unsigned int fs)
{
u32 *buf_32 = rx_buf;
- int k;
+ unsigned int k;
for (k = 0; k < words; k++)
put_unaligned(sh_msiof_read(p, SIRFDR) >> fs, &buf_32[k]);
}
static void sh_msiof_spi_read_fifo_s32(struct sh_msiof_spi_priv *p,
- void *rx_buf, int words, int fs)
+ void *rx_buf, unsigned int words,
+ unsigned int fs)
{
u32 *buf_32 = rx_buf;
- int k;
+ unsigned int k;
for (k = 0; k < words; k++)
buf_32[k] = swab32(sh_msiof_read(p, SIRFDR) >> fs);
}
static void sh_msiof_spi_read_fifo_s32u(struct sh_msiof_spi_priv *p,
- void *rx_buf, int words, int fs)
+ void *rx_buf, unsigned int words,
+ unsigned int fs)
{
u32 *buf_32 = rx_buf;
- int k;
+ unsigned int k;
for (k = 0; k < words; k++)
put_unaligned(swab32(sh_msiof_read(p, SIRFDR) >> fs), &buf_32[k]);
@@ -564,12 +462,12 @@ static int sh_msiof_spi_setup(struct spi_device *spi)
return 0;
/* Configure native chip select mode/polarity early */
- clr = SIMDR1_SYNCMD_MASK;
- set = SIMDR1_SYNCMD_SPI;
+ clr = SIMDR1_SYNCMD;
+ set = FIELD_PREP(SIMDR1_SYNCMD, SIMDR1_SYNCMD_SPI);
if (spi->mode & SPI_CS_HIGH)
- clr |= BIT(SIMDR1_SYNCAC_SHIFT);
+ clr |= SIMDR1_SYNCAC;
else
- set |= BIT(SIMDR1_SYNCAC_SHIFT);
+ set |= SIMDR1_SYNCAC;
pm_runtime_get_sync(&p->pdev->dev);
tmp = sh_msiof_read(p, SITMDR1) & ~clr;
sh_msiof_write(p, SITMDR1, tmp | set | SIMDR1_TRMD | SITMDR1_PCON);
@@ -586,7 +484,8 @@ static int sh_msiof_prepare_message(struct spi_controller *ctlr,
{
struct sh_msiof_spi_priv *p = spi_controller_get_devdata(ctlr);
const struct spi_device *spi = msg->spi;
- u32 ss, cs_high;
+ bool cs_high;
+ u32 ss;
/* Configure pins before asserting CS */
if (spi_get_csgpiod(spi, 0)) {
@@ -594,12 +493,11 @@ static int sh_msiof_prepare_message(struct spi_controller *ctlr,
cs_high = p->native_cs_high;
} else {
ss = spi_get_chipselect(spi, 0);
- cs_high = !!(spi->mode & SPI_CS_HIGH);
+ cs_high = spi->mode & SPI_CS_HIGH;
}
- sh_msiof_spi_set_pin_regs(p, ss, !!(spi->mode & SPI_CPOL),
- !!(spi->mode & SPI_CPHA),
- !!(spi->mode & SPI_3WIRE),
- !!(spi->mode & SPI_LSB_FIRST), cs_high);
+ sh_msiof_spi_set_pin_regs(p, ss, spi->mode & SPI_CPOL,
+ spi->mode & SPI_CPHA, spi->mode & SPI_3WIRE,
+ spi->mode & SPI_LSB_FIRST, cs_high);
return 0;
}
@@ -672,20 +570,22 @@ static int sh_msiof_wait_for_completion(struct sh_msiof_spi_priv *p,
static int sh_msiof_spi_txrx_once(struct sh_msiof_spi_priv *p,
void (*tx_fifo)(struct sh_msiof_spi_priv *,
- const void *, int, int),
+ const void *, unsigned int,
+ unsigned int),
void (*rx_fifo)(struct sh_msiof_spi_priv *,
- void *, int, int),
+ void *, unsigned int,
+ unsigned int),
const void *tx_buf, void *rx_buf,
- int words, int bits)
+ unsigned int words, unsigned int bits)
{
- int fifo_shift;
+ unsigned int fifo_shift;
int ret;
/* limit maximum word transfer to rx/tx fifo size */
if (tx_buf)
- words = min_t(int, words, p->tx_fifo_size);
+ words = min(words, p->tx_fifo_size);
if (rx_buf)
- words = min_t(int, words, p->rx_fifo_size);
+ words = min(words, p->rx_fifo_size);
/* the fifo contents need shifting */
fifo_shift = 32 - bits;
@@ -694,7 +594,7 @@ static int sh_msiof_spi_txrx_once(struct sh_msiof_spi_priv *p,
sh_msiof_write(p, SIFCTR, 0);
/* setup msiof transfer mode registers */
- sh_msiof_spi_set_mode_regs(p, tx_buf, rx_buf, bits, words);
+ sh_msiof_spi_set_mode_regs(p, tx_buf, rx_buf, bits, words, 0);
sh_msiof_write(p, SIIER, SIIER_TEOFE | SIIER_REOFE);
/* write tx fifo */
@@ -744,10 +644,12 @@ static void sh_msiof_dma_complete(void *arg)
}
static int sh_msiof_dma_once(struct sh_msiof_spi_priv *p, const void *tx,
- void *rx, unsigned int len)
+ void *rx, unsigned int len,
+ unsigned int max_wdlen)
{
u32 ier_bits = 0;
struct dma_async_tx_descriptor *desc_tx = NULL, *desc_rx = NULL;
+ unsigned int words1, words2;
dma_cookie_t cookie;
int ret;
@@ -789,10 +691,14 @@ static int sh_msiof_dma_once(struct sh_msiof_spi_priv *p, const void *tx,
}
/* 1 stage FIFO watermarks for DMA */
- sh_msiof_write(p, SIFCTR, SIFCTR_TFWM_1 | SIFCTR_RFWM_1);
+ sh_msiof_write(p, SIFCTR,
+ FIELD_PREP(SIFCTR_TFWM, SIFCTR_TFWM_1) |
+ FIELD_PREP(SIFCTR_RFWM, SIFCTR_RFWM_1));
/* setup msiof transfer mode registers (32-bit words) */
- sh_msiof_spi_set_mode_regs(p, tx, rx, 32, len / 4);
+ words1 = min(len / 4, max_wdlen);
+ words2 = len / 4 - words1;
+ sh_msiof_spi_set_mode_regs(p, tx, rx, 32, words1, words2);
sh_msiof_write(p, SIIER, ier_bits);
@@ -911,9 +817,12 @@ static int sh_msiof_transfer_one(struct spi_controller *ctlr,
struct spi_transfer *t)
{
struct sh_msiof_spi_priv *p = spi_controller_get_devdata(ctlr);
+ unsigned int max_wdlen = FIELD_MAX(SIMDR2_WDLEN1) + 1;
void (*copy32)(u32 *, const u32 *, unsigned int);
- void (*tx_fifo)(struct sh_msiof_spi_priv *, const void *, int, int);
- void (*rx_fifo)(struct sh_msiof_spi_priv *, void *, int, int);
+ void (*tx_fifo)(struct sh_msiof_spi_priv *, const void *, unsigned int,
+ unsigned int);
+ void (*rx_fifo)(struct sh_msiof_spi_priv *, void *, unsigned int,
+ unsigned int);
const void *tx_buf = t->tx_buf;
void *rx_buf = t->rx_buf;
unsigned int len = t->len;
@@ -931,17 +840,17 @@ static int sh_msiof_transfer_one(struct spi_controller *ctlr,
if (!spi_controller_is_target(p->ctlr))
sh_msiof_spi_set_clk_regs(p, t);
+ if (tx_buf)
+ max_wdlen = min(max_wdlen, p->tx_fifo_size);
+ if (rx_buf)
+ max_wdlen = min(max_wdlen, p->rx_fifo_size);
+
while (ctlr->dma_tx && len > 15) {
/*
* DMA supports 32-bit words only, hence pack 8-bit and 16-bit
* words, with byte resp. word swapping.
*/
- unsigned int l = 0;
-
- if (tx_buf)
- l = min(round_down(len, 4), p->tx_fifo_size * 4);
- if (rx_buf)
- l = min(round_down(len, 4), p->rx_fifo_size * 4);
+ unsigned int l = min(round_down(len, 4), 2 * max_wdlen * 4);
if (bits <= 8) {
copy32 = copy_bswap32;
@@ -954,7 +863,7 @@ static int sh_msiof_transfer_one(struct spi_controller *ctlr,
if (tx_buf)
copy32(p->tx_dma_page, tx_buf, l / 4);
- ret = sh_msiof_dma_once(p, tx_buf, rx_buf, l);
+ ret = sh_msiof_dma_once(p, tx_buf, rx_buf, l, max_wdlen);
if (ret == -EAGAIN) {
dev_warn_once(&p->pdev->dev,
"DMA not available, falling back to PIO\n");
@@ -1061,7 +970,7 @@ static const struct sh_msiof_chipdata rcar_gen2_data = {
.bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16) |
SPI_BPW_MASK(24) | SPI_BPW_MASK(32),
.tx_fifo_size = 64,
- .rx_fifo_size = 64,
+ .rx_fifo_size = 128,
.ctlr_flags = SPI_CONTROLLER_MUST_TX,
.min_div_pow = 0,
};
@@ -1070,7 +979,16 @@ static const struct sh_msiof_chipdata rcar_gen3_data = {
.bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16) |
SPI_BPW_MASK(24) | SPI_BPW_MASK(32),
.tx_fifo_size = 64,
- .rx_fifo_size = 64,
+ .rx_fifo_size = 256,
+ .ctlr_flags = SPI_CONTROLLER_MUST_TX,
+ .min_div_pow = 1,
+};
+
+static const struct sh_msiof_chipdata rcar_gen4_data = {
+ .bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16) |
+ SPI_BPW_MASK(24) | SPI_BPW_MASK(32),
+ .tx_fifo_size = 256,
+ .rx_fifo_size = 256,
.ctlr_flags = SPI_CONTROLLER_MUST_TX,
.min_div_pow = 1,
};
@@ -1079,7 +997,7 @@ static const struct sh_msiof_chipdata rcar_r8a7795_data = {
.bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16) |
SPI_BPW_MASK(24) | SPI_BPW_MASK(32),
.tx_fifo_size = 64,
- .rx_fifo_size = 64,
+ .rx_fifo_size = 256,
.ctlr_flags = SPI_CONTROLLER_MUST_TX,
.min_div_pow = 1,
.flags = SH_MSIOF_FLAG_FIXED_DTDL_200,
@@ -1087,20 +1005,14 @@ static const struct sh_msiof_chipdata rcar_r8a7795_data = {
static const struct of_device_id sh_msiof_match[] __maybe_unused = {
{ .compatible = "renesas,sh-mobile-msiof", .data = &sh_data },
- { .compatible = "renesas,msiof-r8a7743", .data = &rcar_gen2_data },
- { .compatible = "renesas,msiof-r8a7745", .data = &rcar_gen2_data },
- { .compatible = "renesas,msiof-r8a7790", .data = &rcar_gen2_data },
- { .compatible = "renesas,msiof-r8a7791", .data = &rcar_gen2_data },
- { .compatible = "renesas,msiof-r8a7792", .data = &rcar_gen2_data },
- { .compatible = "renesas,msiof-r8a7793", .data = &rcar_gen2_data },
- { .compatible = "renesas,msiof-r8a7794", .data = &rcar_gen2_data },
{ .compatible = "renesas,rcar-gen2-msiof", .data = &rcar_gen2_data },
{ .compatible = "renesas,msiof-r8a7795", .data = &rcar_r8a7795_data },
- { .compatible = "renesas,msiof-r8a7796", .data = &rcar_gen3_data },
{ .compatible = "renesas,rcar-gen3-msiof", .data = &rcar_gen3_data },
- { .compatible = "renesas,rcar-gen4-msiof", .data = &rcar_gen3_data },
+ { .compatible = "renesas,msiof-r8a779a0", .data = &rcar_gen3_data },
+ { .compatible = "renesas,msiof-r8a779f0", .data = &rcar_gen3_data },
+ { .compatible = "renesas,rcar-gen4-msiof", .data = &rcar_gen4_data },
{ .compatible = "renesas,sh-msiof", .data = &sh_data }, /* Deprecated */
- {},
+ { /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, sh_msiof_match);
@@ -1276,20 +1188,26 @@ static int sh_msiof_spi_probe(struct platform_device *pdev)
const struct sh_msiof_chipdata *chipdata;
struct sh_msiof_spi_info *info;
struct sh_msiof_spi_priv *p;
+ struct device *dev = &pdev->dev;
unsigned long clksrc;
int i;
int ret;
- chipdata = of_device_get_match_data(&pdev->dev);
+ /* Check whether MSIOF is used as I2S mode or SPI mode by checking "port" node */
+ struct device_node *port __free(device_node) = of_graph_get_next_port(dev->of_node, NULL);
+ if (port) /* It was MSIOF-I2S */
+ return -ENODEV;
+
+ chipdata = of_device_get_match_data(dev);
if (chipdata) {
- info = sh_msiof_spi_parse_dt(&pdev->dev);
+ info = sh_msiof_spi_parse_dt(dev);
} else {
chipdata = (const void *)pdev->id_entry->driver_data;
- info = dev_get_platdata(&pdev->dev);
+ info = dev_get_platdata(dev);
}
if (!info) {
- dev_err(&pdev->dev, "failed to obtain device info\n");
+ dev_err(dev, "failed to obtain device info\n");
return -ENXIO;
}
@@ -1297,11 +1215,9 @@ static int sh_msiof_spi_probe(struct platform_device *pdev)
info->dtdl = 200;
if (info->mode == MSIOF_SPI_TARGET)
- ctlr = spi_alloc_target(&pdev->dev,
- sizeof(struct sh_msiof_spi_priv));
+ ctlr = spi_alloc_target(dev, sizeof(struct sh_msiof_spi_priv));
else
- ctlr = spi_alloc_host(&pdev->dev,
- sizeof(struct sh_msiof_spi_priv));
+ ctlr = spi_alloc_host(dev, sizeof(struct sh_msiof_spi_priv));
if (ctlr == NULL)
return -ENOMEM;
@@ -1315,9 +1231,9 @@ static int sh_msiof_spi_probe(struct platform_device *pdev)
init_completion(&p->done);
init_completion(&p->done_txdma);
- p->clk = devm_clk_get(&pdev->dev, NULL);
+ p->clk = devm_clk_get(dev, NULL);
if (IS_ERR(p->clk)) {
- dev_err(&pdev->dev, "cannot get clock\n");
+ dev_err(dev, "cannot get clock\n");
ret = PTR_ERR(p->clk);
goto err1;
}
@@ -1334,15 +1250,14 @@ static int sh_msiof_spi_probe(struct platform_device *pdev)
goto err1;
}
- ret = devm_request_irq(&pdev->dev, i, sh_msiof_spi_irq, 0,
- dev_name(&pdev->dev), p);
+ ret = devm_request_irq(dev, i, sh_msiof_spi_irq, 0, dev_name(dev), p);
if (ret) {
- dev_err(&pdev->dev, "unable to request irq\n");
+ dev_err(dev, "unable to request irq\n");
goto err1;
}
p->pdev = pdev;
- pm_runtime_enable(&pdev->dev);
+ pm_runtime_enable(dev);
/* Platform data may override FIFO sizes */
p->tx_fifo_size = chipdata->tx_fifo_size;
@@ -1361,7 +1276,7 @@ static int sh_msiof_spi_probe(struct platform_device *pdev)
ctlr->flags = chipdata->ctlr_flags;
ctlr->bus_num = pdev->id;
ctlr->num_chipselect = p->info->num_chipselect;
- ctlr->dev.of_node = pdev->dev.of_node;
+ ctlr->dev.of_node = dev->of_node;
ctlr->setup = sh_msiof_spi_setup;
ctlr->prepare_message = sh_msiof_prepare_message;
ctlr->target_abort = sh_msiof_target_abort;
@@ -1373,11 +1288,11 @@ static int sh_msiof_spi_probe(struct platform_device *pdev)
ret = sh_msiof_request_dma(p);
if (ret < 0)
- dev_warn(&pdev->dev, "DMA not available, using PIO\n");
+ dev_warn(dev, "DMA not available, using PIO\n");
- ret = devm_spi_register_controller(&pdev->dev, ctlr);
+ ret = devm_spi_register_controller(dev, ctlr);
if (ret < 0) {
- dev_err(&pdev->dev, "devm_spi_register_controller error.\n");
+ dev_err(dev, "devm_spi_register_controller error.\n");
goto err2;
}
@@ -1385,7 +1300,7 @@ static int sh_msiof_spi_probe(struct platform_device *pdev)
err2:
sh_msiof_release_dma(p);
- pm_runtime_disable(&pdev->dev);
+ pm_runtime_disable(dev);
err1:
spi_controller_put(ctlr);
return ret;
diff --git a/drivers/spi/spi-sn-f-ospi.c b/drivers/spi/spi-sn-f-ospi.c
index adac645732fe..c4969f66a0ba 100644
--- a/drivers/spi/spi-sn-f-ospi.c
+++ b/drivers/spi/spi-sn-f-ospi.c
@@ -116,6 +116,9 @@ struct f_ospi {
static u32 f_ospi_get_dummy_cycle(const struct spi_mem_op *op)
{
+ if (!op->dummy.nbytes)
+ return 0;
+
return (op->dummy.nbytes * 8) / op->dummy.buswidth;
}
@@ -335,7 +338,6 @@ static void f_ospi_config_indir_protocol(struct f_ospi *ospi,
static int f_ospi_indir_prepare_op(struct f_ospi *ospi, struct spi_mem *mem,
const struct spi_mem_op *op)
{
- struct spi_device *spi = mem->spi;
u32 irq_stat_en;
int ret;
@@ -343,7 +345,7 @@ static int f_ospi_indir_prepare_op(struct f_ospi *ospi, struct spi_mem *mem,
if (ret)
return ret;
- f_ospi_config_clk(ospi, spi->max_speed_hz);
+ f_ospi_config_clk(ospi, op->max_freq);
f_ospi_config_indir_protocol(ospi, mem, op);
@@ -577,6 +579,10 @@ static const struct spi_controller_mem_ops f_ospi_mem_ops = {
.exec_op = f_ospi_exec_op,
};
+static const struct spi_controller_mem_caps f_ospi_mem_caps = {
+ .per_op_freq = true,
+};
+
static int f_ospi_init(struct f_ospi *ospi)
{
int ret;
@@ -614,6 +620,7 @@ static int f_ospi_probe(struct platform_device *pdev)
| SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL
| SPI_MODE_0 | SPI_MODE_1 | SPI_LSB_FIRST;
ctlr->mem_ops = &f_ospi_mem_ops;
+ ctlr->mem_caps = &f_ospi_mem_caps;
ctlr->bus_num = -1;
of_property_read_u32(dev->of_node, "num-cs", &num_cs);
if (num_cs > OSPI_NUM_CS) {
diff --git a/drivers/spi/spi-stm32-ospi.c b/drivers/spi/spi-stm32-ospi.c
new file mode 100644
index 000000000000..7c1fa55fbc47
--- /dev/null
+++ b/drivers/spi/spi-stm32-ospi.c
@@ -0,0 +1,1067 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) STMicroelectronics 2025 - All Rights Reserved
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/gpio/consumer.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/reset.h>
+#include <linux/sizes.h>
+#include <linux/spi/spi-mem.h>
+#include <linux/types.h>
+
+#define OSPI_CR 0x00
+#define CR_EN BIT(0)
+#define CR_ABORT BIT(1)
+#define CR_DMAEN BIT(2)
+#define CR_FTHRES_SHIFT 8
+#define CR_TEIE BIT(16)
+#define CR_TCIE BIT(17)
+#define CR_SMIE BIT(19)
+#define CR_APMS BIT(22)
+#define CR_CSSEL BIT(24)
+#define CR_FMODE_MASK GENMASK(29, 28)
+#define CR_FMODE_INDW (0U)
+#define CR_FMODE_INDR (1U)
+#define CR_FMODE_APM (2U)
+#define CR_FMODE_MM (3U)
+
+#define OSPI_DCR1 0x08
+#define DCR1_DLYBYP BIT(3)
+#define DCR1_DEVSIZE_MASK GENMASK(20, 16)
+#define DCR1_MTYP_MASK GENMASK(26, 24)
+#define DCR1_MTYP_MX_MODE 1
+#define DCR1_MTYP_HP_MEMMODE 4
+
+#define OSPI_DCR2 0x0c
+#define DCR2_PRESC_MASK GENMASK(7, 0)
+
+#define OSPI_SR 0x20
+#define SR_TEF BIT(0)
+#define SR_TCF BIT(1)
+#define SR_FTF BIT(2)
+#define SR_SMF BIT(3)
+#define SR_BUSY BIT(5)
+
+#define OSPI_FCR 0x24
+#define FCR_CTEF BIT(0)
+#define FCR_CTCF BIT(1)
+#define FCR_CSMF BIT(3)
+
+#define OSPI_DLR 0x40
+#define OSPI_AR 0x48
+#define OSPI_DR 0x50
+#define OSPI_PSMKR 0x80
+#define OSPI_PSMAR 0x88
+
+#define OSPI_CCR 0x100
+#define CCR_IMODE_MASK GENMASK(2, 0)
+#define CCR_IDTR BIT(3)
+#define CCR_ISIZE_MASK GENMASK(5, 4)
+#define CCR_ADMODE_MASK GENMASK(10, 8)
+#define CCR_ADMODE_8LINES 4
+#define CCR_ADDTR BIT(11)
+#define CCR_ADSIZE_MASK GENMASK(13, 12)
+#define CCR_ADSIZE_32BITS 3
+#define CCR_DMODE_MASK GENMASK(26, 24)
+#define CCR_DMODE_8LINES 4
+#define CCR_DQSE BIT(29)
+#define CCR_DDTR BIT(27)
+#define CCR_BUSWIDTH_0 0x0
+#define CCR_BUSWIDTH_1 0x1
+#define CCR_BUSWIDTH_2 0x2
+#define CCR_BUSWIDTH_4 0x3
+#define CCR_BUSWIDTH_8 0x4
+
+#define OSPI_TCR 0x108
+#define TCR_DCYC_MASK GENMASK(4, 0)
+#define TCR_DHQC BIT(28)
+#define TCR_SSHIFT BIT(30)
+
+#define OSPI_IR 0x110
+
+#define STM32_OSPI_MAX_MMAP_SZ SZ_256M
+#define STM32_OSPI_MAX_NORCHIP 2
+
+#define STM32_FIFO_TIMEOUT_US 30000
+#define STM32_ABT_TIMEOUT_US 100000
+#define STM32_COMP_TIMEOUT_MS 5000
+#define STM32_BUSY_TIMEOUT_US 100000
+
+
+#define STM32_AUTOSUSPEND_DELAY -1
+
+struct stm32_ospi {
+ struct device *dev;
+ struct spi_controller *ctrl;
+ struct clk *clk;
+ struct reset_control *rstc;
+
+ struct completion data_completion;
+ struct completion match_completion;
+
+ struct dma_chan *dma_chtx;
+ struct dma_chan *dma_chrx;
+ struct completion dma_completion;
+
+ void __iomem *regs_base;
+ void __iomem *mm_base;
+ phys_addr_t regs_phys_base;
+ resource_size_t mm_size;
+ u32 clk_rate;
+ u32 fmode;
+ u32 cr_reg;
+ u32 dcr_reg;
+ u32 flash_presc[STM32_OSPI_MAX_NORCHIP];
+ int irq;
+ unsigned long status_timeout;
+
+ /*
+ * To protect device configuration, could be different between
+ * 2 flash access
+ */
+ struct mutex lock;
+};
+
+static void stm32_ospi_read_fifo(u8 *val, void __iomem *addr)
+{
+ *val = readb_relaxed(addr);
+}
+
+static void stm32_ospi_write_fifo(u8 *val, void __iomem *addr)
+{
+ writeb_relaxed(*val, addr);
+}
+
+static int stm32_ospi_abort(struct stm32_ospi *ospi)
+{
+ void __iomem *regs_base = ospi->regs_base;
+ u32 cr;
+ int timeout;
+
+ cr = readl_relaxed(regs_base + OSPI_CR) | CR_ABORT;
+ writel_relaxed(cr, regs_base + OSPI_CR);
+
+ /* wait clear of abort bit by hw */
+ timeout = readl_relaxed_poll_timeout_atomic(regs_base + OSPI_CR,
+ cr, !(cr & CR_ABORT), 1,
+ STM32_ABT_TIMEOUT_US);
+
+ if (timeout)
+ dev_err(ospi->dev, "%s abort timeout:%d\n", __func__, timeout);
+
+ return timeout;
+}
+
+static int stm32_ospi_poll(struct stm32_ospi *ospi, u8 *buf, u32 len, bool read)
+{
+ void __iomem *regs_base = ospi->regs_base;
+ void (*fifo)(u8 *val, void __iomem *addr);
+ u32 sr;
+ int ret;
+
+ if (read)
+ fifo = stm32_ospi_read_fifo;
+ else
+ fifo = stm32_ospi_write_fifo;
+
+ while (len--) {
+ ret = readl_relaxed_poll_timeout_atomic(regs_base + OSPI_SR,
+ sr, sr & SR_FTF, 1,
+ STM32_FIFO_TIMEOUT_US);
+ if (ret) {
+ dev_err(ospi->dev, "fifo timeout (len:%d stat:%#x)\n",
+ len, sr);
+ return ret;
+ }
+ fifo(buf++, regs_base + OSPI_DR);
+ }
+
+ return 0;
+}
+
+static int stm32_ospi_wait_nobusy(struct stm32_ospi *ospi)
+{
+ u32 sr;
+
+ return readl_relaxed_poll_timeout_atomic(ospi->regs_base + OSPI_SR,
+ sr, !(sr & SR_BUSY), 1,
+ STM32_BUSY_TIMEOUT_US);
+}
+
+static int stm32_ospi_wait_cmd(struct stm32_ospi *ospi)
+{
+ void __iomem *regs_base = ospi->regs_base;
+ u32 cr, sr;
+ int err = 0;
+
+ if ((readl_relaxed(regs_base + OSPI_SR) & SR_TCF) ||
+ ospi->fmode == CR_FMODE_APM)
+ goto out;
+
+ reinit_completion(&ospi->data_completion);
+ cr = readl_relaxed(regs_base + OSPI_CR);
+ writel_relaxed(cr | CR_TCIE | CR_TEIE, regs_base + OSPI_CR);
+
+ if (!wait_for_completion_timeout(&ospi->data_completion,
+ msecs_to_jiffies(STM32_COMP_TIMEOUT_MS)))
+ err = -ETIMEDOUT;
+
+ sr = readl_relaxed(regs_base + OSPI_SR);
+ if (sr & SR_TCF)
+ /* avoid false timeout */
+ err = 0;
+ if (sr & SR_TEF)
+ err = -EIO;
+
+out:
+ /* clear flags */
+ writel_relaxed(FCR_CTCF | FCR_CTEF, regs_base + OSPI_FCR);
+
+ if (!err)
+ err = stm32_ospi_wait_nobusy(ospi);
+
+ return err;
+}
+
+static void stm32_ospi_dma_callback(void *arg)
+{
+ struct completion *dma_completion = arg;
+
+ complete(dma_completion);
+}
+
+static irqreturn_t stm32_ospi_irq(int irq, void *dev_id)
+{
+ struct stm32_ospi *ospi = (struct stm32_ospi *)dev_id;
+ void __iomem *regs_base = ospi->regs_base;
+ u32 cr, sr;
+
+ cr = readl_relaxed(regs_base + OSPI_CR);
+ sr = readl_relaxed(regs_base + OSPI_SR);
+
+ if (cr & CR_SMIE && sr & SR_SMF) {
+ /* disable irq */
+ cr &= ~CR_SMIE;
+ writel_relaxed(cr, regs_base + OSPI_CR);
+ complete(&ospi->match_completion);
+
+ return IRQ_HANDLED;
+ }
+
+ if (sr & (SR_TEF | SR_TCF)) {
+ /* disable irq */
+ cr &= ~CR_TCIE & ~CR_TEIE;
+ writel_relaxed(cr, regs_base + OSPI_CR);
+ complete(&ospi->data_completion);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void stm32_ospi_dma_setup(struct stm32_ospi *ospi,
+ struct dma_slave_config *dma_cfg)
+{
+ if (dma_cfg && ospi->dma_chrx) {
+ if (dmaengine_slave_config(ospi->dma_chrx, dma_cfg)) {
+ dev_err(ospi->dev, "dma rx config failed\n");
+ dma_release_channel(ospi->dma_chrx);
+ ospi->dma_chrx = NULL;
+ }
+ }
+
+ if (dma_cfg && ospi->dma_chtx) {
+ if (dmaengine_slave_config(ospi->dma_chtx, dma_cfg)) {
+ dev_err(ospi->dev, "dma tx config failed\n");
+ dma_release_channel(ospi->dma_chtx);
+ ospi->dma_chtx = NULL;
+ }
+ }
+
+ init_completion(&ospi->dma_completion);
+}
+
+static int stm32_ospi_tx_mm(struct stm32_ospi *ospi,
+ const struct spi_mem_op *op)
+{
+ memcpy_fromio(op->data.buf.in, ospi->mm_base + op->addr.val,
+ op->data.nbytes);
+ return 0;
+}
+
+static int stm32_ospi_tx_dma(struct stm32_ospi *ospi,
+ const struct spi_mem_op *op)
+{
+ struct dma_async_tx_descriptor *desc;
+ void __iomem *regs_base = ospi->regs_base;
+ enum dma_transfer_direction dma_dir;
+ struct dma_chan *dma_ch;
+ struct sg_table sgt;
+ dma_cookie_t cookie;
+ u32 cr, t_out;
+ int err;
+
+ if (op->data.dir == SPI_MEM_DATA_IN) {
+ dma_dir = DMA_DEV_TO_MEM;
+ dma_ch = ospi->dma_chrx;
+ } else {
+ dma_dir = DMA_MEM_TO_DEV;
+ dma_ch = ospi->dma_chtx;
+ }
+
+ /*
+ * Spi_map_buf return -EINVAL if the buffer is not DMA-able
+ * (DMA-able: in vmalloc | kmap | virt_addr_valid)
+ */
+ err = spi_controller_dma_map_mem_op_data(ospi->ctrl, op, &sgt);
+ if (err)
+ return err;
+
+ desc = dmaengine_prep_slave_sg(dma_ch, sgt.sgl, sgt.nents,
+ dma_dir, DMA_PREP_INTERRUPT);
+ if (!desc) {
+ err = -ENOMEM;
+ goto out_unmap;
+ }
+
+ cr = readl_relaxed(regs_base + OSPI_CR);
+
+ reinit_completion(&ospi->dma_completion);
+ desc->callback = stm32_ospi_dma_callback;
+ desc->callback_param = &ospi->dma_completion;
+ cookie = dmaengine_submit(desc);
+ err = dma_submit_error(cookie);
+ if (err)
+ goto out;
+
+ dma_async_issue_pending(dma_ch);
+
+ writel_relaxed(cr | CR_DMAEN, regs_base + OSPI_CR);
+
+ t_out = sgt.nents * STM32_COMP_TIMEOUT_MS;
+ if (!wait_for_completion_timeout(&ospi->dma_completion,
+ msecs_to_jiffies(t_out)))
+ err = -ETIMEDOUT;
+
+ if (err)
+ dmaengine_terminate_all(dma_ch);
+
+out:
+ writel_relaxed(cr & ~CR_DMAEN, regs_base + OSPI_CR);
+out_unmap:
+ spi_controller_dma_unmap_mem_op_data(ospi->ctrl, op, &sgt);
+
+ return err;
+}
+
+static int stm32_ospi_xfer(struct stm32_ospi *ospi, const struct spi_mem_op *op)
+{
+ u8 *buf;
+
+ if (!op->data.nbytes)
+ return 0;
+
+ if (ospi->fmode == CR_FMODE_MM)
+ return stm32_ospi_tx_mm(ospi, op);
+ else if (((op->data.dir == SPI_MEM_DATA_IN && ospi->dma_chrx) ||
+ (op->data.dir == SPI_MEM_DATA_OUT && ospi->dma_chtx)) &&
+ op->data.nbytes > 8)
+ if (!stm32_ospi_tx_dma(ospi, op))
+ return 0;
+
+ if (op->data.dir == SPI_MEM_DATA_IN)
+ buf = op->data.buf.in;
+ else
+ buf = (u8 *)op->data.buf.out;
+
+ return stm32_ospi_poll(ospi, buf, op->data.nbytes,
+ op->data.dir == SPI_MEM_DATA_IN);
+}
+
+static int stm32_ospi_wait_poll_status(struct stm32_ospi *ospi,
+ const struct spi_mem_op *op)
+{
+ void __iomem *regs_base = ospi->regs_base;
+ u32 cr;
+
+ reinit_completion(&ospi->match_completion);
+ cr = readl_relaxed(regs_base + OSPI_CR);
+ writel_relaxed(cr | CR_SMIE, regs_base + OSPI_CR);
+
+ if (!wait_for_completion_timeout(&ospi->match_completion,
+ msecs_to_jiffies(ospi->status_timeout))) {
+ u32 sr = readl_relaxed(regs_base + OSPI_SR);
+
+ /* Avoid false timeout */
+ if (!(sr & SR_SMF))
+ return -ETIMEDOUT;
+ }
+
+ writel_relaxed(FCR_CSMF, regs_base + OSPI_FCR);
+
+ return 0;
+}
+
+static int stm32_ospi_get_mode(u8 buswidth)
+{
+ switch (buswidth) {
+ case 8:
+ return CCR_BUSWIDTH_8;
+ case 4:
+ return CCR_BUSWIDTH_4;
+ default:
+ return buswidth;
+ }
+}
+
+static int stm32_ospi_send(struct spi_device *spi, const struct spi_mem_op *op)
+{
+ struct stm32_ospi *ospi = spi_controller_get_devdata(spi->controller);
+ void __iomem *regs_base = ospi->regs_base;
+ u32 ccr, cr, dcr2, tcr;
+ int timeout, err = 0, err_poll_status = 0;
+ u8 cs = spi->chip_select[ffs(spi->cs_index_mask) - 1];
+
+ dev_dbg(ospi->dev, "cmd:%#x mode:%d.%d.%d.%d addr:%#llx len:%#x\n",
+ op->cmd.opcode, op->cmd.buswidth, op->addr.buswidth,
+ op->dummy.buswidth, op->data.buswidth,
+ op->addr.val, op->data.nbytes);
+
+ cr = readl_relaxed(ospi->regs_base + OSPI_CR);
+ cr &= ~CR_CSSEL;
+ cr |= FIELD_PREP(CR_CSSEL, cs);
+ cr &= ~CR_FMODE_MASK;
+ cr |= FIELD_PREP(CR_FMODE_MASK, ospi->fmode);
+ writel_relaxed(cr, regs_base + OSPI_CR);
+
+ if (op->data.nbytes)
+ writel_relaxed(op->data.nbytes - 1, regs_base + OSPI_DLR);
+
+ /* set prescaler */
+ dcr2 = readl_relaxed(regs_base + OSPI_DCR2);
+ dcr2 |= FIELD_PREP(DCR2_PRESC_MASK, ospi->flash_presc[cs]);
+ writel_relaxed(dcr2, regs_base + OSPI_DCR2);
+
+ ccr = FIELD_PREP(CCR_IMODE_MASK, stm32_ospi_get_mode(op->cmd.buswidth));
+
+ if (op->addr.nbytes) {
+ ccr |= FIELD_PREP(CCR_ADMODE_MASK,
+ stm32_ospi_get_mode(op->addr.buswidth));
+ ccr |= FIELD_PREP(CCR_ADSIZE_MASK, op->addr.nbytes - 1);
+ }
+
+ tcr = TCR_SSHIFT;
+ if (op->dummy.buswidth && op->dummy.nbytes) {
+ tcr |= FIELD_PREP(TCR_DCYC_MASK,
+ op->dummy.nbytes * 8 / op->dummy.buswidth);
+ }
+ writel_relaxed(tcr, regs_base + OSPI_TCR);
+
+ if (op->data.nbytes) {
+ ccr |= FIELD_PREP(CCR_DMODE_MASK,
+ stm32_ospi_get_mode(op->data.buswidth));
+ }
+
+ writel_relaxed(ccr, regs_base + OSPI_CCR);
+
+ /* set instruction, must be set after ccr register update */
+ writel_relaxed(op->cmd.opcode, regs_base + OSPI_IR);
+
+ if (op->addr.nbytes && ospi->fmode != CR_FMODE_MM)
+ writel_relaxed(op->addr.val, regs_base + OSPI_AR);
+
+ if (ospi->fmode == CR_FMODE_APM)
+ err_poll_status = stm32_ospi_wait_poll_status(ospi, op);
+
+ err = stm32_ospi_xfer(ospi, op);
+
+ /*
+ * Abort in:
+ * -error case
+ * -read memory map: prefetching must be stopped if we read the last
+ * byte of device (device size - fifo size). like device size is not
+ * knows, the prefetching is always stop.
+ */
+ if (err || err_poll_status || ospi->fmode == CR_FMODE_MM)
+ goto abort;
+
+ /* Wait end of tx in indirect mode */
+ err = stm32_ospi_wait_cmd(ospi);
+ if (err)
+ goto abort;
+
+ return 0;
+
+abort:
+ timeout = stm32_ospi_abort(ospi);
+ writel_relaxed(FCR_CTCF | FCR_CSMF, regs_base + OSPI_FCR);
+
+ if (err || err_poll_status || timeout)
+ dev_err(ospi->dev, "%s err:%d err_poll_status:%d abort timeout:%d\n",
+ __func__, err, err_poll_status, timeout);
+
+ return err;
+}
+
+static int stm32_ospi_poll_status(struct spi_mem *mem,
+ const struct spi_mem_op *op,
+ u16 mask, u16 match,
+ unsigned long initial_delay_us,
+ unsigned long polling_rate_us,
+ unsigned long timeout_ms)
+{
+ struct stm32_ospi *ospi = spi_controller_get_devdata(mem->spi->controller);
+ void __iomem *regs_base = ospi->regs_base;
+ int ret;
+
+ ret = pm_runtime_resume_and_get(ospi->dev);
+ if (ret < 0)
+ return ret;
+
+ mutex_lock(&ospi->lock);
+
+ writel_relaxed(mask, regs_base + OSPI_PSMKR);
+ writel_relaxed(match, regs_base + OSPI_PSMAR);
+ ospi->fmode = CR_FMODE_APM;
+ ospi->status_timeout = timeout_ms;
+
+ ret = stm32_ospi_send(mem->spi, op);
+ mutex_unlock(&ospi->lock);
+
+ pm_runtime_mark_last_busy(ospi->dev);
+ pm_runtime_put_autosuspend(ospi->dev);
+
+ return ret;
+}
+
+static int stm32_ospi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
+{
+ struct stm32_ospi *ospi = spi_controller_get_devdata(mem->spi->controller);
+ int ret;
+
+ ret = pm_runtime_resume_and_get(ospi->dev);
+ if (ret < 0)
+ return ret;
+
+ mutex_lock(&ospi->lock);
+ if (op->data.dir == SPI_MEM_DATA_IN && op->data.nbytes)
+ ospi->fmode = CR_FMODE_INDR;
+ else
+ ospi->fmode = CR_FMODE_INDW;
+
+ ret = stm32_ospi_send(mem->spi, op);
+ mutex_unlock(&ospi->lock);
+
+ pm_runtime_mark_last_busy(ospi->dev);
+ pm_runtime_put_autosuspend(ospi->dev);
+
+ return ret;
+}
+
+static int stm32_ospi_dirmap_create(struct spi_mem_dirmap_desc *desc)
+{
+ struct stm32_ospi *ospi = spi_controller_get_devdata(desc->mem->spi->controller);
+
+ if (desc->info.op_tmpl.data.dir == SPI_MEM_DATA_OUT)
+ return -EOPNOTSUPP;
+
+ /* Should never happen, as mm_base == null is an error probe exit condition */
+ if (!ospi->mm_base && desc->info.op_tmpl.data.dir == SPI_MEM_DATA_IN)
+ return -EOPNOTSUPP;
+
+ if (!ospi->mm_size)
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
+static ssize_t stm32_ospi_dirmap_read(struct spi_mem_dirmap_desc *desc,
+ u64 offs, size_t len, void *buf)
+{
+ struct stm32_ospi *ospi = spi_controller_get_devdata(desc->mem->spi->controller);
+ struct spi_mem_op op;
+ u32 addr_max;
+ int ret;
+
+ ret = pm_runtime_resume_and_get(ospi->dev);
+ if (ret < 0)
+ return ret;
+
+ mutex_lock(&ospi->lock);
+ /*
+ * Make a local copy of desc op_tmpl and complete dirmap rdesc
+ * spi_mem_op template with offs, len and *buf in order to get
+ * all needed transfer information into struct spi_mem_op
+ */
+ memcpy(&op, &desc->info.op_tmpl, sizeof(struct spi_mem_op));
+ dev_dbg(ospi->dev, "%s len = 0x%zx offs = 0x%llx buf = 0x%p\n", __func__, len, offs, buf);
+
+ op.data.nbytes = len;
+ op.addr.val = desc->info.offset + offs;
+ op.data.buf.in = buf;
+
+ addr_max = op.addr.val + op.data.nbytes + 1;
+ if (addr_max < ospi->mm_size && op.addr.buswidth)
+ ospi->fmode = CR_FMODE_MM;
+ else
+ ospi->fmode = CR_FMODE_INDR;
+
+ ret = stm32_ospi_send(desc->mem->spi, &op);
+ mutex_unlock(&ospi->lock);
+
+ pm_runtime_mark_last_busy(ospi->dev);
+ pm_runtime_put_autosuspend(ospi->dev);
+
+ return ret ?: len;
+}
+
+static int stm32_ospi_transfer_one_message(struct spi_controller *ctrl,
+ struct spi_message *msg)
+{
+ struct stm32_ospi *ospi = spi_controller_get_devdata(ctrl);
+ struct spi_transfer *transfer;
+ struct spi_device *spi = msg->spi;
+ struct spi_mem_op op;
+ struct gpio_desc *cs_gpiod = spi->cs_gpiod[ffs(spi->cs_index_mask) - 1];
+ int ret = 0;
+
+ if (!cs_gpiod)
+ return -EOPNOTSUPP;
+
+ ret = pm_runtime_resume_and_get(ospi->dev);
+ if (ret < 0)
+ return ret;
+
+ mutex_lock(&ospi->lock);
+
+ gpiod_set_value_cansleep(cs_gpiod, true);
+
+ list_for_each_entry(transfer, &msg->transfers, transfer_list) {
+ u8 dummy_bytes = 0;
+
+ memset(&op, 0, sizeof(op));
+
+ dev_dbg(ospi->dev, "tx_buf:%p tx_nbits:%d rx_buf:%p rx_nbits:%d len:%d dummy_data:%d\n",
+ transfer->tx_buf, transfer->tx_nbits,
+ transfer->rx_buf, transfer->rx_nbits,
+ transfer->len, transfer->dummy_data);
+
+ /*
+ * OSPI hardware supports dummy bytes transfer.
+ * If current transfer is dummy byte, merge it with the next
+ * transfer in order to take into account OSPI block constraint
+ */
+ if (transfer->dummy_data) {
+ op.dummy.buswidth = transfer->tx_nbits;
+ op.dummy.nbytes = transfer->len;
+ dummy_bytes = transfer->len;
+
+ /* If happens, means that message is not correctly built */
+ if (list_is_last(&transfer->transfer_list, &msg->transfers)) {
+ ret = -EINVAL;
+ goto end_of_transfer;
+ }
+
+ transfer = list_next_entry(transfer, transfer_list);
+ }
+
+ op.data.nbytes = transfer->len;
+
+ if (transfer->rx_buf) {
+ ospi->fmode = CR_FMODE_INDR;
+ op.data.buswidth = transfer->rx_nbits;
+ op.data.dir = SPI_MEM_DATA_IN;
+ op.data.buf.in = transfer->rx_buf;
+ } else {
+ ospi->fmode = CR_FMODE_INDW;
+ op.data.buswidth = transfer->tx_nbits;
+ op.data.dir = SPI_MEM_DATA_OUT;
+ op.data.buf.out = transfer->tx_buf;
+ }
+
+ ret = stm32_ospi_send(spi, &op);
+ if (ret)
+ goto end_of_transfer;
+
+ msg->actual_length += transfer->len + dummy_bytes;
+ }
+
+end_of_transfer:
+ gpiod_set_value_cansleep(cs_gpiod, false);
+
+ mutex_unlock(&ospi->lock);
+
+ msg->status = ret;
+ spi_finalize_current_message(ctrl);
+
+ pm_runtime_mark_last_busy(ospi->dev);
+ pm_runtime_put_autosuspend(ospi->dev);
+
+ return ret;
+}
+
+static int stm32_ospi_setup(struct spi_device *spi)
+{
+ struct spi_controller *ctrl = spi->controller;
+ struct stm32_ospi *ospi = spi_controller_get_devdata(ctrl);
+ void __iomem *regs_base = ospi->regs_base;
+ int ret;
+ u8 cs = spi->chip_select[ffs(spi->cs_index_mask) - 1];
+
+ if (ctrl->busy)
+ return -EBUSY;
+
+ if (!spi->max_speed_hz)
+ return -EINVAL;
+
+ ret = pm_runtime_resume_and_get(ospi->dev);
+ if (ret < 0)
+ return ret;
+
+ ospi->flash_presc[cs] = DIV_ROUND_UP(ospi->clk_rate, spi->max_speed_hz) - 1;
+
+ mutex_lock(&ospi->lock);
+
+ ospi->cr_reg = CR_APMS | 3 << CR_FTHRES_SHIFT | CR_EN;
+ writel_relaxed(ospi->cr_reg, regs_base + OSPI_CR);
+
+ /* set dcr fsize to max address */
+ ospi->dcr_reg = DCR1_DEVSIZE_MASK | DCR1_DLYBYP;
+ writel_relaxed(ospi->dcr_reg, regs_base + OSPI_DCR1);
+
+ mutex_unlock(&ospi->lock);
+
+ pm_runtime_mark_last_busy(ospi->dev);
+ pm_runtime_put_autosuspend(ospi->dev);
+
+ return 0;
+}
+
+/*
+ * No special host constraint, so use default spi_mem_default_supports_op
+ * to check supported mode.
+ */
+static const struct spi_controller_mem_ops stm32_ospi_mem_ops = {
+ .exec_op = stm32_ospi_exec_op,
+ .dirmap_create = stm32_ospi_dirmap_create,
+ .dirmap_read = stm32_ospi_dirmap_read,
+ .poll_status = stm32_ospi_poll_status,
+};
+
+static int stm32_ospi_get_resources(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct stm32_ospi *ospi = platform_get_drvdata(pdev);
+ struct resource *res;
+ struct reserved_mem *rmem = NULL;
+ struct device_node *node;
+ int ret;
+
+ ospi->regs_base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
+ if (IS_ERR(ospi->regs_base))
+ return PTR_ERR(ospi->regs_base);
+
+ ospi->regs_phys_base = res->start;
+
+ ospi->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(ospi->clk))
+ return dev_err_probe(dev, PTR_ERR(ospi->clk),
+ "Can't get clock\n");
+
+ ospi->clk_rate = clk_get_rate(ospi->clk);
+ if (!ospi->clk_rate) {
+ dev_err(dev, "Invalid clock rate\n");
+ return -EINVAL;
+ }
+
+ ospi->irq = platform_get_irq(pdev, 0);
+ if (ospi->irq < 0)
+ return ospi->irq;
+
+ ret = devm_request_irq(dev, ospi->irq, stm32_ospi_irq, 0,
+ dev_name(dev), ospi);
+ if (ret) {
+ dev_err(dev, "Failed to request irq\n");
+ return ret;
+ }
+
+ ospi->rstc = devm_reset_control_array_get_exclusive(dev);
+ if (IS_ERR(ospi->rstc))
+ return dev_err_probe(dev, PTR_ERR(ospi->rstc),
+ "Can't get reset\n");
+
+ ospi->dma_chrx = dma_request_chan(dev, "rx");
+ if (IS_ERR(ospi->dma_chrx)) {
+ ret = PTR_ERR(ospi->dma_chrx);
+ ospi->dma_chrx = NULL;
+ if (ret == -EPROBE_DEFER)
+ goto err_dma;
+ }
+
+ ospi->dma_chtx = dma_request_chan(dev, "tx");
+ if (IS_ERR(ospi->dma_chtx)) {
+ ret = PTR_ERR(ospi->dma_chtx);
+ ospi->dma_chtx = NULL;
+ if (ret == -EPROBE_DEFER)
+ goto err_dma;
+ }
+
+ node = of_parse_phandle(dev->of_node, "memory-region", 0);
+ if (node)
+ rmem = of_reserved_mem_lookup(node);
+ of_node_put(node);
+
+ if (rmem) {
+ ospi->mm_size = rmem->size;
+ ospi->mm_base = devm_ioremap(dev, rmem->base, rmem->size);
+ if (!ospi->mm_base) {
+ dev_err(dev, "unable to map memory region: %pa+%pa\n",
+ &rmem->base, &rmem->size);
+ ret = -ENOMEM;
+ goto err_dma;
+ }
+
+ if (ospi->mm_size > STM32_OSPI_MAX_MMAP_SZ) {
+ dev_err(dev, "Memory map size outsize bounds\n");
+ ret = -EINVAL;
+ goto err_dma;
+ }
+ } else {
+ dev_info(dev, "No memory-map region found\n");
+ }
+
+ init_completion(&ospi->data_completion);
+ init_completion(&ospi->match_completion);
+
+ return 0;
+
+err_dma:
+ dev_info(dev, "Can't get all resources (%d)\n", ret);
+
+ if (ospi->dma_chtx)
+ dma_release_channel(ospi->dma_chtx);
+ if (ospi->dma_chrx)
+ dma_release_channel(ospi->dma_chrx);
+
+ return ret;
+};
+
+static int stm32_ospi_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct spi_controller *ctrl;
+ struct stm32_ospi *ospi;
+ struct dma_slave_config dma_cfg;
+ struct device_node *child;
+ int ret;
+ u8 spi_flash_count = 0;
+
+ /*
+ * Flash subnodes sanity check:
+ * 1 or 2 spi-nand/spi-nor flashes => supported
+ * All other flash node configuration => not supported
+ */
+ for_each_available_child_of_node(dev->of_node, child) {
+ if (of_device_is_compatible(child, "jedec,spi-nor") ||
+ of_device_is_compatible(child, "spi-nand"))
+ spi_flash_count++;
+ }
+
+ if (spi_flash_count == 0 || spi_flash_count > 2) {
+ dev_err(dev, "Incorrect DT flash node\n");
+ return -ENODEV;
+ }
+
+ ctrl = devm_spi_alloc_host(dev, sizeof(*ospi));
+ if (!ctrl)
+ return -ENOMEM;
+
+ ospi = spi_controller_get_devdata(ctrl);
+ ospi->ctrl = ctrl;
+
+ ospi->dev = &pdev->dev;
+ platform_set_drvdata(pdev, ospi);
+
+ ret = stm32_ospi_get_resources(pdev);
+ if (ret)
+ return ret;
+
+ memset(&dma_cfg, 0, sizeof(dma_cfg));
+ dma_cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ dma_cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ dma_cfg.src_addr = ospi->regs_phys_base + OSPI_DR;
+ dma_cfg.dst_addr = ospi->regs_phys_base + OSPI_DR;
+ dma_cfg.src_maxburst = 4;
+ dma_cfg.dst_maxburst = 4;
+ stm32_ospi_dma_setup(ospi, &dma_cfg);
+
+ mutex_init(&ospi->lock);
+
+ ctrl->mode_bits = SPI_RX_DUAL | SPI_RX_QUAD |
+ SPI_TX_DUAL | SPI_TX_QUAD |
+ SPI_TX_OCTAL | SPI_RX_OCTAL;
+ ctrl->flags = SPI_CONTROLLER_HALF_DUPLEX;
+ ctrl->setup = stm32_ospi_setup;
+ ctrl->bus_num = -1;
+ ctrl->mem_ops = &stm32_ospi_mem_ops;
+ ctrl->use_gpio_descriptors = true;
+ ctrl->transfer_one_message = stm32_ospi_transfer_one_message;
+ ctrl->num_chipselect = STM32_OSPI_MAX_NORCHIP;
+ ctrl->dev.of_node = dev->of_node;
+
+ pm_runtime_enable(ospi->dev);
+ pm_runtime_set_autosuspend_delay(ospi->dev, STM32_AUTOSUSPEND_DELAY);
+ pm_runtime_use_autosuspend(ospi->dev);
+
+ ret = pm_runtime_resume_and_get(ospi->dev);
+ if (ret < 0)
+ goto err_pm_enable;
+
+ if (ospi->rstc) {
+ reset_control_assert(ospi->rstc);
+ udelay(2);
+ reset_control_deassert(ospi->rstc);
+ }
+
+ ret = spi_register_controller(ctrl);
+ if (ret) {
+ /* Disable ospi */
+ writel_relaxed(0, ospi->regs_base + OSPI_CR);
+ goto err_pm_resume;
+ }
+
+ pm_runtime_mark_last_busy(ospi->dev);
+ pm_runtime_put_autosuspend(ospi->dev);
+
+ return 0;
+
+err_pm_resume:
+ pm_runtime_put_sync_suspend(ospi->dev);
+
+err_pm_enable:
+ pm_runtime_force_suspend(ospi->dev);
+ mutex_destroy(&ospi->lock);
+ if (ospi->dma_chtx)
+ dma_release_channel(ospi->dma_chtx);
+ if (ospi->dma_chrx)
+ dma_release_channel(ospi->dma_chrx);
+
+ return ret;
+}
+
+static void stm32_ospi_remove(struct platform_device *pdev)
+{
+ struct stm32_ospi *ospi = platform_get_drvdata(pdev);
+ int ret;
+
+ ret = pm_runtime_resume_and_get(ospi->dev);
+ if (ret < 0)
+ return;
+
+ spi_unregister_controller(ospi->ctrl);
+ /* Disable ospi */
+ writel_relaxed(0, ospi->regs_base + OSPI_CR);
+ mutex_destroy(&ospi->lock);
+
+ if (ospi->dma_chtx)
+ dma_release_channel(ospi->dma_chtx);
+ if (ospi->dma_chrx)
+ dma_release_channel(ospi->dma_chrx);
+
+ pm_runtime_put_sync_suspend(ospi->dev);
+ pm_runtime_force_suspend(ospi->dev);
+}
+
+static int __maybe_unused stm32_ospi_suspend(struct device *dev)
+{
+ struct stm32_ospi *ospi = dev_get_drvdata(dev);
+
+ pinctrl_pm_select_sleep_state(dev);
+
+ return pm_runtime_force_suspend(ospi->dev);
+}
+
+static int __maybe_unused stm32_ospi_resume(struct device *dev)
+{
+ struct stm32_ospi *ospi = dev_get_drvdata(dev);
+ void __iomem *regs_base = ospi->regs_base;
+ int ret;
+
+ ret = pm_runtime_force_resume(ospi->dev);
+ if (ret < 0)
+ return ret;
+
+ pinctrl_pm_select_default_state(dev);
+
+ ret = pm_runtime_resume_and_get(ospi->dev);
+ if (ret < 0)
+ return ret;
+
+ writel_relaxed(ospi->cr_reg, regs_base + OSPI_CR);
+ writel_relaxed(ospi->dcr_reg, regs_base + OSPI_DCR1);
+ pm_runtime_mark_last_busy(ospi->dev);
+ pm_runtime_put_autosuspend(ospi->dev);
+
+ return 0;
+}
+
+static int __maybe_unused stm32_ospi_runtime_suspend(struct device *dev)
+{
+ struct stm32_ospi *ospi = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(ospi->clk);
+
+ return 0;
+}
+
+static int __maybe_unused stm32_ospi_runtime_resume(struct device *dev)
+{
+ struct stm32_ospi *ospi = dev_get_drvdata(dev);
+
+ return clk_prepare_enable(ospi->clk);
+}
+
+static const struct dev_pm_ops stm32_ospi_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(stm32_ospi_suspend, stm32_ospi_resume)
+ SET_RUNTIME_PM_OPS(stm32_ospi_runtime_suspend,
+ stm32_ospi_runtime_resume, NULL)
+};
+
+static const struct of_device_id stm32_ospi_of_match[] = {
+ { .compatible = "st,stm32mp25-ospi" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, stm32_ospi_of_match);
+
+static struct platform_driver stm32_ospi_driver = {
+ .probe = stm32_ospi_probe,
+ .remove = stm32_ospi_remove,
+ .driver = {
+ .name = "stm32-ospi",
+ .pm = &stm32_ospi_pm_ops,
+ .of_match_table = stm32_ospi_of_match,
+ },
+};
+module_platform_driver(stm32_ospi_driver);
+
+MODULE_DESCRIPTION("STMicroelectronics STM32 OCTO SPI driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-stm32-qspi.c b/drivers/spi/spi-stm32-qspi.c
index 540b6948b24d..9691197bbf5a 100644
--- a/drivers/spi/spi-stm32-qspi.c
+++ b/drivers/spi/spi-stm32-qspi.c
@@ -362,11 +362,6 @@ static int stm32_qspi_send(struct spi_device *spi, const struct spi_mem_op *op)
u32 ccr, cr;
int timeout, err = 0, err_poll_status = 0;
- dev_dbg(qspi->dev, "cmd:%#x mode:%d.%d.%d.%d addr:%#llx len:%#x\n",
- op->cmd.opcode, op->cmd.buswidth, op->addr.buswidth,
- op->dummy.buswidth, op->data.buswidth,
- op->addr.val, op->data.nbytes);
-
cr = readl_relaxed(qspi->io_base + QSPI_CR);
cr &= ~CR_PRESC_MASK & ~CR_FSEL;
cr |= FIELD_PREP(CR_PRESC_MASK, flash->presc);
diff --git a/drivers/spi/spi-sun4i.c b/drivers/spi/spi-sun4i.c
index fcbe864c9b7d..aa92fd5a35a9 100644
--- a/drivers/spi/spi-sun4i.c
+++ b/drivers/spi/spi-sun4i.c
@@ -264,6 +264,9 @@ static int sun4i_spi_transfer_one(struct spi_controller *host,
else
reg |= SUN4I_CTL_DHB;
+ /* Now that the settings are correct, enable the interface */
+ reg |= SUN4I_CTL_ENABLE;
+
sun4i_spi_write(sspi, SUN4I_CTL_REG, reg);
/* Ensure that we have a parent clock fast enough */
@@ -404,7 +407,7 @@ static int sun4i_spi_runtime_resume(struct device *dev)
}
sun4i_spi_write(sspi, SUN4I_CTL_REG,
- SUN4I_CTL_ENABLE | SUN4I_CTL_MASTER | SUN4I_CTL_TP);
+ SUN4I_CTL_MASTER | SUN4I_CTL_TP);
return 0;
@@ -462,6 +465,7 @@ static int sun4i_spi_probe(struct platform_device *pdev)
sspi->host = host;
host->max_speed_hz = 100 * 1000 * 1000;
host->min_speed_hz = 3 * 1000;
+ host->use_gpio_descriptors = true;
host->set_cs = sun4i_spi_set_cs;
host->transfer_one = sun4i_spi_transfer_one;
host->num_chipselect = 4;
diff --git a/drivers/spi/spi-tegra114.c b/drivers/spi/spi-tegra114.c
index 3822d7c8d8ed..795a8482c2c7 100644
--- a/drivers/spi/spi-tegra114.c
+++ b/drivers/spi/spi-tegra114.c
@@ -728,9 +728,9 @@ static int tegra_spi_set_hw_cs_timing(struct spi_device *spi)
u32 inactive_cycles;
u8 cs_state;
- if (setup->unit != SPI_DELAY_UNIT_SCK ||
- hold->unit != SPI_DELAY_UNIT_SCK ||
- inactive->unit != SPI_DELAY_UNIT_SCK) {
+ if ((setup->value && setup->unit != SPI_DELAY_UNIT_SCK) ||
+ (hold->value && hold->unit != SPI_DELAY_UNIT_SCK) ||
+ (inactive->value && inactive->unit != SPI_DELAY_UNIT_SCK)) {
dev_err(&spi->dev,
"Invalid delay unit %d, should be SPI_DELAY_UNIT_SCK\n",
SPI_DELAY_UNIT_SCK);
diff --git a/drivers/spi/spi-tegra210-quad.c b/drivers/spi/spi-tegra210-quad.c
index 08e49a876894..3581757a269b 100644
--- a/drivers/spi/spi-tegra210-quad.c
+++ b/drivers/spi/spi-tegra210-quad.c
@@ -22,6 +22,7 @@
#include <linux/spi/spi.h>
#include <linux/acpi.h>
#include <linux/property.h>
+#include <linux/sizes.h>
#define QSPI_COMMAND1 0x000
#define QSPI_BIT_LENGTH(x) (((x) & 0x1f) << 0)
@@ -110,6 +111,9 @@
#define QSPI_DMA_BLK 0x024
#define QSPI_DMA_BLK_SET(x) (((x) & 0xffff) << 0)
+#define QSPI_DMA_MEM_ADDRESS 0x028
+#define QSPI_DMA_HI_ADDRESS 0x02c
+
#define QSPI_TX_FIFO 0x108
#define QSPI_RX_FIFO 0x188
@@ -134,7 +138,7 @@
#define QSPI_COMMAND_VALUE_SET(X) (((x) & 0xFF) << 0)
#define QSPI_CMB_SEQ_CMD_CFG 0x1a0
-#define QSPI_COMMAND_X1_X2_X4(x) (((x) & 0x3) << 13)
+#define QSPI_COMMAND_X1_X2_X4(x) ((((x) >> 1) & 0x3) << 13)
#define QSPI_COMMAND_X1_X2_X4_MASK (0x03 << 13)
#define QSPI_COMMAND_SDR_DDR BIT(12)
#define QSPI_COMMAND_SIZE_SET(x) (((x) & 0xFF) << 0)
@@ -147,7 +151,7 @@
#define QSPI_ADDRESS_VALUE_SET(X) (((x) & 0xFFFF) << 0)
#define QSPI_CMB_SEQ_ADDR_CFG 0x1ac
-#define QSPI_ADDRESS_X1_X2_X4(x) (((x) & 0x3) << 13)
+#define QSPI_ADDRESS_X1_X2_X4(x) ((((x) >> 1) & 0x3) << 13)
#define QSPI_ADDRESS_X1_X2_X4_MASK (0x03 << 13)
#define QSPI_ADDRESS_SDR_DDR BIT(12)
#define QSPI_ADDRESS_SIZE_SET(x) (((x) & 0xFF) << 0)
@@ -156,15 +160,19 @@
#define DATA_DIR_RX BIT(1)
#define QSPI_DMA_TIMEOUT (msecs_to_jiffies(1000))
-#define DEFAULT_QSPI_DMA_BUF_LEN (64 * 1024)
-#define CMD_TRANSFER 0
-#define ADDR_TRANSFER 1
-#define DATA_TRANSFER 2
+#define DEFAULT_QSPI_DMA_BUF_LEN SZ_64K
+
+enum tegra_qspi_transfer_type {
+ CMD_TRANSFER = 0,
+ ADDR_TRANSFER = 1,
+ DUMMY_TRANSFER = 2,
+ DATA_TRANSFER = 3
+};
struct tegra_qspi_soc_data {
- bool has_dma;
bool cmb_xfer_capable;
bool supports_tpm;
+ bool has_ext_dma;
unsigned int cs_count;
};
@@ -600,13 +608,16 @@ static void tegra_qspi_dma_unmap_xfer(struct tegra_qspi *tqspi, struct spi_trans
len = DIV_ROUND_UP(tqspi->curr_dma_words * tqspi->bytes_per_word, 4) * 4;
- dma_unmap_single(tqspi->dev, t->tx_dma, len, DMA_TO_DEVICE);
- dma_unmap_single(tqspi->dev, t->rx_dma, len, DMA_FROM_DEVICE);
+ if (t->tx_buf)
+ dma_unmap_single(tqspi->dev, t->tx_dma, len, DMA_TO_DEVICE);
+ if (t->rx_buf)
+ dma_unmap_single(tqspi->dev, t->rx_dma, len, DMA_FROM_DEVICE);
}
static int tegra_qspi_start_dma_based_transfer(struct tegra_qspi *tqspi, struct spi_transfer *t)
{
struct dma_slave_config dma_sconfig = { 0 };
+ dma_addr_t rx_dma_phys, tx_dma_phys;
unsigned int len;
u8 dma_burst;
int ret = 0;
@@ -629,60 +640,86 @@ static int tegra_qspi_start_dma_based_transfer(struct tegra_qspi *tqspi, struct
len = tqspi->curr_dma_words * 4;
/* set attention level based on length of transfer */
- val = 0;
- if (len & 0xf) {
- val |= QSPI_TX_TRIG_1 | QSPI_RX_TRIG_1;
- dma_burst = 1;
- } else if (((len) >> 4) & 0x1) {
- val |= QSPI_TX_TRIG_4 | QSPI_RX_TRIG_4;
- dma_burst = 4;
- } else {
- val |= QSPI_TX_TRIG_8 | QSPI_RX_TRIG_8;
- dma_burst = 8;
+ if (tqspi->soc_data->has_ext_dma) {
+ val = 0;
+ if (len & 0xf) {
+ val |= QSPI_TX_TRIG_1 | QSPI_RX_TRIG_1;
+ dma_burst = 1;
+ } else if (((len) >> 4) & 0x1) {
+ val |= QSPI_TX_TRIG_4 | QSPI_RX_TRIG_4;
+ dma_burst = 4;
+ } else {
+ val |= QSPI_TX_TRIG_8 | QSPI_RX_TRIG_8;
+ dma_burst = 8;
+ }
+
+ tegra_qspi_writel(tqspi, val, QSPI_DMA_CTL);
}
- tegra_qspi_writel(tqspi, val, QSPI_DMA_CTL);
tqspi->dma_control_reg = val;
dma_sconfig.device_fc = true;
+
if (tqspi->cur_direction & DATA_DIR_TX) {
- dma_sconfig.dst_addr = tqspi->phys + QSPI_TX_FIFO;
- dma_sconfig.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
- dma_sconfig.dst_maxburst = dma_burst;
- ret = dmaengine_slave_config(tqspi->tx_dma_chan, &dma_sconfig);
- if (ret < 0) {
- dev_err(tqspi->dev, "failed DMA slave config: %d\n", ret);
- return ret;
- }
+ if (tqspi->tx_dma_chan) {
+ dma_sconfig.dst_addr = tqspi->phys + QSPI_TX_FIFO;
+ dma_sconfig.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ dma_sconfig.dst_maxburst = dma_burst;
+ ret = dmaengine_slave_config(tqspi->tx_dma_chan, &dma_sconfig);
+ if (ret < 0) {
+ dev_err(tqspi->dev, "failed DMA slave config: %d\n", ret);
+ return ret;
+ }
- tegra_qspi_copy_client_txbuf_to_qspi_txbuf(tqspi, t);
- ret = tegra_qspi_start_tx_dma(tqspi, t, len);
- if (ret < 0) {
- dev_err(tqspi->dev, "failed to starting TX DMA: %d\n", ret);
- return ret;
+ tegra_qspi_copy_client_txbuf_to_qspi_txbuf(tqspi, t);
+ ret = tegra_qspi_start_tx_dma(tqspi, t, len);
+ if (ret < 0) {
+ dev_err(tqspi->dev, "failed to starting TX DMA: %d\n", ret);
+ return ret;
+ }
+ } else {
+ if (tqspi->is_packed)
+ tx_dma_phys = t->tx_dma;
+ else
+ tx_dma_phys = tqspi->tx_dma_phys;
+ tegra_qspi_copy_client_txbuf_to_qspi_txbuf(tqspi, t);
+ tegra_qspi_writel(tqspi, lower_32_bits(tx_dma_phys),
+ QSPI_DMA_MEM_ADDRESS);
+ tegra_qspi_writel(tqspi, (upper_32_bits(tx_dma_phys) & 0xff),
+ QSPI_DMA_HI_ADDRESS);
}
}
if (tqspi->cur_direction & DATA_DIR_RX) {
- dma_sconfig.src_addr = tqspi->phys + QSPI_RX_FIFO;
- dma_sconfig.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
- dma_sconfig.src_maxburst = dma_burst;
- ret = dmaengine_slave_config(tqspi->rx_dma_chan, &dma_sconfig);
- if (ret < 0) {
- dev_err(tqspi->dev, "failed DMA slave config: %d\n", ret);
- return ret;
- }
-
- dma_sync_single_for_device(tqspi->dev, tqspi->rx_dma_phys,
- tqspi->dma_buf_size,
- DMA_FROM_DEVICE);
+ if (tqspi->rx_dma_chan) {
+ dma_sconfig.src_addr = tqspi->phys + QSPI_RX_FIFO;
+ dma_sconfig.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ dma_sconfig.src_maxburst = dma_burst;
+ ret = dmaengine_slave_config(tqspi->rx_dma_chan, &dma_sconfig);
+ if (ret < 0) {
+ dev_err(tqspi->dev, "failed DMA slave config: %d\n", ret);
+ return ret;
+ }
- ret = tegra_qspi_start_rx_dma(tqspi, t, len);
- if (ret < 0) {
- dev_err(tqspi->dev, "failed to start RX DMA: %d\n", ret);
- if (tqspi->cur_direction & DATA_DIR_TX)
- dmaengine_terminate_all(tqspi->tx_dma_chan);
- return ret;
+ dma_sync_single_for_device(tqspi->dev, tqspi->rx_dma_phys,
+ tqspi->dma_buf_size, DMA_FROM_DEVICE);
+ ret = tegra_qspi_start_rx_dma(tqspi, t, len);
+ if (ret < 0) {
+ dev_err(tqspi->dev, "failed to start RX DMA: %d\n", ret);
+ if (tqspi->cur_direction & DATA_DIR_TX)
+ dmaengine_terminate_all(tqspi->tx_dma_chan);
+ return ret;
+ }
+ } else {
+ if (tqspi->is_packed)
+ rx_dma_phys = t->rx_dma;
+ else
+ rx_dma_phys = tqspi->rx_dma_phys;
+
+ tegra_qspi_writel(tqspi, lower_32_bits(rx_dma_phys),
+ QSPI_DMA_MEM_ADDRESS);
+ tegra_qspi_writel(tqspi, (upper_32_bits(rx_dma_phys) & 0xff),
+ QSPI_DMA_HI_ADDRESS);
}
}
@@ -721,9 +758,6 @@ static int tegra_qspi_start_cpu_based_transfer(struct tegra_qspi *qspi, struct s
static void tegra_qspi_deinit_dma(struct tegra_qspi *tqspi)
{
- if (!tqspi->soc_data->has_dma)
- return;
-
if (tqspi->tx_dma_buf) {
dma_free_coherent(tqspi->dev, tqspi->dma_buf_size,
tqspi->tx_dma_buf, tqspi->tx_dma_phys);
@@ -754,16 +788,29 @@ static int tegra_qspi_init_dma(struct tegra_qspi *tqspi)
u32 *dma_buf;
int err;
- if (!tqspi->soc_data->has_dma)
- return 0;
+ if (tqspi->soc_data->has_ext_dma) {
+ dma_chan = dma_request_chan(tqspi->dev, "rx");
+ if (IS_ERR(dma_chan)) {
+ err = PTR_ERR(dma_chan);
+ goto err_out;
+ }
- dma_chan = dma_request_chan(tqspi->dev, "rx");
- if (IS_ERR(dma_chan)) {
- err = PTR_ERR(dma_chan);
- goto err_out;
- }
+ tqspi->rx_dma_chan = dma_chan;
- tqspi->rx_dma_chan = dma_chan;
+ dma_chan = dma_request_chan(tqspi->dev, "tx");
+ if (IS_ERR(dma_chan)) {
+ err = PTR_ERR(dma_chan);
+ goto err_out;
+ }
+
+ tqspi->tx_dma_chan = dma_chan;
+ } else {
+ if (!device_iommu_mapped(tqspi->dev)) {
+ dev_warn(tqspi->dev,
+ "IOMMU not enabled in device-tree, falling back to PIO mode\n");
+ return 0;
+ }
+ }
dma_buf = dma_alloc_coherent(tqspi->dev, tqspi->dma_buf_size, &dma_phys, GFP_KERNEL);
if (!dma_buf) {
@@ -774,14 +821,6 @@ static int tegra_qspi_init_dma(struct tegra_qspi *tqspi)
tqspi->rx_dma_buf = dma_buf;
tqspi->rx_dma_phys = dma_phys;
- dma_chan = dma_request_chan(tqspi->dev, "tx");
- if (IS_ERR(dma_chan)) {
- err = PTR_ERR(dma_chan);
- goto err_out;
- }
-
- tqspi->tx_dma_chan = dma_chan;
-
dma_buf = dma_alloc_coherent(tqspi->dev, tqspi->dma_buf_size, &dma_phys, GFP_KERNEL);
if (!dma_buf) {
err = -ENOMEM;
@@ -1036,10 +1075,6 @@ static u32 tegra_qspi_addr_config(bool is_ddr, u8 bus_width, u8 len)
{
u32 addr_config = 0;
- /* Extract Address configuration and value */
- is_ddr = 0; //Only SDR mode supported
- bus_width = 0; //X1 mode
-
if (is_ddr)
addr_config |= QSPI_ADDRESS_SDR_DDR;
else
@@ -1079,16 +1114,23 @@ static int tegra_qspi_combined_seq_xfer(struct tegra_qspi *tqspi,
switch (transfer_phase) {
case CMD_TRANSFER:
/* X1 SDR mode */
- cmd_config = tegra_qspi_cmd_config(false, 0,
+ cmd_config = tegra_qspi_cmd_config(false, xfer->tx_nbits,
xfer->len);
cmd_value = *((const u8 *)(xfer->tx_buf));
break;
case ADDR_TRANSFER:
/* X1 SDR mode */
- addr_config = tegra_qspi_addr_config(false, 0,
+ addr_config = tegra_qspi_addr_config(false, xfer->tx_nbits,
xfer->len);
address_value = *((const u32 *)(xfer->tx_buf));
break;
+ case DUMMY_TRANSFER:
+ if (xfer->dummy_data) {
+ tqspi->dummy_cycles = xfer->len * 8 / xfer->tx_nbits;
+ break;
+ }
+ transfer_phase++;
+ fallthrough;
case DATA_TRANSFER:
/* Program Command, Address value in register */
tegra_qspi_writel(tqspi, cmd_value, QSPI_CMB_SEQ_CMD);
@@ -1117,18 +1159,17 @@ static int tegra_qspi_combined_seq_xfer(struct tegra_qspi *tqspi,
(&tqspi->xfer_completion,
QSPI_DMA_TIMEOUT);
- if (WARN_ON(ret == 0)) {
- dev_err(tqspi->dev, "QSPI Transfer failed with timeout: %d\n",
- ret);
- if (tqspi->is_curr_dma_xfer &&
- (tqspi->cur_direction & DATA_DIR_TX))
- dmaengine_terminate_all
- (tqspi->tx_dma_chan);
-
- if (tqspi->is_curr_dma_xfer &&
- (tqspi->cur_direction & DATA_DIR_RX))
- dmaengine_terminate_all
- (tqspi->rx_dma_chan);
+ if (WARN_ON_ONCE(ret == 0)) {
+ dev_err_ratelimited(tqspi->dev,
+ "QSPI Transfer failed with timeout\n");
+ if (tqspi->is_curr_dma_xfer) {
+ if ((tqspi->cur_direction & DATA_DIR_TX) &&
+ tqspi->tx_dma_chan)
+ dmaengine_terminate_all(tqspi->tx_dma_chan);
+ if ((tqspi->cur_direction & DATA_DIR_RX) &&
+ tqspi->rx_dma_chan)
+ dmaengine_terminate_all(tqspi->rx_dma_chan);
+ }
/* Abort transfer by resetting pio/dma bit */
if (!tqspi->is_curr_dma_xfer) {
@@ -1163,26 +1204,22 @@ static int tegra_qspi_combined_seq_xfer(struct tegra_qspi *tqspi,
ret = -EIO;
goto exit;
}
- if (!xfer->cs_change) {
- tegra_qspi_transfer_end(spi);
- spi_transfer_delay_exec(xfer);
- }
break;
default:
ret = -EINVAL;
goto exit;
}
msg->actual_length += xfer->len;
+ if (!xfer->cs_change && transfer_phase == DATA_TRANSFER) {
+ tegra_qspi_transfer_end(spi);
+ spi_transfer_delay_exec(xfer);
+ }
transfer_phase++;
}
ret = 0;
exit:
msg->status = ret;
- if (ret < 0) {
- tegra_qspi_transfer_end(spi);
- spi_transfer_delay_exec(xfer);
- }
return ret;
}
@@ -1247,10 +1284,12 @@ static int tegra_qspi_non_combined_seq_xfer(struct tegra_qspi *tqspi,
QSPI_DMA_TIMEOUT);
if (WARN_ON(ret == 0)) {
dev_err(tqspi->dev, "transfer timeout\n");
- if (tqspi->is_curr_dma_xfer && (tqspi->cur_direction & DATA_DIR_TX))
- dmaengine_terminate_all(tqspi->tx_dma_chan);
- if (tqspi->is_curr_dma_xfer && (tqspi->cur_direction & DATA_DIR_RX))
- dmaengine_terminate_all(tqspi->rx_dma_chan);
+ if (tqspi->is_curr_dma_xfer) {
+ if ((tqspi->cur_direction & DATA_DIR_TX) && tqspi->tx_dma_chan)
+ dmaengine_terminate_all(tqspi->tx_dma_chan);
+ if ((tqspi->cur_direction & DATA_DIR_RX) && tqspi->rx_dma_chan)
+ dmaengine_terminate_all(tqspi->rx_dma_chan);
+ }
tegra_qspi_handle_error(tqspi);
ret = -EIO;
goto complete_xfer;
@@ -1300,7 +1339,9 @@ static bool tegra_qspi_validate_cmb_seq(struct tegra_qspi *tqspi,
list_for_each_entry(xfer, &msg->transfers, transfer_list) {
transfer_count++;
}
- if (!tqspi->soc_data->cmb_xfer_capable || transfer_count != 3)
+ if (!tqspi->soc_data->cmb_xfer_capable)
+ return false;
+ if (transfer_count > 4 || transfer_count < 3)
return false;
xfer = list_first_entry(&msg->transfers, typeof(*xfer),
transfer_list);
@@ -1310,7 +1351,14 @@ static bool tegra_qspi_validate_cmb_seq(struct tegra_qspi *tqspi,
if (xfer->len > 4 || xfer->len < 3)
return false;
xfer = list_next_entry(xfer, transfer_list);
- if (!tqspi->soc_data->has_dma && xfer->len > (QSPI_FIFO_DEPTH << 2))
+ if (transfer_count == 4) {
+ if (xfer->dummy_data != 1)
+ return false;
+ if ((xfer->len * 8 / xfer->tx_nbits) > QSPI_DUMMY_CYCLES_MAX)
+ return false;
+ xfer = list_next_entry(xfer, transfer_list);
+ }
+ if (!tqspi->soc_data->has_ext_dma && xfer->len > (QSPI_FIFO_DEPTH << 2))
return false;
return true;
@@ -1371,41 +1419,43 @@ static irqreturn_t handle_dma_based_xfer(struct tegra_qspi *tqspi)
unsigned int total_fifo_words;
unsigned long flags;
long wait_status;
- int err = 0;
+ int num_errors = 0;
if (tqspi->cur_direction & DATA_DIR_TX) {
if (tqspi->tx_status) {
- dmaengine_terminate_all(tqspi->tx_dma_chan);
- err += 1;
- } else {
+ if (tqspi->tx_dma_chan)
+ dmaengine_terminate_all(tqspi->tx_dma_chan);
+ num_errors++;
+ } else if (tqspi->tx_dma_chan) {
wait_status = wait_for_completion_interruptible_timeout(
&tqspi->tx_dma_complete, QSPI_DMA_TIMEOUT);
if (wait_status <= 0) {
dmaengine_terminate_all(tqspi->tx_dma_chan);
dev_err(tqspi->dev, "failed TX DMA transfer\n");
- err += 1;
+ num_errors++;
}
}
}
if (tqspi->cur_direction & DATA_DIR_RX) {
if (tqspi->rx_status) {
- dmaengine_terminate_all(tqspi->rx_dma_chan);
- err += 2;
- } else {
+ if (tqspi->rx_dma_chan)
+ dmaengine_terminate_all(tqspi->rx_dma_chan);
+ num_errors++;
+ } else if (tqspi->rx_dma_chan) {
wait_status = wait_for_completion_interruptible_timeout(
&tqspi->rx_dma_complete, QSPI_DMA_TIMEOUT);
if (wait_status <= 0) {
dmaengine_terminate_all(tqspi->rx_dma_chan);
dev_err(tqspi->dev, "failed RX DMA transfer\n");
- err += 2;
+ num_errors++;
}
}
}
spin_lock_irqsave(&tqspi->lock, flags);
- if (err) {
+ if (num_errors) {
tegra_qspi_dma_unmap_xfer(tqspi, t);
tegra_qspi_handle_error(tqspi);
complete(&tqspi->xfer_completion);
@@ -1431,9 +1481,9 @@ static irqreturn_t handle_dma_based_xfer(struct tegra_qspi *tqspi)
/* continue transfer in current message */
total_fifo_words = tegra_qspi_calculate_curr_xfer_param(tqspi, t);
if (total_fifo_words > QSPI_FIFO_DEPTH)
- err = tegra_qspi_start_dma_based_transfer(tqspi, t);
+ num_errors = tegra_qspi_start_dma_based_transfer(tqspi, t);
else
- err = tegra_qspi_start_cpu_based_transfer(tqspi, t);
+ num_errors = tegra_qspi_start_cpu_based_transfer(tqspi, t);
exit:
spin_unlock_irqrestore(&tqspi->lock, flags);
@@ -1461,28 +1511,28 @@ static irqreturn_t tegra_qspi_isr_thread(int irq, void *context_data)
}
static struct tegra_qspi_soc_data tegra210_qspi_soc_data = {
- .has_dma = true,
+ .has_ext_dma = true,
.cmb_xfer_capable = false,
.supports_tpm = false,
.cs_count = 1,
};
static struct tegra_qspi_soc_data tegra186_qspi_soc_data = {
- .has_dma = true,
+ .has_ext_dma = true,
.cmb_xfer_capable = true,
.supports_tpm = false,
.cs_count = 1,
};
static struct tegra_qspi_soc_data tegra234_qspi_soc_data = {
- .has_dma = false,
+ .has_ext_dma = false,
.cmb_xfer_capable = true,
.supports_tpm = true,
.cs_count = 1,
};
static struct tegra_qspi_soc_data tegra241_qspi_soc_data = {
- .has_dma = false,
+ .has_ext_dma = true,
.cmb_xfer_capable = true,
.supports_tpm = true,
.cs_count = 4,
diff --git a/drivers/spi/spi-ti-qspi.c b/drivers/spi/spi-ti-qspi.c
index 9122350402b5..a284d2794586 100644
--- a/drivers/spi/spi-ti-qspi.c
+++ b/drivers/spi/spi-ti-qspi.c
@@ -623,7 +623,7 @@ static int ti_qspi_exec_mem_op(struct spi_mem *mem,
mutex_lock(&qspi->list_lock);
if (!qspi->mmap_enabled || qspi->current_cs != spi_get_chipselect(mem->spi, 0)) {
- ti_qspi_setup_clk(qspi, mem->spi->max_speed_hz);
+ ti_qspi_setup_clk(qspi, op->max_freq);
ti_qspi_enable_memory_map(mem->spi);
}
ti_qspi_setup_mmap_read(mem->spi, op->cmd.opcode, op->data.buswidth,
@@ -658,6 +658,10 @@ static const struct spi_controller_mem_ops ti_qspi_mem_ops = {
.adjust_op_size = ti_qspi_adjust_op_size,
};
+static const struct spi_controller_mem_caps ti_qspi_mem_caps = {
+ .per_op_freq = true,
+};
+
static int ti_qspi_start_transfer_one(struct spi_controller *host,
struct spi_message *m)
{
@@ -777,6 +781,7 @@ static int ti_qspi_probe(struct platform_device *pdev)
host->bits_per_word_mask = SPI_BPW_MASK(32) | SPI_BPW_MASK(16) |
SPI_BPW_MASK(8);
host->mem_ops = &ti_qspi_mem_ops;
+ host->mem_caps = &ti_qspi_mem_caps;
if (!of_property_read_u32(np, "num-cs", &num_cs))
host->num_chipselect = num_cs;
@@ -826,20 +831,12 @@ static int ti_qspi_probe(struct platform_device *pdev)
if (of_property_present(np, "syscon-chipselects")) {
qspi->ctrl_base =
- syscon_regmap_lookup_by_phandle(np,
- "syscon-chipselects");
+ syscon_regmap_lookup_by_phandle_args(np, "syscon-chipselects",
+ 1, &qspi->ctrl_reg);
if (IS_ERR(qspi->ctrl_base)) {
ret = PTR_ERR(qspi->ctrl_base);
goto free_host;
}
- ret = of_property_read_u32_index(np,
- "syscon-chipselects",
- 1, &qspi->ctrl_reg);
- if (ret) {
- dev_err(&pdev->dev,
- "couldn't get ctrl_mod reg index\n");
- goto free_host;
- }
}
qspi->fclk = devm_clk_get(&pdev->dev, "fck");
diff --git a/drivers/spi/spi-xcomm.c b/drivers/spi/spi-xcomm.c
index 3bd0149d8f4e..1a40c4866ce1 100644
--- a/drivers/spi/spi-xcomm.c
+++ b/drivers/spi/spi-xcomm.c
@@ -44,8 +44,8 @@ struct spi_xcomm {
u8 buf[63];
};
-static void spi_xcomm_gpio_set_value(struct gpio_chip *chip,
- unsigned int offset, int val)
+static int spi_xcomm_gpio_set_value(struct gpio_chip *chip,
+ unsigned int offset, int val)
{
struct spi_xcomm *spi_xcomm = gpiochip_get_data(chip);
unsigned char buf[2];
@@ -53,7 +53,7 @@ static void spi_xcomm_gpio_set_value(struct gpio_chip *chip,
buf[0] = SPI_XCOMM_CMD_GPIO_SET;
buf[1] = !!val;
- i2c_master_send(spi_xcomm->i2c, buf, 2);
+ return i2c_master_send(spi_xcomm->i2c, buf, 2);
}
static int spi_xcomm_gpio_get_direction(struct gpio_chip *chip,
@@ -70,7 +70,7 @@ static int spi_xcomm_gpio_add(struct spi_xcomm *spi_xcomm)
return 0;
spi_xcomm->gc.get_direction = spi_xcomm_gpio_get_direction;
- spi_xcomm->gc.set = spi_xcomm_gpio_set_value;
+ spi_xcomm->gc.set_rv = spi_xcomm_gpio_set_value;
spi_xcomm->gc.can_sleep = 1;
spi_xcomm->gc.base = -1;
spi_xcomm->gc.ngpio = 1;
diff --git a/drivers/spi/spi-zynq-qspi.c b/drivers/spi/spi-zynq-qspi.c
index dee9c339a35e..5232483c4a3a 100644
--- a/drivers/spi/spi-zynq-qspi.c
+++ b/drivers/spi/spi-zynq-qspi.c
@@ -318,6 +318,7 @@ static void zynq_qspi_chipselect(struct spi_device *spi, bool assert)
* zynq_qspi_config_op - Configure QSPI controller for specified transfer
* @xqspi: Pointer to the zynq_qspi structure
* @spi: Pointer to the spi_device structure
+ * @op: The memory operation to execute
*
* Sets the operational mode of QSPI controller for the next QSPI transfer and
* sets the requested clock frequency.
@@ -331,7 +332,8 @@ static void zynq_qspi_chipselect(struct spi_device *spi, bool assert)
* controller the driver will set the highest or lowest frequency supported by
* controller.
*/
-static int zynq_qspi_config_op(struct zynq_qspi *xqspi, struct spi_device *spi)
+static int zynq_qspi_config_op(struct zynq_qspi *xqspi, struct spi_device *spi,
+ const struct spi_mem_op *op)
{
u32 config_reg, baud_rate_val = 0;
@@ -346,7 +348,7 @@ static int zynq_qspi_config_op(struct zynq_qspi *xqspi, struct spi_device *spi)
*/
while ((baud_rate_val < ZYNQ_QSPI_CONFIG_BAUD_DIV_MAX) &&
(clk_get_rate(xqspi->refclk) / (2 << baud_rate_val)) >
- spi->max_speed_hz)
+ op->max_freq)
baud_rate_val++;
config_reg = zynq_qspi_read(xqspi, ZYNQ_QSPI_CONFIG_OFFSET);
@@ -379,12 +381,21 @@ static int zynq_qspi_setup_op(struct spi_device *spi)
{
struct spi_controller *ctlr = spi->controller;
struct zynq_qspi *qspi = spi_controller_get_devdata(ctlr);
+ int ret;
if (ctlr->busy)
return -EBUSY;
- clk_enable(qspi->refclk);
- clk_enable(qspi->pclk);
+ ret = clk_enable(qspi->refclk);
+ if (ret)
+ return ret;
+
+ ret = clk_enable(qspi->pclk);
+ if (ret) {
+ clk_disable(qspi->refclk);
+ return ret;
+ }
+
zynq_qspi_write(qspi, ZYNQ_QSPI_ENABLE_OFFSET,
ZYNQ_QSPI_ENABLE_ENABLE_MASK);
@@ -529,12 +540,8 @@ static int zynq_qspi_exec_mem_op(struct spi_mem *mem,
int err = 0, i;
u8 *tmpbuf;
- dev_dbg(xqspi->dev, "cmd:%#x mode:%d.%d.%d.%d\n",
- op->cmd.opcode, op->cmd.buswidth, op->addr.buswidth,
- op->dummy.buswidth, op->data.buswidth);
-
zynq_qspi_chipselect(mem->spi, true);
- zynq_qspi_config_op(xqspi, mem->spi);
+ zynq_qspi_config_op(xqspi, mem->spi, op);
if (op->cmd.opcode) {
reinit_completion(&xqspi->data_completion);
@@ -620,6 +627,10 @@ static const struct spi_controller_mem_ops zynq_qspi_mem_ops = {
.exec_op = zynq_qspi_exec_mem_op,
};
+static const struct spi_controller_mem_caps zynq_qspi_mem_caps = {
+ .per_op_freq = true,
+};
+
/**
* zynq_qspi_probe - Probe method for the QSPI driver
* @pdev: Pointer to the platform_device structure
@@ -706,6 +717,7 @@ static int zynq_qspi_probe(struct platform_device *pdev)
ctlr->mode_bits = SPI_RX_DUAL | SPI_RX_QUAD |
SPI_TX_DUAL | SPI_TX_QUAD;
ctlr->mem_ops = &zynq_qspi_mem_ops;
+ ctlr->mem_caps = &zynq_qspi_mem_caps;
ctlr->setup = zynq_qspi_setup_op;
ctlr->max_speed_hz = clk_get_rate(xqspi->refclk) / 2;
ctlr->dev.of_node = np;
diff --git a/drivers/spi/spi-zynqmp-gqspi.c b/drivers/spi/spi-zynqmp-gqspi.c
index 549a6e0c9654..595b6dc10845 100644
--- a/drivers/spi/spi-zynqmp-gqspi.c
+++ b/drivers/spi/spi-zynqmp-gqspi.c
@@ -82,7 +82,6 @@
#define GQSPI_GENFIFO_RX 0x00020000
#define GQSPI_GENFIFO_STRIPE 0x00040000
#define GQSPI_GENFIFO_POLL 0x00080000
-#define GQSPI_GENFIFO_EXP_START 0x00000100
#define GQSPI_FIFO_CTRL_RST_RX_FIFO_MASK 0x00000004
#define GQSPI_FIFO_CTRL_RST_TX_FIFO_MASK 0x00000002
#define GQSPI_FIFO_CTRL_RST_GEN_FIFO_MASK 0x00000001
@@ -535,7 +534,7 @@ static inline u32 zynqmp_qspi_selectspimode(struct zynqmp_qspi *xqspi,
* zynqmp_qspi_config_op - Configure QSPI controller for specified
* transfer
* @xqspi: Pointer to the zynqmp_qspi structure
- * @qspi: Pointer to the spi_device structure
+ * @op: The memory operation to execute
*
* Sets the operational mode of QSPI controller for the next QSPI transfer and
* sets the requested clock frequency.
@@ -553,12 +552,12 @@ static inline u32 zynqmp_qspi_selectspimode(struct zynqmp_qspi *xqspi,
* frequency supported by controller.
*/
static int zynqmp_qspi_config_op(struct zynqmp_qspi *xqspi,
- struct spi_device *qspi)
+ const struct spi_mem_op *op)
{
ulong clk_rate;
u32 config_reg, req_speed_hz, baud_rate_val = 0;
- req_speed_hz = qspi->max_speed_hz;
+ req_speed_hz = op->max_freq;
if (xqspi->speed_hz != req_speed_hz) {
xqspi->speed_hz = req_speed_hz;
@@ -580,6 +579,8 @@ static int zynqmp_qspi_config_op(struct zynqmp_qspi *xqspi,
zynqmp_gqspi_write(xqspi, GQSPI_CONFIG_OFST, config_reg);
zynqmp_qspi_set_tapdelay(xqspi, baud_rate_val);
}
+
+ dev_dbg(xqspi->dev, "config speed %u\n", req_speed_hz);
return 0;
}
@@ -670,69 +671,77 @@ static void zynqmp_qspi_readrxfifo(struct zynqmp_qspi *xqspi, u32 size)
static void zynqmp_qspi_fillgenfifo(struct zynqmp_qspi *xqspi, u8 nbits,
u32 genfifoentry)
{
- u32 transfer_len = 0;
+ u32 transfer_len, tempcount, exponent;
+ u8 imm_data;
- if (xqspi->txbuf) {
- genfifoentry &= ~GQSPI_GENFIFO_RX;
- genfifoentry |= GQSPI_GENFIFO_DATA_XFER;
- genfifoentry |= GQSPI_GENFIFO_TX;
- transfer_len = xqspi->bytes_to_transfer;
- } else if (xqspi->rxbuf) {
- genfifoentry &= ~GQSPI_GENFIFO_TX;
- genfifoentry |= GQSPI_GENFIFO_DATA_XFER;
+ genfifoentry |= GQSPI_GENFIFO_DATA_XFER;
+ if (xqspi->rxbuf) {
genfifoentry |= GQSPI_GENFIFO_RX;
if (xqspi->mode == GQSPI_MODE_DMA)
transfer_len = xqspi->dma_rx_bytes;
else
transfer_len = xqspi->bytes_to_receive;
} else {
- /* Sending dummy circles here */
- genfifoentry &= ~(GQSPI_GENFIFO_TX | GQSPI_GENFIFO_RX);
- genfifoentry |= GQSPI_GENFIFO_DATA_XFER;
transfer_len = xqspi->bytes_to_transfer;
}
+
+ if (xqspi->txbuf)
+ genfifoentry |= GQSPI_GENFIFO_TX;
+
genfifoentry |= zynqmp_qspi_selectspimode(xqspi, nbits);
xqspi->genfifoentry = genfifoentry;
-
- if ((transfer_len) < GQSPI_GENFIFO_IMM_DATA_MASK) {
- genfifoentry &= ~GQSPI_GENFIFO_IMM_DATA_MASK;
- genfifoentry |= transfer_len;
- zynqmp_gqspi_write(xqspi, GQSPI_GEN_FIFO_OFST, genfifoentry);
- } else {
- int tempcount = transfer_len;
- u32 exponent = 8; /* 2^8 = 256 */
- u8 imm_data = tempcount & 0xFF;
-
- tempcount &= ~(tempcount & 0xFF);
- /* Immediate entry */
- if (tempcount != 0) {
- /* Exponent entries */
- genfifoentry |= GQSPI_GENFIFO_EXP;
- while (tempcount != 0) {
- if (tempcount & GQSPI_GENFIFO_EXP_START) {
- genfifoentry &=
- ~GQSPI_GENFIFO_IMM_DATA_MASK;
- genfifoentry |= exponent;
- zynqmp_gqspi_write(xqspi,
- GQSPI_GEN_FIFO_OFST,
- genfifoentry);
- }
- tempcount = tempcount >> 1;
- exponent++;
- }
- }
- if (imm_data != 0) {
- genfifoentry &= ~GQSPI_GENFIFO_EXP;
- genfifoentry &= ~GQSPI_GENFIFO_IMM_DATA_MASK;
- genfifoentry |= (u8)(imm_data & 0xFF);
+ dev_dbg(xqspi->dev, "genfifo %05x transfer_len %u\n",
+ genfifoentry, transfer_len);
+
+ /* Exponent entries */
+ imm_data = transfer_len;
+ tempcount = transfer_len >> 8;
+ exponent = 8;
+ genfifoentry |= GQSPI_GENFIFO_EXP;
+ while (tempcount) {
+ if (tempcount & 1)
zynqmp_gqspi_write(xqspi, GQSPI_GEN_FIFO_OFST,
- genfifoentry);
- }
- }
- if (xqspi->mode == GQSPI_MODE_IO && xqspi->rxbuf) {
- /* Dummy generic FIFO entry */
- zynqmp_gqspi_write(xqspi, GQSPI_GEN_FIFO_OFST, 0x0);
+ genfifoentry | exponent);
+ tempcount >>= 1;
+ exponent++;
}
+
+ /* Immediate entry */
+ genfifoentry &= ~GQSPI_GENFIFO_EXP;
+ if (imm_data)
+ zynqmp_gqspi_write(xqspi, GQSPI_GEN_FIFO_OFST,
+ genfifoentry | imm_data);
+
+ /* Dummy generic FIFO entry */
+ if (xqspi->mode == GQSPI_MODE_IO && xqspi->rxbuf)
+ zynqmp_gqspi_write(xqspi, GQSPI_GEN_FIFO_OFST, 0);
+}
+
+/**
+ * zynqmp_qspi_disable_dma() - Disable DMA mode
+ * @xqspi: GQSPI instance
+ */
+static void zynqmp_qspi_disable_dma(struct zynqmp_qspi *xqspi)
+{
+ u32 config_reg = zynqmp_gqspi_read(xqspi, GQSPI_CONFIG_OFST);
+
+ config_reg &= ~GQSPI_CFG_MODE_EN_MASK;
+ zynqmp_gqspi_write(xqspi, GQSPI_CONFIG_OFST, config_reg);
+ xqspi->mode = GQSPI_MODE_IO;
+}
+
+/**
+ * zynqmp_qspi_enable_dma() - Enable DMA mode
+ * @xqspi: GQSPI instance
+ */
+static void zynqmp_qspi_enable_dma(struct zynqmp_qspi *xqspi)
+{
+ u32 config_reg = zynqmp_gqspi_read(xqspi, GQSPI_CONFIG_OFST);
+
+ config_reg &= ~GQSPI_CFG_MODE_EN_MASK;
+ config_reg |= GQSPI_CFG_MODE_EN_DMA_MASK;
+ zynqmp_gqspi_write(xqspi, GQSPI_CONFIG_OFST, config_reg);
+ xqspi->mode = GQSPI_MODE_DMA;
}
/**
@@ -744,7 +753,7 @@ static void zynqmp_qspi_fillgenfifo(struct zynqmp_qspi *xqspi, u8 nbits,
*/
static void zynqmp_process_dma_irq(struct zynqmp_qspi *xqspi)
{
- u32 config_reg, genfifoentry;
+ u32 genfifoentry;
dma_unmap_single(xqspi->dev, xqspi->dma_addr,
xqspi->dma_rx_bytes, DMA_FROM_DEVICE);
@@ -758,9 +767,7 @@ static void zynqmp_process_dma_irq(struct zynqmp_qspi *xqspi)
if (xqspi->bytes_to_receive > 0) {
/* Switch to IO mode,for remaining bytes to receive */
- config_reg = zynqmp_gqspi_read(xqspi, GQSPI_CONFIG_OFST);
- config_reg &= ~GQSPI_CFG_MODE_EN_MASK;
- zynqmp_gqspi_write(xqspi, GQSPI_CONFIG_OFST, config_reg);
+ zynqmp_qspi_disable_dma(xqspi);
/* Initiate the transfer of remaining bytes */
genfifoentry = xqspi->genfifoentry;
@@ -799,7 +806,6 @@ static void zynqmp_process_dma_irq(struct zynqmp_qspi *xqspi)
static irqreturn_t zynqmp_qspi_irq(int irq, void *dev_id)
{
struct zynqmp_qspi *xqspi = (struct zynqmp_qspi *)dev_id;
- irqreturn_t ret = IRQ_NONE;
u32 status, mask, dma_status = 0;
status = zynqmp_gqspi_read(xqspi, GQSPI_ISR_OFST);
@@ -814,27 +820,24 @@ static irqreturn_t zynqmp_qspi_irq(int irq, void *dev_id)
dma_status);
}
- if (mask & GQSPI_ISR_TXNOT_FULL_MASK) {
+ if (!mask && !dma_status)
+ return IRQ_NONE;
+
+ if (mask & GQSPI_ISR_TXNOT_FULL_MASK)
zynqmp_qspi_filltxfifo(xqspi, GQSPI_TX_FIFO_FILL);
- ret = IRQ_HANDLED;
- }
- if (dma_status & GQSPI_QSPIDMA_DST_I_STS_DONE_MASK) {
+ if (dma_status & GQSPI_QSPIDMA_DST_I_STS_DONE_MASK)
zynqmp_process_dma_irq(xqspi);
- ret = IRQ_HANDLED;
- } else if (!(mask & GQSPI_IER_RXEMPTY_MASK) &&
- (mask & GQSPI_IER_GENFIFOEMPTY_MASK)) {
+ else if (!(mask & GQSPI_IER_RXEMPTY_MASK) &&
+ (mask & GQSPI_IER_GENFIFOEMPTY_MASK))
zynqmp_qspi_readrxfifo(xqspi, GQSPI_RX_FIFO_FILL);
- ret = IRQ_HANDLED;
- }
if (xqspi->bytes_to_receive == 0 && xqspi->bytes_to_transfer == 0 &&
((status & GQSPI_IRQ_MASK) == GQSPI_IRQ_MASK)) {
zynqmp_gqspi_write(xqspi, GQSPI_IDR_OFST, GQSPI_ISR_IDR_MASK);
complete(&xqspi->data_completion);
- ret = IRQ_HANDLED;
}
- return ret;
+ return IRQ_HANDLED;
}
/**
@@ -845,17 +848,14 @@ static irqreturn_t zynqmp_qspi_irq(int irq, void *dev_id)
*/
static int zynqmp_qspi_setuprxdma(struct zynqmp_qspi *xqspi)
{
- u32 rx_bytes, rx_rem, config_reg;
+ u32 rx_bytes, rx_rem;
dma_addr_t addr;
u64 dma_align = (u64)(uintptr_t)xqspi->rxbuf;
if (xqspi->bytes_to_receive < 8 ||
((dma_align & GQSPI_DMA_UNALIGN) != 0x0)) {
/* Setting to IO mode */
- config_reg = zynqmp_gqspi_read(xqspi, GQSPI_CONFIG_OFST);
- config_reg &= ~GQSPI_CFG_MODE_EN_MASK;
- zynqmp_gqspi_write(xqspi, GQSPI_CONFIG_OFST, config_reg);
- xqspi->mode = GQSPI_MODE_IO;
+ zynqmp_qspi_disable_dma(xqspi);
xqspi->dma_rx_bytes = 0;
return 0;
}
@@ -878,14 +878,7 @@ static int zynqmp_qspi_setuprxdma(struct zynqmp_qspi *xqspi)
zynqmp_gqspi_write(xqspi, GQSPI_QSPIDMA_DST_ADDR_MSB_OFST,
((u32)addr) & 0xfff);
- /* Enabling the DMA mode */
- config_reg = zynqmp_gqspi_read(xqspi, GQSPI_CONFIG_OFST);
- config_reg &= ~GQSPI_CFG_MODE_EN_MASK;
- config_reg |= GQSPI_CFG_MODE_EN_DMA_MASK;
- zynqmp_gqspi_write(xqspi, GQSPI_CONFIG_OFST, config_reg);
-
- /* Switch to DMA mode */
- xqspi->mode = GQSPI_MODE_DMA;
+ zynqmp_qspi_enable_dma(xqspi);
/* Write the number of bytes to transfer */
zynqmp_gqspi_write(xqspi, GQSPI_QSPIDMA_DST_SIZE_OFST, rx_bytes);
@@ -905,18 +898,10 @@ static int zynqmp_qspi_setuprxdma(struct zynqmp_qspi *xqspi)
static void zynqmp_qspi_write_op(struct zynqmp_qspi *xqspi, u8 tx_nbits,
u32 genfifoentry)
{
- u32 config_reg;
-
zynqmp_qspi_fillgenfifo(xqspi, tx_nbits, genfifoentry);
zynqmp_qspi_filltxfifo(xqspi, GQSPI_TXD_DEPTH);
- if (xqspi->mode == GQSPI_MODE_DMA) {
- config_reg = zynqmp_gqspi_read(xqspi,
- GQSPI_CONFIG_OFST);
- config_reg &= ~GQSPI_CFG_MODE_EN_MASK;
- zynqmp_gqspi_write(xqspi, GQSPI_CONFIG_OFST,
- config_reg);
- xqspi->mode = GQSPI_MODE_IO;
- }
+ if (xqspi->mode == GQSPI_MODE_DMA)
+ zynqmp_qspi_disable_dma(xqspi);
}
/**
@@ -1059,20 +1044,16 @@ static unsigned long zynqmp_qspi_timeout(struct zynqmp_qspi *xqspi, u8 bits,
static int zynqmp_qspi_exec_op(struct spi_mem *mem,
const struct spi_mem_op *op)
{
- struct zynqmp_qspi *xqspi = spi_controller_get_devdata
- (mem->spi->controller);
+ struct zynqmp_qspi *xqspi =
+ spi_controller_get_devdata(mem->spi->controller);
unsigned long timeout;
int err = 0, i;
u32 genfifoentry = 0;
u16 opcode = op->cmd.opcode;
u64 opaddr;
- dev_dbg(xqspi->dev, "cmd:%#x mode:%d.%d.%d.%d\n",
- op->cmd.opcode, op->cmd.buswidth, op->addr.buswidth,
- op->dummy.buswidth, op->data.buswidth);
-
mutex_lock(&xqspi->op_lock);
- zynqmp_qspi_config_op(xqspi, mem->spi);
+ zynqmp_qspi_config_op(xqspi, op);
zynqmp_qspi_chipselect(mem->spi, false);
genfifoentry |= xqspi->genfifocs;
genfifoentry |= xqspi->genfifobus;
@@ -1224,6 +1205,10 @@ static const struct spi_controller_mem_ops zynqmp_qspi_mem_ops = {
.exec_op = zynqmp_qspi_exec_op,
};
+static const struct spi_controller_mem_caps zynqmp_qspi_mem_caps = {
+ .per_op_freq = true,
+};
+
/**
* zynqmp_qspi_probe - Probe method for the QSPI driver
* @pdev: Pointer to the platform_device structure
@@ -1333,6 +1318,7 @@ static int zynqmp_qspi_probe(struct platform_device *pdev)
ctlr->bits_per_word_mask = SPI_BPW_MASK(8);
ctlr->mem_ops = &zynqmp_qspi_mem_ops;
+ ctlr->mem_caps = &zynqmp_qspi_mem_caps;
ctlr->setup = zynqmp_qspi_setup_op;
ctlr->bits_per_word_mask = SPI_BPW_MASK(8);
ctlr->dev.of_node = np;
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index ff1add2ecb91..1bc0fdbb1bd7 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -31,6 +31,7 @@
#include <linux/ptp_clock_kernel.h>
#include <linux/sched/rt.h>
#include <linux/slab.h>
+#include <linux/spi/offload/types.h>
#include <linux/spi/spi.h>
#include <linux/spi/spi-mem.h>
#include <uapi/linux/sched/types.h>
@@ -42,7 +43,7 @@ EXPORT_TRACEPOINT_SYMBOL(spi_transfer_stop);
#include "internals.h"
-static DEFINE_IDR(spi_master_idr);
+static DEFINE_IDR(spi_controller_idr);
static void spidev_release(struct device *dev)
{
@@ -305,7 +306,7 @@ static const struct attribute_group spi_controller_statistics_group = {
.attrs = spi_controller_statistics_attrs,
};
-static const struct attribute_group *spi_master_groups[] = {
+static const struct attribute_group *spi_controller_groups[] = {
&spi_controller_statistics_group,
NULL,
};
@@ -410,29 +411,21 @@ static int spi_probe(struct device *dev)
{
const struct spi_driver *sdrv = to_spi_driver(dev->driver);
struct spi_device *spi = to_spi_device(dev);
+ struct fwnode_handle *fwnode = dev_fwnode(dev);
int ret;
ret = of_clk_set_defaults(dev->of_node, false);
if (ret)
return ret;
- if (dev->of_node) {
+ if (is_of_node(fwnode))
spi->irq = of_irq_get(dev->of_node, 0);
- if (spi->irq == -EPROBE_DEFER)
- return dev_err_probe(dev, -EPROBE_DEFER, "Failed to get irq\n");
- if (spi->irq < 0)
- spi->irq = 0;
- }
-
- if (has_acpi_companion(dev) && spi->irq < 0) {
- struct acpi_device *adev = to_acpi_device_node(dev->fwnode);
-
- spi->irq = acpi_dev_gpio_irq_get(adev, 0);
- if (spi->irq == -EPROBE_DEFER)
- return -EPROBE_DEFER;
- if (spi->irq < 0)
- spi->irq = 0;
- }
+ else if (is_acpi_device_node(fwnode) && spi->irq < 0)
+ spi->irq = acpi_dev_gpio_irq_get(to_acpi_device_node(fwnode), 0);
+ if (spi->irq == -EPROBE_DEFER)
+ return dev_err_probe(dev, spi->irq, "Failed to get irq\n");
+ if (spi->irq < 0)
+ spi->irq = 0;
ret = dev_pm_domain_attach(dev, true);
if (ret)
@@ -874,15 +867,18 @@ EXPORT_SYMBOL_GPL(spi_new_device);
*/
void spi_unregister_device(struct spi_device *spi)
{
+ struct fwnode_handle *fwnode;
+
if (!spi)
return;
- if (spi->dev.of_node) {
- of_node_clear_flag(spi->dev.of_node, OF_POPULATED);
- of_node_put(spi->dev.of_node);
+ fwnode = dev_fwnode(&spi->dev);
+ if (is_of_node(fwnode)) {
+ of_node_clear_flag(to_of_node(fwnode), OF_POPULATED);
+ of_node_put(to_of_node(fwnode));
+ } else if (is_acpi_device_node(fwnode)) {
+ acpi_device_clear_enumerated(to_acpi_device_node(fwnode));
}
- if (ACPI_COMPANION(&spi->dev))
- acpi_device_clear_enumerated(ACPI_COMPANION(&spi->dev));
device_remove_software_node(&spi->dev);
device_del(&spi->dev);
spi_cleanup(spi);
@@ -1059,7 +1055,7 @@ static void spi_toggle_csgpiod(struct spi_device *spi, u8 idx, bool enable, bool
* ambiguity. That's why we use enable, that takes SPI_CS_HIGH
* into account.
*/
- if (has_acpi_companion(&spi->dev))
+ if (is_acpi_device_node(dev_fwnode(&spi->dev)))
gpiod_set_value_cansleep(spi_get_csgpiod(spi, idx), !enable);
else
/* Polarity handled by GPIO library */
@@ -1080,10 +1076,8 @@ static void spi_set_cs(struct spi_device *spi, bool enable, bool force)
* Avoid calling into the driver (or doing delays) if the chip select
* isn't actually changing from the last time this was called.
*/
- if (!force && ((enable && spi->controller->last_cs_index_mask == spi->cs_index_mask &&
- spi_is_last_cs(spi)) ||
- (!enable && spi->controller->last_cs_index_mask == spi->cs_index_mask &&
- !spi_is_last_cs(spi))) &&
+ if (!force && (enable == spi_is_last_cs(spi)) &&
+ (spi->controller->last_cs_index_mask == spi->cs_index_mask) &&
(spi->controller->last_cs_mode_high == (spi->mode & SPI_CS_HIGH)))
return;
@@ -1092,9 +1086,9 @@ static void spi_set_cs(struct spi_device *spi, bool enable, bool force)
spi->controller->last_cs_index_mask = spi->cs_index_mask;
for (idx = 0; idx < SPI_CS_CNT_MAX; idx++)
spi->controller->last_cs[idx] = enable ? spi_get_chipselect(spi, 0) : SPI_INVALID_CS;
- spi->controller->last_cs_mode_high = spi->mode & SPI_CS_HIGH;
- if (spi->mode & SPI_CS_HIGH)
+ spi->controller->last_cs_mode_high = spi->mode & SPI_CS_HIGH;
+ if (spi->controller->last_cs_mode_high)
enable = !enable;
/*
@@ -1111,7 +1105,7 @@ static void spi_set_cs(struct spi_device *spi, bool enable, bool force)
spi_toggle_csgpiod(spi, idx, enable, activate);
}
}
- /* Some SPI masters need both GPIO CS & slave_select */
+ /* Some SPI controllers need both GPIO CS & ->set_cs() */
if ((spi->controller->flags & SPI_CONTROLLER_GPIO_SS) &&
spi->controller->set_cs)
spi->controller->set_cs(spi, !enable);
@@ -1500,10 +1494,7 @@ static void _spi_transfer_delay_ns(u32 ns)
} else {
u32 us = DIV_ROUND_UP(ns, NSEC_PER_USEC);
- if (us <= 10)
- udelay(us);
- else
- usleep_range(us, us + DIV_ROUND_UP(us, 10));
+ fsleep(us);
}
}
@@ -2060,7 +2051,7 @@ static int spi_init_queue(struct spi_controller *ctlr)
ctlr->busy = false;
ctlr->queue_empty = true;
- ctlr->kworker = kthread_create_worker(0, dev_name(&ctlr->dev));
+ ctlr->kworker = kthread_run_worker(0, dev_name(&ctlr->dev));
if (IS_ERR(ctlr->kworker)) {
dev_err(&ctlr->dev, "failed to create message pump kworker\n");
return PTR_ERR(ctlr->kworker);
@@ -2539,7 +2530,7 @@ err_out:
* @ctlr: Pointer to spi_controller device
*
* Registers an spi_device for each child node of controller node which
- * represents a valid SPI slave.
+ * represents a valid SPI target device.
*/
static void of_register_spi_devices(struct spi_controller *ctlr)
{
@@ -2824,7 +2815,7 @@ struct spi_device *acpi_spi_device_alloc(struct spi_controller *ctlr,
if (!lookup.max_speed_hz &&
ACPI_SUCCESS(acpi_get_parent(adev->handle, &parent_handle)) &&
device_match_acpi_handle(lookup.ctlr->dev.parent, parent_handle)) {
- /* Apple does not use _CRS but nested devices for SPI slaves */
+ /* Apple does not use _CRS but nested devices for SPI target devices */
acpi_spi_parse_apple_properties(adev, &lookup);
}
@@ -2916,7 +2907,7 @@ static void acpi_register_spi_devices(struct spi_controller *ctlr)
SPI_ACPI_ENUMERATE_MAX_DEPTH,
acpi_spi_add_device, NULL, ctlr, NULL);
if (ACPI_FAILURE(status))
- dev_warn(&ctlr->dev, "failed to enumerate SPI slaves\n");
+ dev_warn(&ctlr->dev, "failed to enumerate SPI target devices\n");
}
#else
static inline void acpi_register_spi_devices(struct spi_controller *ctlr) {}
@@ -2930,16 +2921,15 @@ static void spi_controller_release(struct device *dev)
kfree(ctlr);
}
-static const struct class spi_master_class = {
+static const struct class spi_controller_class = {
.name = "spi_master",
.dev_release = spi_controller_release,
- .dev_groups = spi_master_groups,
+ .dev_groups = spi_controller_groups,
};
#ifdef CONFIG_SPI_SLAVE
/**
- * spi_target_abort - abort the ongoing transfer request on an SPI slave
- * controller
+ * spi_target_abort - abort the ongoing transfer request on an SPI target controller
* @spi: device used for the current transfer
*/
int spi_target_abort(struct spi_device *spi)
@@ -2959,9 +2949,13 @@ static ssize_t slave_show(struct device *dev, struct device_attribute *attr,
struct spi_controller *ctlr = container_of(dev, struct spi_controller,
dev);
struct device *child;
+ int ret;
child = device_find_any_child(&ctlr->dev);
- return sysfs_emit(buf, "%s\n", child ? to_spi_device(child)->modalias : NULL);
+ ret = sysfs_emit(buf, "%s\n", child ? to_spi_device(child)->modalias : NULL);
+ put_device(child);
+
+ return ret;
}
static ssize_t slave_store(struct device *dev, struct device_attribute *attr,
@@ -2980,13 +2974,13 @@ static ssize_t slave_store(struct device *dev, struct device_attribute *attr,
child = device_find_any_child(&ctlr->dev);
if (child) {
- /* Remove registered slave */
+ /* Remove registered target device */
device_unregister(child);
put_device(child);
}
if (strcmp(name, "(null)")) {
- /* Register new slave */
+ /* Register new target device */
spi = spi_alloc_device(ctlr);
if (!spi)
return -ENOMEM;
@@ -3005,40 +2999,40 @@ static ssize_t slave_store(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR_RW(slave);
-static struct attribute *spi_slave_attrs[] = {
+static struct attribute *spi_target_attrs[] = {
&dev_attr_slave.attr,
NULL,
};
-static const struct attribute_group spi_slave_group = {
- .attrs = spi_slave_attrs,
+static const struct attribute_group spi_target_group = {
+ .attrs = spi_target_attrs,
};
-static const struct attribute_group *spi_slave_groups[] = {
+static const struct attribute_group *spi_target_groups[] = {
&spi_controller_statistics_group,
- &spi_slave_group,
+ &spi_target_group,
NULL,
};
-static const struct class spi_slave_class = {
+static const struct class spi_target_class = {
.name = "spi_slave",
.dev_release = spi_controller_release,
- .dev_groups = spi_slave_groups,
+ .dev_groups = spi_target_groups,
};
#else
-extern struct class spi_slave_class; /* dummy */
+extern struct class spi_target_class; /* dummy */
#endif
/**
- * __spi_alloc_controller - allocate an SPI master or slave controller
+ * __spi_alloc_controller - allocate an SPI host or target controller
* @dev: the controller, possibly using the platform_bus
* @size: how much zeroed driver-private data to allocate; the pointer to this
* memory is in the driver_data field of the returned device, accessible
* with spi_controller_get_devdata(); the memory is cacheline aligned;
* drivers granting DMA access to portions of their private data need to
* round up @size using ALIGN(size, dma_get_cache_alignment()).
- * @slave: flag indicating whether to allocate an SPI master (false) or SPI
- * slave (true) controller
+ * @target: flag indicating whether to allocate an SPI host (false) or SPI target (true)
+ * controller
* Context: can sleep
*
* This call is used only by SPI controller drivers, which are the
@@ -3055,7 +3049,7 @@ extern struct class spi_slave_class; /* dummy */
* Return: the SPI controller structure on success, else NULL.
*/
struct spi_controller *__spi_alloc_controller(struct device *dev,
- unsigned int size, bool slave)
+ unsigned int size, bool target)
{
struct spi_controller *ctlr;
size_t ctlr_size = ALIGN(sizeof(*ctlr), dma_get_cache_alignment());
@@ -3076,11 +3070,11 @@ struct spi_controller *__spi_alloc_controller(struct device *dev,
mutex_init(&ctlr->add_lock);
ctlr->bus_num = -1;
ctlr->num_chipselect = 1;
- ctlr->slave = slave;
- if (IS_ENABLED(CONFIG_SPI_SLAVE) && slave)
- ctlr->dev.class = &spi_slave_class;
+ ctlr->target = target;
+ if (IS_ENABLED(CONFIG_SPI_SLAVE) && target)
+ ctlr->dev.class = &spi_target_class;
else
- ctlr->dev.class = &spi_master_class;
+ ctlr->dev.class = &spi_controller_class;
ctlr->dev.parent = dev;
pm_suspend_ignore_children(&ctlr->dev, true);
spi_controller_set_devdata(ctlr, (void *)ctlr + ctlr_size);
@@ -3098,7 +3092,7 @@ static void devm_spi_release_controller(struct device *dev, void *ctlr)
* __devm_spi_alloc_controller - resource-managed __spi_alloc_controller()
* @dev: physical device of SPI controller
* @size: how much zeroed driver-private data to allocate
- * @slave: whether to allocate an SPI master (false) or SPI slave (true)
+ * @target: whether to allocate an SPI host (false) or SPI target (true) controller
* Context: can sleep
*
* Allocate an SPI controller and automatically release a reference on it
@@ -3111,7 +3105,7 @@ static void devm_spi_release_controller(struct device *dev, void *ctlr)
*/
struct spi_controller *__devm_spi_alloc_controller(struct device *dev,
unsigned int size,
- bool slave)
+ bool target)
{
struct spi_controller **ptr, *ctlr;
@@ -3120,7 +3114,7 @@ struct spi_controller *__devm_spi_alloc_controller(struct device *dev,
if (!ptr)
return NULL;
- ctlr = __spi_alloc_controller(dev, size, slave);
+ ctlr = __spi_alloc_controller(dev, size, target);
if (ctlr) {
ctlr->devm_allocated = true;
*ptr = ctlr;
@@ -3134,8 +3128,8 @@ struct spi_controller *__devm_spi_alloc_controller(struct device *dev,
EXPORT_SYMBOL_GPL(__devm_spi_alloc_controller);
/**
- * spi_get_gpio_descs() - grab chip select GPIOs for the master
- * @ctlr: The SPI master to grab GPIO descriptors for
+ * spi_get_gpio_descs() - grab chip select GPIOs for the controller
+ * @ctlr: The SPI controller to grab GPIO descriptors for
*/
static int spi_get_gpio_descs(struct spi_controller *ctlr)
{
@@ -3233,7 +3227,7 @@ static int spi_controller_id_alloc(struct spi_controller *ctlr, int start, int e
int id;
mutex_lock(&board_lock);
- id = idr_alloc(&spi_master_idr, ctlr, start, end, GFP_KERNEL);
+ id = idr_alloc(&spi_controller_idr, ctlr, start, end, GFP_KERNEL);
mutex_unlock(&board_lock);
if (WARN(id < 0, "couldn't get idr"))
return id == -ENOSPC ? -EBUSY : id;
@@ -3382,7 +3376,7 @@ destroy_queue:
spi_destroy_queue(ctlr);
free_bus_id:
mutex_lock(&board_lock);
- idr_remove(&spi_master_idr, ctlr->bus_num);
+ idr_remove(&spi_controller_idr, ctlr->bus_num);
mutex_unlock(&board_lock);
return status;
}
@@ -3394,8 +3388,7 @@ static void devm_spi_unregister(struct device *dev, void *res)
}
/**
- * devm_spi_register_controller - register managed SPI host or target
- * controller
+ * devm_spi_register_controller - register managed SPI host or target controller
* @dev: device managing SPI controller
* @ctlr: initialized controller, originally from spi_alloc_host() or
* spi_alloc_target()
@@ -3435,7 +3428,7 @@ static int __unregister(struct device *dev, void *null)
}
/**
- * spi_unregister_controller - unregister SPI master or slave controller
+ * spi_unregister_controller - unregister SPI host or target controller
* @ctlr: the controller being unregistered
* Context: can sleep
*
@@ -3459,7 +3452,7 @@ void spi_unregister_controller(struct spi_controller *ctlr)
/* First make sure that this controller was ever added */
mutex_lock(&board_lock);
- found = idr_find(&spi_master_idr, id);
+ found = idr_find(&spi_controller_idr, id);
mutex_unlock(&board_lock);
if (ctlr->queued) {
if (spi_destroy_queue(ctlr))
@@ -3474,7 +3467,7 @@ void spi_unregister_controller(struct spi_controller *ctlr)
/* Free bus id */
mutex_lock(&board_lock);
if (found == ctlr)
- idr_remove(&spi_master_idr, id);
+ idr_remove(&spi_controller_idr, id);
mutex_unlock(&board_lock);
if (IS_ENABLED(CONFIG_SPI_DYNAMIC))
@@ -3807,7 +3800,7 @@ int spi_split_transfers_maxwords(struct spi_controller *ctlr,
size_t maxsize;
int ret;
- maxsize = maxwords * roundup_pow_of_two(BITS_TO_BYTES(xfer->bits_per_word));
+ maxsize = maxwords * spi_bpw_to_bytes(xfer->bits_per_word);
if (xfer->len > maxsize) {
ret = __spi_split_transfer_maxsize(ctlr, msg, &xfer,
maxsize);
@@ -4101,6 +4094,13 @@ static int __spi_validate(struct spi_device *spi, struct spi_message *message)
if (__spi_validate_bits_per_word(ctlr, xfer->bits_per_word))
return -EINVAL;
+ /* DDR mode is supported only if controller has dtr_caps=true.
+ * default considered as SDR mode for SPI and QSPI controller.
+ * Note: This is applicable only to QSPI controller.
+ */
+ if (xfer->dtr_mode && !ctlr->dtr_caps)
+ return -EINVAL;
+
/*
* SPI transfer length should be multiple of SPI word size
* where SPI word size should be power-of-two multiple.
@@ -4163,6 +4163,15 @@ static int __spi_validate(struct spi_device *spi, struct spi_message *message)
if (_spi_xfer_word_delay_update(xfer, spi))
return -EINVAL;
+
+ /* Make sure controller supports required offload features. */
+ if (xfer->offload_flags) {
+ if (!message->offload)
+ return -EINVAL;
+
+ if (xfer->offload_flags & ~message->offload->xfer_flags)
+ return -EINVAL;
+ }
}
message->status = -EINPROGRESS;
@@ -4618,7 +4627,7 @@ EXPORT_SYMBOL_GPL(spi_sync_locked);
/**
* spi_bus_lock - obtain a lock for exclusive SPI bus usage
- * @ctlr: SPI bus master that should be locked for exclusive bus access
+ * @ctlr: SPI bus controller that should be locked for exclusive bus access
* Context: can sleep
*
* This call may only be used from a context that may sleep. The sleep
@@ -4649,7 +4658,7 @@ EXPORT_SYMBOL_GPL(spi_bus_lock);
/**
* spi_bus_unlock - release the lock for exclusive SPI bus usage
- * @ctlr: SPI bus master that was locked for exclusive bus access
+ * @ctlr: SPI bus controller that was locked for exclusive bus access
* Context: can sleep
*
* This call may only be used from a context that may sleep. The sleep
@@ -4766,9 +4775,9 @@ static struct spi_controller *of_find_spi_controller_by_node(struct device_node
{
struct device *dev;
- dev = class_find_device_by_of_node(&spi_master_class, node);
+ dev = class_find_device_by_of_node(&spi_controller_class, node);
if (!dev && IS_ENABLED(CONFIG_SPI_SLAVE))
- dev = class_find_device_by_of_node(&spi_slave_class, node);
+ dev = class_find_device_by_of_node(&spi_target_class, node);
if (!dev)
return NULL;
@@ -4841,17 +4850,17 @@ extern struct notifier_block spi_of_notifier;
#if IS_ENABLED(CONFIG_ACPI)
static int spi_acpi_controller_match(struct device *dev, const void *data)
{
- return ACPI_COMPANION(dev->parent) == data;
+ return device_match_acpi_dev(dev->parent, data);
}
struct spi_controller *acpi_spi_find_controller_by_adev(struct acpi_device *adev)
{
struct device *dev;
- dev = class_find_device(&spi_master_class, NULL, adev,
+ dev = class_find_device(&spi_controller_class, NULL, adev,
spi_acpi_controller_match);
if (!dev && IS_ENABLED(CONFIG_SPI_SLAVE))
- dev = class_find_device(&spi_slave_class, NULL, adev,
+ dev = class_find_device(&spi_target_class, NULL, adev,
spi_acpi_controller_match);
if (!dev)
return NULL;
@@ -4921,12 +4930,12 @@ static int __init spi_init(void)
if (status < 0)
goto err1;
- status = class_register(&spi_master_class);
+ status = class_register(&spi_controller_class);
if (status < 0)
goto err2;
if (IS_ENABLED(CONFIG_SPI_SLAVE)) {
- status = class_register(&spi_slave_class);
+ status = class_register(&spi_target_class);
if (status < 0)
goto err3;
}
@@ -4939,7 +4948,7 @@ static int __init spi_init(void)
return 0;
err3:
- class_unregister(&spi_master_class);
+ class_unregister(&spi_controller_class);
err2:
bus_unregister(&spi_bus_type);
err1:
diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
index 653f82984216..6108959c28d9 100644
--- a/drivers/spi/spidev.c
+++ b/drivers/spi/spidev.c
@@ -698,19 +698,25 @@ static const struct class spidev_class = {
.name = "spidev",
};
+/*
+ * The spi device ids are expected to match the device names of the
+ * spidev_dt_ids array below. Both arrays are kept in the same ordering.
+ */
static const struct spi_device_id spidev_spi_ids[] = {
- { .name = "bh2228fv" },
- { .name = "dh2228fv" },
- { .name = "jg10309-01" },
- { .name = "ltc2488" },
- { .name = "sx1301" },
- { .name = "bk4" },
- { .name = "dhcom-board" },
- { .name = "m53cpld" },
- { .name = "spi-petra" },
- { .name = "spi-authenta" },
- { .name = "em3581" },
- { .name = "si3210" },
+ { .name = /* cisco */ "spi-petra" },
+ { .name = /* dh */ "dhcom-board" },
+ { .name = /* elgin */ "jg10309-01" },
+ { .name = /* gocontroll */ "moduline-module-slot"},
+ { .name = /* lineartechnology */ "ltc2488" },
+ { .name = /* lwn */ "bk4" },
+ { .name = /* lwn */ "bk4-spi" },
+ { .name = /* menlo */ "m53cpld" },
+ { .name = /* micron */ "spi-authenta" },
+ { .name = /* rohm */ "bh2228fv" },
+ { .name = /* rohm */ "dh2228fv" },
+ { .name = /* semtech */ "sx1301" },
+ { .name = /* silabs */ "em3581" },
+ { .name = /* silabs */ "si3210" },
{},
};
MODULE_DEVICE_TABLE(spi, spidev_spi_ids);
@@ -732,8 +738,10 @@ static const struct of_device_id spidev_dt_ids[] = {
{ .compatible = "cisco,spi-petra", .data = &spidev_of_check },
{ .compatible = "dh,dhcom-board", .data = &spidev_of_check },
{ .compatible = "elgin,jg10309-01", .data = &spidev_of_check },
+ { .compatible = "gocontroll,moduline-module-slot", .data = &spidev_of_check},
{ .compatible = "lineartechnology,ltc2488", .data = &spidev_of_check },
{ .compatible = "lwn,bk4", .data = &spidev_of_check },
+ { .compatible = "lwn,bk4-spi", .data = &spidev_of_check },
{ .compatible = "menlo,m53cpld", .data = &spidev_of_check },
{ .compatible = "micron,spi-authenta", .data = &spidev_of_check },
{ .compatible = "rohm,bh2228fv", .data = &spidev_of_check },